예제 #1
0
 def __init__(self, multidownload, connector, ratelimiter, choker, storage,
              max_chunk_length, max_rate_period, num_fast, infohash):
     assert isinstance(connector, BitTorrent.Connector.Connector)
     self.multidownload = multidownload
     self.connector = connector
     self.ratelimiter = ratelimiter
     self.infohash = infohash
     self.choker = choker
     self.num_fast = num_fast
     self.storage = storage
     self.max_chunk_length = max_chunk_length
     self.choked = True
     self.unchoke_time = None
     self.interested = False
     self.had_length_error = False
     self.had_max_requests_error = False
     self.buffer = []  # contains piece data about to be sent.
     self.measure = Measure(max_rate_period)
     connector.add_sent_listener(self.measure.update_rate)
     self.allowed_fast_pieces = []
     if connector.uses_fast_extension:
         if storage.get_amount_left() == 0:
             connector.send_have_all()
         elif storage.do_I_have_anything():
             connector.send_bitfield(storage.get_have_list())
         else:
             connector.send_have_none()
         self._send_allowed_fast_list()
     elif storage.do_I_have_anything():
         connector.send_bitfield(storage.get_have_list())
예제 #2
0
class KRateLimiter:
    # special rate limiter that drops entries that have been sitting in the queue for longer than self.age seconds
    # by default we toss anything that has less than 5 seconds to live
    def __init__(self, transport, rate, call_later, rlcount, rate_period, age=(KRPC_TIMEOUT - 5)):
        self.q = []
        self.transport = transport
        self.rate = rate
        self.curr = 0
        self.running = False
        self.age = age
        self.last = 0
        self.call_later = call_later
        self.rlcount = rlcount
        self.measure = Measure(rate_period)
        self.sent=self.dropped=0
        if self.rate == 0:
            self.rate = 1e10
            
    def sendto(self, s, i, addr):
        self.q.append((time(), (s, i, addr)))
        if not self.running:
            self.run(check=True)

    def run(self, check=False):
        t = time()
        self.expire(t)
        self.curr -= (t - self.last) * self.rate
        self.last = t
        if check:
            self.curr = max(self.curr, 0 - self.rate)

        shuffle(self.q)
        while self.q and self.curr <= 0:
            x, tup = self.q.pop()
            size = len(tup[0])
            self.curr += size
            try:
                self.transport.sendto(*tup)
                self.sent+=1
                self.rlcount(size)
                self.measure.update_rate(size)
            except:
                if tup[2][1] != 0:
                    print ">>> sendto exception", tup
                    print_exc()
        self.q.sort()
        if self.q or self.curr > 0:
            self.running = True
            # sleep for at least a half second
            self.call_later(max(self.curr / self.rate, 0.5), self.run)
        else:
            self.running = False
                          
    def expire(self, t=time()):
        if self.q:
            expire_time = t - self.age
            while self.q and self.q[0][0] < expire_time:
                self.q.pop(0)
                self.dropped+=1
예제 #3
0
파일: Downloader.py 프로젝트: 3ft9/btdaemon
 def __init__(self, downloader, connection):
     self.downloader = downloader
     self.connection = connection
     self.choked = True
     self.interested = False
     self.active_requests = []
     self.measure = Measure(downloader.config['max_rate_period'])
     self.peermeasure = Measure(max(downloader.storage.piece_size / 10000,
                                    20))
     self.have = Bitfield(downloader.numpieces)
     self.last = 0
     self.example_interest = None
     self.backlog = 2
     self.guard = BadDataGuard(self)
예제 #4
0
 def __init__(self, downloader, connection):
     self.downloader = downloader
     self.connection = connection
     self.choked = True
     self.interested = False
     self.active_requests = []
     self.measure = Measure(downloader.config['max_rate_period'])
     self.peermeasure = Measure(max(downloader.storage.piece_size / 10000,
                                    20))
     self.have = Bitfield(downloader.numpieces)
     self.last = 0
     self.example_interest = None
     self.backlog = 2
     self.guard = BadDataGuard(self)
예제 #5
0
 def __init__(self, multidownload, connection):
     self.multidownload = multidownload
     self.connection = connection
     self.choked = True
     self.interested = False
     self.prefer_full = False
     self.active_requests = set()
     self.measure = Measure(multidownload.config['max_rate_period'])
     self.peermeasure = Measure(
         max(multidownload.storage.piece_size / 10000, 20))
     self.have = Bitfield(multidownload.numpieces)
     self.last = 0
     self.example_interest = None
     self.guard = BadDataGuard(self)
     self.suggested_pieces = []
     self.allowed_fast_pieces = []
예제 #6
0
 def __init__(self, connection, ratelimiter, totalup, choker, storage,
              max_slice_length, max_rate_period, fudge):
     self.connection = connection
     self.ratelimiter = ratelimiter
     self.totalup = totalup
     self.choker = choker
     self.storage = storage
     self.max_slice_length = max_slice_length
     self.max_rate_period = max_rate_period
     self.choked = True
     self.unchoke_time = None
     self.interested = False
     self.buffer = []
     self.measure = Measure(max_rate_period, fudge)
     if storage.do_I_have_anything():
         connection.send_bitfield(storage.get_have_list())
예제 #7
0
 def __init__(self, connection, ratelimiter, totalup,choker,
              storage, max_slice_length, max_rate_period, num_fast,
              torrent):
     assert isinstance(connection, BitTorrent.Connector.Connection)
     assert isinstance(torrent, BitTorrent.Torrent.Torrent)
     self.connection = connection
     self.ratelimiter = ratelimiter
     self.totalup = totalup
     self.torrent = torrent
     self.choker = choker
     self.num_fast = num_fast
     self.storage = storage
     self.max_slice_length = max_slice_length
     self.max_rate_period = max_rate_period
     self.choked = True
     self.unchoke_time = None
     self.interested = False
     self.buffer = []    # contains piece data about to be sent.
     self.measure = Measure(max_rate_period)
     self.allowed_fast_pieces = []
     if connection.uses_fast_extension:
         if storage.get_amount_left() == 0:
             connection.send_have_all()
         elif storage.do_I_have_anything():
             connection.send_bitfield(storage.get_have_list())
         else:
             connection.send_have_none()
         self._send_allowed_fast_list()
     elif storage.do_I_have_anything():
         connection.send_bitfield(storage.get_have_list())
예제 #8
0
 def __init__(self, connection, ratelimiter, totalup, choker, storage,
              picker, config):
     self.connection = connection
     self.ratelimiter = ratelimiter
     self.totalup = totalup
     self.choker = choker
     self.storage = storage
     self.picker = picker
     self.config = config
     self.max_slice_length = config['max_slice_length']
     self.choked = True
     self.cleared = True
     self.interested = False
     self.super_seeding = False
     self.buffer = []
     self.measure = Measure(config['max_rate_period'], config['upload_rate_fudge'])
     self.was_ever_interested = False
     if storage.get_amount_left() == 0:
         if choker.super_seed:
             self.super_seeding = True   # flag, and don't send bitfield
             self.seed_have_list = []    # set from piecepicker
             self.skipped_count = 0
         else:
             if config['breakup_seed_bitfield']:
                 bitfield, msgs = storage.get_have_list_cloaked()
                 connection.send_bitfield(bitfield)
                 for have in msgs:
                     connection.send_have(have)
             else:
                 connection.send_bitfield(storage.get_have_list())
     else:
         if storage.do_I_have_anything():
             connection.send_bitfield(storage.get_have_list())
     self.piecedl = None
     self.piecebuf = None
예제 #9
0
 def __init__(self, storage, picker, backlog, max_rate_period, numpieces,
              chunksize, measurefunc, snub_time, kickbans_ok, kickfunc,
              banfunc):
     self.storage = storage
     self.picker = picker
     self.backlog = backlog
     self.max_rate_period = max_rate_period
     self.measurefunc = measurefunc
     self.totalmeasure = Measure(max_rate_period * storage.piece_length /
                                 storage.request_size)
     self.numpieces = numpieces
     self.chunksize = chunksize
     self.snub_time = snub_time
     self.kickfunc = kickfunc
     self.banfunc = banfunc
     self.disconnectedseeds = {}
     self.downloads = []
     self.perip = {}
     self.gotbaddata = {}
     self.kicked = {}
     self.banned = {}
     self.kickbans_ok = kickbans_ok
     self.kickbans_halted = False
     self.super_seeding = False
     self.endgamemode = False
     self.endgame_queued_pieces = []
     self.all_requests = []
     self.discarded = 0L
     #        self.download_rate = 25000  # 25K/s test rate
     self.download_rate = 0
     self.bytes_requested = 0
     self.last_time = clock()
     self.queued_out = {}
     self.requeueing = False
     self.paused = False
예제 #10
0
 def __init__(self, multidownload, connector, ratelimiter, choker, storage, 
              max_chunk_length, max_rate_period, num_fast, infohash):
     assert isinstance(connector, BitTorrent.Connector.Connector)
     self.multidownload = multidownload
     self.connector = connector
     self.ratelimiter = ratelimiter
     self.infohash = infohash 
     self.choker = choker
     self.num_fast = num_fast
     self.storage = storage
     self.max_chunk_length = max_chunk_length
     self.choked = True
     self.unchoke_time = None
     self.interested = False
     self.had_length_error = False
     self.had_max_requests_error = False
     self.buffer = []    # contains piece data about to be sent.
     self.measure = Measure(max_rate_period)
     connector.add_sent_listener(self.measure.update_rate) 
     self.allowed_fast_pieces = []
     if connector.uses_fast_extension:
         if storage.get_amount_left() == 0:
             connector.send_have_all()
         elif storage.do_I_have_anything():
             connector.send_bitfield(storage.get_have_list())
         else:
             connector.send_have_none()
         self._send_allowed_fast_list()
     elif storage.do_I_have_anything():
         connector.send_bitfield(storage.get_have_list())
예제 #11
0
 def __init__(self, connection, ratelimiter, totalup, totalup2, choker,
              storage, max_slice_length, max_rate_period, logcollector):
     self.connection = connection
     self.ratelimiter = ratelimiter
     self.totalup = totalup
     self.totalup2 = totalup2
     self.choker = choker
     self.storage = storage
     self.max_slice_length = max_slice_length
     self.max_rate_period = max_rate_period
     self.choked = True
     self.unchoke_time = None
     self.interested = False
     #the list buffer contains tuples (index, begin, lenght) for each
     #block requested by the remote peer. A non empty buffer means that
     #there is data to send to the remote peer already requested by the
     #remote peer. 
     self.buffer = []
     #PFS begin
     self.config = choker.config
     self.I = {}     # I[piece id] = block uploaded count in the piece id
     self.r = {}     # r[piece_id] = block requested count in the piece id
     #PFS end
     self.measure = Measure(max_rate_period)
     #send the bittfield of the peer the first time it connects to the peers. 
     if storage.do_I_have_anything():
         connection.send_bitfield(storage.get_have_list())
     self.logcollector = logcollector
예제 #12
0
    def __init__(self, downloader, connection):
        self.downloader = downloader
        self.connection = connection
        self.choked = True
        self.interested = False
        self.active_requests = []
        self.measure = Measure(downloader.max_rate_period)
        self.peermeasure = Measure(downloader.max_rate_period)
        self.have = Bitfield(downloader.numpieces)
        self.last = -1000
        self.last2 = -1000
        self.example_interest = None
#        self.backlog = 2
        self.backlog = 8
        self.ip = connection.get_ip()
        self.guard = BadDataGuard(self)
예제 #13
0
 def __init__(self, downloader, connection):
     self.downloader = downloader
     self.connection = connection
     self.choked = True
     self.interested = False
     self.active_requests = []
     self.measure = Measure(downloader.max_rate_period)
     self.peermeasure = Measure(downloader.max_rate_period)
     self.have = Bitfield(downloader.numpieces)
     self.last = -1000
     self.last2 = -1000
     self.example_interest = None
     #        self.backlog = 2
     self.backlog = 8
     self.ip = connection.get_ip()
     self.guard = BadDataGuard(self)
예제 #14
0
파일: Upload.py 프로젝트: skn/floodgate
    def __init__(self, multidownload, connector, ratelimiter, choker, storage,
                 max_chunk_length, max_rate_period, num_fast, infohash):
        assert isinstance(connector, BitTorrent.Connector.Connector)
        self.multidownload = multidownload
        self.connector = connector
        self.ratelimiter = ratelimiter
        self.infohash = infohash
        self.choker = choker
        self.num_fast = num_fast
        self.storage = storage
        self.max_chunk_length = max_chunk_length
        self.choked = True
        self.unchoke_time = None
        self.interested = False
        self.had_length_error = False
        self.had_max_requests_error = False
        self.buffer = []  # contains piece data about to be sent.
        self.measure = Measure(max_rate_period)
        connector.add_sent_listener(self.measure.update_rate)
        self.allowed_fast_pieces = []
        if connector.uses_fast_extension:
            if storage.get_amount_left() == 0:
                connector.send_have_all()
            elif storage.do_I_have_anything():
                connector.send_bitfield(storage.get_have_list())
            else:
                connector.send_have_none()
            self._send_allowed_fast_list()
        elif storage.do_I_have_anything():
            connector.send_bitfield(storage.get_have_list())
        self.unchecked_key_rewards = {
        }  #hash of peerId of pieces(idx,offset,len) with keys als values
        self.blocked_piece_requests = []
        self.uploaded_piece_status = {
        }  #(idx,offset,lenght) : ("done"|"waiting"| "failed", retries)
        self.own_certificate_is_sent_to_peer = False  #whether we have sent our certficate to the other side already

        self.elogger = logging.getLogger("ez")
        print "elogger", self.elogger
        ch = logging.StreamHandler(sys.stdout)
        ch.setLevel(logging.DEBUG)
        formatter = logging.Formatter(
            "%(asctime)s - %(name)s - %(levelname)s - %(message)s")
        ch.setFormatter(formatter)
        elogger.addHandler(ch)
예제 #15
0
 def __init__(self,
              transport,
              rate,
              call_later,
              rlcount,
              rate_period,
              age=(KRPC_TIMEOUT - 5)):
     self.q = []
     self.transport = transport
     self.rate = rate
     self.curr = 0
     self.running = False
     self.age = age
     self.last = 0
     self.call_later = call_later
     self.rlcount = rlcount
     self.measure = Measure(rate_period)
     self.sent = self.dropped = 0
     if self.rate == 0:
         self.rate = 1e10
예제 #16
0
 def __init__(self, multidownload, connection):
     self.multidownload = multidownload
     self.connection = connection
     self.choked = True
     self.interested = False
     self.prefer_full = False
     self.active_requests = set()
     self.intro_size = self.multidownload.chunksize * 4 # just a guess
     self.measure = Measure(multidownload.config['max_rate_period'])
     self.peermeasure = Measure(
         max(multidownload.storage.piece_size / 10000, 20))
     self.have = Bitfield(multidownload.numpieces)
     self.last = 0
     self.example_interest = None
     self.guard = BadDataGuard(self)
     self.suggested_pieces = []
     self.allowed_fast_pieces = []
     self._received_listeners = set()
     self.add_useful_received_listener(self.measure.update_rate)
     self.total_bytes = 0
     def lambda_sucks(x): self.total_bytes += x
     self.add_useful_received_listener(lambda_sucks)
예제 #17
0
 def __init__(self, transport, rate, call_later, rlcount, rate_period, age=(KRPC_TIMEOUT - 5)):
     self.q = []
     self.transport = transport
     self.rate = rate
     self.curr = 0
     self.running = False
     self.age = age
     self.last = 0
     self.call_later = call_later
     self.rlcount = rlcount
     self.measure = Measure(rate_period)
     self.sent=self.dropped=0
     if self.rate == 0:
         self.rate = 1e10
예제 #18
0
 def __init__(self, connection, ratelimiter, totalup, choker, storage,
              picker, config):
     self.connection = connection
     self.ratelimiter = ratelimiter
     self.totalup = totalup
     self.choker = choker
     self.storage = storage
     self.picker = picker
     self.config = config
     self.max_slice_length = config['max_slice_length']
     self.choked = True
     self.cleared = True
     self.interested = False
     self.super_seeding = False
     self.buffer = []
     self.measure = Measure(config['max_rate_period'],
                            config['upload_rate_fudge'])
     self.was_ever_interested = False
     if storage.get_amount_left() == 0:
         if choker.super_seed:
             self.super_seeding = True  # flag, and don't send bitfield
             self.seed_have_list = []  # set from piecepicker
             self.skipped_count = 0
         else:
             if config['breakup_seed_bitfield']:
                 bitfield, msgs = storage.get_have_list_cloaked()
                 connection.send_bitfield(bitfield)
                 for have in msgs:
                     connection.send_have(have)
             else:
                 connection.send_bitfield(storage.get_have_list())
     else:
         if storage.do_I_have_anything():
             connection.send_bitfield(storage.get_have_list())
     self.piecedl = None
     self.piecebuf = None
예제 #19
0
    def __init__(self, multidownload, connector):
        self.multidownload = multidownload
        self.connector = connector
        self.choked = True
        self.interested = False
        self.prefer_full = False
        self.active_requests = set()
        self.expecting_reject = set()
        self.intro_size = self.multidownload.chunksize * 4  # just a guess
        self.measure = Measure(multidownload.config['max_rate_period'])
        self.peermeasure = Measure(
            max(multidownload.storage.piece_size / 10000, 20))
        self.have = Bitfield(multidownload.numpieces)
        self.last = 0
        self.example_interest = None
        self.guard = BadDataGuard(self)
        self.suggested_pieces = []
        self.allowed_fast_pieces = []
        self._useful_received_listeners = set()
        self._raw_received_listeners = set()

        self.add_useful_received_listener(self.measure.update_rate)
        self.total_bytes = 0
        self.add_useful_received_listener(self.accumulate_total)
예제 #20
0
 def __init__(self, connection, ratelimiter, totalup, totalup2, choker,
              storage, max_slice_length, max_rate_period):
     self.connection = connection
     self.ratelimiter = ratelimiter
     self.totalup = totalup
     self.totalup2 = totalup2
     self.choker = choker
     self.storage = storage
     self.max_slice_length = max_slice_length
     self.max_rate_period = max_rate_period
     self.choked = True
     self.unchoke_time = None
     self.interested = False
     self.buffer = []
     self.measure = Measure(max_rate_period)
     if storage.do_I_have_anything():
         connection.send_bitfield(storage.get_have_list())
예제 #21
0
    def __init__(self, downloader, url):
        self.downloader = downloader
        self.baseurl = url
        try:
            (scheme, self.netloc, path, pars, query, fragment) = urlparse(url)
        except:
            self.downloader.errorfunc('cannot parse http seed address: '+url)
            return
        if scheme != 'http':
            self.downloader.errorfunc('http seed url not http: '+url)
            return
        try:
            self.connection = HTTPConnection(self.netloc)
        except:
            self.downloader.errorfunc('cannot connect to http seed: '+url)
            return
        self.seedurl = path
        if pars:
            self.seedurl += ';'+pars
        self.seedurl += '?'
        if query:
            self.seedurl += query+'&'
        self.seedurl += 'info_hash='+quote(self.downloader.infohash)

        self.measure = Measure(downloader.max_rate_period)
        self.index = None
        self.url = ''
        self.requests = []
        self.request_size = 0
        self.endflag = False
        self.error = None
        self.retry_period = 30
        self._retry_period = None
        self.errorcount = 0
        self.goodseed = False
        self.active = False
        self.cancelled = False
        self.resched(randint(2,10))
예제 #22
0
class Download(object):
    """Implements BitTorrent protocol semantics for downloading over a single
       connection.  See Upload for the protocol semantics in the upload
       direction.  See Connector.Connection for the protocol syntax
       implementation."""

    def __init__(self, multidownload, connection):
        self.multidownload = multidownload
        self.connection = connection
        self.choked = True
        self.interested = False
        self.prefer_full = False
        self.active_requests = set()
        self.measure = Measure(multidownload.config['max_rate_period'])
        self.peermeasure = Measure(
            max(multidownload.storage.piece_size / 10000, 20))
        self.have = Bitfield(multidownload.numpieces)
        self.last = 0
        self.example_interest = None
        self.guard = BadDataGuard(self)
        self.suggested_pieces = []
        self.allowed_fast_pieces = []
        
    def _backlog(self):
        backlog = 2 + int(4 * self.measure.get_rate() /
                          self.multidownload.chunksize)
        if backlog > 50:
            backlog = max(50, int(.075 * backlog))
        return backlog

    def disconnected(self):
        self.multidownload.lost_peer(self)
        if self.have.numfalse == 0:
            self.multidownload.lost_have_all()
        else:
            # arg, slow
            count = 0
            target = len(self.have) - self.have.numfalse
            for i in xrange(len(self.have)):
                if count == target:
                    break
                if self.have[i]:
                    self.multidownload.lost_have(i)
                    count += 1
        self._letgo()
        self.guard.download = None
        
    def _letgo(self):
        if not self.active_requests:
            return
        if self.multidownload.storage.endgame:
            self.active_requests.clear()
            return
        lost = []
        for index, begin, length in self.active_requests:
            self.multidownload.storage.request_lost(index, begin, length)
            self.multidownload.active_requests.remove(index)
            if index not in lost:
                lost.append(index)
        self.active_requests.clear()
        ds = [d for d in self.multidownload.downloads if not d.choked]
        random.shuffle(ds)
        for d in ds:
            d._request_more(lost)
        for d in self.multidownload.downloads:
            if d.choked and not d.interested:
                for l in lost:
                    if d.have[l] and \
                      self.multidownload.storage.want_requests(l):
                        d.interested = True
                        d.connection.send_interested()
                        break
        
    def got_choke(self):
        if not self.choked:
            self.choked = True
            if not self.connection.uses_fast_extension:
                self._letgo()
        
    def got_unchoke(self):
        if self.choked:
            self.choked = False
            if self.interested:
                self._request_more()

    def got_piece(self, index, begin, piece):
        req = (index, begin, len(piece))

        if req not in self.active_requests:
            self.multidownload.discarded_bytes += len(piece)
            if self.connection.uses_fast_extension:
                self.connection.close()
            return
        
        self.active_requests.remove(req)
        
        if self.multidownload.storage.endgame:
            if req not in self.multidownload.all_requests:
                self.multidownload.discarded_bytes += len(piece)
                return
            
            self.multidownload.all_requests.remove(req)

            for d in self.multidownload.downloads:
                if d.interested:
                    if not d.choked:
                        if req in d.active_requests:
                            d.connection.send_cancel(*req)
                            if not self.connection.uses_fast_extension:
                                d.active_requests.remove(req)
                    d.fix_download_endgame()
        else:
            self._request_more()
            
        self.last = bttime()
        self.update_rate(len(piece))
        df = self.multidownload.storage.write(index, begin, piece, self.guard)
        df.addCallback(self._got_piece, index)

    def _got_piece(self, hashchecked, index):
        if hashchecked:
            self.multidownload.hashchecked(index)
        
    def _want(self, index):
        return self.have[index] and \
               self.multidownload.storage.want_requests(index)

    def _request_more(self, indices = []):
        
        if self.choked:
            self._request_when_choked()
            return
        #log( "_request_more.active_requests=%s" % self.active_requests )
        b = self._backlog()
        if len(self.active_requests) >= b:
            return
        if self.multidownload.storage.endgame:
            self.fix_download_endgame()
            return

        self.suggested_pieces = [i for i in self.suggested_pieces 
            if not self.multidownload.storage.do_I_have(i)]
        lost_interests = []
        while len(self.active_requests) < b:
            if not indices:
                interest = self.multidownload.picker.next(self.have,
                                    self.multidownload.active_requests,
                                    self.multidownload.storage.full_pieces,
                                    self.suggested_pieces)
            else:
                interest = None
                for i in indices:
                    if self.have[i] and \
                            self.multidownload.storage.want_requests(i):
                        interest = i
                        break
            if interest is None:
                break
            if not self.interested:
                self.interested = True
                self.connection.send_interested()
            # an example interest created by from_behind is preferable
            if self.example_interest is None:
                self.example_interest = interest

            # request as many chunks of interesting piece as fit in backlog.
            while len(self.active_requests) < b:
                begin, length = self.multidownload.storage.new_request(interest,
                                                                       self.prefer_full)
                self.multidownload.active_requests_add(interest)
                self.active_requests.add((interest, begin, length))
                self.connection.send_request(interest, begin, length)
                if not self.multidownload.storage.want_requests(interest):
                    lost_interests.append(interest)
                    break
        if not self.active_requests and self.interested:
            self.interested = False
            self.connection.send_not_interested()
        self._check_lost_interests(lost_interests)
        if self.multidownload.storage.endgame:
            self.multidownload.all_requests = set()
            for d in self.multidownload.downloads:
                self.multidownload.all_requests.update(d.active_requests)
            for d in self.multidownload.downloads:
                d.fix_download_endgame()

    def _check_lost_interests(self, lost_interests):
        """
           Notify other downloads that these pieces are no longer interesting.

           @param lost_interests: list of pieces that have been fully 
               requested.
        """
        if not lost_interests:
            return
        for d in self.multidownload.downloads:
            if d.active_requests or not d.interested:
                continue
            if d.example_interest is not None and not \
                    self.multidownload.storage.have[d.example_interest] and \
                    self.multidownload.storage.want_requests(d.example_interest):
                continue
            # any() does not exist until python 2.5
            #if not any([d.have[lost] for lost in lost_interests]):
            #    continue
            for lost in lost_interests:
                if d.have[lost]:
                    break
            else:
                continue
            interest = self.multidownload.picker.from_behind(self.have,
                            self.multidownload.storage.full_pieces)
            if interest is None:
                d.interested = False
                d.connection.send_not_interested()
            else:
                d.example_interest = interest

    def _request_when_choked(self):
        self.allowed_fast_pieces = [i for i in self.allowed_fast_pieces
            if not self.multidownload.storage.do_I_have(i)]
        if not self.allowed_fast_pieces:
            return
        fast = list(self.allowed_fast_pieces)

        b = self._backlog()
        lost_interests = []
        while len(self.active_requests) < b:

            while fast:
                piece = fast.pop()
                if self.have[piece] \
                   and self.multidownload.storage.want_requests(piece):
                    break
            else:
                break # no unrequested pieces among allowed fast.

            # request chunks until no more chunks or no more room in backlog.
            while len(self.active_requests) < b:
                begin, length = self.multidownload.storage.new_request(piece,
                                                                       self.prefer_full)
                self.multidownload.active_requests_add(piece)
                self.active_requests.add((piece, begin, length))
                self.connection.send_request(piece, begin, length)
                if not self.multidownload.storage.want_requests(piece):
                    lost_interests.append(piece)
                    break
        self._check_lost_interests(lost_interests)
        if self.multidownload.storage.endgame:
            self.multidownload.all_requests = set()
            for d in self.multidownload.downloads:
                self.multidownload.all_requests.update(d.active_requests)
            for d in self.multidownload.downloads:
                d.fix_download_endgame()
                
    def fix_download_endgame(self):
        want = []
        for a in self.multidownload.all_requests:
            if not self.have[a[0]]:
                continue
            if a in self.active_requests:
                continue
            want.append(a)

        if self.interested and not self.active_requests and not want:
            self.interested = False
            self.connection.send_not_interested()
            return
        if not self.interested and want:
            self.interested = True
            self.connection.send_interested()
        if self.choked:
            return
        random.shuffle(want)
        for req in want[:self._backlog() - len(self.active_requests)]:
            self.active_requests.add(req)
            self.connection.send_request(*req)
        
    def got_have(self, index):
        if self.have[index]:
            return
        if index == self.multidownload.numpieces-1:
            self.peermeasure.update_rate(self.multidownload.storage.total_length-
              (self.multidownload.numpieces-1)*self.multidownload.storage.piece_size)
        else:
            self.peermeasure.update_rate(self.multidownload.storage.piece_size)
        self.have[index] = True
        self.multidownload.got_have(index)
        if self.multidownload.storage.get_amount_left() == 0 and self.have.numfalse == 0:
            self.connection.close()
            return
        if self.multidownload.storage.endgame:
            self.fix_download_endgame()
        elif self.multidownload.storage.want_requests(index):
            self._request_more([index]) # call _request_more whether choked.
            if self.choked:
                if not self.interested:
                    self.interested = True
                    self.connection.send_interested()
        
    def got_have_bitfield(self, have):
        if have.numfalse == 0:
            self._got_have_all(have)
            return
        self.have = have
        # arg, slow
        count = 0
        target = len(self.have) - self.have.numfalse
        for i in xrange(len(self.have)):
            if count == target:
                break
            if self.have[i]:
                self.multidownload.got_have(i)
                count += 1
        if self.multidownload.storage.endgame:
            for piece, begin, length in self.multidownload.all_requests:
                if self.have[piece]:
                    self.interested = True
                    self.connection.send_interested()
                    return
        for piece in self.multidownload.storage.iter_want():
            if self.have[piece]:
                self.interested = True
                self.connection.send_interested()
                return

    def _got_have_all(self, have=None):
        if self.multidownload.storage.get_amount_left() == 0:
            self.connection.close()
            return
        if have is None:
            # bleh
            n = self.multidownload.numpieces
            rlen, extra = divmod(n, 8)
            if extra:
                extra = chr((0xFF << (8 - extra)) & 0xFF)
            else:
                extra = ''
            s = (chr(0xFF) * rlen) + extra
            have = Bitfield(n, s)
        self.have = have
        self.multidownload.got_have_all()
        if self.multidownload.storage.endgame:
            for piece, begin, length in self.multidownload.all_requests:
                self.interested = True
                self.connection.send_interested()
                return
        for i in self.multidownload.storage.iter_want():
            self.interested = True
            self.connection.send_interested()
            return
        
    def update_rate(self, amount):
        self.measure.update_rate(amount)
        self.multidownload.update_rate(amount)

    def get_rate(self):
        return self.measure.get_rate()

    def is_snubbed(self):
        return bttime() - self.last > self.multidownload.snub_time

    def got_have_none(self):
        pass  # currently no action is taken when have_none is received.
              # The picker already assumes the local peer has none of the
              # pieces until got_have is called.

    def got_have_all(self):
        assert self.connection.uses_fast_extension
        self._got_have_all()

    def got_suggest_piece(self, piece):
        assert self.connection.uses_fast_extension
        if not self.multidownload.storage.do_I_have(piece): 
          self.suggested_pieces.append(piece)
        self._request_more() # try to request more. Just returns if choked.

    def got_allowed_fast(self,piece):
        """Upon receiving this message, the multidownload knows that it is
           allowed to download the specified piece even when choked."""
        #log( "got_allowed_fast %d" % piece )
        assert self.connection.uses_fast_extension

        if not self.multidownload.storage.do_I_have(piece): 
            if piece not in self.allowed_fast_pieces:
                self.allowed_fast_pieces.append(piece)
                random.shuffle(self.allowed_fast_pieces)  # O(n) but n is small.
        self._request_more()  # will try to request.  Handles cases like
                              # whether neighbor has "allowed fast" piece.

    def got_reject_request(self, piece, begin, length):
        if not self.connection.uses_fast_extension:
            self.connection.close()
            return
        req = (piece, begin, length) 

        if req not in self.active_requests:
            self.connection.close()
            return
        self.active_requests.remove(req)

        if self.multidownload.storage.endgame:
            return

        self.multidownload.storage.request_lost(*req)
        if not self.choked:
            self._request_more()
        ds = [d for d in self.multidownload.downloads if not d.choked]
        random.shuffle(ds)
        for d in ds:
            d._request_more([piece])
            
        for d in self.multidownload.downloads:
            if d.choked and not d.interested:
                if d.have[piece] and \
                  self.multidownload.storage.want_requests(piece):
                    d.interested = True
                    d.connection.send_interested()
                    break
예제 #23
0
파일: Downloader.py 프로젝트: 3ft9/btdaemon
class SingleDownload(object):

    def __init__(self, downloader, connection):
        self.downloader = downloader
        self.connection = connection
        self.choked = True
        self.interested = False
        self.active_requests = []
        self.measure = Measure(downloader.config['max_rate_period'])
        self.peermeasure = Measure(max(downloader.storage.piece_size / 10000,
                                       20))
        self.have = Bitfield(downloader.numpieces)
        self.last = 0
        self.example_interest = None
        self.backlog = 2
        self.guard = BadDataGuard(self)

    def _backlog(self):
        backlog = 2 + int(4 * self.measure.get_rate() /
                          self.downloader.chunksize)
        if backlog > 50:
            backlog = max(50, int(.075 * backlog))
        self.backlog = backlog
        return backlog

    def disconnected(self):
        self.downloader.lost_peer(self)
        for i in xrange(len(self.have)):
            if self.have[i]:
                self.downloader.picker.lost_have(i)
        self._letgo()
        self.guard.download = None

    def _letgo(self):
        if not self.active_requests:
            return
        if self.downloader.storage.endgame:
            self.active_requests = []
            return
        lost = []
        for index, begin, length in self.active_requests:
            self.downloader.storage.request_lost(index, begin, length)
            if index not in lost:
                lost.append(index)
        self.active_requests = []
        ds = [d for d in self.downloader.downloads if not d.choked]
        shuffle(ds)
        for d in ds:
            d._request_more(lost)
        for d in self.downloader.downloads:
            if d.choked and not d.interested:
                for l in lost:
                    if d.have[l] and self.downloader.storage.do_I_have_requests(l):
                        d.interested = True
                        d.connection.send_interested()
                        break

    def got_choke(self):
        if not self.choked:
            self.choked = True
            self._letgo()

    def got_unchoke(self):
        if self.choked:
            self.choked = False
            if self.interested:
                self._request_more()

    def got_piece(self, index, begin, piece):
        try:
            self.active_requests.remove((index, begin, len(piece)))
        except ValueError:
            self.downloader.discarded_bytes += len(piece)
            return False
        if self.downloader.storage.endgame:
            self.downloader.all_requests.remove((index, begin, len(piece)))
        self.last = bttime()
        self.measure.update_rate(len(piece))
        self.downloader.measurefunc(len(piece))
        self.downloader.downmeasure.update_rate(len(piece))
        if not self.downloader.storage.piece_came_in(index, begin, piece,
                                                     self.guard):
            if self.downloader.storage.endgame:
                while self.downloader.storage.do_I_have_requests(index):
                    nb, nl = self.downloader.storage.new_request(index)
                    self.downloader.all_requests.append((index, nb, nl))
                for d in self.downloader.downloads:
                    d.fix_download_endgame()
                return False
            ds = [d for d in self.downloader.downloads if not d.choked]
            shuffle(ds)
            for d in ds:
                d._request_more([index])
            return False
        if self.downloader.storage.do_I_have(index):
            self.downloader.picker.complete(index)
        if self.downloader.storage.endgame:
            for d in self.downloader.downloads:
                if d is not self and d.interested:
                    if d.choked:
                        d.fix_download_endgame()
                    else:
                        try:
                            d.active_requests.remove((index, begin, len(piece)))
                        except ValueError:
                            continue
                        d.connection.send_cancel(index, begin, len(piece))
                        d.fix_download_endgame()
        self._request_more()
        if self.downloader.picker.am_I_complete():
            for d in [i for i in self.downloader.downloads if i.have.numfalse == 0]:
                d.connection.close()
        return self.downloader.storage.do_I_have(index)

    def _want(self, index):
        return self.have[index] and self.downloader.storage.do_I_have_requests(index)

    def _request_more(self, indices = None):
        assert not self.choked
        if len(self.active_requests) >= self._backlog():
            return
        if self.downloader.storage.endgame:
            self.fix_download_endgame()
            return
        lost_interests = []
        while len(self.active_requests) < self.backlog:
            if indices is None:
                interest = self.downloader.picker.next(self._want, self.have.numfalse == 0)
            else:
                interest = None
                for i in indices:
                    if self.have[i] and self.downloader.storage.do_I_have_requests(i):
                        interest = i
                        break
            if interest is None:
                break
            if not self.interested:
                self.interested = True
                self.connection.send_interested()
            self.example_interest = interest
            self.downloader.picker.requested(interest, self.have.numfalse == 0)
            while len(self.active_requests) < (self.backlog-2) * 5 + 2:
                begin, length = self.downloader.storage.new_request(interest)
                self.active_requests.append((interest, begin, length))
                self.connection.send_request(interest, begin, length)
                if not self.downloader.storage.do_I_have_requests(interest):
                    lost_interests.append(interest)
                    break
        if not self.active_requests and self.interested:
            self.interested = False
            self.connection.send_not_interested()
        if lost_interests:
            for d in self.downloader.downloads:
                if d.active_requests or not d.interested:
                    continue
                if d.example_interest is not None and self.downloader.storage.do_I_have_requests(d.example_interest):
                    continue
                for lost in lost_interests:
                    if d.have[lost]:
                        break
                else:
                    continue
                interest = self.downloader.picker.next(d._want, d.have.numfalse == 0)
                if interest is None:
                    d.interested = False
                    d.connection.send_not_interested()
                else:
                    d.example_interest = interest
        if self.downloader.storage.endgame:
            self.downloader.all_requests = []
            for d in self.downloader.downloads:
                self.downloader.all_requests.extend(d.active_requests)
            for d in self.downloader.downloads:
                d.fix_download_endgame()

    def fix_download_endgame(self):
        want = [a for a in self.downloader.all_requests if self.have[a[0]] and a not in self.active_requests]
        if self.interested and not self.active_requests and not want:
            self.interested = False
            self.connection.send_not_interested()
            return
        if not self.interested and want:
            self.interested = True
            self.connection.send_interested()
        if self.choked or len(self.active_requests) >= self._backlog():
            return
        shuffle(want)
        del want[self.backlog - len(self.active_requests):]
        self.active_requests.extend(want)
        for piece, begin, length in want:
            self.connection.send_request(piece, begin, length)

    def got_have(self, index):
        if self.have[index]:
            return
        if index == self.downloader.numpieces-1:
            self.peermeasure.update_rate(self.downloader.storage.total_length-
              (self.downloader.numpieces-1)*self.downloader.storage.piece_size)
        else:
            self.peermeasure.update_rate(self.downloader.storage.piece_size)
        self.have[index] = True
        self.downloader.picker.got_have(index)
        if self.downloader.picker.am_I_complete() and self.have.numfalse == 0:
            self.connection.close()
            return
        if self.downloader.storage.endgame:
            self.fix_download_endgame()
        elif self.downloader.storage.do_I_have_requests(index):
            if not self.choked:
                self._request_more([index])
            else:
                if not self.interested:
                    self.interested = True
                    self.connection.send_interested()

    def got_have_bitfield(self, have):
        if self.downloader.picker.am_I_complete() and have.numfalse == 0:
            self.connection.close()
            return
        self.have = have
        for i in xrange(len(self.have)):
            if self.have[i]:
                self.downloader.picker.got_have(i)
        if self.downloader.storage.endgame:
            for piece, begin, length in self.downloader.all_requests:
                if self.have[piece]:
                    self.interested = True
                    self.connection.send_interested()
                    return
        for i in xrange(len(self.have)):
            if self.have[i] and self.downloader.storage.do_I_have_requests(i):
                self.interested = True
                self.connection.send_interested()
                return

    def get_rate(self):
        return self.measure.get_rate()

    def is_snubbed(self):
        return bttime() - self.last > self.downloader.snub_time
예제 #24
0
class SingleDownload(object):

    def __init__(self, downloader, connection, logcollector):
        self.downloader = downloader
        self.connection = connection
        self.choked = True
        self.interested = False
        self.active_requests = []
        self.measure = Measure(downloader.config['max_rate_period'])
        self.peermeasure = Measure(max(downloader.storage.piece_size / 10000,
                                       20))
        #intialize a bitfield of lenght 'numpieces'
        self.have = Bitfield(downloader.numpieces)
        self.last = 0
        self.example_interest = None
        self.backlog = 2
        self.guard = BadDataGuard(self)
        self.logcollector=logcollector

    def _backlog(self):
        backlog = 2 + int(4 * self.measure.get_rate() /
                          self.downloader.chunksize)
        if backlog > 50:
            backlog = max(50, int(.075 * backlog))
        self.backlog = backlog
        return backlog

    def disconnected(self):
        self.downloader.lost_peer(self)
        for i in xrange(len(self.have)):
            if self.have[i]:
                self.downloader.picker.lost_have(i)
        self._letgo()
        self.guard.download = None

    def _letgo(self):
        if not self.active_requests:
            return
        if self.downloader.storage.endgame:
            self.active_requests = []
            return
        lost = []
        for index, begin, length in self.active_requests:
            self.downloader.storage.request_lost(index, begin, length)
            if index not in lost:
                lost.append(index)
        self.active_requests = []
        ds = [d for d in self.downloader.downloads if not d.choked]
        shuffle(ds)
        for d in ds:
            d._request_more(lost)
        for d in self.downloader.downloads:
            if d.choked and not d.interested:
                for l in lost:
                    if d.have[l] and self.downloader.storage.do_I_have_requests(l):
                        d.interested = True
                        d.connection.send_interested()
                        break

    def got_choke(self):
        if not self.choked:
            self.logcollector.log(None, 'R C ' + str(self.connection.ip))
            self.choked = True
            self._letgo()

    def got_unchoke(self):
        if self.choked:
            self.logcollector.log(None, 'R UC ' + str(self.connection.ip))
            self.choked = False
            if self.interested:
                self._request_more()

    #this method returns True if the block received completes a piece,
    #false, otherwise. The result of this method is used to decide when
    #to send the HAVE message.
    def got_piece(self, index, begin, piece):        
        try:
            #the received block was not requested if it is not in active_requests.
            #It is discarded
            self.active_requests.remove((index, begin, len(piece)))
        except ValueError:
            self.downloader.discarded_bytes += len(piece)
            return False
        #count all the received packet that are requested. 
        self.logcollector.log(None, 'R P ' + str(self.connection.ip) + ' i ' + str(index) + ' b ' + str(begin))
        if self.downloader.storage.endgame:
            self.downloader.all_requests.remove((index, begin, len(piece)))
        self.last = bttime()
        self.measure.update_rate(len(piece))
        self.downloader.measurefunc(len(piece))
        self.downloader.downmeasure.update_rate(len(piece))
        if not self.downloader.storage.piece_came_in(index, begin, piece,
                                                     self.guard):
            if self.downloader.storage.endgame:
                while self.downloader.storage.do_I_have_requests(index):
                    nb, nl = self.downloader.storage.new_request(index)
                    self.downloader.all_requests.append((index, nb, nl))
                for d in self.downloader.downloads:
                    d.fix_download_endgame()
                return False
            ds = [d for d in self.downloader.downloads if not d.choked]
            shuffle(ds)
            for d in ds:
                d._request_more([index])
            return False
        if self.downloader.storage.do_I_have(index):
            self.downloader.picker.complete(index)
        if self.downloader.storage.endgame:
            for d in self.downloader.downloads:
                if d is not self and d.interested:
                    if d.choked:
                        d.fix_download_endgame()
                    else:
                        try:
                            d.active_requests.remove((index, begin, len(piece)))
                        except ValueError:
                            continue
                        d.connection.send_cancel(index, begin, len(piece))
                        d.fix_download_endgame()
        self._request_more()
        if self.downloader.picker.am_I_complete():
            for d in [i for i in self.downloader.downloads if i.have.numfalse == 0]:
                self.logcollector.log(None, 'CON C ' + str(d.connection.ip) + ' S')
                d.connection.close()
        return self.downloader.storage.do_I_have(index)

    #return true if the remote peer has the piece and
    #there are blocks that can be requested for this piece.
    def _want(self, index):
        return self.have[index] and self.downloader.storage.do_I_have_requests(index)

    #indices is used for the strict priority. When a connection is lost, the pending pieces
    #are requested. This is done in _letgo.
    def _request_more(self, indices = None):
        assert not self.choked
        if len(self.active_requests) >= self._backlog():
            return
        if self.downloader.storage.endgame:
            self.fix_download_endgame()
            return
        lost_interests = []
        while len(self.active_requests) < self.backlog:
            if indices is None:
                interest = self.downloader.picker.next(self._want, self.have.numfalse == 0)
            else:
                interest = None
                for i in indices:
                    if self.have[i] and self.downloader.storage.do_I_have_requests(i):
                        interest = i
                        break
            if interest is None:
                break
            if not self.interested:
                self.interested = True
                self.connection.send_interested()
            self.example_interest = interest
            self.downloader.picker.requested(interest, self.have.numfalse == 0)
            while len(self.active_requests) < (self.backlog-2) * 5 + 2:
                begin, length = self.downloader.storage.new_request(interest)
                self.active_requests.append((interest, begin, length))
                self.connection.send_request(interest, begin, length)
                if not self.downloader.storage.do_I_have_requests(interest):
                    lost_interests.append(interest)
                    break
        if not self.active_requests and self.interested:
            self.interested = False
            self.connection.send_not_interested()
        if lost_interests:
            for d in self.downloader.downloads:
                if d.active_requests or not d.interested:
                    continue
                if d.example_interest is not None and self.downloader.storage.do_I_have_requests(d.example_interest):
                    continue
                for lost in lost_interests:
                    if d.have[lost]:
                        break
                else:
                    continue
                interest = self.downloader.picker.next(d._want, d.have.numfalse == 0)
                if interest is None:
                    d.interested = False
                    d.connection.send_not_interested()
                else:
                    d.example_interest = interest
        if self.downloader.storage.endgame:
            self.downloader.all_requests = []
            for d in self.downloader.downloads:
                self.downloader.all_requests.extend(d.active_requests)
            for d in self.downloader.downloads:
                d.fix_download_endgame()

    def fix_download_endgame(self):
        want = [a for a in self.downloader.all_requests if self.have[a[0]] and a not in self.active_requests]
        if self.interested and not self.active_requests and not want:
            self.interested = False
            self.connection.send_not_interested()
            return
        if not self.interested and want:
            self.interested = True
            self.connection.send_interested()
        if self.choked or len(self.active_requests) >= self._backlog():
            return
        shuffle(want)
        del want[self.backlog - len(self.active_requests):]
        self.active_requests.extend(want)
        for piece, begin, length in want:
            self.connection.send_request(piece, begin, length)

    def got_have(self, index):
        if self.have[index]:
            return
        self.logcollector.log(None, 'R H ' + str(self.connection.ip) + ' i ' + str(index))

        #Update the download rate. For each piece received, it uses the piece size
        #to compute the download rate over the last period of time.
        #As the last piece can have a smaller size than piece_size,
        #the exact size of the last piece is considered.
        if index == self.downloader.numpieces-1:
            self.peermeasure.update_rate(self.downloader.storage.total_length-
              (self.downloader.numpieces-1)*self.downloader.storage.piece_size)
        else:
            self.peermeasure.update_rate(self.downloader.storage.piece_size)
            
        self.have[index] = True
        self.downloader.picker.got_have(index)
        if self.downloader.picker.am_I_complete() and self.have.numfalse == 0:
            self.logcollector.log(None, 'CON C '  + str(self.connection.ip) + ' S')
            self.connection.close()
            return
        if self.downloader.storage.endgame:
            self.fix_download_endgame()
        elif self.downloader.storage.do_I_have_requests(index):
            if not self.choked:
                self._request_more([index])
            else:
                if not self.interested:
                    self.interested = True
                    self.connection.send_interested()

    #initial bitfield
    def got_have_bitfield(self, have):
        if have.numfalse == 0:
            self.logcollector.log(None, 'P ' + str(self.connection.ip) + ' S')
        
        if self.downloader.picker.am_I_complete() and have.numfalse == 0:
            self.logcollector.log(None, 'CON C ' + str(self.connection.ip) + ' S')
            self.connection.close()
            return
        self.have = have
        
        #the string bitfield is just used for logging.
        bitfield=''
        for i in xrange(len(self.have)):
            if self.have[i]:
                bitfield += str(i) + ' '
                self.downloader.picker.got_have(i)
        self.logcollector.log(None, 'R BF ' + str(self.connection.ip) + ' ' + bitfield)
        #receive bitfield while in endgame mode. In this case, it sends INTERESTED
        #to the remote peer as soon as the local peer does not have
        #at least one piece advertised by the remote peer. 
        if self.downloader.storage.endgame:
            for piece, begin, length in self.downloader.all_requests:
                if self.have[piece]:
                    self.interested = True
                    self.connection.send_interested()
                    return
        for i in xrange(len(self.have)):
            if self.have[i] and self.downloader.storage.do_I_have_requests(i):
                self.interested = True
                self.connection.send_interested()
                return

    def get_rate(self):
        return self.measure.get_rate()

    #self.last is the time since the last piece was received.
    #once a peer is snubbed, the only way to change this state is to
    #be optimistically unchoked. Indeed, to be unsnubbed, self.last
    #must be updated. However, it is only updated in got_piece().
    def is_snubbed(self):
        return bttime() - self.last > self.downloader.snub_time
예제 #25
0
class KRateLimiter:
    # special rate limiter that drops entries that have been sitting in the queue for longer than self.age seconds
    # by default we toss anything that has less than 5 seconds to live
    def __init__(self,
                 transport,
                 rate,
                 call_later,
                 rlcount,
                 rate_period,
                 age=(KRPC_TIMEOUT - 5)):
        self.q = []
        self.transport = transport
        self.rate = rate
        self.curr = 0
        self.running = False
        self.age = age
        self.last = 0
        self.call_later = call_later
        self.rlcount = rlcount
        self.measure = Measure(rate_period)
        self.sent = self.dropped = 0
        if self.rate == 0:
            self.rate = 1e10

    def sendto(self, s, i, addr):
        self.q.append((time(), (s, i, addr)))
        if not self.running:
            self.run(check=True)

    def run(self, check=False):
        t = time()
        self.expire(t)
        self.curr -= (t - self.last) * self.rate
        self.last = t
        if check:
            self.curr = max(self.curr, 0 - self.rate)

        shuffle(self.q)
        while self.q and self.curr <= 0:
            x, tup = self.q.pop()
            size = len(tup[0])
            self.curr += size
            try:
                self.transport.sendto(*tup)
                self.sent += 1
                self.rlcount(size)
                self.measure.update_rate(size)
            except:
                if tup[2][1] != 0:
                    print ">>> sendto exception", tup
                    print_exc()
        self.q.sort()
        if self.q or self.curr > 0:
            self.running = True
            # sleep for at least a half second
            self.call_later(max(self.curr / self.rate, 0.5), self.run)
        else:
            self.running = False

    def expire(self, t=time()):
        if self.q:
            expire_time = t - self.age
            while self.q and self.q[0][0] < expire_time:
                self.q.pop(0)
                self.dropped += 1
예제 #26
0
class SingleDownload(object):

    def __init__(self, downloader, connection):
        self.downloader = downloader
        self.connection = connection
        self.choked = True
        self.interested = False
        self.active_requests = []
        self.measure = Measure(downloader.config['max_rate_period'])
        self.peermeasure = Measure(max(downloader.storage.piece_size / 10000,
                                       20))
        self.have = Bitfield(downloader.numpieces)
        self.last = 0
        self.example_interest = None
        self.backlog = 2
        self.guard = BadDataGuard(self)

    def _backlog(self):
        backlog = 2 + int(4 * self.measure.get_rate() /
                          self.downloader.chunksize)
        if backlog > 50:
            backlog = max(50, int(.075 * backlog))
        self.backlog = backlog
        return backlog

    def disconnected(self):
        self.downloader.lost_peer(self)
        for i in xrange(len(self.have)):
            if self.have[i]:
                self.downloader.picker.lost_have(i)
        self._letgo()
        self.guard.download = None

    def _letgo(self):
        if not self.active_requests:
            return
        if self.downloader.storage.endgame:
            self.active_requests = []
            return
        lost = []
        for index, begin, length in self.active_requests:
            self.downloader.storage.request_lost(index, begin, length)
            if index not in lost:
                lost.append(index)
        self.active_requests = []
        ds = [d for d in self.downloader.downloads if not d.choked]
        shuffle(ds)
        for d in ds:
            d._request_more(lost)
        for d in self.downloader.downloads:
            if d.choked and not d.interested:
                for l in lost:
                    if d.have[l] and self.downloader.storage.do_I_have_requests(l):
                        d.interested = True
                        d.connection.send_interested()
                        break

    def got_choke(self):
        if not self.choked:
            self.choked = True
            self._letgo()

    def got_unchoke(self):
        if self.choked:
            self.choked = False
            if self.interested:
                self._request_more()

    def got_piece(self, index, begin, piece):
        try:
            self.active_requests.remove((index, begin, len(piece)))
        except ValueError:
            self.downloader.discarded_bytes += len(piece)
            return False
        if self.downloader.storage.endgame:
            self.downloader.all_requests.remove((index, begin, len(piece)))
        self.last = time()
        self.measure.update_rate(len(piece))
        self.downloader.measurefunc(len(piece))
        self.downloader.downmeasure.update_rate(len(piece))
        if not self.downloader.storage.piece_came_in(index, begin, piece,
                                                     self.guard):
            if self.downloader.storage.endgame:
                while self.downloader.storage.do_I_have_requests(index):
                    nb, nl = self.downloader.storage.new_request(index)
                    self.downloader.all_requests.append((index, nb, nl))
                for d in self.downloader.downloads:
                    d.fix_download_endgame()
                return False
            ds = [d for d in self.downloader.downloads if not d.choked]
            shuffle(ds)
            for d in ds:
                d._request_more([index])
            return False
        if self.downloader.storage.do_I_have(index):
            self.downloader.picker.complete(index)
        if self.downloader.storage.endgame:
            for d in self.downloader.downloads:
                if d is not self and d.interested:
                    if d.choked:
                        d.fix_download_endgame()
                    else:
                        try:
                            d.active_requests.remove((index, begin, len(piece)))
                        except ValueError:
                            continue
                        d.connection.send_cancel(index, begin, len(piece))
                        d.fix_download_endgame()
        self._request_more()
        if self.downloader.picker.am_I_complete():
            for d in [i for i in self.downloader.downloads if i.have.numfalse == 0]:
                d.connection.close()
        return self.downloader.storage.do_I_have(index)

    def _want(self, index):
        return self.have[index] and self.downloader.storage.do_I_have_requests(index)

    def _request_more(self, indices = None):
        assert not self.choked
        if len(self.active_requests) >= self._backlog():
            return
        if self.downloader.storage.endgame:
            self.fix_download_endgame()
            return
        lost_interests = []
        while len(self.active_requests) < self.backlog:
            if indices is None:
                interest = self.downloader.picker.next(self._want, self.have.numfalse == 0)
            else:
                interest = None
                for i in indices:
                    if self.have[i] and self.downloader.storage.do_I_have_requests(i):
                        interest = i
                        break
            if interest is None:
                break
            if not self.interested:
                self.interested = True
                self.connection.send_interested()
            self.example_interest = interest
            self.downloader.picker.requested(interest, self.have.numfalse == 0)
            while len(self.active_requests) < (self.backlog-2) * 5 + 2:
                begin, length = self.downloader.storage.new_request(interest)
                self.active_requests.append((interest, begin, length))
                self.connection.send_request(interest, begin, length)
                if not self.downloader.storage.do_I_have_requests(interest):
                    lost_interests.append(interest)
                    break
        if not self.active_requests and self.interested:
            self.interested = False
            self.connection.send_not_interested()
        if lost_interests:
            for d in self.downloader.downloads:
                if d.active_requests or not d.interested:
                    continue
                if d.example_interest is not None and self.downloader.storage.do_I_have_requests(d.example_interest):
                    continue
                for lost in lost_interests:
                    if d.have[lost]:
                        break
                else:
                    continue
                interest = self.downloader.picker.next(d._want, d.have.numfalse == 0)
                if interest is None:
                    d.interested = False
                    d.connection.send_not_interested()
                else:
                    d.example_interest = interest
        if self.downloader.storage.endgame:
            self.downloader.all_requests = []
            for d in self.downloader.downloads:
                self.downloader.all_requests.extend(d.active_requests)
            for d in self.downloader.downloads:
                d.fix_download_endgame()

    def fix_download_endgame(self):
        want = [a for a in self.downloader.all_requests if self.have[a[0]] and a not in self.active_requests]
        if self.interested and not self.active_requests and not want:
            self.interested = False
            self.connection.send_not_interested()
            return
        if not self.interested and want:
            self.interested = True
            self.connection.send_interested()
        if self.choked or len(self.active_requests) >= self._backlog():
            return
        shuffle(want)
        del want[self.backlog - len(self.active_requests):]
        self.active_requests.extend(want)
        for piece, begin, length in want:
            self.connection.send_request(piece, begin, length)

    def got_have(self, index):
        if self.have[index]:
            return
        if index == self.downloader.numpieces-1:
            self.peermeasure.update_rate(self.downloader.storage.total_length-
              (self.downloader.numpieces-1)*self.downloader.storage.piece_size)
        else:
            self.peermeasure.update_rate(self.downloader.storage.piece_size)
        self.have[index] = True
        self.downloader.picker.got_have(index)
        if self.downloader.picker.am_I_complete() and self.have.numfalse == 0:
            self.connection.close()
            return
        if self.downloader.storage.endgame:
            self.fix_download_endgame()
        elif self.downloader.storage.do_I_have_requests(index):
            if not self.choked:
                self._request_more([index])
            else:
                if not self.interested:
                    self.interested = True
                    self.connection.send_interested()

    def got_have_bitfield(self, have):
        self.have = have
        for i in xrange(len(self.have)):
            if self.have[i]:
                self.downloader.picker.got_have(i)
        if self.downloader.picker.am_I_complete() and self.have.numfalse == 0:
            self.connection.close()
            return
        if self.downloader.storage.endgame:
            for piece, begin, length in self.downloader.all_requests:
                if self.have[piece]:
                    self.interested = True
                    self.connection.send_interested()
                    return
        for i in xrange(len(self.have)):
            if self.have[i] and self.downloader.storage.do_I_have_requests(i):
                self.interested = True
                self.connection.send_interested()
                return

    def get_rate(self):
        return self.measure.get_rate()

    def is_snubbed(self):
        return time() - self.last > self.downloader.snub_time
예제 #27
0
class _SingleTorrent(object):

    def __init__(self, rawserver, singleport_listener, ratelimiter, filepool,
                 config, dht):
        self._rawserver = rawserver
        self._singleport_listener = singleport_listener
        self._ratelimiter = ratelimiter
        self._filepool = filepool
        self._dht = dht
        self._storage = None
        self._storagewrapper = None
        self._ratemeasure = None
        self._upmeasure = None
        self._downmeasure = None
        self._encoder = None
        self._rerequest = None
        self._statuscollecter = None
        self._announced = False
        self._listening = False
        self.reserved_ports = []
        self.reported_port = None
        self._myfiles = None
        self.started = False
        self.is_seed = False
        self.closed = False
        self.infohash = None
        self.total_bytes = None
        self._doneflag = threading.Event()
        self.finflag = threading.Event()
        self._hashcheck_thread = None
        self._contfunc = None
        self._activity = (_("Initial startup"), 0)
        self.feedback = None
        self.errors = []
        self.rlgroup = None
        self.config = config
        
    def start_download(self, *args, **kwargs):
        it = self._start_download(*args, **kwargs)
        def cont():
            try:
                it.next()
            except StopIteration:
                self._contfunc = None
        def contfunc():
            self._rawserver.external_add_task(cont, 0, context=self)
        self._contfunc = contfunc
        contfunc()

    def _start_download(self, metainfo, feedback, save_path):
        self.feedback = feedback
        config = self.config

        self.infohash = metainfo.infohash
        self.total_bytes = metainfo.total_bytes
        if not metainfo.reported_errors:
            metainfo.show_encoding_errors(self._error)

        myid = self._make_id()
        seed(myid)
        def schedfunc(func, delay):
            self._rawserver.add_task(func, delay, context=self)
        def externalsched(func, delay):
            self._rawserver.external_add_task(func, delay, context=self)
        if metainfo.is_batch:
            myfiles = [os.path.join(save_path, f) for f in metainfo.files_fs]
        else:
            myfiles = [save_path]
        self._filepool.add_files(myfiles, self)
        self._myfiles = myfiles
        self._storage = Storage(config, self._filepool, zip(myfiles,
                                                            metainfo.sizes))
        resumefile = None
        if config['data_dir']:
            filename = os.path.join(config['data_dir'], 'resume',
                                    self.infohash.encode('hex'))
            if os.path.exists(filename):
                try:
                    resumefile = file(filename, 'rb')
                    if self._storage.check_fastresume(resumefile) == 0:
                        resumefile.close()
                        resumefile = None
                except Exception, e:
                    self._error(WARNING,
                                _("Could not load fastresume data: %s") % str(e)
                                + ' ' + _("Will perform full hash check."))
                    if resumefile is not None:
                        resumefile.close()
                    resumefile = None
        def data_flunked(amount, index):
            self._ratemeasure.data_rejected(amount)
            self._error(INFO,
                        _("piece %d failed hash check, re-downloading it")
                        % index)
        backthread_exception = []
        def errorfunc(level, text):
            def e():
                self._error(level, text)
            externalsched(e, 0)
        def hashcheck():
            def statusfunc(activity = None, fractionDone = 0):
                if activity is None:
                    activity = self._activity[0]
                self._activity = (activity, fractionDone)
            try:
                self._storagewrapper = StorageWrapper(self._storage,
                     config, metainfo.hashes, metainfo.piece_length,
                     self._finished, statusfunc, self._doneflag, data_flunked,
                     self.infohash, errorfunc, resumefile)
            except:
                backthread_exception.append(sys.exc_info())
            self._contfunc()
        thread = threading.Thread(target = hashcheck)
        thread.setDaemon(False)
        self._hashcheck_thread = thread
        thread.start()
        yield None
        self._hashcheck_thread = None
        if resumefile is not None:
            resumefile.close()
        if backthread_exception:
            a, b, c = backthread_exception[0]
            raise a, b, c

        if self._storagewrapper.amount_left == 0:
            self._finished()
        choker = Choker(config, schedfunc, self.finflag.isSet)
        upmeasure = Measure(config['max_rate_period'])
        upmeasure_seedtime = Measure(config['max_rate_period_seedtime'])
        downmeasure = Measure(config['max_rate_period'])
        self._upmeasure = upmeasure
        self._upmeasure_seedtime = upmeasure_seedtime
        self._downmeasure = downmeasure
        self._ratemeasure = RateMeasure(self._storagewrapper.
                                        amount_left_with_partials)
        picker = PiecePicker(len(metainfo.hashes), config)
        for i in xrange(len(metainfo.hashes)):
            if self._storagewrapper.do_I_have(i):
                picker.complete(i)
        for i in self._storagewrapper.stat_dirty:
            picker.requested(i)
        def kickpeer(connection):
            def kick():
                connection.close()
            schedfunc(kick, 0)
        def banpeer(ip):
            self._encoder.ban(ip)
        downloader = Downloader(config, self._storagewrapper, picker,
            len(metainfo.hashes), downmeasure, self._ratemeasure.data_came_in,
                                kickpeer, banpeer)
        def make_upload(connection):
            return Upload(connection, self._ratelimiter, upmeasure,
                        upmeasure_seedtime, choker, self._storagewrapper,
                        config['max_slice_length'], config['max_rate_period'])


        self.reported_port = self.config['forwarded_port']
        if not self.reported_port:
            self.reported_port = self._singleport_listener.get_port(self.change_port)
            self.reserved_ports.append(self.reported_port)

        if self._dht:
            addContact = self._dht.addContact
        else:
            addContact = None
        self._encoder = Encoder(make_upload, downloader, choker,
                     len(metainfo.hashes), self._ratelimiter, self._rawserver,
                     config, myid, schedfunc, self.infohash, self, addContact, self.reported_port)

        self._singleport_listener.add_torrent(self.infohash, self._encoder)
        self._listening = True
        if metainfo.is_trackerless:
            if not self._dht:
                self._error(self, CRITICAL, _("Attempt to download a trackerless torrent with trackerless client turned off."))
                return
            else:
                if len(self._dht.table.findNodes(metainfo.infohash, invalid=False)) < const.K:
                    for host, port in metainfo.nodes:
                            self._dht.addContact(host, port)
                self._rerequest = DHTRerequester(config,
                    schedfunc, self._encoder.how_many_connections,
                    self._encoder.start_connection, externalsched,
                    self._storagewrapper.get_amount_left, upmeasure.get_total,
                    downmeasure.get_total, self.reported_port, myid,
                    self.infohash, self._error, self.finflag, upmeasure.get_rate,
                    downmeasure.get_rate, self._encoder.ever_got_incoming,
                    self.internal_shutdown, self._announce_done, self._dht)
        else:
            self._rerequest = Rerequester(metainfo.announce, config,
                schedfunc, self._encoder.how_many_connections,
                self._encoder.start_connection, externalsched,
                self._storagewrapper.get_amount_left, upmeasure.get_total,
                downmeasure.get_total, self.reported_port, myid,
                self.infohash, self._error, self.finflag, upmeasure.get_rate,
                downmeasure.get_rate, self._encoder.ever_got_incoming,
                self.internal_shutdown, self._announce_done)

        self._statuscollecter = DownloaderFeedback(choker, upmeasure.get_rate,
            upmeasure_seedtime.get_rate, downmeasure.get_rate,
            upmeasure.get_total, downmeasure.get_total,
            self._ratemeasure.get_time_left, self._ratemeasure.get_size_left,
            self.total_bytes, self.finflag, downloader, self._myfiles,
            self._encoder.ever_got_incoming, self._rerequest)

        self._announced = True
        if self._dht and len(self._dht.table.findNodes(self.infohash)) == 0:
            self._rawserver.add_task(self._dht.findCloseNodes, 5)
            self._rawserver.add_task(self._rerequest.begin, 20)
        else:
            self._rerequest.begin()
        self.started = True
        if not self.finflag.isSet():
            self._activity = (_("downloading"), 0)
        self.feedback.started(self)
예제 #28
0
파일: MultiTorrent.py 프로젝트: hitzjd/DHT
    def __init__(self,
                 config,
                 rawserver,
                 data_dir,
                 listen_fail_ok=False,
                 init_torrents=True,
                 is_single_torrent=False,
                 resume_from_torrent_config=True,
                 bitstring=None):
        """
         @param config: program-wide configuration object.
         @param rawserver: object that manages main event loop and event
           scheduling.
         @param data_dir: where variable data such as fastresume information
           and GUI state is saved.
         @param listen_fail_ok: if false, a BTFailure is raised if
           a server socket cannot be opened to accept incoming peer
           connections.
         @param init_torrents: restore fast resume state from prior
           instantiations of MultiTorrent.
         @param is_single_torrent: if true then allow only one torrent
           at a time in this MultiTorrent.
         @param resume_from_torrent_config: resume from ui_state files.
        """
        # is_single_torrent will go away when we move MultiTorrent into
        # a separate process, in which case, single torrent applications like
        # curses and console will act as a client to the MultiTorrent daemon.
        #   --Dave

        # init_torrents refers to fast resume rather than torrent config.
        # If init_torrents is set to False, the UI state file is still
        # read and the paths to existing downloads still used. This is
        # not what we want for launchmany.
        #
        # resume_from_torrent_config is separate from
        # is_single_torrent because launchmany must be able to have
        # multiple torrents while not resuming from torrent config
        # state.  If launchmany resumes from torrent config then it
        # saves or seeds from the path in the torrent config even if
        # the file has moved in the directory tree.  Because
        # launchmany has no mechanism for removing torrents other than
        # to change the directory tree, the only way for the user to
        # eliminate the old state is to wipe out the files in the
        # .bittorrent/launchmany-*/ui_state directory.  This is highly
        # counterintuitive.  Best to simply ignore the ui_state
        # directory altogether.  --Dave

        assert isinstance(config, Preferences)
        #assert isinstance(data_dir, unicode)  # temporarily commented -Dave
        assert isinstance(listen_fail_ok, bool)
        assert not (is_single_torrent and resume_from_torrent_config)

        self.config = config
        self.data_dir = data_dir
        self.last_save_time = 0
        self.policies = []
        self.torrents = {}
        self.running = {}
        self.log_root = "core.MultiTorrent"
        self.logger = logging.getLogger(self.log_root)
        self.is_single_torrent = is_single_torrent
        self.resume_from_torrent_config = resume_from_torrent_config
        self.auto_update_policy_index = None
        self.dht = None
        self.rawserver = rawserver
        nattraverser = NatTraverser(self.rawserver)
        self.internet_watcher = get_internet_watcher(self.rawserver)
        self.singleport_listener = SingleportListener(
            self.rawserver,
            nattraverser,
            self.log_root,
            # config['use_local_discovery']
            False)
        self.choker = Choker(self.config, self.rawserver.add_task)
        self.up_ratelimiter = RateLimiter(self.rawserver.add_task)
        self.up_ratelimiter.set_parameters(config['max_upload_rate'],
                                           config['upload_unit_size'])
        self.down_ratelimiter = DownloadRateLimiter(
            config['download_rate_limiter_interval'],
            self.config['max_download_rate'])
        self.total_downmeasure = Measure(config['max_rate_period'])

        self._find_port(listen_fail_ok)

        self.filepool_doneflag = DeferredEvent()
        self.filepool = FilePool(self.filepool_doneflag,
                                 self.rawserver.add_task,
                                 self.rawserver.external_add_task,
                                 config['max_files_open'],
                                 config['num_disk_threads'])
        self.bitstring = bitstring

        if self.resume_from_torrent_config:
            try:
                self._restore_state(init_torrents)
            except BTFailure:
                # don't be retarted.
                self.logger.exception("_restore_state failed")

        def no_dump_set_option(option, value):
            self.set_option(option, value, dump=False)

        self.bandwidth_manager = BandwidthManager(
            self.rawserver.external_add_task,
            config,
            no_dump_set_option,
            self.rawserver.get_remote_endpoints,
            get_rates=self.get_total_rates)

        self.rawserver.add_task(0, self.butle)
예제 #29
0
파일: Upload.py 프로젝트: hitzjd/DHT
class Upload(object):
    """Upload over a single connection."""
    def __init__(self, multidownload, connector, ratelimiter, choker, storage,
                 max_chunk_length, max_rate_period, num_fast, infohash):
        assert isinstance(connector, BitTorrent.Connector.Connector)
        self.multidownload = multidownload
        self.connector = connector
        self.ratelimiter = ratelimiter
        self.infohash = infohash
        self.choker = choker
        self.num_fast = num_fast
        self.storage = storage
        self.max_chunk_length = max_chunk_length
        self.choked = True
        self.unchoke_time = None
        self.interested = False
        self.had_length_error = False
        self.had_max_requests_error = False
        self.buffer = []  # contains piece data about to be sent.
        self.measure = Measure(max_rate_period)
        connector.add_sent_listener(self.measure.update_rate)
        self.allowed_fast_pieces = []
        if connector.uses_fast_extension:
            if storage.get_amount_left() == 0:
                connector.send_have_all()
            elif storage.do_I_have_anything():
                connector.send_bitfield(storage.get_have_list())
            else:
                connector.send_have_none()
            self._send_allowed_fast_list()
        elif storage.do_I_have_anything():
            connector.send_bitfield(storage.get_have_list())

    def _send_allowed_fast_list(self):
        """Computes and sends the 'allowed fast' set.  """
        self.allowed_fast_pieces = _compute_allowed_fast_list(
            self.infohash, self.connector.ip, self.num_fast,
            self.storage.get_num_pieces())

        for index in self.allowed_fast_pieces:
            self.connector.send_allowed_fast(index)

    def _compute_allowed_fast_list(self, infohash, ip, num_fast, num_pieces):

        # if ipv4 then  (for now assume IPv4)
        iplist = [int(x) for x in ip.split(".")]

        # classful heuristic.
        if iplist[0] | 0x7F == 0xFF or iplist[0] & 0xC0 == 0x80:  # class A or B
            iplist = [chr(iplist[0]), chr(iplist[1]), chr(0), chr(0)]
        else:
            iplist = [chr(iplist[0]), chr(iplist[1]), chr(iplist[2]), chr(0)]
        h = "".join(iplist)
        h = "".join([h, infohash])
        fastlist = []
        assert num_pieces < 2**32
        if num_pieces <= num_fast:
            return range(num_pieces)  # <---- this would be bizarre
        while True:
            h = sha(h).digest()  # rehash hash to generate new random string.
            #log("infohash=%s" % h.encode('hex'))
            for i in xrange(5):
                j = i * 4
                y = [ord(x) for x in h[j:j + 4]]
                z = (y[0] << 24) + (y[1] << 16) + (y[2] << 8) + y[3]
                index = int(z % num_pieces)
                #log("z=%s=%d, index=%d" % ( hex(z), z, index ))
                if index not in fastlist:
                    fastlist.append(index)
                    if len(fastlist) >= num_fast:
                        return fastlist

    def got_not_interested(self):
        if self.interested:
            self.interested = False
            self.choker.not_interested(self.connector)

    def got_interested(self):
        if not self.interested:
            self.interested = True
            self.choker.interested(self.connector)

    def get_upload_chunk(self, index, begin, length):
        df = self.storage.read(index, begin, length)
        df.addCallback(lambda piece: (index, begin, piece))
        df.addErrback(self._failed_get_upload_chunk)
        return df

    def _failed_get_upload_chunk(self, f):
        log("get_upload_chunk failed", exc_info=f.exc_info())
        self.connector.close()
        return f

    def got_request(self, index, begin, length):
        if not self.interested:
            self.connector.protocol_violation("request when not interested")
            self.connector.close()
            return
        if length > self.max_chunk_length:
            if not self.had_length_error:
                m = ("request length %r exceeds max %r" %
                     (length, self.max_chunk_length))
                self.connector.protocol_violation(m)
                self.had_length_error = True
            #self.connector.close()
            # we could still download...
            if self.connector.uses_fast_extension:
                self.connector.send_reject_request(index, begin, length)
            return
        if len(self.buffer) > MAX_REQUESTS:
            if not self.had_max_requests_error:
                m = ("max request limit %d" % MAX_REQUESTS)
                self.connector.protocol_violation(m)
                self.had_max_requests_error = True
            if self.connector.uses_fast_extension:
                self.connector.send_reject_request(index, begin, length)
            return
        if index in self.allowed_fast_pieces or not self.connector.choke_sent:
            df = self.get_upload_chunk(index, begin, length)
            df.addCallback(self._got_piece)
            df.addErrback(self.multidownload.errorfunc)
        elif self.connector.uses_fast_extension:
            self.connector.send_reject_request(index, begin, length)

    def _got_piece(self, piece_info):
        index, begin, piece = piece_info
        if self.connector.closed:
            return
        if self.choked:
            if not self.connector.uses_fast_extension:
                return
            if index not in self.allowed_fast_pieces:
                self.connector.send_reject_request(index, begin, len(piece))
                return
        self.buffer.append(((index, begin, len(piece)), piece))
        if self.connector.next_upload is None and \
               self.connector.connection.is_flushed():
            self.ratelimiter.queue(self.connector)

    def got_cancel(self, index, begin, length):
        req = (index, begin, length)
        for pos, (r, p) in enumerate(self.buffer):
            if r == req:
                del self.buffer[pos]
                if self.connector.uses_fast_extension:
                    self.connector.send_reject_request(*req)
                break

    def choke(self):
        if not self.choked:
            self.choked = True
            self.connector.send_choke()

    def sent_choke(self):
        assert self.choked
        if self.connector.uses_fast_extension:
            b2 = []
            for r in self.buffer:
                ((index, begin, length), piecedata) = r
                if index not in self.allowed_fast_pieces:
                    self.connector.send_reject_request(index, begin, length)
                else:
                    b2.append(r)
            self.buffer = b2
        else:
            del self.buffer[:]

    def unchoke(self, time):
        if self.choked:
            self.choked = False
            self.unchoke_time = time
            self.connector.send_unchoke()

    def has_queries(self):
        return len(self.buffer) > 0

    def get_rate(self):
        return self.measure.get_rate()
예제 #30
0
파일: Upload.py 프로젝트: skn/floodgate
class Upload(object):
    """Upload over a single connection."""
    def __init__(self, multidownload, connector, ratelimiter, choker, storage,
                 max_chunk_length, max_rate_period, num_fast, infohash):
        assert isinstance(connector, BitTorrent.Connector.Connector)
        self.multidownload = multidownload
        self.connector = connector
        self.ratelimiter = ratelimiter
        self.infohash = infohash
        self.choker = choker
        self.num_fast = num_fast
        self.storage = storage
        self.max_chunk_length = max_chunk_length
        self.choked = True
        self.unchoke_time = None
        self.interested = False
        self.had_length_error = False
        self.had_max_requests_error = False
        self.buffer = []  # contains piece data about to be sent.
        self.measure = Measure(max_rate_period)
        connector.add_sent_listener(self.measure.update_rate)
        self.allowed_fast_pieces = []
        if connector.uses_fast_extension:
            if storage.get_amount_left() == 0:
                connector.send_have_all()
            elif storage.do_I_have_anything():
                connector.send_bitfield(storage.get_have_list())
            else:
                connector.send_have_none()
            self._send_allowed_fast_list()
        elif storage.do_I_have_anything():
            connector.send_bitfield(storage.get_have_list())
        self.unchecked_key_rewards = {
        }  #hash of peerId of pieces(idx,offset,len) with keys als values
        self.blocked_piece_requests = []
        self.uploaded_piece_status = {
        }  #(idx,offset,lenght) : ("done"|"waiting"| "failed", retries)
        self.own_certificate_is_sent_to_peer = False  #whether we have sent our certficate to the other side already

        self.elogger = logging.getLogger("ez")
        print "elogger", self.elogger
        ch = logging.StreamHandler(sys.stdout)
        ch.setLevel(logging.DEBUG)
        formatter = logging.Formatter(
            "%(asctime)s - %(name)s - %(levelname)s - %(message)s")
        ch.setFormatter(formatter)
        elogger.addHandler(ch)

    def _send_allowed_fast_list(self):
        """Computes and sends the 'allowed fast' set.  """
        self.allowed_fast_pieces = _compute_allowed_fast_list(
            self.infohash, self.connector.ip, self.num_fast,
            self.storage.get_num_pieces())

        for index in self.allowed_fast_pieces:
            self.connector.send_allowed_fast(index)

    def _compute_allowed_fast_list(self, infohash, ip, num_fast, num_pieces):

        # if ipv4 then  (for now assume IPv4)
        iplist = [int(x) for x in ip.split(".")]

        # classful heuristic.
        if iplist[0] | 0x7F == 0xFF or iplist[0] & 0xC0 == 0x80:  # class A or B
            iplist = [chr(iplist[0]), chr(iplist[1]), chr(0), chr(0)]
        else:
            iplist = [chr(iplist[0]), chr(iplist[1]), chr(iplist[2]), chr(0)]
        h = "".join(iplist)
        h = "".join([h, infohash])
        fastlist = []
        assert num_pieces < 2**32
        if num_pieces <= num_fast:
            return range(num_pieces)  # <---- this would be bizarre
        while True:
            h = sha(h).digest()  # rehash hash to generate new random string.
            #log("infohash=%s" % h.encode('hex'))
            for i in xrange(5):
                j = i * 4
                y = [ord(x) for x in h[j:j + 4]]
                z = (y[0] << 24) + (y[1] << 16) + (y[2] << 8) + y[3]
                index = int(z % num_pieces)
                #log("z=%s=%d, index=%d" % ( hex(z), z, index ))
                if index not in fastlist:
                    fastlist.append(index)
                    if len(fastlist) >= num_fast:
                        return fastlist

    def got_not_interested(self):
        if self.interested:
            self.interested = False
            self.choker.not_interested(self.connector)

    def got_interested(self):
        if not self.interested:
            self.interested = True
            self.choker.interested(self.connector)

    def get_upload_chunk(self, index, begin, length):
        df = self.storage.read(index, begin, length)
        df.addCallback(lambda piece: (index, begin, piece))
        df.addErrback(self._failed_get_upload_chunk)
        return df

    def _failed_get_upload_chunk(self, f):
        self.elogger.warn("get_upload_chunk failed", exc_info=f.exc_info())
        self.connector.close()
        return f

    def got_mp_request(self, index, begin, length, der_cert, sig):
        self.elogger.warn("LOG 4P\n LOG 4P\n LOG4P\n")
        self.elogger.warn("got bad request invalid cert i=%d o=%d l=%d" %
                          (index, begin, length))
        peer_cert = self.connector.download.got_cert(der_cert)
        if not peer_cert:
            self.elogger.warn("got bad request invalid cert i=%d o=%d l=%d " %
                              (index, begin, length))
            return
        msg_to_sign = pack("!iii", index, begin, length)
        msg_ok = self.multidownload.pk_tools.check_signature_tls(
            peer_cert.publicKey, sig, msg_to_sign)
        if msg_ok:
            self.got_request(index, begin, length)
        else:
            self.elogger.warn("got bad request i=%d o=%d l=%d " %
                              (index, begin, length))

    def got_request(self, index, begin, length):
        if not self.interested:
            self.connector.protocol_violation("request when not interested")
            self.connector.close()
            return
        if length > self.max_chunk_length:
            if not self.had_length_error:
                m = ("request length %r exceeds max %r" %
                     (length, self.max_chunk_length))
                self.connector.protocol_violation(m)
                self.had_length_error = True
            #self.connector.close()
            # we could still download...
            if self.connector.uses_fast_extension:
                self.connector.send_reject_request(index, begin, length)
            return
        if len(self.buffer) > MAX_REQUESTS:
            if not self.had_max_requests_error:
                m = ("max request limit %d" % MAX_REQUESTS)
                self.connector.protocol_violation(m)
                self.had_max_requests_error = True
            if self.connector.uses_fast_extension:
                self.connector.send_reject_request(index, begin, length)
            return
        #EZ if micropyaments, check outstanding upload rewards en send reminders
        if self.multidownload.micropayments:
            self.elogger.warn("mircopayment true in upload")
            if self.connector.id in self.multidownload.waiting_for_reward \
               and len(self.multidownload.waiting_for_reward[self.connector.id]) > 0:
                print(
                    "waiting for reward true in upload, so returning after sending requests"
                )
                #if len(self.multidownload.waiting_for_reward[self.connector.id]) > MAX_UNREWARDED_UPLOADS:
                print("adding request to blocked_piece_requests")
                waiting_for_piece_rewards = self.multidownload.waiting_for_reward[
                    self.connector.id]
                self.blocked_piece_requests.append((index, begin, length))
                #ez: iterate to send all responses
                for (index, begin, length) in waiting_for_piece_rewards:
                    print(
                        "upload: sending key request to downloader for piece %d"
                        % index)
                    self.send_key_reward_response(index, begin, length, False)
                return

        if index in self.allowed_fast_pieces or not self.connector.choke_sent:
            df = self.get_upload_chunk(index, begin, length)
            df.addCallback(self._got_piece)
            df.addErrback(self.multidownload.errorfunc)
        elif self.connector.uses_fast_extension:
            self.connector.send_reject_request(index, begin, length)

    def _got_piece(self, piece_info):
        index, begin, piece = piece_info
        if self.connector.closed:
            return
        if self.choked:
            if not self.connector.uses_fast_extension:
                return
            if index not in self.allowed_fast_pieces:
                self.connector.send_reject_request(index, begin, len(piece))
                return
        if self.multidownload.micropayments:
            if self.connector.id not in self.multidownload.waiting_for_reward:
                self.multidownload.waiting_for_reward[self.connector.id] = []
            self.multidownload.waiting_for_reward[self.connector.id].append(
                (index, begin, len(piece)))
            if not self.own_certificate_is_sent_to_peer:
                der_cert = self.multidownload.certificate.bytes.tostring()
                self.own_certificate_is_sent_to_peer = True
                self.connector.download.own_certificate_is_sent_to_peer = True
            else:
                der_cert = ""  #leave out the certfificate because other side alread has it
            self.elogger.warn("length of der cert = %d" % len(der_cert))
            #todo do this before instead of realtime
            piece_sig = self.multidownload.pk_tools.get_sha_signature_tls(
                self.multidownload.private_key, piece)
            self.elogger.warn(
                "index= %d, begin=%d, len(piece)=%d,len(piece_sig)%d lendercert = %d"
                % (index, begin, len(piece), len(piece_sig), len(der_cert)))
            self.buffer.append(((index, begin, len(piece), len(piece_sig),
                                 len(der_cert)), piece, piece_sig, der_cert))
        else:
            self.buffer.append(((index, begin, len(piece)), piece))

        if self.connector.next_upload is None and \
               self.connector.connection.is_flushed():
            self.ratelimiter.queue(self.connector)

    def got_cancel_mp(self, index, begin, length):
        self.elogger.warn("got mp cancel")
        req = (index, begin, length)
        for entry in enumerate(self.buffer):
            pos, ((p_index, p_begin, p_length, length_sig, length_cert), p,
                  p_sig, p_cert) = entry
            buffer_req = (p_index, p_begin, p_length)
            if buffer_req == req:
                del self.buffer[pos]
                if self.connector.uses_fast_extension:
                    self.connector.send_reject_request(*req)
                break

    def got_cancel(self, index, begin, length):
        if self.multidownload.micropayments:
            self.got_cancel_mp(index, begin, length)
            return
        log("got non mp cancel")
        req = (index, begin, length)
        for pos, (r, p) in enumerate(self.buffer):
            if r == req:
                del self.buffer[pos]
                if self.connector.uses_fast_extension:
                    self.connector.send_reject_request(*req)
                break

    def choke(self):
        if not self.choked:
            self.choked = True
            self.connector.send_choke()

    def sent_choke_mp(self):
        assert self.choked
        if self.connector.uses_fast_extension:
            b2 = []
            for r in self.buffer:
                ((index, begin, length, length_sig, length_cert), p, p_sig,
                 p_cert) = r
                if index not in self.allowed_fast_pieces:
                    self.connector.send_reject_request(index, begin, length)
                else:
                    b2.append(r)
            self.buffer = b2
        else:
            del self.buffer[:]

    def sent_choke(self):
        assert self.choked
        if self.multidownload.micropayments:
            self.sent_choke_mp()
            return
        if self.connector.uses_fast_extension:
            b2 = []
            for r in self.buffer:
                ((index, begin, length), piecedata) = r
                if index not in self.allowed_fast_pieces:
                    self.connector.send_reject_request(index, begin, length)
                else:
                    b2.append(r)
            self.buffer = b2
        else:
            del self.buffer[:]

    def unchoke(self, time):
        if self.choked:
            self.choked = False
            self.unchoke_time = time
            self.connector.send_unchoke()

    def has_queries(self):
        return len(self.buffer) > 0

    def get_rate(self):
        return self.measure.get_rate()

    #EZ
    def send_key_reward_response(self, index, begin, length, good):
        """
         Send a response to the keyreward received to indicate if it could be validated.
         Validation of the key reward is checking if the sha1 hash of the reward is the
         same as the hashed reward received from the tracker.
         
         @param index: index of the piece (piece number), 
           a piece is a block of which the has is in the meta file
         @param begin: offset of the subpiece is the piece,
           this is currently always zero, this enables to account a single piece
           hashcheck failing to single peer. 
         @param length: length of the piece 
         @return: nothing 
        """
        self.connector.send_key_reward_response(index, begin, length, good)

    def got_key_reward(self, index, begin, length, key):
        """
         Process a received key reward. Compares hash of keyreward with received
         hash from tracker. If hash from tracker not present, a tracker request is made to retrieve it.
         If the key is OK any pending requests that were blocked
         are now unblocked.
         A key reward reponse is send. 
         
         @param index: index of the piece (piece number), 
           a piece is a block of which the has is in the meta file
         @param begin: offset of the subpiece is the piece,
           this is currently always zero, this enables to account a single piece
           hashcheck failing to single peer. 
         @param length: length of the piece 
         @return: nothing 
        """

        log("in got key reward for piece %d %d %d and key [%s] len key=%d" %
            (index, begin, length, key.encode('hex'), len(key)))

        if len(key) < 127:
            log("received empty/too small key  got_key_reward indicating bad payment key"
                )
            proceed = self.update_upload_key_status(index, begin, length,
                                                    False)
            if not proceed:
                log("too many retries.. in upload (update key status)")
                return
            self.got_request(index, begin, length)
            return

        if (not self.connector.id in self.multidownload.payment_key_hash_cache.keys()) or\
         self.multidownload.payment_key_hash_cache[self.connector.id] == {}:
            self.elogger.warn(
                "downlaod. got_key_reward no key_cache, so retrieve first")
            if self.connector.id not in self.multidownload.payment_key_hash_cache.keys(
            ):
                self.elogger.warn(
                    "dl: initializing payment_key_hash_cache to {}")
                self.multidownload.payment_key_hash_cache[
                    self.connector.id] = {}
            self.elogger.warn(
                "calling rerequester.get_key_hash_list_for_peer()")
            self.multidownload.rerequester.get_key_hash_list_for_peer(
                self.connector.id, self.got_key_hash_list)
            if self.connector.id not in self.unchecked_key_rewards:
                self.elogger.warn("initializing unchecked key rewards")
                self.unchecked_key_rewards[self.connector.id] = {}
            self.unchecked_key_rewards[self.connector.id][(index, begin,
                                                           length)] = key
            self.elogger.warn("added key to unchecked rewarsd")
            return

        self.elogger.warn("cached keys of peer already available")
        self.elogger.warn("encrypted key reward hex: %s" % key.encode('hex'))
        key = self.multidownload.pk_tools.decrypt_piece_tls(
            self.multidownload.private_key, key)
        self.elogger.warn("decrypted key reward: " + key)
        key_cache = self.multidownload.payment_key_hash_cache[
            self.connector.id]
        self.elogger.warn(
            "comparing recvd %s, to stored %s" %
            (key_cache[(index, begin, length)], sha(key).hexdigest()))
        result = (key_cache[(index, begin, length)] == sha(key).hexdigest())
        print("result is %d" % result)
        self.send_key_reward_response(index, begin, length, result)
        print("after send key reward response")
        if result:
            print("key matches stored key")
            if self.connector.id in self.multidownload.waiting_for_reward:
                print("waiting for reward true in upload")
                self.elogger.warn("waiting for reward true in upload")
                waiting_for_piece_rewards = self.multidownload.waiting_for_reward[
                    self.connector.id]
                #ez: iterate to send all responses
                if (index, begin, length) in waiting_for_piece_rewards:
                    self.elogger.warn("removing from waiting for keys: %d" %
                                      index)
                    waiting_for_piece_rewards.remove((index, begin, length))
                    if self.blocked_piece_requests:
                        (bidx, bbegin,
                         blen) = self.blocked_piece_requests.pop()
                        print(
                            "sending blocked request to got_request: %d %d %d"
                            % (bidx, bbegin, blen))
                        self.got_request(bidx, bbegin, blen)
                else:
                    self.elogger.warn(
                        "received key but wasnt waiting for it (error in code)"
                    )
            self.elogger.warn("not waiting for reward from this peer")
        else:
            self.elogger.warn("recieved bad key")

    def update_upload_key_status(self, index, offset, length, success):
        """
          Change status of reward that has been sent.
          Status contains textual status and retries
          Textual status can be waiting | done | failed
          waiting: still waiting for key reward response
          done: key reward response received
          failed: too many retries
          
          @param index: piecenumber that the key reward was for
          @param offset: offset in piece of the subpiece that the key reward was for (always 0)
          @param length: length of piece that the keyreward was for
          
          @return boolean that signifies if any further attempt to send the reward must be done.
          """

        if (self, index, offset) not in self.uploaded_piece_status:
            self.uploaded_piece_status[(self, index, offset)] = ("waiting", 0)
        (status, retries) = self.uploaded_piece_status[(self, index, offset)]
        if success == True:
            status = "done"
        else:
            if status == "failed":
                return False
            retries += 1
            if (retries > MAX_REWARD_RETRIES):
                status = "failed"
            else:
                status = "waiting"
        self.uploaded_piece_status[(self, index, offset)] = (status, retries)
        return retries < MAX_REWARD_RETRIES

    def got_key_hash_list(self, peerid, keyhashliststring):
        """
         Callback function called from the Rerequester when a keyhashlist is 
         received from the tracker
         The received string is put in a dictionary with a piece identifier
         (piecenr,offset,len) as dictonarykeyword and the keyhash as value.
         
         The keyhashlist is a list of hashed payment keys, that is used to check
         the validity of received payment keys.
         
         @param peerid: The peerid of the peer the keyhashlist is for
         @param keyhashliststring: A string of concatenated hex encoded keyhashes.
        """

        keyhashliststring = self.multidownload.pk_tools.decrypt_piece_tls(
            self.multidownload.private_key, keyhashliststring.decode('hex'))

        self.elogger.warn("setting hashes of keys")
        piece_length = self.storage.piece_size
        key_length = P_KEY_LENGTH * 2
        keylist_length = len(keyhashliststring)
        keyhashlist = {}
        for idx in range(0, keylist_length / key_length):
            start = idx * key_length
            if idx == self.storage.numpieces - 1:
                piece_length = self.storage.lastlen
                self.elogger.warn("setting last key length to %d" % key_length)
            keyhashlist[(idx, 0,
                         piece_length)] = keyhashliststring[start:(start +
                                                                   key_length)]
            print "set keyhashnumber %d to %s" % (
                idx, keyhashliststring[start:(start + key_length)])

        self.multidownload.payment_key_hash_cache[
            self.connector.id] = keyhashlist
        #todo move to multidownload
        self.elogger.warn("validating unchecked rewards")
        for (index, offset,
             length) in self.unchecked_key_rewards[peerid].keys():
            key_to_check = self.unchecked_key_rewards[peerid][(index, offset,
                                                               length)]
            self.got_key_reward(index, offset, length, key_to_check)
            self.unchecked_key_rewards[peerid].pop((index, offset, length))
예제 #31
0
class Download(object):
    """Implements BitTorrent protocol semantics for downloading over a single
       connection.  See Upload for the protocol semantics in the upload
       direction.  See Connector for the protocol syntax implementation."""
    def __init__(self, multidownload, connector):
        self.multidownload = multidownload
        self.connector = connector
        self.choked = True
        self.interested = False
        self.prefer_full = False
        self.active_requests = set()
        self.expecting_reject = set()
        self.intro_size = self.multidownload.chunksize * 4  # just a guess
        self.measure = Measure(multidownload.config['max_rate_period'])
        self.peermeasure = Measure(
            max(multidownload.storage.piece_size / 10000, 20))
        self.have = Bitfield(multidownload.numpieces)
        self.last = 0
        self.example_interest = None
        self.guard = BadDataGuard(self)
        self.suggested_pieces = []
        self.allowed_fast_pieces = []
        self._useful_received_listeners = set()
        self._raw_received_listeners = set()

        self.add_useful_received_listener(self.measure.update_rate)
        self.total_bytes = 0
        self.add_useful_received_listener(self.accumulate_total)

    def accumulate_total(self, x):
        self.total_bytes += x

    def add_useful_received_listener(self, listener):
        # "useful received bytes are used in measuring goodput.
        self._useful_received_listeners.add(listener)

    def remove_useful_received_listener(self, listener):
        self._useful_received_listeners.remove(listener)

    def fire_useful_received_listeners(self, bytes):
        for f in self._useful_received_listeners:
            f(bytes)

    def add_raw_received_listener(self, listener):
        self._raw_received_listeners.add(listener)

    def remove_raw_received_listener(self, listener):
        self._raw_received_listeners.remove(listener)

    def fire_raw_received_listeners(self, bytes):
        for f in self._raw_received_listeners:
            f(bytes)

    def _backlog(self):
        # Dave's suggestion:
        # backlog = 2 + thruput delay product in chunks.
        # Assume one-way download propagation delay is always less than 200ms.
        # backlog = 2 + int(0.2 * self.measure.get_rate() /
        #                 self.multidownload.chunksize
        # Then eliminate the cap of 50 and the 0.075*backlog.

        backlog = 2 + int(
            4 * self.measure.get_rate() / self.multidownload.chunksize)
        if self.total_bytes < self.intro_size:
            # optimistic backlog to get things started
            backlog = max(10, backlog)
        if backlog > 50:
            backlog = max(50, int(.075 * backlog))

        if self.multidownload.rm.endgame:
            # OPTIONAL: zero pipelining during endgame
            #b = 1
            pass

        return backlog

    def disconnected(self):
        self.multidownload.lost_peer(self)
        if self.have.numfalse == 0:
            self.multidownload.lost_have_all()
        else:
            # arg, slow
            count = 0
            target = len(self.have) - self.have.numfalse
            for i in xrange(len(self.have)):
                if count == target:
                    break
                if self.have[i]:
                    self.multidownload.lost_have(i)
                    count += 1
        self._letgo()
        self.guard.download = None

    def _letgo(self):
        if not self.active_requests:
            return
        if self.multidownload.rm.endgame:
            self.active_requests.clear()
            return
        lost = []
        for index, begin, length in self.active_requests:
            self.multidownload.rm.request_lost(index, begin, length)
            self.multidownload.active_requests_remove(index)
            if index not in lost:
                lost.append(index)
        self.active_requests.clear()
        ds = [d for d in self.multidownload.downloads if not d.choked]
        random.shuffle(ds)
        for d in ds:
            d._request_more(lost)
        for d in self.multidownload.downloads:
            if d.choked and not d.interested:
                for l in lost:
                    if d._want(l):
                        d.interested = True
                        d.connector.send_interested()
                        break

    def got_choke(self):
        if not self.choked:
            self.choked = True
            # ugly. instead, it should move all the requests to expecting_reject
            if not self.connector.uses_fast_extension:
                self._letgo()

    def got_unchoke(self):
        if self.choked:
            self.choked = False
            if self.interested:
                self._request_more()

    def got_piece(self, index, begin, piece):
        req = (index, begin, len(piece))

        if req not in self.active_requests:
            self.multidownload.discarded_bytes += len(piece)
            if self.connector.uses_fast_extension:
                # getting a piece we sent a cancel for
                # is just like receiving a reject
                self.got_reject_request(*req)
            return

        self.active_requests.remove(req)

        # we still give the peer credit in endgame, since we did request
        # the piece (it was in active_requests)
        self.fire_useful_received_listeners(len(piece))

        if self.multidownload.rm.endgame:
            if req not in self.multidownload.all_requests:
                self.multidownload.discarded_bytes += len(piece)
                return

            self.multidownload.all_requests.remove(req)

            for d in self.multidownload.downloads:
                if d.interested:
                    if not d.choked and req in d.active_requests:
                        d.connector.send_cancel(*req)
                        d.active_requests.remove(req)
                        if d.connector.uses_fast_extension:
                            d.expecting_reject.add(req)
                    d.fix_download_endgame()
        else:
            self._request_more()

        self.last = bttime()
        df = self.multidownload.storage.write(index, begin, piece, self.guard)
        df.addCallback(self._got_piece, index)
        df.addErrback(self.multidownload.errorfunc)

    def _got_piece(self, hashchecked, index):
        if hashchecked:
            self.multidownload.hashchecked(index)

    def _want(self, index):
        return (self.have[index]
                and self.multidownload.rm.want_requests(index))

    def send_request(self, index, begin, length):
        piece_size = self.multidownload.storage.piece_size
        if begin + length > piece_size:
            raise ValueError("Issuing request that exceeds piece size: "
                             "(%d + %d == %d) > %d" %
                             (begin, length, begin + length, piece_size))
        self.multidownload.active_requests_add(index)
        self.active_requests.add((index, begin, length))
        self.connector.send_request(index, begin, length)

    def _request_more(self, indices=[]):

        if self.choked:
            self._request_when_choked()
            return
        #log( "_request_more.active_requests=%s" % self.active_requests )
        b = self._backlog()
        if len(self.active_requests) >= b:
            return
        if self.multidownload.rm.endgame:
            self.fix_download_endgame()
            return

        self.suggested_pieces = [
            i for i in self.suggested_pieces
            if not self.multidownload.storage.do_I_have(i)
        ]
        lost_interests = []
        while len(self.active_requests) < b:
            if not indices:
                interest = self.multidownload.picker.next(
                    self.have, self.multidownload.rm.active_requests,
                    self.multidownload.rm.fully_active, self.suggested_pieces)
            else:
                interest = None
                for i in indices:
                    if self._want(i):
                        interest = i
                        break
            if interest is None:
                break
            if not self.interested:
                self.interested = True
                self.connector.send_interested()
            # an example interest created by from_behind is preferable
            if self.example_interest is None:
                self.example_interest = interest

            # request as many chunks of interesting piece as fit in backlog.
            while len(self.active_requests) < b:
                begin, length = self.multidownload.rm.new_request(
                    interest, self.prefer_full)
                self.send_request(interest, begin, length)

                if not self.multidownload.rm.want_requests(interest):
                    lost_interests.append(interest)
                    break
        if not self.active_requests and self.interested:
            self.interested = False
            self.connector.send_not_interested()
        self._check_lost_interests(lost_interests)
        self.multidownload.check_enter_endgame()

    def _check_lost_interests(self, lost_interests):
        """
           Notify other downloads that these pieces are no longer interesting.

           @param lost_interests: list of pieces that have been fully 
               requested.
        """
        if not lost_interests:
            return
        for d in self.multidownload.downloads:
            if d.active_requests or not d.interested:
                continue
            if (d.example_interest is not None and
                    self.multidownload.rm.want_requests(d.example_interest)):
                continue
            # any() does not exist until python 2.5
            #if not any([d.have[lost] for lost in lost_interests]):
            #    continue
            for lost in lost_interests:
                if d.have[lost]:
                    break
            else:
                continue
            interest = self.multidownload.picker.from_behind(
                d.have, self.multidownload.rm.fully_active)
            if interest is None:
                d.interested = False
                d.connector.send_not_interested()
            else:
                d.example_interest = interest

    def _request_when_choked(self):
        self.allowed_fast_pieces = [
            i for i in self.allowed_fast_pieces
            if not self.multidownload.storage.do_I_have(i)
        ]
        if not self.allowed_fast_pieces:
            return
        fast = list(self.allowed_fast_pieces)

        b = self._backlog()
        lost_interests = []
        while len(self.active_requests) < b:

            while fast:
                piece = fast.pop()
                if self._want(piece):
                    break
            else:
                break  # no unrequested pieces among allowed fast.

            # request chunks until no more chunks or no more room in backlog.
            while len(self.active_requests) < b:
                begin, length = self.multidownload.rm.new_request(
                    piece, self.prefer_full)
                self.send_request(piece, begin, length)
                if not self.multidownload.rm.want_requests(piece):
                    lost_interests.append(piece)
                    break
        self._check_lost_interests(lost_interests)
        self.multidownload.check_enter_endgame()

    def fix_download_endgame(self):
        want = []
        for a in self.multidownload.all_requests:
            if not self.have[a[0]]:
                continue
            if a in self.active_requests:
                continue
            want.append(a)

        if self.interested and not self.active_requests and not want:
            self.interested = False
            self.connector.send_not_interested()
            return
        if not self.interested and want:
            self.interested = True
            self.connector.send_interested()
        if self.choked:
            return
        random.shuffle(want)
        for req in want[:self._backlog() - len(self.active_requests)]:
            self.send_request(*req)

    def got_have(self, index):
        if self.have[index]:
            return
        if index == self.multidownload.numpieces - 1:
            self.peermeasure.update_rate(
                self.multidownload.storage.total_length -
                (self.multidownload.numpieces - 1) *
                self.multidownload.storage.piece_size)
        else:
            self.peermeasure.update_rate(self.multidownload.storage.piece_size)
        self.have[index] = True
        self.multidownload.got_have(index)
        if (self.multidownload.storage.get_amount_left() == 0
                and self.have.numfalse == 0):
            self.connector.close()
            return
        if self.multidownload.rm.endgame:
            self.fix_download_endgame()
        elif self.multidownload.rm.want_requests(index):
            self._request_more([index])  # call _request_more whether choked.
            if self.choked and not self.interested:
                self.interested = True
                self.connector.send_interested()

    def got_have_bitfield(self, have):
        if have.numfalse == 0:
            self._got_have_all(have)
            return
        self.have = have
        # arg, slow
        count = 0
        target = len(self.have) - self.have.numfalse
        for i in xrange(len(self.have)):
            if count == target:
                break
            if self.have[i]:
                self.multidownload.got_have(i)
                count += 1
        if self.multidownload.rm.endgame:
            for piece, begin, length in self.multidownload.all_requests:
                if self.have[piece]:
                    self.interested = True
                    self.connector.send_interested()
                    return
        for piece in self.multidownload.rm.iter_want():
            if self.have[piece]:
                self.interested = True
                self.connector.send_interested()
                return

    def _got_have_all(self, have=None):
        if self.multidownload.storage.get_amount_left() == 0:
            self.connector.close()
            return
        if have is None:
            # bleh
            n = self.multidownload.numpieces
            rlen, extra = divmod(n, 8)
            if extra:
                extra = chr((0xFF << (8 - extra)) & 0xFF)
            else:
                extra = ''
            s = (chr(0xFF) * rlen) + extra
            have = Bitfield(n, s)
        self.have = have
        self.multidownload.got_have_all()
        if self.multidownload.rm.endgame:
            for piece, begin, length in self.multidownload.all_requests:
                self.interested = True
                self.connector.send_interested()
                return
        for i in self.multidownload.rm.iter_want():
            self.interested = True
            self.connector.send_interested()
            return

    def get_rate(self):
        return self.measure.get_rate()

    def is_snubbed(self):
        return bttime() - self.last > self.multidownload.snub_time

    def got_have_none(self):
        pass  # currently no action is taken when have_none is received.
        # The picker already assumes the local peer has none of the
        # pieces until got_have is called.

    def got_have_all(self):
        assert self.connector.uses_fast_extension
        self._got_have_all()

    def got_suggest_piece(self, piece):
        assert self.connector.uses_fast_extension
        if not self.multidownload.storage.do_I_have(piece):
            self.suggested_pieces.append(piece)
        self._request_more()  # try to request more. Just returns if choked.

    def got_allowed_fast(self, piece):
        """Upon receiving this message, the multidownload knows that it is
           allowed to download the specified piece even when choked."""
        #log( "got_allowed_fast %d" % piece )
        assert self.connector.uses_fast_extension

        if not self.multidownload.storage.do_I_have(piece):
            if piece not in self.allowed_fast_pieces:
                self.allowed_fast_pieces.append(piece)
                random.shuffle(
                    self.allowed_fast_pieces)  # O(n) but n is small.
        self._request_more()  # will try to request.  Handles cases like
        # whether neighbor has "allowed fast" piece.

    def got_reject_request(self, piece, begin, length):
        assert self.connector.uses_fast_extension
        req = (piece, begin, length)

        if req not in self.expecting_reject:
            if req not in self.active_requests:
                self.connector.protocol_violation("Reject received for "
                                                  "piece not pending")
                self.connector.close()
                return
            self.active_requests.remove(req)
        else:
            self.expecting_reject.remove(req)

        if self.multidownload.rm.endgame:
            return

        self.multidownload.rm.request_lost(*req)
        if not self.choked:
            self._request_more()
        ds = [d for d in self.multidownload.downloads if not d.choked]
        random.shuffle(ds)
        for d in ds:
            d._request_more([piece])

        for d in self.multidownload.downloads:
            if d.choked and not d.interested:
                if d._want(piece):
                    d.interested = True
                    d.connector.send_interested()
                    break
예제 #32
0
class Download(object):
    """Implements BitTorrent protocol semantics for downloading over a single
       connection.  See Upload for the protocol semantics in the upload
       direction.  See Connector for the protocol syntax implementation."""

    def __init__(self, multidownload, connector):
        self.multidownload = multidownload
        self.connector = connector
        self.choked = True
        self.interested = False
        self.prefer_full = False
        self.active_requests = set()
        self.expecting_reject = set()
        self.intro_size = self.multidownload.chunksize * 4 # just a guess
        self.measure = Measure(multidownload.config['max_rate_period'])
        self.peermeasure = Measure(
            max(multidownload.storage.piece_size / 10000, 20))
        self.have = Bitfield(multidownload.numpieces)
        self.last = 0
        self.example_interest = None
        self.guard = BadDataGuard(self)
        self.suggested_pieces = []
        self.allowed_fast_pieces = []
        self._useful_received_listeners = set()
        self._raw_received_listeners = set()
        
        self.add_useful_received_listener(self.measure.update_rate)
        self.total_bytes = 0
        self.add_useful_received_listener(self.accumulate_total)

    def accumulate_total(self, x):
        self.total_bytes += x        

    def add_useful_received_listener(self, listener):
        # "useful received bytes are used in measuring goodput.
        self._useful_received_listeners.add(listener)

    def remove_useful_received_listener(self, listener):
        self._useful_received_listeners.remove(listener)

    def fire_useful_received_listeners(self, bytes):
        for f in self._useful_received_listeners:
            f(bytes)

    def add_raw_received_listener(self, listener):
        self._raw_received_listeners.add(listener)

    def remove_raw_received_listener(self, listener):
        self._raw_received_listeners.remove(listener)

    def fire_raw_received_listeners(self, bytes):
        for f in self._raw_received_listeners:
            f(bytes)

    def _backlog(self):
        # Dave's suggestion:
        # backlog = 2 + thruput delay product in chunks.
        # Assume one-way download propagation delay is always less than 200ms.
        # backlog = 2 + int(0.2 * self.measure.get_rate() / 
        #                 self.multidownload.chunksize
        # Then eliminate the cap of 50 and the 0.075*backlog. 

        backlog = 2 + int(4 * self.measure.get_rate() /
                          self.multidownload.chunksize)
        if self.total_bytes < self.intro_size:
            # optimistic backlog to get things started
            backlog = max(10, backlog)
        if backlog > 50:
            backlog = max(50, int(.075 * backlog))

        if self.multidownload.rm.endgame:
            # OPTIONAL: zero pipelining during endgame
            #b = 1
            pass

        return backlog

    def disconnected(self):
        self.multidownload.lost_peer(self)
        if self.have.numfalse == 0:
            self.multidownload.lost_have_all()
        else:
            # arg, slow
            count = 0
            target = len(self.have) - self.have.numfalse
            for i in xrange(len(self.have)):
                if count == target:
                    break
                if self.have[i]:
                    self.multidownload.lost_have(i)
                    count += 1
        self._letgo()
        self.guard.download = None
        
    def _letgo(self):
        if not self.active_requests:
            return
        if self.multidownload.rm.endgame:
            self.active_requests.clear()
            return
        lost = []
        for index, begin, length in self.active_requests:
            self.multidownload.rm.request_lost(index, begin, length)
            self.multidownload.active_requests_remove(index)
            if index not in lost:
                lost.append(index)
        self.active_requests.clear()
        ds = [d for d in self.multidownload.downloads if not d.choked]
        random.shuffle(ds)
        for d in ds:
            d._request_more(lost)
        for d in self.multidownload.downloads:
            if d.choked and not d.interested:
                for l in lost:
                    if d._want(l):
                        d.interested = True
                        d.connector.send_interested()
                        break
        
    def got_choke(self):
        if not self.choked:
            self.choked = True
            if not self.connector.uses_fast_extension:
                self._letgo()
        
    def got_unchoke(self):
        if self.choked:
            self.choked = False
            if self.interested:
                self._request_more()

    def got_piece(self, index, begin, piece):
        req = (index, begin, len(piece))

        if req not in self.active_requests:
            self.multidownload.discarded_bytes += len(piece)
            if self.connector.uses_fast_extension:
                # getting a piece we sent a cancel for
                # is just like receiving a reject
                self.got_reject_request(*req)
            return

        self.active_requests.remove(req)
        
        # we still give the peer credit in endgame, since we did request
        # the piece (it was in active_requests)
        self.fire_useful_received_listeners(len(piece))

        if self.multidownload.rm.endgame:
            if req not in self.multidownload.all_requests:
                self.multidownload.discarded_bytes += len(piece)
                return

            self.multidownload.all_requests.remove(req)

            for d in self.multidownload.downloads:
                if d.interested:
                    if not d.choked and req in d.active_requests:
                        d.connector.send_cancel(*req)
                        d.active_requests.remove(req)
                        if d.connector.uses_fast_extension:
                            d.expecting_reject.add(req)
                    d.fix_download_endgame()
        else:
            self._request_more()
            
        self.last = bttime()
        df = self.multidownload.storage.write(index, begin, piece, self.guard)
        df.addCallback(self._got_piece, index)
        df.addErrback(self.multidownload.errorfunc)

    def _got_piece(self, hashchecked, index):
        if hashchecked:
            self.multidownload.hashchecked(index)
        
    def _want(self, index):
        return (self.have[index] and 
                self.multidownload.rm.want_requests(index))

    def send_request(self, index, begin, length):
        piece_size = self.multidownload.storage.piece_size
        if begin + length > piece_size:
            raise ValueError("Issuing request that exceeds piece size: "
                             "(%d + %d == %d) > %d" %
                             (begin, length, begin + length, piece_size))
        self.multidownload.active_requests_add(index)
        self.active_requests.add((index, begin, length))
        self.connector.send_request(index, begin, length)

    def _request_more(self, indices = []):
        
        if self.choked:
            self._request_when_choked()
            return
        #log( "_request_more.active_requests=%s" % self.active_requests )
        b = self._backlog()
        if len(self.active_requests) >= b:
            return
        if self.multidownload.rm.endgame:
            self.fix_download_endgame()
            return

        self.suggested_pieces = [i for i in self.suggested_pieces 
            if not self.multidownload.storage.do_I_have(i)]
        lost_interests = []
        while len(self.active_requests) < b:
            if not indices:
                interest = self.multidownload.picker.next(self.have,
                                    self.multidownload.rm.active_requests,
                                    self.multidownload.rm.fully_active,
                                    self.suggested_pieces)
            else:
                interest = None
                for i in indices:
                    if self._want(i):
                        interest = i
                        break
            if interest is None:
                break
            if not self.interested:
                self.interested = True
                self.connector.send_interested()
            # an example interest created by from_behind is preferable
            if self.example_interest is None:
                self.example_interest = interest

            # request as many chunks of interesting piece as fit in backlog.
            while len(self.active_requests) < b:
                begin, length = self.multidownload.rm.new_request(interest,
                                                                  self.prefer_full)
                self.send_request(interest, begin, length)

                if not self.multidownload.rm.want_requests(interest):
                    lost_interests.append(interest)
                    break
        if not self.active_requests and self.interested:
            self.interested = False
            self.connector.send_not_interested()
        self._check_lost_interests(lost_interests)
        self.multidownload.check_enter_endgame()

    def _check_lost_interests(self, lost_interests):
        """
           Notify other downloads that these pieces are no longer interesting.

           @param lost_interests: list of pieces that have been fully 
               requested.
        """
        if not lost_interests:
            return
        for d in self.multidownload.downloads:
            if d.active_requests or not d.interested:
                continue
            if (d.example_interest is not None and 
                self.multidownload.rm.want_requests(d.example_interest)):
                continue
            # any() does not exist until python 2.5
            #if not any([d.have[lost] for lost in lost_interests]):
            #    continue
            for lost in lost_interests:
                if d.have[lost]:
                    break
            else:
                continue
            interest = self.multidownload.picker.from_behind(d.have,
                            self.multidownload.rm.fully_active)
            if interest is None:
                d.interested = False
                d.connector.send_not_interested()
            else:
                d.example_interest = interest

    def _request_when_choked(self):
        self.allowed_fast_pieces = [i for i in self.allowed_fast_pieces
            if not self.multidownload.storage.do_I_have(i)]
        if not self.allowed_fast_pieces:
            return
        fast = list(self.allowed_fast_pieces)

        b = self._backlog()
        lost_interests = []
        while len(self.active_requests) < b:

            while fast:
                piece = fast.pop()
                if self._want(piece):
                    break
            else:
                break # no unrequested pieces among allowed fast.

            # request chunks until no more chunks or no more room in backlog.
            while len(self.active_requests) < b:
                begin, length = self.multidownload.rm.new_request(piece,
                                                                  self.prefer_full)
                self.send_request(piece, begin, length)
                if not self.multidownload.rm.want_requests(piece):
                    lost_interests.append(piece)
                    break
        self._check_lost_interests(lost_interests)
        self.multidownload.check_enter_endgame()
                
    def fix_download_endgame(self):
        want = []
        for a in self.multidownload.all_requests:
            if not self.have[a[0]]:
                continue
            if a in self.active_requests:
                continue
            want.append(a)

        if self.interested and not self.active_requests and not want:
            self.interested = False
            self.connector.send_not_interested()
            return
        if not self.interested and want:
            self.interested = True
            self.connector.send_interested()
        if self.choked:
            return
        random.shuffle(want)
        for req in want[:self._backlog() - len(self.active_requests)]:
            self.send_request(*req)
        
    def got_have(self, index):
        if self.have[index]:
            return
        if index == self.multidownload.numpieces-1:
            self.peermeasure.update_rate(self.multidownload.storage.total_length-
              (self.multidownload.numpieces-1)*self.multidownload.storage.piece_size)
        else:
            self.peermeasure.update_rate(self.multidownload.storage.piece_size)
        self.have[index] = True
        self.multidownload.got_have(index)
        if (self.multidownload.storage.get_amount_left() == 0 and
            self.have.numfalse == 0):
            self.connector.close()
            return
        if self.multidownload.rm.endgame:
            self.fix_download_endgame()
        elif self.multidownload.rm.want_requests(index):
            self._request_more([index]) # call _request_more whether choked.
            if self.choked and not self.interested:
                self.interested = True
                self.connector.send_interested()
        
    def got_have_bitfield(self, have):
        if have.numfalse == 0:
            self._got_have_all(have)
            return
        self.have = have
        # arg, slow
        count = 0
        target = len(self.have) - self.have.numfalse
        for i in xrange(len(self.have)):
            if count == target:
                break
            if self.have[i]:
                self.multidownload.got_have(i)
                count += 1
        if self.multidownload.rm.endgame:
            for piece, begin, length in self.multidownload.all_requests:
                if self.have[piece]:
                    self.interested = True
                    self.connector.send_interested()
                    return
        for piece in self.multidownload.rm.iter_want():
            if self.have[piece]:
                self.interested = True
                self.connector.send_interested()
                return

    def _got_have_all(self, have=None):
        if self.multidownload.storage.get_amount_left() == 0:
            self.connector.close()
            return
        if have is None:
            # bleh
            n = self.multidownload.numpieces
            rlen, extra = divmod(n, 8)
            if extra:
                extra = chr((0xFF << (8 - extra)) & 0xFF)
            else:
                extra = ''
            s = (chr(0xFF) * rlen) + extra
            have = Bitfield(n, s)
        self.have = have
        self.multidownload.got_have_all()
        if self.multidownload.rm.endgame:
            for piece, begin, length in self.multidownload.all_requests:
                self.interested = True
                self.connector.send_interested()
                return
        for i in self.multidownload.rm.iter_want():
            self.interested = True
            self.connector.send_interested()
            return
        

    def get_rate(self):
        return self.measure.get_rate()

    def is_snubbed(self):
        return bttime() - self.last > self.multidownload.snub_time

    def got_have_none(self):
        pass  # currently no action is taken when have_none is received.
              # The picker already assumes the local peer has none of the
              # pieces until got_have is called.

    def got_have_all(self):
        assert self.connector.uses_fast_extension
        self._got_have_all()

    def got_suggest_piece(self, piece):
        assert self.connector.uses_fast_extension
        if not self.multidownload.storage.do_I_have(piece): 
          self.suggested_pieces.append(piece)
        self._request_more() # try to request more. Just returns if choked.

    def got_allowed_fast(self,piece):
        """Upon receiving this message, the multidownload knows that it is
           allowed to download the specified piece even when choked."""
        #log( "got_allowed_fast %d" % piece )
        assert self.connector.uses_fast_extension

        if not self.multidownload.storage.do_I_have(piece): 
            if piece not in self.allowed_fast_pieces:
                self.allowed_fast_pieces.append(piece)
                random.shuffle(self.allowed_fast_pieces)  # O(n) but n is small.
        self._request_more()  # will try to request.  Handles cases like
                              # whether neighbor has "allowed fast" piece.

    def got_reject_request(self, piece, begin, length):
        assert self.connector.uses_fast_extension
        req = (piece, begin, length) 

        if req not in self.expecting_reject:
            if req not in self.active_requests:
                self.connector.protocol_violation("Reject received for "
                                                  "piece not pending")
                self.connector.close()
                return
            self.active_requests.remove(req)
        else:
            self.expecting_reject.remove(req)

        if self.multidownload.rm.endgame:
            return

        self.multidownload.rm.request_lost(*req)
        if not self.choked:
            self._request_more()
        ds = [d for d in self.multidownload.downloads if not d.choked]
        random.shuffle(ds)
        for d in ds:
            d._request_more([piece])
            
        for d in self.multidownload.downloads:
            if d.choked and not d.interested:
                if d._want(piece):
                    d.interested = True
                    d.connector.send_interested()
                    break
예제 #33
0
class DownloadPeer:
    def __init__(self, downloader, connection):
        self.downloader = downloader
        self.connection = connection
        self.choked = True
        self.interested = False
        self.active_requests = []
        self.measure = Measure(downloader.max_rate_period)
        self.peermeasure = Measure(downloader.max_rate_period)
        self.have = Bitfield(downloader.numpieces)
        self.last = -1000
        self.last2 = -1000
        self.example_interest = None
#        self.backlog = 2
        self.backlog = 8
        self.ip = connection.get_ip()
        self.guard = BadDataGuard(self)

    def _backlog(self, just_unchoked):
#        self.backlog = min(
#            2+int(4*self.measure.get_rate()/self.downloader.chunksize),
#            (2*just_unchoked)+self.downloader.queue_limit() )
#        if self.backlog > 50:
#            self.backlog = max(50, self.backlog * 0.075)
#        return self.backlog
      self.backlog = 4+int(8*self.measure.get_rate()/self.downloader.chunksize)
      return self.backlog
    
    def disconnected(self):
        self.downloader.lost_peer(self)
        if self.have.complete():
            self.downloader.picker.lost_seed()
        else:
            for i in xrange(len(self.have)):
                if self.have[i]:
                    self.downloader.picker.lost_have(i)
        if self.have.complete() and self.downloader.storage.is_endgame():
            self.downloader.add_disconnected_seed(self.connection.get_readable_id())
        self._letgo()
        self.guard.download = None

    def _letgo(self):
        if self.downloader.queued_out.has_key(self):
            del self.downloader.queued_out[self]
        if not self.active_requests:
            return
        if self.downloader.endgamemode:
            self.active_requests = []
            return
        lost = {}
        for index, begin, length in self.active_requests:
            self.downloader.storage.request_lost(index, begin, length)
            lost[index] = 1
        lost = lost.keys()
        self.active_requests = []
        if self.downloader.paused:
            return
        ds = [d for d in self.downloader.downloads if not d.choked]
        shuffle(ds)
        for d in ds:
            d._request_more()
        for d in self.downloader.downloads:
            if d.choked and not d.interested:
                for l in lost:
                    if d.have[l] and self.downloader.storage.do_I_have_requests(l):
                        d.send_interested()
                        break

    def got_choke(self):
        if not self.choked:
            self.choked = True
            self._letgo()

    def got_unchoke(self):
        if self.choked:
            self.choked = False
            if self.interested:
                self._request_more(new_unchoke = True)
            self.last2 = clock()

    def is_choked(self):
        return self.choked

    def is_interested(self):
        return self.interested

    def send_interested(self):
        if not self.interested:
            self.interested = True
            self.connection.send_interested()
            if not self.choked:
                self.last2 = clock()

    def send_not_interested(self):
        if self.interested:
            self.interested = False
            self.connection.send_not_interested()

    def got_piece(self, index, begin, piece):
        length = len(piece)
        try:
            self.active_requests.remove((index, begin, length))
        except ValueError:
            self.downloader.discarded += length
            return False
        if self.downloader.endgamemode:
            self.downloader.all_requests.remove((index, begin, length))
        self.last = clock()
        self.last2 = clock()
        self.measure.update_rate(length)
        self.downloader.measurefunc(length)
        if not self.downloader.storage.piece_came_in(index, begin, piece, self.guard):
            self.downloader.piece_flunked(index)
            return False
        if self.downloader.storage.do_I_have(index):
            self.downloader.picker.complete(index)
        if self.downloader.endgamemode:
            for d in self.downloader.downloads:
                if d is not self:
                  if d.interested:
                    if d.choked:
                        assert not d.active_requests
                        d.fix_download_endgame()
                    else:
                        try:
                            d.active_requests.remove((index, begin, length))
                        except ValueError:
                            continue
                        d.connection.send_cancel(index, begin, length)
                        d.fix_download_endgame()
                  else:
                      assert not d.active_requests
        self._request_more()
        self.downloader.check_complete(index)
        return self.downloader.storage.do_I_have(index)

    def _request_more(self, new_unchoke = False):
        assert not self.choked
        if self.downloader.endgamemode:
            self.fix_download_endgame(new_unchoke)
            return
        if self.downloader.paused:
            return
        if len(self.active_requests) >= self._backlog(new_unchoke):
            if not (self.active_requests or self.backlog):
                self.downloader.queued_out[self] = 1
            return
        lost_interests = []
        while len(self.active_requests) < self.backlog:
            interest = self.downloader.picker.next(self.have,
                               self.downloader.storage.do_I_have_requests,
                               self.downloader.too_many_partials())
            if interest is None:
                break
            self.example_interest = interest
            self.send_interested()
            loop = True
            while len(self.active_requests) < self.backlog and loop:
                begin, length = self.downloader.storage.new_request(interest)
                self.downloader.picker.requested(interest)
                self.active_requests.append((interest, begin, length))
                self.connection.send_request(interest, begin, length)
                self.downloader.chunk_requested(length)
                if not self.downloader.storage.do_I_have_requests(interest):
                    loop = False
                    lost_interests.append(interest)
        if not self.active_requests:
            self.send_not_interested()
        if lost_interests:
            for d in self.downloader.downloads:
                if d.active_requests or not d.interested:
                    continue
                if d.example_interest is not None and self.downloader.storage.do_I_have_requests(d.example_interest):
                    continue
                for lost in lost_interests:
                    if d.have[lost]:
                        break
                else:
                    continue
                interest = self.downloader.picker.next(d.have,
                                   self.downloader.storage.do_I_have_requests,
                                   self.downloader.too_many_partials())
                if interest is None:
                    d.send_not_interested()
                else:
                    d.example_interest = interest
        if self.downloader.storage.is_endgame():
            self.downloader.start_endgame()


    def fix_download_endgame(self, new_unchoke = False):
        if self.downloader.paused:
            return
        if len(self.active_requests) >= self._backlog(new_unchoke):
            if not (self.active_requests or self.backlog) and not self.choked:
                self.downloader.queued_out[self] = 1
            return
        want = [a for a in self.downloader.all_requests if self.have[a[0]] and a not in self.active_requests]
        if not (self.active_requests or want):
            self.send_not_interested()
            return
        if want:
            self.send_interested()
        if self.choked:
            return
        shuffle(want)
        del want[self.backlog - len(self.active_requests):]
        self.active_requests.extend(want)
        for piece, begin, length in want:
            self.connection.send_request(piece, begin, length)
            self.downloader.chunk_requested(length)

    def got_have(self, index):
        if index == self.downloader.numpieces-1:
            self.downloader.totalmeasure.update_rate(self.downloader.storage.total_length-(self.downloader.numpieces-1)*self.downloader.storage.piece_length)
            self.peermeasure.update_rate(self.downloader.storage.total_length-(self.downloader.numpieces-1)*self.downloader.storage.piece_length)
        else:
            self.downloader.totalmeasure.update_rate(self.downloader.storage.piece_length)
            self.peermeasure.update_rate(self.downloader.storage.piece_length)
        if not self.have[index]:
            self.have[index] = True
            self.downloader.picker.got_have(index)
            if self.have.complete():
                self.downloader.picker.became_seed()
                if self.downloader.storage.am_I_complete():
                    self.downloader.add_disconnected_seed(self.connection.get_readable_id())
                    self.connection.close()
            elif self.downloader.endgamemode:
                self.fix_download_endgame()
            elif ( not self.downloader.paused
                   and not self.downloader.picker.is_blocked(index)
                   and self.downloader.storage.do_I_have_requests(index) ):
                if not self.choked:
                    self._request_more()
                else:
                    self.send_interested()
        return self.have.complete()

    def _check_interests(self):
        if self.interested or self.downloader.paused:
            return
        for i in xrange(len(self.have)):
            if ( self.have[i] and not self.downloader.picker.is_blocked(i)
                 and ( self.downloader.endgamemode
                       or self.downloader.storage.do_I_have_requests(i) ) ):
                self.send_interested()
                return

    def got_have_bitfield(self, have):
        if self.downloader.storage.am_I_complete() and have.complete():
            if self.downloader.super_seeding:
                self.connection.send_bitfield(have.tostring()) # be nice, show you're a seed too
            self.connection.close()
            self.downloader.add_disconnected_seed(self.connection.get_readable_id())
            return False
        self.have = have
        if have.complete():
            self.downloader.picker.got_seed()
        else:
            temp = 4
            for i in xrange(len(have)):
                if have[i]:
                    self.downloader.picker.got_have(i)
        if self.downloader.endgamemode and not self.downloader.paused:
            for piece, begin, length in self.downloader.all_requests:
                if self.have[piece]:
                    self.send_interested()
                    break
        else:
            self._check_interests()
        return have.complete()

    def get_rate(self):
        return self.measure.get_rate()
        
    def get_peer_completion(self):
      if len(self.have) > 0:
          return float(len(self.have)-self.have.numfalse)/float(len(self.have))
      else:
          return 1.0

    def is_snubbed(self):
        if ( self.interested and not self.choked
             and clock() - self.last2 > self.downloader.snub_time ):
            for index, begin, length in self.active_requests:
                self.connection.send_cancel(index, begin, length)
            self.got_choke()    # treat it just like a choke
        return clock() - self.last > self.downloader.snub_time
예제 #34
0
class Upload(object):
    """Upload over a single connection."""
    
    def __init__(self, connection, ratelimiter, totalup,choker,
                 storage, max_slice_length, max_rate_period, num_fast,
                 torrent):
        assert isinstance(connection, BitTorrent.Connector.Connection)
        assert isinstance(torrent, BitTorrent.Torrent.Torrent)
        self.connection = connection
        self.ratelimiter = ratelimiter
        self.totalup = totalup
        self.torrent = torrent
        self.choker = choker
        self.num_fast = num_fast
        self.storage = storage
        self.max_slice_length = max_slice_length
        self.max_rate_period = max_rate_period
        self.choked = True
        self.unchoke_time = None
        self.interested = False
        self.buffer = []    # contains piece data about to be sent.
        self.measure = Measure(max_rate_period)
        self.allowed_fast_pieces = []
        if connection.uses_fast_extension:
            if storage.get_amount_left() == 0:
                connection.send_have_all()
            elif storage.do_I_have_anything():
                connection.send_bitfield(storage.get_have_list())
            else:
                connection.send_have_none()
            self._send_allowed_fast_list()
        elif storage.do_I_have_anything():
            connection.send_bitfield(storage.get_have_list())


    def _send_allowed_fast_list(self):
        """Computes and sends the 'allowed fast' set.  """

        self.allowed_fast_pieces = _compute_allowed_fast_list(
                        self.torrent.infohash,
                        self.connection.ip, self.num_fast,
                        self.storage.get_num_pieces())

        for index in self.allowed_fast_pieces:
            self.connection.send_allowed_fast(index)


    def got_not_interested(self):
        if self.interested:
            self.interested = False
            self.choker.not_interested(self.connection)

    def got_interested(self):
        if not self.interested:
            self.interested = True
            self.choker.interested(self.connection)

    def get_upload_chunk(self, index, begin, length):
        df = self.storage.read(index, begin, length)
        def fail(e):
            log( "get_upload_chunk failed", exc_info=e )
            self.connection.close()
            return None
        def update_rate(piece):  # piece is actual data.
            if piece is None:
                return fail("Piece is None")
            return (index, begin, piece)
        df.addCallback(update_rate)
        df.addErrback(fail)
        return df

    def update_rate(self, bytes):
        self.measure.update_rate(bytes)
        self.totalup.update_rate(bytes)

    def got_request(self, index, begin, length):
        if not self.interested or length > self.max_slice_length:
            self.connection.close()
            return
        if index in self.allowed_fast_pieces or not self.connection.choke_sent:
            df = self.get_upload_chunk(index, begin, length)
            def got_piece(piece):  # 3rd elem in tuple is piece data.
                if self.connection.closed or piece is None:
                    return
                index, begin, piece = piece # piece changes from tuple to data.
                if self.choked:
                    if not self.connection.uses_fast_extension:
                        return
                    if index not in self.allowed_fast_pieces:
                        self.connection.send_reject_request(
                            index, begin, len(piece))
                        return
                self.buffer.append(((index, begin, len(piece)), piece))
                if self.connection.next_upload is None and \
                       self.connection.connection.is_flushed():
                    self.ratelimiter.queue(self.connection)
            df.addCallback(got_piece)
        elif self.connection.uses_fast_extension:
            self.connection.send_reject_request( index, begin, length )
            
    def got_cancel(self, index, begin, length):
        req = (index, begin, length)
        for pos, (r, p) in enumerate(self.buffer):
            if r == req:
                del self.buffer[pos]
                if self.connection.uses_fast_extension:
                    self.connection.send_reject_request(*req)
                break

    def choke(self):
        if not self.choked:
            self.choked = True
            self.connection.send_choke()

    def sent_choke(self):
        assert self.choked
        if self.connection.uses_fast_extension:
            b2 = []
            for r in self.buffer:
                ((index,begin,length),piecedata) = r
                if index not in self.allowed_fast_pieces:
                    self.connection.send_reject_request( index, begin, length )
                else:
                    b2.append(r)
            self.buffer = b2
        else:
            del self.buffer[:]

    def unchoke(self, time):
        if self.choked:
            self.choked = False
            self.unchoke_time = time
            self.connection.send_unchoke()

    def has_queries(self):
        return len(self.buffer) > 0

    def get_rate(self):
        return self.measure.get_rate()
예제 #35
0
class Upload(object):

    def __init__(self, connection, ratelimiter, totalup, totalup2, choker,
                 storage, max_slice_length, max_rate_period):
        self.connection = connection
        self.ratelimiter = ratelimiter
        self.totalup = totalup
        self.totalup2 = totalup2
        self.choker = choker
        self.storage = storage
        self.max_slice_length = max_slice_length
        self.max_rate_period = max_rate_period
        self.choked = True
        self.unchoke_time = None
        self.interested = False
        self.buffer = []
        self.measure = Measure(max_rate_period)
        if storage.do_I_have_anything():
            connection.send_bitfield(storage.get_have_list())

    def got_not_interested(self):
        if self.interested:
            self.interested = False
            del self.buffer[:]
            self.choker.not_interested(self.connection)

    def got_interested(self):
        if not self.interested:
            self.interested = True
            self.choker.interested(self.connection)

    def get_upload_chunk(self):
        if not self.buffer:
            return None
        index, begin, length = self.buffer.pop(0)
        piece = self.storage.get_piece(index, begin, length)
        if piece is None:
            self.connection.close()
            return None
        self.measure.update_rate(len(piece))
        self.totalup.update_rate(len(piece))
        self.totalup2.update_rate(len(piece))
        return (index, begin, piece)

    def got_request(self, index, begin, length):
        if not self.interested or length > self.max_slice_length:
            self.connection.close()
            return
        if not self.connection.choke_sent:
            self.buffer.append((index, begin, length))
            if self.connection.next_upload is None and \
                   self.connection.connection.is_flushed():
                self.ratelimiter.queue(self.connection)

    def got_cancel(self, index, begin, length):
        try:
            self.buffer.remove((index, begin, length))
        except ValueError:
            pass

    def choke(self):
        if not self.choked:
            self.choked = True
            self.connection.send_choke()

    def sent_choke(self):
        assert self.choked
        del self.buffer[:]

    def unchoke(self, time):
        if self.choked:
            self.choked = False
            self.unchoke_time = time
            self.connection.send_unchoke()

    def has_queries(self):
        return len(self.buffer) > 0

    def get_rate(self):
        return self.measure.get_rate()
예제 #36
0
class Upload(object):

    def __init__(self, connection, ratelimiter, totalup, totalup2, choker,
                 storage, max_slice_length, max_rate_period, logcollector):
        self.connection = connection
        self.ratelimiter = ratelimiter
        self.totalup = totalup
        self.totalup2 = totalup2
        self.choker = choker
        self.storage = storage
        self.max_slice_length = max_slice_length
        self.max_rate_period = max_rate_period
        self.choked = True
        self.unchoke_time = None
        self.interested = False
        #the list buffer contains tuples (index, begin, lenght) for each
        #block requested by the remote peer. A non empty buffer means that
        #there is data to send to the remote peer already requested by the
        #remote peer. 
        self.buffer = []
        #PFS begin
        self.config = choker.config
        self.I = {}     # I[piece id] = block uploaded count in the piece id
        self.r = {}     # r[piece_id] = block requested count in the piece id
        #PFS end
        self.measure = Measure(max_rate_period)
        #send the bittfield of the peer the first time it connects to the peers. 
        if storage.do_I_have_anything():
            connection.send_bitfield(storage.get_have_list())
        self.logcollector = logcollector

    def got_not_interested(self):
        if self.interested:
            self.logcollector.log(None, 'R NI ' + str(self.connection.ip))
            self.interested = False
            del self.buffer[:]
            self.choker.not_interested(self.connection)

    def got_interested(self):
        if not self.interested:
            self.logcollector.log(None, 'R I ' + str(self.connection.ip))
            self.interested = True
            self.choker.interested(self.connection)

    def get_upload_chunk(self):
        if not self.buffer:
            return None
        #buffer.pop(0) return the element with index 0 and remove
        #this element from buffer.
        index, begin, length = self.buffer.pop(0)

        #PFS begin
        if self.choker.done():
            if index in self.I:
                self.I[index] += 1
            else:
                self.I[index] = 1
                if index in self.choker.I:
                    self.choker.I[index] += 1
                else:
                    self.choker.I[index] = 1
                self.logcollector.log(None, 'PFS ' + str(self.connection.ip) + \
                                      ' theta(' + str(index) + ') ' + str(self.choker.I[index]))
            if index not in self.choker.theta:
                self.choker.theta[index] = 1.0
        #PFS end

        piece = self.storage.get_piece(index, begin, length)
        if piece is None:
            self.logcollector.log(None, 'CON C ' + str(self.connection.ip) +  ' E 1')
            self.connection.close()
            return None
        self.measure.update_rate(len(piece))
        self.totalup.update_rate(len(piece))
        self.totalup2.update_rate(len(piece))
        return (index, begin, piece)

    def got_request(self, index, begin, length):
        if not self.interested or length > self.max_slice_length:
            self.logcollector.log(None, 'CON C ' + str(self.connection.ip) +  ' E 2')
            self.connection.close()
            return
        self.logcollector.log(None, 'R R ' + str(self.connection.ip) + ' i ' + str(index) + ' b ' + str(begin) + \
                              ' l ' + str(length))            
        if not self.connection.choke_sent:
            self.buffer.append((index, begin, length))
            if self.connection.next_upload is None and \
                   self.connection.connection.is_flushed():
                self.ratelimiter.queue(self.connection)

            # EPFS begin
            if self.choker.done():
                # update vector of requests {r1,...}
                self.PFS_update_r(index)
            # EPFS end


    # EPFS step 5: Seed updates his data structure when receiving REQUEST from leechers
    def PFS_update_r(self, index):
        if self.config['scheduling_algorithm'] == 'BT':
            return False
        if self.choker.tm_first_req == 0:
            self.choker.tm_first_req = bttime()
        if index in self.r:
            self.r[index] += 1
        else:
            self.r[index] = 1
            if index in self.choker.r:
                self.choker.r[index] += 1.0
            else:
                self.choker.r[index] = 1.0
                self.logcollector.log(None, 'PFS ' + str(self.connection.ip) + \
                                      ' r[' + str(index) + '] ' + str(self.choker.r[index]))
        return True
    # EPFS end

    def got_cancel(self, index, begin, length):
        try:
            self.buffer.remove((index, begin, length))
        except ValueError:
            pass

    def choke(self):
        if not self.choked:
            self.choked = True
            self.connection.send_choke()

    def sent_choke(self):
        assert self.choked
        del self.buffer[:]

    def unchoke(self, time):
        if self.choked:
            self.choked = False
            self.unchoke_time = time
            self.connection.send_unchoke()

    def has_queries(self):
        return len(self.buffer) > 0

    def get_rate(self):
        return self.measure.get_rate()
예제 #37
0
class Upload(object):
    """Upload over a single connection."""
    
    def __init__(self, multidownload, connector, ratelimiter, choker, storage, 
                 max_chunk_length, max_rate_period, num_fast, infohash):
        assert isinstance(connector, BitTorrent.Connector.Connector)
        self.multidownload = multidownload
        self.connector = connector
        self.ratelimiter = ratelimiter
        self.infohash = infohash 
        self.choker = choker
        self.num_fast = num_fast
        self.storage = storage
        self.max_chunk_length = max_chunk_length
        self.choked = True
        self.unchoke_time = None
        self.interested = False
        self.had_length_error = False
        self.had_max_requests_error = False
        self.buffer = []    # contains piece data about to be sent.
        self.measure = Measure(max_rate_period)
        connector.add_sent_listener(self.measure.update_rate) 
        self.allowed_fast_pieces = []
        if connector.uses_fast_extension:
            if storage.get_amount_left() == 0:
                connector.send_have_all()
            elif storage.do_I_have_anything():
                connector.send_bitfield(storage.get_have_list())
            else:
                connector.send_have_none()
            self._send_allowed_fast_list()
        elif storage.do_I_have_anything():
            connector.send_bitfield(storage.get_have_list())


    def _send_allowed_fast_list(self):
        """Computes and sends the 'allowed fast' set.  """
        self.allowed_fast_pieces = _compute_allowed_fast_list(
                        self.infohash,
                        self.connector.ip, self.num_fast,
                        self.storage.get_num_pieces())

        for index in self.allowed_fast_pieces:
            self.connector.send_allowed_fast(index)

    def _compute_allowed_fast_list(self,infohash,ip, num_fast, num_pieces):
        
        # if ipv4 then  (for now assume IPv4)
        iplist = [int(x) for x in ip.split(".")]

        # classful heuristic.
        if iplist[0] | 0x7F==0xFF or iplist[0] & 0xC0==0x80: # class A or B
            iplist = [chr(iplist[0]),chr(iplist[1]),chr(0),chr(0)]
        else:
            iplist = [chr(iplist[0]),chr(iplist[1]),chr(iplist[2]),chr(0)]
        h = "".join(iplist)
        h = "".join([h,infohash])
        fastlist = []
        assert num_pieces < 2**32
        if num_pieces <= num_fast:
            return range(num_pieces) # <---- this would be bizarre
        while True:
            h = sha1(h).digest() # rehash hash to generate new random string.
            #log("infohash=%s" % h.encode('hex'))
            for i in xrange(5):
                j = i*4
                y = [ord(x) for x in h[j:j+4]]
                z = (y[0] << 24) + (y[1]<<16) + (y[2]<<8) + y[3]
                index = int(z % num_pieces)
                #log("z=%s=%d, index=%d" % ( hex(z), z, index ))
                if index not in fastlist:
                    fastlist.append(index)
                    if len(fastlist) >= num_fast:
                        return fastlist

    def got_not_interested(self):
        if self.interested:
            self.interested = False
            self.choker.not_interested(self.connector)

    def got_interested(self):
        if not self.interested:
            self.interested = True
            self.choker.interested(self.connector)

    def get_upload_chunk(self, index, begin, length):
        df = self.storage.read(index, begin, length)
        df.addCallback(lambda piece: (index, begin, piece))
        df.addErrback(self._failed_get_upload_chunk)
        return df

    def _failed_get_upload_chunk(self, f):
        log("get_upload_chunk failed", exc_info=f.exc_info())
        self.connector.close()
        return f

    def got_request(self, index, begin, length):
        if not self.interested:
            self.connector.protocol_violation("request when not interested")
            self.connector.close()
            return
        if length > self.max_chunk_length:
            if not self.had_length_error:
                m = ("request length %r exceeds max %r" %
                     (length, self.max_chunk_length))
                self.connector.protocol_violation(m)
                self.had_length_error = True
            #self.connector.close()
            # we could still download...
            if self.connector.uses_fast_extension:
                self.connector.send_reject_request(index, begin, length)
            return
        if len(self.buffer) > MAX_REQUESTS:
            if not self.had_max_requests_error:
                m = ("max request limit %d" % MAX_REQUESTS)
                self.connector.protocol_violation(m)
                self.had_max_requests_error = True
            if self.connector.uses_fast_extension:
                self.connector.send_reject_request(index, begin, length)
            return
        if index in self.allowed_fast_pieces or not self.connector.choke_sent:
            df = self.get_upload_chunk(index, begin, length)
            df.addCallback(self._got_piece)
            df.addErrback(self.multidownload.errorfunc)
        elif self.connector.uses_fast_extension:
            self.connector.send_reject_request(index, begin, length)

    def _got_piece(self, piece_info):
        index, begin, piece = piece_info
        if self.connector.closed:
            return
        if self.choked:
            if not self.connector.uses_fast_extension:
                return
            if index not in self.allowed_fast_pieces:
                self.connector.send_reject_request(index, begin, len(piece))
                return
        self.buffer.append(((index, begin, len(piece)), piece))
        if self.connector.next_upload is None and \
               self.connector.connection.is_flushed():
            self.ratelimiter.queue(self.connector)
            
    def got_cancel(self, index, begin, length):
        req = (index, begin, length)
        for pos, (r, p) in enumerate(self.buffer):
            if r == req:
                del self.buffer[pos]
                if self.connector.uses_fast_extension:
                    self.connector.send_reject_request(*req)
                break

    def choke(self):
        if not self.choked:
            self.choked = True
            self.connector.send_choke()

    def sent_choke(self):
        assert self.choked
        if self.connector.uses_fast_extension:
            b2 = []
            for r in self.buffer:
                ((index,begin,length),piecedata) = r
                if index not in self.allowed_fast_pieces:
                    self.connector.send_reject_request(index, begin, length)
                else:
                    b2.append(r)
            self.buffer = b2
        else:
            del self.buffer[:]

    def unchoke(self, time):
        if self.choked:
            self.choked = False
            self.unchoke_time = time
            self.connector.send_unchoke()

    def has_queries(self):
        return len(self.buffer) > 0

    def get_rate(self):
        return self.measure.get_rate()
예제 #38
0
class Download(object):
    """Implements BitTorrent protocol semantics for downloading over a single
       connection.  See Upload for the protocol semantics in the upload
       direction.  See Connector for the protocol syntax implementation."""
       

    def __init__(self, multidownload, connector):
        self.multidownload = multidownload
        self.connector = connector
        self.choked = True
        self.interested = False
        self.prefer_full = False
        self.active_requests = set()
        self.expecting_reject = set()
        self.intro_size = self.multidownload.chunksize * 4 # just a guess
        self.measure = Measure(multidownload.config['max_rate_period'])
        self.peermeasure = Measure(
            max(multidownload.storage.piece_size / 10000, 20))
        self.have = Bitfield(multidownload.numpieces)
        self.last = 0
        self.example_interest = None
        self.guard = BadDataGuard(self)
        self.suggested_pieces = []
        self.allowed_fast_pieces = []
        self._useful_received_listeners = set()
        self._raw_received_listeners = set()
        
        self.add_useful_received_listener(self.measure.update_rate)
        self.total_bytes = 0
        self.add_useful_received_listener(self.accumulate_total)
        
        self.payment_key_status ={} #hash with retries of sending payment key: key=(idx,offset,len) val ("statustring",retries)
                                    #todo move to multidownload, and later to seperate package
        self.peer_certificate = None
        self.own_certificate_is_sent_to_peer = False
        
    def accumulate_total(self, x):
        self.total_bytes += x        

    def add_useful_received_listener(self, listener):
        # "useful received bytes are used in measuring goodput.
        self._useful_received_listeners.add(listener)

    def remove_useful_received_listener(self, listener):
        self._useful_received_listeners.remove(listener)

    def fire_useful_received_listeners(self, bytes):
        for f in self._useful_received_listeners:
            f(bytes)

    def add_raw_received_listener(self, listener):
        self._raw_received_listeners.add(listener)

    def remove_raw_received_listener(self, listener):
        self._raw_received_listeners.remove(listener)

    def fire_raw_received_listeners(self, bytes):
        for f in self._raw_received_listeners:
            f(bytes)

    def _backlog(self):
        # Dave's suggestion:
        # backlog = 2 + thruput delay product in chunks.
        # Assume one-way download propagation delay is always less than 200ms.
        # backlog = 2 + int(0.2 * self.measure.get_rate() / 
        #                 self.multidownload.chunksize
        # Then eliminate the cap of 50 and the 0.075*backlog. 

        backlog = 2 + int(4 * self.measure.get_rate() /
                          self.multidownload.chunksize)
        if self.total_bytes < self.intro_size:
            # optimistic backlog to get things started
            backlog = max(10, backlog)
        if backlog > 50:
            backlog = max(50, int(.075 * backlog))

        if self.multidownload.rm.endgame:
            # OPTIONAL: zero pipelining during endgame
            #b = 1
            pass

        return backlog

    def disconnected(self):
        self.multidownload.lost_peer(self)
        if self.have.numfalse == 0:
            self.multidownload.lost_have_all()
        else:
            # arg, slow
            count = 0
            target = len(self.have) - self.have.numfalse
            for i in xrange(len(self.have)):
                if count == target:
                    break
                if self.have[i]:
                    self.multidownload.lost_have(i)
                    count += 1
        self._letgo()
        self.guard.download = None
        
    def _letgo(self):
        if not self.active_requests:
            return
        if self.multidownload.rm.endgame:
            self.active_requests.clear()
            return
        lost = []
        for index, begin, length in self.active_requests:
            self.multidownload.rm.request_lost(index, begin, length)
            self.multidownload.active_requests_remove(index)
            if index not in lost:
                lost.append(index)
        self.active_requests.clear()
        ds = [d for d in self.multidownload.downloads if not d.choked]
        random.shuffle(ds)
        for d in ds:
            d._request_more(lost)
        for d in self.multidownload.downloads:
            if d.choked and not d.interested:
                for l in lost:
                    if d._want(l):
                        d.interested = True
                        d.connector.send_interested()
                        break
        
    def got_choke(self):
        if not self.choked:
            self.choked = True
            if not self.connector.uses_fast_extension:
                self._letgo()
        
    def got_unchoke(self):
        if self.choked:
            self.choked = False
            if self.interested:
                self._request_more()

    
    
    def got_cert(self, cert):
        """
         checks certificate and stores it into an object from which 
         the public public key can be read
         
         @param cert: a DER encoded X509 certificate string 
         @return: the certificate object or None in case of an invalid certificate         
        """
        if self.peer_certificate and not cert:
            return self.peer_certificate
       
        print "before parsing der certficate in got_cert length= %d" % len(cert)
        peer_certificate = self.multidownload.pk_tools.parseDERCert_tls(cert)
        print "after parsing cert = %s" % str(peer_certificate)
        if peer_certificate == self.peer_certificate and self.peer_certificate is not None : #maybe also check if not none
            print "peer certificate already present so skipping check lencer = %d == len self.cert %d" % (len(peer_certificate),len(self.peer_certificate))
            return peer_certificate
        else:
           cert_ok = self.multidownload.pk_tools.validate_in_mem_certificate(peer_certificate) 
           if cert_ok:
               print "peer certificate validated"
               self.peer_certificate = peer_certificate
               return peer_certificate
           else:
               print "peer certificate NOT OK!!!"
               return None
           
    def got_mp_piece(self, index, begin, piece, sig):
        """
         check if the certificate and the signature of the received piece message are OK
         if they are OK process piece with got_piece, otherwise send a empty keyreward message
         
         @param index: index of the piece (piece number), 
           a piece is a block of which the has is in the meta file
         @param begin: offset of the subpiece is the piece,
           this is currently always zero, this enables to account a single piece
           hashcheck failing to single peer. 
         @param piece: The piece itself 
         @param sig: the signature of the piece , created by encrypting a zero padded
          sha-1 hash.
         @return: nothing 
        """
        
        if self.peer_certificate and self.peer_certificate.publicKey:
                    
            sig_ok = self.multidownload.pk_tools.check_sha_signature_tls(self.peer_certificate.publicKey,sig,piece)
            print "after checking signature sig_OK is %d" % sig_ok
            if sig_ok:
                print "signature ok in got_mp_piece"
                self.got_piece(index,begin,piece)
                return
            else:
                print "signature not OK in got_mp_piece"
        #TODO maybe also increment retries
        else:
            print "no peer certificate or certificate.publickey"
            print "cert= %s\n\n\n publicKey= %s" % (str(self.peer_certificate),str(self.peer_certificate.publicKey))
        self.send_key_reward(index,begin,len(piece),False)
    
    def got_piece(self, index, begin, piece):
        req = (index, begin, len(piece))

        if req not in self.active_requests:
            self.multidownload.discarded_bytes += len(piece)
            if self.connector.uses_fast_extension:
                # getting a piece we sent a cancel for
                # is just like receiving a reject
                self.got_reject_request(*req)
            return

        self.active_requests.remove(req)
        
        # we still give the peer credit in endgame, since we did request
        # the piece (it was in active_requests)
        self.fire_useful_received_listeners(len(piece))

        if self.multidownload.rm.endgame:
            if req not in self.multidownload.all_requests:
                self.multidownload.discarded_bytes += len(piece)
                return

            self.multidownload.all_requests.remove(req)

            for d in self.multidownload.downloads:
                if d.interested:
                    if not d.choked and req in d.active_requests:
                        d.connector.send_cancel(*req)
                        d.active_requests.remove(req)
                        if d.connector.uses_fast_extension:
                            d.expecting_reject.add(req)
                    d.fix_download_endgame()
        else:
            self._request_more()
            
        self.last = bttime()
        df = self.multidownload.storage.write(index, begin, piece, self.guard)
        df.addCallback(self._got_piece, index)
        df.addErrback(self.multidownload.errorfunc)

    def _got_piece(self, hashchecked, index):
        if hashchecked:
            self.multidownload.hashchecked(index)
        
    def _want(self, index):
        return (self.have[index] and 
                self.multidownload.rm.want_requests(index))

    def send_request(self, index, begin, length):
        piece_size = self.multidownload.storage.piece_size
        if begin + length > piece_size:
            raise ValueError("Issuing request that exceeds piece size: "
                             "(%d + %d == %d) > %d" %
                             (begin, length, begin + length, piece_size))
        self.multidownload.active_requests_add(index)
        self.active_requests.add((index, begin, length))
        if self.multidownload.micropayments:
            msg_to_sign = pack("!iii",index,begin,length)
            sig = self.multidownload.pk_tools.get_signature_tls(self.multidownload.private_key, msg_to_sign)
            if self.own_certificate_is_sent_to_peer:
                cert_to_send = ""
            else:
                cert_to_send = self.multidownload.certificate.writeBytes().tostring()
                self.own_certificate_is_sent_to_peer = True
                self.connector.upload.own_certificate_is_sent_to_peer = True
            self.connector.send_mp_request(index, begin, length,sig,cert_to_send)
        else:
            self.connector.send_request(index, begin, length)

    def _request_more(self, indices = []):
        
        if self.choked:
            self._request_when_choked()
            return
        #log( "_request_more.active_requests=%s" % self.active_requests )
        b = self._backlog()
        if len(self.active_requests) >= b:
            return
        if self.multidownload.rm.endgame:
            self.fix_download_endgame()
            return

        self.suggested_pieces = [i for i in self.suggested_pieces 
            if not self.multidownload.storage.do_I_have(i)]
        lost_interests = []
        while len(self.active_requests) < b:
            if not indices:
                interest = self.multidownload.picker.next(self.have,
                                    self.multidownload.rm.active_requests,
                                    self.multidownload.rm.fully_active,
                                    self.suggested_pieces)
            else:
                interest = None
                for i in indices:
                    if self._want(i):
                        interest = i
                        break
            if interest is None:
                break
            if not self.interested:
                self.interested = True
                self.connector.send_interested()
            # an example interest created by from_behind is preferable
            if self.example_interest is None:
                self.example_interest = interest

            # request as many chunks of interesting piece as fit in backlog.
            while len(self.active_requests) < b:
                begin, length = self.multidownload.rm.new_request(interest,
                                                                  self.prefer_full)
                self.send_request(interest, begin, length)

                if not self.multidownload.rm.want_requests(interest):
                    lost_interests.append(interest)
                    break
        if not self.active_requests and self.interested:
            self.interested = False
            self.connector.send_not_interested()
        self._check_lost_interests(lost_interests)
        self.multidownload.check_enter_endgame()

    def _check_lost_interests(self, lost_interests):
        """
           Notify other downloads that these pieces are no longer interesting.

           @param lost_interests: list of pieces that have been fully 
               requested.
        """
        if not lost_interests:
            return
        for d in self.multidownload.downloads:
            if d.active_requests or not d.interested:
                continue
            if (d.example_interest is not None and 
                self.multidownload.rm.want_requests(d.example_interest)):
                continue
            # any() does not exist until python 2.5
            #if not any([d.have[lost] for lost in lost_interests]):
            #    continue
            for lost in lost_interests:
                if d.have[lost]:
                    break
            else:
                continue
            interest = self.multidownload.picker.from_behind(d.have,
                            self.multidownload.rm.fully_active)
            if interest is None:
                d.interested = False
                d.connector.send_not_interested()
            else:
                d.example_interest = interest

    def _request_when_choked(self):
        self.allowed_fast_pieces = [i for i in self.allowed_fast_pieces
            if not self.multidownload.storage.do_I_have(i)]
        if not self.allowed_fast_pieces:
            return
        fast = list(self.allowed_fast_pieces)

        b = self._backlog()
        lost_interests = []
        while len(self.active_requests) < b:

            while fast:
                piece = fast.pop()
                if self._want(piece):
                    break
            else:
                break # no unrequested pieces among allowed fast.

            # request chunks until no more chunks or no more room in backlog.
            while len(self.active_requests) < b:
                begin, length = self.multidownload.rm.new_request(piece,
                                                                  self.prefer_full)
                self.send_request(piece, begin, length)
                if not self.multidownload.rm.want_requests(piece):
                    lost_interests.append(piece)
                    break
        self._check_lost_interests(lost_interests)
        self.multidownload.check_enter_endgame()
                
    def fix_download_endgame(self):
        want = []
        for a in self.multidownload.all_requests:
            if not self.have[a[0]]:
                continue
            if a in self.active_requests:
                continue
            want.append(a)

        if self.interested and not self.active_requests and not want:
            self.interested = False
            self.connector.send_not_interested()
            return
        if not self.interested and want:
            self.interested = True
            self.connector.send_interested()
        if self.choked:
            return
        random.shuffle(want)
        for req in want[:self._backlog() - len(self.active_requests)]:
            self.send_request(*req)
        
    def got_have(self, index):
        if self.have[index]:
            return
        if index == self.multidownload.numpieces-1:
            self.peermeasure.update_rate(self.multidownload.storage.total_length-
              (self.multidownload.numpieces-1)*self.multidownload.storage.piece_size)
        else:
            self.peermeasure.update_rate(self.multidownload.storage.piece_size)
        self.have[index] = True
        self.multidownload.got_have(index)
        if (self.multidownload.storage.get_amount_left() == 0 and
            self.have.numfalse == 0):
            self.connector.close()
            return
        if self.multidownload.rm.endgame:
            self.fix_download_endgame()
        elif self.multidownload.rm.want_requests(index):
            self._request_more([index]) # call _request_more whether choked.
            if self.choked and not self.interested:
                self.interested = True
                self.connector.send_interested()
        
    def got_have_bitfield(self, have):
        if have.numfalse == 0:
            self._got_have_all(have)
            return
        self.have = have
        # arg, slow
        count = 0
        target = len(self.have) - self.have.numfalse
        for i in xrange(len(self.have)):
            if count == target:
                break
            if self.have[i]:
                self.multidownload.got_have(i)
                count += 1
        if self.multidownload.rm.endgame:
            for piece, begin, length in self.multidownload.all_requests:
                if self.have[piece]:
                    self.interested = True
                    self.connector.send_interested()
                    return
        for piece in self.multidownload.rm.iter_want():
            if self.have[piece]:
                self.interested = True
                self.connector.send_interested()
                return

    def _got_have_all(self, have=None):
        if self.multidownload.storage.get_amount_left() == 0:
            self.connector.close()
            return
        if have is None:
            # bleh
            n = self.multidownload.numpieces
            rlen, extra = divmod(n, 8)
            if extra:
                extra = chr((0xFF << (8 - extra)) & 0xFF)
            else:
                extra = ''
            s = (chr(0xFF) * rlen) + extra
            have = Bitfield(n, s)
        self.have = have
        self.multidownload.got_have_all()
        if self.multidownload.rm.endgame:
            for piece, begin, length in self.multidownload.all_requests:
                self.interested = True
                self.connector.send_interested()
                return
        for i in self.multidownload.rm.iter_want():
            self.interested = True
            self.connector.send_interested()
            return
        

    def get_rate(self):
        return self.measure.get_rate()

    def is_snubbed(self):
        return bttime() - self.last > self.multidownload.snub_time

    def got_have_none(self):
        pass  # currently no action is taken when have_none is received.
              # The picker already assumes the local peer has none of the
              # pieces until got_have is called.

    def got_have_all(self):
        assert self.connector.uses_fast_extension
        self._got_have_all()

    def got_suggest_piece(self, piece):
        assert self.connector.uses_fast_extension
        if not self.multidownload.storage.do_I_have(piece): 
          self.suggested_pieces.append(piece)
        self._request_more() # try to request more. Just returns if choked.

    def got_allowed_fast(self,piece):
        """Upon receiving this message, the multidownload knows that it is
           allowed to download the specified piece even when choked."""
        #log( "got_allowed_fast %d" % piece )
        assert self.connector.uses_fast_extension

        if not self.multidownload.storage.do_I_have(piece): 
            if piece not in self.allowed_fast_pieces:
                self.allowed_fast_pieces.append(piece)
                random.shuffle(self.allowed_fast_pieces)  # O(n) but n is small.
        self._request_more()  # will try to request.  Handles cases like
                              # whether neighbor has "allowed fast" piece.

    def got_reject_request(self, piece, begin, length):
        assert self.connector.uses_fast_extension
        req = (piece, begin, length) 

        if req not in self.expecting_reject:
            if req not in self.active_requests:
                self.connector.protocol_violation("Reject received for "
                                                  "piece not pending")
                self.connector.close()
                return
            self.active_requests.remove(req)
        else:
            self.expecting_reject.remove(req)

        if self.multidownload.rm.endgame:
            return

        self.multidownload.rm.request_lost(*req)
        if not self.choked:
            self._request_more()
        ds = [d for d in self.multidownload.downloads if not d.choked]
        random.shuffle(ds)
        for d in ds:
            d._request_more([piece])
            
        for d in self.multidownload.downloads:
            if d.choked and not d.interested:
                if d._want(piece):
                    d.interested = True
                    d.connector.send_interested()
                    break


    def got_key_reward_response(self,index, begin, length, result):
        """
         Process a payment key response message. If the response indicates succesful
         reception of the key update status to "done". If the response  indicates
         failure (invalid key or no key recvd) resend key.
         
         The keyreward message is also sent as a reply to a piece request if the 
         downloader has too many outstanding unpaid pieces.
         
         @param index: index of the piece (piece number) the reward is for. 
           A piece is a block of which the has is in the meta file.
         @param begin: offset of the subpiece is the piece the reward is for.
           This is currently always zero, it enables to account a single piece
           hashcheck failing to single peer. 
         @param length: The length of the piece the reward is for. 
         @param result: boolean indicating if the uploader validated the sent key
         @return: nothing 
        """
  
        print("received reward for piece %d %d %d" % (index,begin,length))
        if not (index,begin,length) in self.payment_key_status:
            print("received reward for unsent piece %d %d %d" % (index,begin,length))
            return
        
        (status,retries,old_good) = self.payment_key_status[(index,begin,length)]  
        if status == "done":
            print "received key reward response for already done piece"
            #we already received this one
            return
        elif status == None:
            #something weird happened, received unwanted key
            print("received unwanted key_reward resonse idx=%06d begin=%06d len=%06d key=%s" % (index,begin,len))
            return  
        elif status == "waiting":
            (old_status,old_retries, good) = self.payment_key_status[(index,begin,length)]
            if result != 0:
                #success
               print "received positive key reward response for %d %d %d" % (index,begin,length)
               self.payment_key_status[(index,begin,length)] = ("done",retries, good)
            #elif retries > MAX_REWARD_RETRIES:
                #permanent failure
            #            log("failure, too many retries")
            ##            self.payment_key_status[ (index,begin,length)] = ("failed",retries,good)
            #            self.multidownload.ban(self.ip)
            #            return
            else:
                #temporary failure
                #TODO add check for validity of response
                self.send_key_reward(index,begin,length,good)
                retries+=1
                self.payment_key_status[ (index,begin,length)] = ("waiting",retries,good)
        else:
                print("bad status for key_reward resonse idx=%06d begin=%06d len=%06d key=%s" % (index,begin,len))
                
                
    def send_key_reward(self,index,begin,length,good):
        """
        Send a key reward message. The message may or may not contain the keyreward.
        If the message is send as response to corrupt piece, no keyreward is included.
         
         @param index: index of the piece (piece number) the reward is for. 
           A piece is a block of which the has is in the meta file.
         @param begin: offset of the subpiece is the piece the reward is for.
           This is currently always zero, it enables to account a single piece
           hashcheck failing to single peer. 
         @param length: The length of the piece the reward is for. 
         @param good: boolean indicating if the key reward has to be included.
         @return: nothing 
        """
        print "sending key reward for piece  %d %d %d result= %d" % (index,begin,length,good)
        if  (index,begin,length) not in self.payment_key_status:
            print "first time send reward for piece  %d %d %d result= %d" % (index,begin,length,good)
            self.payment_key_status[ (index,begin,length)] = ("waiting", 0,good)
        (status,retries,good) = self.payment_key_status[ (index,begin,length)]  
        if status == "done":
            print "received double key reward response %d %d %d" %  (index,begin,length)
            #we already received the reward for this one
            return
        if good and (index,begin,length) in self.multidownload.key_rewards: #only send a reward if we already received the keylist from tracker.
            print("sending key reward ")
            key = self.multidownload.key_rewards[(index,begin,length)]
            print "encrypting key:"+key
            key = self.multidownload.pk_tools.encrypt_piece_tls(self.peer_certificate.publicKey, key)
            
                
            print "encrypted key hex %s" % key.encode('hex')  
        else:
            key = "";
            print "sending empty reward"
        self.connector.send_key_reward(index, begin, length,key)
예제 #39
0
class UploadPeer:
    def __init__(self, connection, ratelimiter, totalup, choker, storage,
                 picker, config):
        self.connection = connection
        self.ratelimiter = ratelimiter
        self.totalup = totalup
        self.choker = choker
        self.storage = storage
        self.picker = picker
        self.config = config
        self.max_slice_length = config['max_slice_length']
        self.choked = True
        self.cleared = True
        self.interested = False
        self.super_seeding = False
        self.buffer = []
        self.measure = Measure(config['max_rate_period'],
                               config['upload_rate_fudge'])
        self.was_ever_interested = False
        if storage.get_amount_left() == 0:
            if choker.super_seed:
                self.super_seeding = True  # flag, and don't send bitfield
                self.seed_have_list = []  # set from piecepicker
                self.skipped_count = 0
            else:
                if config['breakup_seed_bitfield']:
                    bitfield, msgs = storage.get_have_list_cloaked()
                    connection.send_bitfield(bitfield)
                    for have in msgs:
                        connection.send_have(have)
                else:
                    connection.send_bitfield(storage.get_have_list())
        else:
            if storage.do_I_have_anything():
                connection.send_bitfield(storage.get_have_list())
        self.piecedl = None
        self.piecebuf = None

    def got_not_interested(self):
        if self.interested:
            self.interested = False
            del self.buffer[:]
            self.piecedl = None
            if self.piecebuf:
                self.piecebuf.release()
            self.piecebuf = None
            self.choker.not_interested(self.connection)

    def got_interested(self):
        if not self.interested:
            self.interested = True
            self.was_ever_interested = True
            self.choker.interested(self.connection)

    def get_upload_chunk(self):
        if self.choked or not self.buffer:
            return None
        index, begin, length = self.buffer.pop(0)
        if self.config['buffer_reads']:
            if index != self.piecedl:
                if self.piecebuf:
                    self.piecebuf.release()
                self.piecedl = index
                self.piecebuf = self.storage.get_piece(index, 0, -1)
            piece = None
            if self.piecebuf:
                piece = self.piecebuf[begin:begin + length]
            # fails if storage.get_piece returns None or if out of range
            if not piece or len(piece) != length:
                self.connection.close()
                return None
        else:
            if self.piecebuf:
                self.piecebuf.release()
                self.piecedl = None
            piece = self.storage.get_piece(index, begin, length)
            if piece is None:
                self.connection.close()
                return None
        self.measure.update_rate(len(piece))
        self.totalup.update_rate(len(piece))
        return (index, begin, piece)

    def got_request(self, index, begin, length):
        if ((self.super_seeding and not index in self.seed_have_list)
                or not self.interested or length > self.max_slice_length):
            self.connection.close()
            return
        if not self.cleared:
            self.buffer.append((index, begin, length))
        if not self.choked and self.connection.next_upload is None:
            self.ratelimiter.queue(self.connection)

    def got_cancel(self, index, begin, length):
        try:
            self.buffer.remove((index, begin, length))
        except ValueError:
            pass

    def choke(self):
        if not self.choked:
            self.choked = True
            self.connection.send_choke()
        self.piecedl = None
        if self.piecebuf:
            self.piecebuf.release()
            self.piecebuf = None

    def choke_sent(self):
        del self.buffer[:]
        self.cleared = True

    def unchoke(self):
        if self.choked:
            self.choked = False
            self.cleared = False
            self.connection.send_unchoke()

    def disconnected(self):
        if self.piecebuf:
            self.piecebuf.release()
            self.piecebuf = None

    def is_choked(self):
        return self.choked

    def is_interested(self):
        return self.interested

    def has_queries(self):
        return not self.choked and len(self.buffer) > 0

    def get_rate(self):
        return self.measure.get_rate()
예제 #40
0
class DownloadPeer:
    def __init__(self, downloader, connection):
        self.downloader = downloader
        self.connection = connection
        self.choked = True
        self.interested = False
        self.active_requests = []
        self.measure = Measure(downloader.max_rate_period)
        self.peermeasure = Measure(downloader.max_rate_period)
        self.have = Bitfield(downloader.numpieces)
        self.last = -1000
        self.last2 = -1000
        self.example_interest = None
        #        self.backlog = 2
        self.backlog = 8
        self.ip = connection.get_ip()
        self.guard = BadDataGuard(self)

    def _backlog(self, just_unchoked):
        #        self.backlog = min(
        #            2+int(4*self.measure.get_rate()/self.downloader.chunksize),
        #            (2*just_unchoked)+self.downloader.queue_limit() )
        #        if self.backlog > 50:
        #            self.backlog = max(50, self.backlog * 0.075)
        #        return self.backlog
        self.backlog = 4 + int(
            8 * self.measure.get_rate() / self.downloader.chunksize)
        return self.backlog

    def disconnected(self):
        self.downloader.lost_peer(self)
        if self.have.complete():
            self.downloader.picker.lost_seed()
        else:
            for i in xrange(len(self.have)):
                if self.have[i]:
                    self.downloader.picker.lost_have(i)
        if self.have.complete() and self.downloader.storage.is_endgame():
            self.downloader.add_disconnected_seed(
                self.connection.get_readable_id())
        self._letgo()
        self.guard.download = None

    def _letgo(self):
        if self.downloader.queued_out.has_key(self):
            del self.downloader.queued_out[self]
        if not self.active_requests:
            return
        if self.downloader.endgamemode:
            self.active_requests = []
            return
        lost = {}
        for index, begin, length in self.active_requests:
            self.downloader.storage.request_lost(index, begin, length)
            lost[index] = 1
        lost = lost.keys()
        self.active_requests = []
        if self.downloader.paused:
            return
        ds = [d for d in self.downloader.downloads if not d.choked]
        shuffle(ds)
        for d in ds:
            d._request_more()
        for d in self.downloader.downloads:
            if d.choked and not d.interested:
                for l in lost:
                    if d.have[l] and self.downloader.storage.do_I_have_requests(
                            l):
                        d.send_interested()
                        break

    def got_choke(self):
        if not self.choked:
            self.choked = True
            self._letgo()

    def got_unchoke(self):
        if self.choked:
            self.choked = False
            if self.interested:
                self._request_more(new_unchoke=True)
            self.last2 = clock()

    def is_choked(self):
        return self.choked

    def is_interested(self):
        return self.interested

    def send_interested(self):
        if not self.interested:
            self.interested = True
            self.connection.send_interested()
            if not self.choked:
                self.last2 = clock()

    def send_not_interested(self):
        if self.interested:
            self.interested = False
            self.connection.send_not_interested()

    def got_piece(self, index, begin, piece):
        length = len(piece)
        try:
            self.active_requests.remove((index, begin, length))
        except ValueError:
            self.downloader.discarded += length
            return False
        if self.downloader.endgamemode:
            self.downloader.all_requests.remove((index, begin, length))
        self.last = clock()
        self.last2 = clock()
        self.measure.update_rate(length)
        self.downloader.measurefunc(length)
        if not self.downloader.storage.piece_came_in(index, begin, piece,
                                                     self.guard):
            self.downloader.piece_flunked(index)
            return False
        if self.downloader.storage.do_I_have(index):
            self.downloader.picker.complete(index)
        if self.downloader.endgamemode:
            for d in self.downloader.downloads:
                if d is not self:
                    if d.interested:
                        if d.choked:
                            assert not d.active_requests
                            d.fix_download_endgame()
                        else:
                            try:
                                d.active_requests.remove(
                                    (index, begin, length))
                            except ValueError:
                                continue
                            d.connection.send_cancel(index, begin, length)
                            d.fix_download_endgame()
                    else:
                        assert not d.active_requests
        self._request_more()
        self.downloader.check_complete(index)
        return self.downloader.storage.do_I_have(index)

    def _request_more(self, new_unchoke=False):
        assert not self.choked
        if self.downloader.endgamemode:
            self.fix_download_endgame(new_unchoke)
            return
        if self.downloader.paused:
            return
        if len(self.active_requests) >= self._backlog(new_unchoke):
            if not (self.active_requests or self.backlog):
                self.downloader.queued_out[self] = 1
            return
        lost_interests = []
        while len(self.active_requests) < self.backlog:
            interest = self.downloader.picker.next(
                self.have, self.downloader.storage.do_I_have_requests,
                self.downloader.too_many_partials())
            if interest is None:
                break
            self.example_interest = interest
            self.send_interested()
            loop = True
            while len(self.active_requests) < self.backlog and loop:
                begin, length = self.downloader.storage.new_request(interest)
                self.downloader.picker.requested(interest)
                self.active_requests.append((interest, begin, length))
                self.connection.send_request(interest, begin, length)
                self.downloader.chunk_requested(length)
                if not self.downloader.storage.do_I_have_requests(interest):
                    loop = False
                    lost_interests.append(interest)
        if not self.active_requests:
            self.send_not_interested()
        if lost_interests:
            for d in self.downloader.downloads:
                if d.active_requests or not d.interested:
                    continue
                if d.example_interest is not None and self.downloader.storage.do_I_have_requests(
                        d.example_interest):
                    continue
                for lost in lost_interests:
                    if d.have[lost]:
                        break
                else:
                    continue
                interest = self.downloader.picker.next(
                    d.have, self.downloader.storage.do_I_have_requests,
                    self.downloader.too_many_partials())
                if interest is None:
                    d.send_not_interested()
                else:
                    d.example_interest = interest
        if self.downloader.storage.is_endgame():
            self.downloader.start_endgame()

    def fix_download_endgame(self, new_unchoke=False):
        if self.downloader.paused:
            return
        if len(self.active_requests) >= self._backlog(new_unchoke):
            if not (self.active_requests or self.backlog) and not self.choked:
                self.downloader.queued_out[self] = 1
            return
        want = [
            a for a in self.downloader.all_requests
            if self.have[a[0]] and a not in self.active_requests
        ]
        if not (self.active_requests or want):
            self.send_not_interested()
            return
        if want:
            self.send_interested()
        if self.choked:
            return
        shuffle(want)
        del want[self.backlog - len(self.active_requests):]
        self.active_requests.extend(want)
        for piece, begin, length in want:
            self.connection.send_request(piece, begin, length)
            self.downloader.chunk_requested(length)

    def got_have(self, index):
        if index == self.downloader.numpieces - 1:
            self.downloader.totalmeasure.update_rate(
                self.downloader.storage.total_length -
                (self.downloader.numpieces - 1) *
                self.downloader.storage.piece_length)
            self.peermeasure.update_rate(self.downloader.storage.total_length -
                                         (self.downloader.numpieces - 1) *
                                         self.downloader.storage.piece_length)
        else:
            self.downloader.totalmeasure.update_rate(
                self.downloader.storage.piece_length)
            self.peermeasure.update_rate(self.downloader.storage.piece_length)
        if not self.have[index]:
            self.have[index] = True
            self.downloader.picker.got_have(index)
            if self.have.complete():
                self.downloader.picker.became_seed()
                if self.downloader.storage.am_I_complete():
                    self.downloader.add_disconnected_seed(
                        self.connection.get_readable_id())
                    self.connection.close()
            elif self.downloader.endgamemode:
                self.fix_download_endgame()
            elif (not self.downloader.paused
                  and not self.downloader.picker.is_blocked(index)
                  and self.downloader.storage.do_I_have_requests(index)):
                if not self.choked:
                    self._request_more()
                else:
                    self.send_interested()
        return self.have.complete()

    def _check_interests(self):
        if self.interested or self.downloader.paused:
            return
        for i in xrange(len(self.have)):
            if (self.have[i] and not self.downloader.picker.is_blocked(i)
                    and (self.downloader.endgamemode
                         or self.downloader.storage.do_I_have_requests(i))):
                self.send_interested()
                return

    def got_have_bitfield(self, have):
        if self.downloader.storage.am_I_complete() and have.complete():
            if self.downloader.super_seeding:
                self.connection.send_bitfield(
                    have.tostring())  # be nice, show you're a seed too
            self.connection.close()
            self.downloader.add_disconnected_seed(
                self.connection.get_readable_id())
            return False
        self.have = have
        if have.complete():
            self.downloader.picker.got_seed()
        else:
            temp = 4
            for i in xrange(len(have)):
                if have[i]:
                    self.downloader.picker.got_have(i)
        if self.downloader.endgamemode and not self.downloader.paused:
            for piece, begin, length in self.downloader.all_requests:
                if self.have[piece]:
                    self.send_interested()
                    break
        else:
            self._check_interests()
        return have.complete()

    def get_rate(self):
        return self.measure.get_rate()

    def get_peer_completion(self):
        if len(self.have) > 0:
            return float(len(self.have) - self.have.numfalse) / float(
                len(self.have))
        else:
            return 1.0

    def is_snubbed(self):
        if (self.interested and not self.choked
                and clock() - self.last2 > self.downloader.snub_time):
            for index, begin, length in self.active_requests:
                self.connection.send_cancel(index, begin, length)
            self.got_choke()  # treat it just like a choke
        return clock() - self.last > self.downloader.snub_time
예제 #41
0
class UploadPeer:
    def __init__(self, connection, ratelimiter, totalup, choker, storage,
                 picker, config):
        self.connection = connection
        self.ratelimiter = ratelimiter
        self.totalup = totalup
        self.choker = choker
        self.storage = storage
        self.picker = picker
        self.config = config
        self.max_slice_length = config['max_slice_length']
        self.choked = True
        self.cleared = True
        self.interested = False
        self.super_seeding = False
        self.buffer = []
        self.measure = Measure(config['max_rate_period'], config['upload_rate_fudge'])
        self.was_ever_interested = False
        if storage.get_amount_left() == 0:
            if choker.super_seed:
                self.super_seeding = True   # flag, and don't send bitfield
                self.seed_have_list = []    # set from piecepicker
                self.skipped_count = 0
            else:
                if config['breakup_seed_bitfield']:
                    bitfield, msgs = storage.get_have_list_cloaked()
                    connection.send_bitfield(bitfield)
                    for have in msgs:
                        connection.send_have(have)
                else:
                    connection.send_bitfield(storage.get_have_list())
        else:
            if storage.do_I_have_anything():
                connection.send_bitfield(storage.get_have_list())
        self.piecedl = None
        self.piecebuf = None

    def got_not_interested(self):
        if self.interested:
            self.interested = False
            del self.buffer[:]
            self.piecedl = None
            if self.piecebuf:
                self.piecebuf.release()
            self.piecebuf = None
            self.choker.not_interested(self.connection)

    def got_interested(self):
        if not self.interested:
            self.interested = True
            self.was_ever_interested = True
            self.choker.interested(self.connection)

    def get_upload_chunk(self):
        if self.choked or not self.buffer:
            return None
        index, begin, length = self.buffer.pop(0)
        if self.config['buffer_reads']:
            if index != self.piecedl:
                if self.piecebuf:
                    self.piecebuf.release()
                self.piecedl = index
                self.piecebuf = self.storage.get_piece(index, 0, -1)
            piece = None
            if self.piecebuf:
              piece = self.piecebuf[begin:begin+length]
            # fails if storage.get_piece returns None or if out of range
            if not piece or len(piece) != length:
                self.connection.close()
                return None
        else:
            if self.piecebuf:
                self.piecebuf.release()
                self.piecedl = None
            piece = self.storage.get_piece(index, begin, length)
            if piece is None:
                self.connection.close()
                return None
        self.measure.update_rate(len(piece))
        self.totalup.update_rate(len(piece))
        return (index, begin, piece)

    def got_request(self, index, begin, length):
        if ( (self.super_seeding and not index in self.seed_have_list)
                   or not self.interested or length > self.max_slice_length ):
            self.connection.close()
            return
        if not self.cleared:
            self.buffer.append((index, begin, length))
        if not self.choked and self.connection.next_upload is None:
                self.ratelimiter.queue(self.connection)


    def got_cancel(self, index, begin, length):
        try:
            self.buffer.remove((index, begin, length))
        except ValueError:
            pass

    def choke(self):
        if not self.choked:
            self.choked = True
            self.connection.send_choke()
        self.piecedl = None
        if self.piecebuf:
            self.piecebuf.release()
            self.piecebuf = None

    def choke_sent(self):
        del self.buffer[:]
        self.cleared = True

    def unchoke(self):
        if self.choked:
            self.choked = False
            self.cleared = False
            self.connection.send_unchoke()
        
    def disconnected(self):
        if self.piecebuf:
            self.piecebuf.release()
            self.piecebuf = None

    def is_choked(self):
        return self.choked
        
    def is_interested(self):
        return self.interested

    def has_queries(self):
        return not self.choked and len(self.buffer) > 0

    def get_rate(self):
        return self.measure.get_rate()
예제 #42
0
class Upload(object):
    def __init__(self, connection, ratelimiter, totalup, totalup2, choker,
                 storage, max_slice_length, max_rate_period):
        self.connection = connection
        self.ratelimiter = ratelimiter
        self.totalup = totalup
        self.totalup2 = totalup2
        self.choker = choker
        self.storage = storage
        self.max_slice_length = max_slice_length
        self.max_rate_period = max_rate_period
        self.choked = True
        self.unchoke_time = None
        self.interested = False
        self.buffer = []
        self.measure = Measure(max_rate_period)
        if storage.do_I_have_anything():
            connection.send_bitfield(storage.get_have_list())

    def got_not_interested(self):
        if self.interested:
            self.interested = False
            del self.buffer[:]
            self.choker.not_interested(self.connection)

    def got_interested(self):
        if not self.interested:
            self.interested = True
            self.choker.interested(self.connection)

    def get_upload_chunk(self):
        if not self.buffer:
            return None
        index, begin, length = self.buffer.pop(0)
        if 'BF2-0-0' in self.connection.id:
            piece = self.storage.get_DF_piece(index, begin, length)
        else:
            piece = self.storage.get_piece(index, begin, length)
        if piece is None:
            self.connection.close()
            return None
        return (index, begin, piece)

    def update_rate(self, bytes):
        self.measure.update_rate(bytes)
        self.totalup.update_rate(bytes)
        self.totalup2.update_rate(bytes)

    def got_request(self, index, begin, length):
        if not self.interested or length > self.max_slice_length:
            self.connection.close()
            return
        if not self.connection.choke_sent:
            print 'IN  reqst: (piece %d[%d:%d] - SN: %d)' % (
                index, begin, begin + length, begin / length)
            self.buffer.append((index, begin, length))
            if self.connection.next_upload is None and \
                   self.connection.connection.is_flushed():
                self.ratelimiter.queue(self.connection,
                                       self.connection.encoder.context.rlgroup)

    def got_cancel(self, index, begin, length):
        try:
            self.buffer.remove((index, begin, length))
        except ValueError:
            pass

    def choke(self):
        if not self.choked:
            self.choked = True
            self.connection.send_choke()

    def sent_choke(self):
        assert self.choked
        del self.buffer[:]

    def unchoke(self, time):
        if self.choked:
            self.choked = False
            self.unchoke_time = time
            self.connection.send_unchoke()

    def has_queries(self):
        return len(self.buffer) > 0

    def get_rate(self):
        return self.measure.get_rate()