Example #1
0
    def got_ut_pex(self, connection, d):
        infohash, dns = c2infohash_dns(connection)
        is_tribler_peer = connection.is_tribler_peer()
        added = check_ut_pex_peerlist(d, 'added')[:REPEX_PEX_MSG_MAX_PEERS]
        addedf = map(ord, d.get('addedf', []))[:REPEX_PEX_MSG_MAX_PEERS]
        addedf.extend([0] * (len(added) - len(addedf)))
        IS_SEED = 2
        IS_SAME = 4
        if infohash != self.infohash or dns is None:
            return
        if DEBUG:
            print >> sys.stderr, 'RePEXer: got_ut_pex: %s:%s pex_size = %s' % (
                dns + (len(added), ))
        for i in range(len(added) - 1, -1, -1):
            if added[i][0].startswith('0.'):
                added.pop(i)
                addedf.pop(i)

        if len(added) >= REPEX_PEX_MINSIZE:
            if not is_tribler_peer:
                addedf = [flag & ~IS_SAME for flag in addedf]
            picks = range(len(added))
            shuffle(picks)
            pex_peers = [(added[i], addedf[i])
                         for i in picks[:REPEX_STORED_PEX_SIZE]]
            self.live_peers[dns] = {
                'last_seen': ts_now(),
                'pex': pex_peers,
                'version': self.dns2version[dns]
            }
        self.datacost['pex_connections'] += 1
        connection.close()
    def got_ut_pex(self, connection, d):
        infohash, dns = c2infohash_dns(connection)
        is_tribler_peer = connection.is_tribler_peer()
        added = check_ut_pex_peerlist(d, 'added')[:REPEX_PEX_MSG_MAX_PEERS]
        addedf = map(ord, d.get('addedf', []))[:REPEX_PEX_MSG_MAX_PEERS]
        addedf.extend([0] * (len(added) - len(addedf)))
        IS_SEED = 2
        IS_SAME = 4
        if infohash != self.infohash or dns is None:
            return
        if DEBUG:
            print >> sys.stderr, 'RePEXer: got_ut_pex: %s:%s pex_size = %s' % (dns + (len(added),))
        for i in range(len(added) - 1, -1, -1):
            if added[i][0].startswith('0.'):
                added.pop(i)
                addedf.pop(i)

        if len(added) >= REPEX_PEX_MINSIZE:
            if not is_tribler_peer:
                addedf = [ flag & ~IS_SAME for flag in addedf ]
            picks = range(len(added))
            shuffle(picks)
            pex_peers = [ (added[i], addedf[i]) for i in picks[:REPEX_STORED_PEX_SIZE] ]
            self.live_peers[dns] = {'last_seen': ts_now(),
             'pex': pex_peers,
             'version': self.dns2version[dns]}
        self.datacost['pex_connections'] += 1
        connection.close()
Example #3
0
 def repex_ready(self, infohash, connecter, encoder, rerequester):
     if infohash != self.infohash:
         print >>sys.stderr, time.asctime(),'-', "RePEXer: repex_ready: wrong infohash:", b2a_hex(infohash)
         return
     if self.done:
         print >>sys.stderr, time.asctime(),'-', "RePEXer: repex_ready: already done"
         return
     if DEBUG:
         print >>sys.stderr, time.asctime(),'-', "RePEXer: repex_ready:", b2a_hex(infohash)
     self.ready = True
     self.ready_ts = ts_now()
     self.connecter = connecter
     self.encoder = encoder
     self.rerequest = rerequester
     
     # Fill connect queue
     self.to_pex = self.starting_peertable.keys()
     self.max_sockets = REPEX_INITIAL_SOCKETS
     
     # We'll also extend the queue with all peers from the pex messages
     # TODO: investigate whether a more sophisticated queueing scheme is more appropiate
     # For example, only fill the queue when countering a failure
     for dns in self.starting_peertable:
         self.to_pex.extend([pexdns for pexdns,flags in self.starting_peertable[dns].get('pex',[])])
     self.connect_queue()
Example #4
0
    def send_done(self):
        self.done = True
        self.end_ts = ts_now()
        swarmcache = dict(self.live_peers)
        to_delete = max(len(swarmcache) - REPEX_SWARMCACHE_SIZE, 0)
        deleted = 0
        for dns in swarmcache.keys():
            if deleted == to_delete:
                break
            if dns not in self.starting_peertable:
                del swarmcache[dns]
                deleted += 1

        shufflepeers = {}
        for dns in self.starting_peertable:
            if dns not in swarmcache:
                shufflepeers[dns] = (dns in self.bt_connectable, dns
                                     in self.bt_pex,
                                     self.starting_peertable[dns].get(
                                         'last_seen', 0))

        self.final_peertable = swarmcache
        for observer in self._observers:
            if DEBUG:
                print >> sys.stderr, 'RePEXer: send_done: calling repex_done on', ` observer `
            try:
                observer.repex_done(self, swarmcache, self.shufflecount,
                                    shufflepeers, self.bootstrap_counter,
                                    self.datacost)
            except:
                print_exc()
Example #5
0
 def repex_ready(self, infohash, connecter, encoder, rerequester):
     if infohash != self.infohash:
         print >>sys.stderr, "RePEXer: repex_ready: wrong infohash:", b2a_hex(infohash)
         return
     if self.done:
         print >>sys.stderr, "RePEXer: repex_ready: already done"
         return
     if DEBUG:
         print >>sys.stderr, "RePEXer: repex_ready:", b2a_hex(infohash)
     self.ready = True
     self.ready_ts = ts_now()
     self.connecter = connecter
     self.encoder = encoder
     self.rerequest = rerequester
     
     # Fill connect queue
     self.to_pex = self.starting_peertable.keys()
     self.max_sockets = REPEX_INITIAL_SOCKETS
     
     # We'll also extend the queue with all peers from the pex messages
     # TODO: investigate whether a more sophisticated queueing scheme is more appropiate
     # For example, only fill the queue when countering a failure
     for dns in self.starting_peertable:
         self.to_pex.extend([pexdns for pexdns,flags in self.starting_peertable[dns].get('pex',[])])
     self.connect_queue()
    def send_done(self):
        self.done = True
        self.end_ts = ts_now()
        swarmcache = dict(self.live_peers)
        to_delete = max(len(swarmcache) - REPEX_SWARMCACHE_SIZE, 0)
        deleted = 0
        for dns in swarmcache.keys():
            if deleted == to_delete:
                break
            if dns not in self.starting_peertable:
                del swarmcache[dns]
                deleted += 1

        shufflepeers = {}
        for dns in self.starting_peertable:
            if dns not in swarmcache:
                shufflepeers[dns] = (dns in self.bt_connectable, dns in self.bt_pex, self.starting_peertable[dns].get('last_seen', 0))

        self.final_peertable = swarmcache
        for observer in self._observers:
            if DEBUG:
                print >> sys.stderr, 'RePEXer: send_done: calling repex_done on', `observer`
            try:
                observer.repex_done(self, swarmcache, self.shufflecount, shufflepeers, self.bootstrap_counter, self.datacost)
            except:
                print_exc()
Example #7
0
 def repex_done(self, repexer, swarmcache, shufflecount, shufflepeers, bootstrapcount, datacost):
     if DEBUG:
         print >>sys.stderr, 'RePEXScheduler: repex_done: %s\n\ttable size/shuffle/bootstrap %s/%s/%s' % (
                             b2a_hex(repexer.infohash), len(swarmcache), shufflecount, bootstrapcount)
     self.current_repex = None
     self.last_attempts[repexer.infohash] = ts_now()
     self.downloads[repexer.infohash].stop()
     self.session.set_download_states_callback(self.network_scan)
Example #8
0
 def repex_done(self, repexer, swarmcache, shufflecount, shufflepeers, bootstrapcount, datacost):
     if DEBUG:
         print >>sys.stderr, time.asctime(),'-', 'RePEXScheduler: repex_done: %s\n\ttable size/shuffle/bootstrap %s/%s/%s' % (
                             b2a_hex(repexer.infohash), len(swarmcache), shufflecount, bootstrapcount)
     self.current_repex = None
     self.last_attempts[repexer.infohash] = ts_now()
     self.downloads[repexer.infohash].stop()
     self.session.set_download_states_callback(self.network_scan)
Example #9
0
 def repex_aborted(self, repexer, dlstatus=None):
     if DEBUG:
         if dlstatus is None:
             status_string = str(None)
         else:
             status_string = dlstatus_strings[dlstatus]
         print >>sys.stderr, time.asctime(),'-', "RePEXScheduler: repex_aborted:", b2a_hex(repexer.infohash), status_string
     self.current_repex = None
     self.last_attempts[repexer.infohash] = ts_now() 
     self.session.set_download_states_callback(self.network_scan)
Example #10
0
 def repex_aborted(self, repexer, dlstatus=None):
     if DEBUG:
         if dlstatus is None:
             status_string = str(None)
         else:
             status_string = dlstatus_strings[dlstatus]
         print >>sys.stderr, "RePEXScheduler: repex_aborted:", b2a_hex(repexer.infohash), status_string
     self.current_repex = None
     self.last_attempts[repexer.infohash] = ts_now() 
     self.session.set_download_states_callback(self.network_scan)
    def network_scan(self, dslist):
        if DEBUG:
            print >> sys.stderr, 'RePEXScheduler: network_scan: %s DownloadStates' % len(dslist)
        self.lock.acquire()
        exception = None
        try:
            if not self.active or self.current_repex is not None:
                return (-1, False)
            now = ts_now()
            found_infohash = None
            found_download = None
            found_age = -1
            for ds in dslist:
                download = ds.get_download()
                infohash = download.tdef.get_infohash()
                debug_msg = None
                if DEBUG:
                    print >> sys.stderr, 'RePEXScheduler: network_scan: checking', `(download.tdef.get_name_as_unicode())`
                if ds.get_status() == DLSTATUS_STOPPED and ds.get_progress() == 1.0:
                    age = now - (swarmcache_ts(ds.get_swarmcache()) or 0)
                    last_attempt_ago = now - self.last_attempts.get(infohash, 0)
                    if last_attempt_ago < REPEX_MIN_INTERVAL:
                        debug_msg = '...too soon to try again, last attempt was %ss ago' % last_attempt_ago
                    elif age < REPEX_INTERVAL:
                        debug_msg = '...SwarmCache too fresh: %s seconds' % age
                    elif age >= REPEX_INTERVAL:
                        debug_msg = '...suitable for RePEX!'
                        if age > found_age:
                            found_download = download
                            found_infohash = infohash
                            found_age = age
                else:
                    debug_msg = '...not repexable: %s %s%%' % (dlstatus_strings[ds.get_status()], ds.get_progress() * 100)
                if DEBUG:
                    print >> sys.stderr, 'RePEXScheduler: network_scan:', debug_msg

            if found_download is None:
                if DEBUG:
                    print >> sys.stderr, 'RePEXScheduler: network_scan: nothing found yet'
                return (REPEX_SCAN_INTERVAL, False)
            if DEBUG:
                print >> sys.stderr, 'RePEXScheduler: network_scan: found %s, starting RePEX phase.' % `(found_download.tdef.get_name_as_unicode())`
            self.current_repex = found_infohash
            self.downloads[found_infohash] = found_download
            found_download.set_mode(DLMODE_NORMAL)
            found_download.restart(initialdlstatus=DLSTATUS_REPEXING)
            return (-1, False)
        except Exception as e:
            exception = e
        finally:
            self.lock.release()

        if exception is not None:
            raise exception
Example #12
0
    def got_ut_pex(self, connection, d):
        infohash, dns = c2infohash_dns(connection)
        is_tribler_peer = connection.is_tribler_peer()
        added = check_ut_pex_peerlist(d, 'added')[:REPEX_PEX_MSG_MAX_PEERS]
        addedf = map(ord, d.get('addedf', []))[:REPEX_PEX_MSG_MAX_PEERS]
        addedf.extend([0] * (len(added) - len(addedf)))
        IS_SEED = 2
        IS_SAME = 4
        if infohash != self.infohash:
            return
        if DEBUG:
            print >> sys.stderr, "RePEXer: got_ut_pex: %s:%s pex_size = %s" % (
                dns + (len(added), ))

        # Remove bad IPs like 0.x.x.x (often received from Transmission peers)
        for i in range(len(added) - 1, -1, -1):
            if added[i][0].startswith('0.'):
                added.pop(i)
                addedf.pop(i)

        # only store peer when sufficiently connected
        if len(added) >= REPEX_PEX_MINSIZE:
            # Clear flag IS_SAME if it was not a Tribler peer
            if not is_tribler_peer:
                addedf = [flag & ~IS_SAME for flag in addedf]

            # sample PEX message and
            picks = range(len(added))
            shuffle(picks)
            pex_peers = [(added[i], addedf[i])
                         for i in picks[:REPEX_STORED_PEX_SIZE]]
            self.live_peers[dns] = {
                'last_seen': ts_now(),
                'pex': pex_peers,
                'version': self.dns2version[dns]
            }
            # Should we do the following? Might lower the load on the tracker even more?
            # self.to_pex.extend(zip(*pex_peers)[0])
            # Possible danger: too much crawling, wasting resources?

            # TODO: Might be more sophisticated to sampling of PEX msg at the end?
            # (allows us to get more diversity and perhaps also security?)

        self.datacost['pex_connections'] += 1

        # Closing time
        connection.close()
 def repex_aborted(self, infohash, dlstatus):
     if self.done:
         return
     if infohash != self.infohash:
         print >> sys.stderr, 'RePEXer: repex_aborted: wrong infohash:', b2a_hex(infohash)
         return
     if DEBUG:
         if dlstatus is None:
             status_string = str(None)
         else:
             status_string = dlstatus_strings[dlstatus]
         print >> sys.stderr, 'RePEXer: repex_aborted:', b2a_hex(infohash), status_string
     self.done = True
     self.aborted = True
     self.end_ts = ts_now()
     for observer in self._observers:
         observer.repex_aborted(self, dlstatus)
Example #14
0
 def repex_aborted(self, infohash, dlstatus):
     if self.done:
         return
     if infohash != self.infohash:
         print >>sys.stderr, "RePEXer: repex_aborted: wrong infohash:", b2a_hex(infohash)
         return
     if DEBUG:
         if dlstatus is None:
             status_string = str(None)
         else:
             status_string = dlstatus_strings[dlstatus]
         print >>sys.stderr, "RePEXer: repex_aborted:", b2a_hex(infohash),status_string
     self.done = True
     self.aborted = True
     self.end_ts = ts_now()
     for observer in self._observers:
         observer.repex_aborted(self, dlstatus)
Example #15
0
 def got_ut_pex(self, connection, d):
     infohash, dns = c2infohash_dns(connection)
     is_tribler_peer = connection.is_tribler_peer()
     added = check_ut_pex_peerlist(d,'added')[:REPEX_PEX_MSG_MAX_PEERS]
     addedf = map(ord, d.get('addedf',[]))[:REPEX_PEX_MSG_MAX_PEERS]
     addedf.extend( [0]*(len(added)-len(addedf)) )
     IS_SEED = 2
     IS_SAME = 4
     if infohash != self.infohash:
         return
     if DEBUG:
         print >>sys.stderr, time.asctime(),'-', "RePEXer: got_ut_pex: %s:%s pex_size = %s" % (dns + (len(added),))
     
     # Remove bad IPs like 0.x.x.x (often received from Transmission peers)
     for i in range(len(added)-1,-1,-1):
         if added[i][0].startswith('0.'):
             added.pop(i)
             addedf.pop(i)
     
     # only store peer when sufficiently connected
     if len(added) >= REPEX_PEX_MINSIZE:
         # Clear flag IS_SAME if it was not a Tribler peer
         if not is_tribler_peer:
             addedf = [flag & ~IS_SAME for flag in addedf]
                 
         # sample PEX message and
         picks = range(len(added))
         shuffle(picks)
         pex_peers = [(added[i],addedf[i]) for i in picks[:REPEX_STORED_PEX_SIZE]]
         self.live_peers[dns] = {'last_seen' : ts_now(),
                                 'pex' : pex_peers,
                                 'version' : self.dns2version[dns]}
         # Should we do the following? Might lower the load on the tracker even more?
         # self.to_pex.extend(zip(*pex_peers)[0])
         # Possible danger: too much crawling, wasting resources?
         
         # TODO: Might be more sophisticated to sampling of PEX msg at the end?
         # (allows us to get more diversity and perhaps also security?)
     
     self.datacost['pex_connections'] += 1
     
     # Closing time
     connection.close()
    def repex_ready(self, infohash, connecter, encoder, rerequester):
        if infohash != self.infohash:
            print >> sys.stderr, 'RePEXer: repex_ready: wrong infohash:', b2a_hex(infohash)
            return
        if self.done:
            print >> sys.stderr, 'RePEXer: repex_ready: already done'
            return
        if DEBUG:
            print >> sys.stderr, 'RePEXer: repex_ready:', b2a_hex(infohash)
        self.ready = True
        self.ready_ts = ts_now()
        self.connecter = connecter
        self.encoder = encoder
        self.rerequest = rerequester
        self.to_pex = self.starting_peertable.keys()
        self.max_sockets = REPEX_INITIAL_SOCKETS
        for dns in self.starting_peertable:
            self.to_pex.extend([ pexdns for pexdns, flags in self.starting_peertable[dns].get('pex', []) ])

        self.connect_queue()
Example #17
0
 def send_done(self):
     self.done = True
     self.end_ts = ts_now()
     
     # Construct the new SwarmCache by removing excess peers
     swarmcache = dict(self.live_peers)
     to_delete = max(len(swarmcache) - REPEX_SWARMCACHE_SIZE, 0)
     deleted = 0
     for dns in swarmcache.keys():
         if deleted == to_delete:
             break
         if dns not in self.starting_peertable:
             del swarmcache[dns]
             deleted += 1
     
     # TODO: Should we change the shuffle algorithm such that we 
     # prefer to replace an offline peer with one of the peers
     # in its PEX message?
     
     # create shufflepeers dict, allowing us to deduce why a peer was shuffled out
     shufflepeers = {}
     for dns in self.starting_peertable:
         if dns not in swarmcache:
             shufflepeers[dns] = (dns in self.bt_connectable, dns in self.bt_pex, self.starting_peertable[dns].get('last_seen',0))
     
     self.final_peertable = swarmcache
     for observer in self._observers:
         if DEBUG:
             print >>sys.stderr, "RePEXer: send_done: calling repex_done on", `observer`
         try:
             observer.repex_done(self,
                                 swarmcache,
                                 self.shufflecount,
                                 shufflepeers,
                                 self.bootstrap_counter,
                                 self.datacost)
         except:
             print_exc()
Example #18
0
 def send_done(self):
     self.done = True
     self.end_ts = ts_now()
     
     # Construct the new SwarmCache by removing excess peers
     swarmcache = dict(self.live_peers)
     to_delete = max(len(swarmcache) - REPEX_SWARMCACHE_SIZE, 0)
     deleted = 0
     for dns in swarmcache.keys():
         if deleted == to_delete:
             break
         if dns not in self.starting_peertable:
             del swarmcache[dns]
             deleted += 1
     
     # TODO: Should we change the shuffle algorithm such that we 
     # prefer to replace an offline peer with one of the peers
     # in its PEX message?
     
     # create shufflepeers dict, allowing us to deduce why a peer was shuffled out
     shufflepeers = {}
     for dns in self.starting_peertable:
         if dns not in swarmcache:
             shufflepeers[dns] = (dns in self.bt_connectable, dns in self.bt_pex, self.starting_peertable[dns].get('last_seen',0))
     
     self.final_peertable = swarmcache
     for observer in self._observers:
         if DEBUG:
             print >>sys.stderr, "RePEXer: send_done: calling repex_done on", `observer`
         try:
             observer.repex_done(self,
                                 swarmcache,
                                 self.shufflecount,
                                 shufflepeers,
                                 self.bootstrap_counter,
                                 self.datacost)
         except:
             print_exc()
Example #19
0
    def repex_ready(self, infohash, connecter, encoder, rerequester):
        if infohash != self.infohash:
            print >> sys.stderr, 'RePEXer: repex_ready: wrong infohash:', b2a_hex(
                infohash)
            return
        if self.done:
            print >> sys.stderr, 'RePEXer: repex_ready: already done'
            return
        if DEBUG:
            print >> sys.stderr, 'RePEXer: repex_ready:', b2a_hex(infohash)
        self.ready = True
        self.ready_ts = ts_now()
        self.connecter = connecter
        self.encoder = encoder
        self.rerequest = rerequester
        self.to_pex = self.starting_peertable.keys()
        self.max_sockets = REPEX_INITIAL_SOCKETS
        for dns in self.starting_peertable:
            self.to_pex.extend([
                pexdns for pexdns, flags in self.starting_peertable[dns].get(
                    'pex', [])
            ])

        self.connect_queue()
Example #20
0
    def network_scan(self, dslist):
        if DEBUG:
            print >> sys.stderr, 'RePEXScheduler: network_scan: %s DownloadStates' % len(
                dslist)
        self.lock.acquire()
        exception = None
        try:
            if not self.active or self.current_repex is not None:
                return (-1, False)
            now = ts_now()
            found_infohash = None
            found_download = None
            found_age = -1
            for ds in dslist:
                download = ds.get_download()
                infohash = download.tdef.get_infohash()
                debug_msg = None
                if DEBUG:
                    print >> sys.stderr, 'RePEXScheduler: network_scan: checking', ` (
                        download.tdef.get_name_as_unicode()) `
                if ds.get_status() == DLSTATUS_STOPPED and ds.get_progress(
                ) == 1.0:
                    age = now - (swarmcache_ts(ds.get_swarmcache()) or 0)
                    last_attempt_ago = now - self.last_attempts.get(
                        infohash, 0)
                    if last_attempt_ago < REPEX_MIN_INTERVAL:
                        debug_msg = '...too soon to try again, last attempt was %ss ago' % last_attempt_ago
                    elif age < REPEX_INTERVAL:
                        debug_msg = '...SwarmCache too fresh: %s seconds' % age
                    elif age >= REPEX_INTERVAL:
                        debug_msg = '...suitable for RePEX!'
                        if age > found_age:
                            found_download = download
                            found_infohash = infohash
                            found_age = age
                else:
                    debug_msg = '...not repexable: %s %s%%' % (
                        dlstatus_strings[ds.get_status()],
                        ds.get_progress() * 100)
                if DEBUG:
                    print >> sys.stderr, 'RePEXScheduler: network_scan:', debug_msg

            if found_download is None:
                if DEBUG:
                    print >> sys.stderr, 'RePEXScheduler: network_scan: nothing found yet'
                return (REPEX_SCAN_INTERVAL, False)
            if DEBUG:
                print >> sys.stderr, 'RePEXScheduler: network_scan: found %s, starting RePEX phase.' % ` (
                    found_download.tdef.get_name_as_unicode()) `
            self.current_repex = found_infohash
            self.downloads[found_infohash] = found_download
            found_download.set_mode(DLMODE_NORMAL)
            found_download.restart(initialdlstatus=DLSTATUS_REPEXING)
            return (-1, False)
        except Exception as e:
            exception = e
        finally:
            self.lock.release()

        if exception is not None:
            raise exception
Example #21
0
 def network_scan(self, dslist):
     """
     Called by session thread. Scans for stopped downloads and stores
     them in a queue.
     @param dslist List of DownloadStates"""
     # TODO: only repex last X Downloads instead of all.
     if DEBUG:
         print >>sys.stderr, "RePEXScheduler: network_scan: %s DownloadStates" % len(dslist)
     self.lock.acquire()
     exception = None
     try:
         try:
             if not self.active or self.current_repex is not None:
                 return -1, False
             
             now = ts_now()
             found_infohash = None
             found_download = None
             found_age = -1
             for ds in dslist:
                 download = ds.get_download()
                 infohash = download.get_def().get_id()
                 debug_msg = None
                 if DEBUG:
                     print >>sys.stderr, "RePEXScheduler: network_scan: checking", `download.get_def().get_name()`
                 if ds.get_status() == DLSTATUS_STOPPED and ds.get_progress()==1.0:
                     # TODO: only repex finished downloads or also prematurely stopped ones?
                     age = now - (swarmcache_ts(ds.get_swarmcache()) or 0)
                     last_attempt_ago = now - self.last_attempts.get(infohash, 0)
                     
                     if last_attempt_ago < REPEX_MIN_INTERVAL:
                         debug_msg = "...too soon to try again, last attempt was %ss ago" % last_attempt_ago
                     elif age < REPEX_INTERVAL:
                         debug_msg = "...SwarmCache too fresh: %s seconds" % age
                     else:
                         if age >= REPEX_INTERVAL:
                             debug_msg = "...suitable for RePEX!"
                             if age > found_age:
                                 found_download = download
                                 found_infohash = infohash
                                 found_age = age
                 else:
                     debug_msg = "...not repexable: %s %s%%" % (dlstatus_strings[ds.get_status()], ds.get_progress()*100)
                 if DEBUG:
                     print >>sys.stderr, "RePEXScheduler: network_scan:", debug_msg
             
             if found_download is None:
                 if DEBUG:
                     print >>sys.stderr, "RePEXScheduler: network_scan: nothing found yet"
                 return REPEX_SCAN_INTERVAL, False
             else:
                 if DEBUG:
                     print >>sys.stderr, "RePEXScheduler: network_scan: found %s, starting RePEX phase." % `found_download.get_def().get_name()`
                 self.current_repex = found_infohash
                 self.downloads[found_infohash] = found_download
                 found_download.set_mode(DLMODE_NORMAL)
                 found_download.restart(initialdlstatus=DLSTATUS_REPEXING)
                 return -1, False
         except Exception, e:
             exception = e
     finally:
         self.lock.release()
     if exception is not None:
         # [E0702, RePEXScheduler.network_scan] Raising NoneType
         # while only classes, instances or string are allowed
         # pylint: disable-msg=E0702
         raise exception
Example #22
0
 def network_scan(self, dslist):
     """
     Called by network thread. Scans for stopped downloads and stores
     them in a queue.
     @param dslist List of DownloadStates"""
     # TODO: only repex last X Downloads instead of all.
     if DEBUG:
         print >>sys.stderr, time.asctime(),'-', "RePEXScheduler: network_scan: %s DownloadStates" % len(dslist)
     self.lock.acquire()
     exception = None
     try:
         try:
             if not self.active or self.current_repex is not None:
                 return -1, False
             
             now = ts_now()
             found_infohash = None
             found_download = None
             found_age = -1
             for ds in dslist:
                 download = ds.get_download()
                 infohash = download.tdef.get_infohash()
                 debug_msg = None
                 if DEBUG:
                     print >>sys.stderr, time.asctime(),'-', "RePEXScheduler: network_scan: checking", `download.tdef.get_name_as_unicode()`
                 if ds.get_status() == DLSTATUS_STOPPED and ds.get_progress()==1.0:
                     # TODO: only repex finished downloads or also prematurely stopped ones?
                     age = now - (swarmcache_ts(ds.get_swarmcache()) or 0)
                     last_attempt_ago = now - self.last_attempts.get(infohash, 0)
                     
                     if last_attempt_ago < REPEX_MIN_INTERVAL:
                         debug_msg = "...too soon to try again, last attempt was %ss ago" % last_attempt_ago
                     elif age < REPEX_INTERVAL:
                         debug_msg = "...SwarmCache too fresh: %s seconds" % age
                     else:
                         if age >= REPEX_INTERVAL:
                             debug_msg = "...suitable for RePEX!"
                             if age > found_age:
                                 found_download = download
                                 found_infohash = infohash
                                 found_age = age
                 else:
                     debug_msg = "...not repexable: %s %s%%" % (dlstatus_strings[ds.get_status()], ds.get_progress()*100)
                 if DEBUG:
                     print >>sys.stderr, time.asctime(),'-', "RePEXScheduler: network_scan:", debug_msg
             
             if found_download is None:
                 if DEBUG:
                     print >>sys.stderr, time.asctime(),'-', "RePEXScheduler: network_scan: nothing found yet"
                 return REPEX_SCAN_INTERVAL, False
             else:
                 if DEBUG:
                     print >>sys.stderr, time.asctime(),'-', "RePEXScheduler: network_scan: found %s, starting RePEX phase." % `found_download.tdef.get_name_as_unicode()`
                 self.current_repex = found_infohash
                 self.downloads[found_infohash] = found_download
                 found_download.set_mode(DLMODE_NORMAL)
                 found_download.restart(initialdlstatus=DLSTATUS_REPEXING)
                 return -1, False
         except Exception, e:
             exception = e
     finally:
         self.lock.release()
     if exception is not None: raise exception