Exemplo n.º 1
0
 def socket_cb(self, sock):
     """ Called by P2PConnection after connect() has completed """
     if sock.connected:
         log.info('Connected to %s' %str(sock.addr))
         for id,v in self.incomplete.iteritems():
             if v == sock.addr:
                 break
         else:
             return #loc wasn't found
         AnomosNeighborInitializer(self, sock, id)
     else:
         #Remove nid,loc pair from incomplete
         torm = []
         for k,v in self.incomplete.items():
             if v == sock.addr:
                 log.info('Failed to connect, discarding \\x%02x' % ord(k))
                 torm.append(k)            
         for j in torm:
             self.rm_neighbor(j)
         if sock.addr == None:
             if self.incomplete.items() != []:
                 log.info("Remaining incomplete peers: %d" %len(self.incomplete.items()))
             else:
                 log.info("No remaining incomplete peers")
         else:
             log.info("Failed to open connection to %s\n" % str(sock.addr))
Exemplo n.º 2
0
 def handle_error(self):
     t, v, tb = sys.exc_info()
     if isinstance(v, KeyboardInterrupt):
         raise
     else:
         log.info(traceback.format_exc())
         self.close()
Exemplo n.º 3
0
    def _close(self):
        if self.closed:
            return
        self.closed = True

        # GTK Crash Hack
        import time
        time.sleep(.2)

        self.event_handler.remove_context(self)

        self._doneflag.set()
        log.info("Closing connections, please wait...")
        if self._announced:
            self._rerequest.announce_stop()
            self._rerequest.cleanup()
        if self._hashcheck_thread is not None:
            self._hashcheck_thread.join() # should die soon after doneflag set
        if self._myfiles is not None:
            self._filepool.remove_files(self._myfiles)
        if self._listening:
            self.neighbors.remove_torrent(self.infohash)
        for port in self.reserved_ports:
            self._singleport_listener.release_port(port)
        if self._storage is not None:
            self._storage.close()
        self.schedule(0, gc.collect)
Exemplo n.º 4
0
 def socket_cb(self, sock):
     """ Called by P2PConnection after connect() has completed """
     if sock.connected:
         log.info('Connected to %s' % str(sock.addr))
         for id, v in self.incomplete.iteritems():
             if v == sock.addr:
                 break
         else:
             return  #loc wasn't found
         AnomosNeighborInitializer(self, sock, id)
     else:
         #Remove nid,loc pair from incomplete
         torm = []
         for k, v in self.incomplete.items():
             if v == sock.addr:
                 log.info('Failed to connect, discarding \\x%02x' % ord(k))
                 torm.append(k)
         for j in torm:
             self.rm_neighbor(j)
         if sock.addr == None:
             if self.incomplete.items() != []:
                 log.info("Remaining incomplete peers: %d" %
                          len(self.incomplete.items()))
             else:
                 log.info("No remaining incomplete peers")
         else:
             log.info("Failed to open connection to %s\n" % str(sock.addr))
Exemplo n.º 5
0
    def _close(self):
        if self.closed:
            return
        self.closed = True

        # GTK Crash Hack
        import time

        time.sleep(0.2)

        self.event_handler.remove_context(self)

        self._doneflag.set()
        log.info("Closing connections, please wait...")
        if self._announced:
            self._rerequest.announce_stop()
            self._rerequest.cleanup()
        if self._hashcheck_thread is not None:
            self._hashcheck_thread.join()  # should die soon after doneflag set
        if self._myfiles is not None:
            self._filepool.remove_files(self._myfiles)
        if self._listening:
            self.neighbors.remove_torrent(self.infohash)
        for port in self.reserved_ports:
            self._singleport_listener.release_port(port)
        if self._storage is not None:
            self._storage.close()
        self.schedule(0, gc.collect)
Exemplo n.º 6
0
 def connection_completed(self):
     log.info("Relay connection [%02x:%d] established" %
              (int(ord(self.neighbor.id)), self.stream_id))
     self.complete = True
     self.flush_pre_buffer()
     self.orelay.complete = True
     self.orelay.flush_pre_buffer()
Exemplo n.º 7
0
 def connection_completed(self):
     log.info("Relay connection [%02x:%d] established" %
                         (int(ord(self.neighbor.id)),self.stream_id))
     self.complete = True
     self.flush_pre_buffer()
     self.orelay.complete = True
     self.orelay.flush_pre_buffer()
Exemplo n.º 8
0
 def found_terminator(self):
     creq = self.req
     self.req = ''
     if not self.next_func:
         log.info("Malformed request from %s:%d" % self.socket.addr)
         self.handle_close()
         return
     self.next_func = self.next_func(creq)
Exemplo n.º 9
0
 def close(self):
     if self.closed:
         log.warning("Double close")
         return
     log.info("Closing %s"%self.uniq_id())
     if self.complete:
         self.send_break()
     self.shutdown()
Exemplo n.º 10
0
 def send_tracking_code(self, trackcode):
     #XXX: Just a test, Throw tcodes into the PMQ instead of sending them
     # immediately
     #self.network_ctl_msg(TCODE, trackcode)
     log.info("Queuing tracking code")
     self.neighbor.queue_message(self.stream_id, TCODE+trackcode)
     if self.next_upload is None:
         log.info("Queuing self")
         self.ratelimiter.queue(self)
Exemplo n.º 11
0
 def close(self):
     # Connection was closed locally (as opposed to
     # being closed by receiving a BREAK message)
     if self.closed:
         log.warning("%s: Double close" % self.uniq_id())
         return
     log.info("Closing %s" % self.uniq_id())
     if self.complete and not self.sent_break:
         self.send_break()
     self.shutdown()
Exemplo n.º 12
0
 def close(self):
     # Connection was closed locally (as opposed to
     # being closed by receiving a BREAK message)
     if self.closed:
         log.warning("%s: Double close" % self.uniq_id())
         return
     log.info("Closing %s"%self.uniq_id())
     if self.complete and not self.sent_break:
         self.send_break()
     self.shutdown()
Exemplo n.º 13
0
 def _parsepeers(self, p):
     peers = []
     if type(p) == str:
         for x in xrange(0, len(p), 6):
             ip = '.'.join([str(ord(i)) for i in p[x:x+4]])
             log.info("Got peer %s"%ip)
             port = (ord(p[x+4]) << 8) | ord(p[x+5])
             peers.append((ip, port, None))
     else:
         for x in p:
             log.info("Got peer %s"%str(x['ip']))
             peers.append((x['ip'], x['port'], x.get('nid')))
     return peers
Exemplo n.º 14
0
 def connection_completed(self, socket, id):
     """Called by AnomosNeighborInitializer"""
     if self.incomplete.has_key(id):
         del self.incomplete[id]
     if id == NAT_CHECK_ID:
         log.info("NAT check ok.")
         return
     self.add_neighbor(socket, id)
     tasks = self.waiting_tcs.get(id)
     if tasks is None:
         return
     for task in tasks:
         #TODO: Would a minimum wait between these tasks aid anonymity?
         self.schedule(0, task)
     del self.waiting_tcs[id]
Exemplo n.º 15
0
 def connection_completed(self, socket, id):
     """Called by AnomosNeighborInitializer"""
     if self.incomplete.has_key(id):
         del self.incomplete[id]
     if id == NAT_CHECK_ID:
         log.info("NAT check ok.")
         return
     self.add_neighbor(socket, id)
     tasks = self.waiting_tcs.get(id)
     if tasks is None:
         return
     for task in tasks:
         #TODO: Would a minimum wait between these tasks aid anonymity?
         self.schedule(0, task)
     del self.waiting_tcs[id]
Exemplo n.º 16
0
 def start_endpoint_stream(self, torrent, aeskey, data=None):
     """ Starts an EndPoint stream
         @param torrent: Torrent to be uploaded/downloaded
         @param aeskey: AES-256 key to be used for transfer communication
         @param data: Tracking Code to be sent
         @type torrent: Anomos.Torrent.Torrent
         @type aeskey: Anomos.crypto.AESKey
         @type data: String
         @return: Newly created EndPoint object"""
     if data is None: # Incoming stream
         nxtid = self.incoming_stream_id
         self.next_stream_id = nxtid + 1
     else: # Localy initialized stream
         nxtid = self.next_stream_id
         self.next_stream_id += 1
     self.streams[nxtid] = \
                 EndPoint(nxtid, self, torrent, aeskey, data)
     self.manager.schedule(180, self.streams[nxtid].completion_timeout)
     log.info("Starting endpoint")
     return self.streams[nxtid]
Exemplo n.º 17
0
 def start_endpoint_stream(self, torrent, aeskey, data=None):
     """ Starts an EndPoint stream
         @param torrent: Torrent to be uploaded/downloaded
         @param aeskey: AES-256 key to be used for transfer communication
         @param data: Tracking Code to be sent
         @type torrent: Anomos.Torrent.Torrent
         @type aeskey: Anomos.crypto.AESKey
         @type data: String
         @return: Newly created EndPoint object"""
     if data is None:  # Incoming stream
         nxtid = self.incoming_stream_id
         self.next_stream_id = nxtid + 1
     else:  # Localy initialized stream
         nxtid = self.next_stream_id
         self.next_stream_id += 1
     self.streams[nxtid] = \
                 EndPoint(nxtid, self, torrent, aeskey, data)
     self.manager.schedule(180, self.streams[nxtid].completion_timeout)
     log.info("Starting endpoint")
     return self.streams[nxtid]
Exemplo n.º 18
0
 def _rerequest(self, query):
     """ Make an HTTP GET request to the tracker
         Note: This runs in its own thread.
     """
     log.info("Making announce to " + self.url + ":" + str(self.remote_port))
     if not self.https:
         log.warning("Warning: Will not connect to non HTTPS server")
         return
     try:
         if self.proxy_url:
             h = ProxyHTTPSConnection(self.proxy_url, \
                                      username=self.proxy_username, \
                                      password=self.proxy_password, \
                                      ssl_context=self.ssl_ctx)
             s = "https://%s:%d%s%s" % (self.url, self.remote_port, self.path, query)
             h.putrequest('GET', s)
             
             # I suggest that for now, until there is a better solution in python, 
             # that connections with socks proxies be done with:
             #  socat TCP4-LISTEN:5555,fork SOCKS4A:s,socksport=9050 
             #  or use Privoxy:
             #  127.0.0.1:8118
                                 
         else:
             #No proxy url, use normal connection
             h = HTTPSConnection(self.url, self.remote_port, ssl_context=self.ssl_ctx)
             h.putrequest('GET', self.path+query)
         h.endheaders()
         resp = h.getresponse()
         data = resp.read()
         resp.close()
         h.close()
         h = None
     # urllib2 can raise various crap that doesn't have a common base
     # exception class especially when proxies are used, at least
     # ValueError and stuff from httplib
     except Exception, g:
         def f(r='Problem connecting to ' + self.url + ':  ' + str(g)):
             self._postrequest(errormsg=r)
Exemplo n.º 19
0
    def _start_download(self, metainfo, feedback, save_path):
    
        # GTK Crash Hack
        import time
        time.sleep(.2)
        
        self.feedback = feedback
        self._set_auto_uploads()

        self.infohash = metainfo.infohash
        self.file_size = metainfo.file_size
        if not metainfo.reported_errors:
            metainfo.show_encoding_errors(log.error)

        if metainfo.is_batch:
            myfiles = [os.path.join(save_path, f) for f in metainfo.files_fs]
        else:
            myfiles = [save_path]
        self._filepool.add_files(myfiles, self)
        self._myfiles = myfiles
        self._storage = Storage(self.config, self._filepool, zip(myfiles,
                                                            metainfo.sizes))
        resumefile = None
        if self.config['data_dir']:
            filename = os.path.join(self.config['data_dir'], 'resume',
                                    self.infohash.encode('hex'))
            if os.path.exists(filename):
                try:
                    resumefile = file(filename, 'rb')
                    if self._storage.check_fastresume(resumefile) == 0:
                        resumefile.close()
                        resumefile = None
                except Exception, e:
                    log.info("Could not load fastresume data: "+
                                str(e) + ". Will perform full hash check.")
                    if resumefile is not None:
                        resumefile.close()
                    resumefile = None
Exemplo n.º 20
0
    def __init__(self, stream_id, neighbor, torrent, aes, data=None):
        AnomosEndPointProtocol.__init__(self)
        self.partial_recv = ''
        self.sent_break = False

        self.stream_id = stream_id
        self.neighbor = neighbor
        self.manager = neighbor.manager
        self.ratelimiter = neighbor.ratelimiter
        self.torrent = torrent
        self.e2e_key = aes
        self.complete = False
        self.closed = False
        self.choker = None
        self.choke_sent = False
        self.upload = None
        self.next_upload = None
        if data is not None:
            self.send_tracking_code(data)
        else:
            self.send_confirm()
            self.connection_completed()
            log.info("Sent confirm")
Exemplo n.º 21
0
    def _start_download(self, metainfo, feedback, save_path):

        # GTK Crash Hack
        import time

        time.sleep(0.2)

        self.feedback = feedback
        self._set_auto_uploads()

        self.infohash = metainfo.infohash
        self.file_size = metainfo.file_size
        if not metainfo.reported_errors:
            metainfo.show_encoding_errors(log.error)

        if metainfo.is_batch:
            myfiles = [os.path.join(save_path, f) for f in metainfo.files_fs]
        else:
            myfiles = [save_path]
        self._filepool.add_files(myfiles, self)
        self._myfiles = myfiles
        self._storage = Storage(self.config, self._filepool, zip(myfiles, metainfo.sizes))
        resumefile = None
        if self.config["data_dir"]:
            filename = os.path.join(self.config["data_dir"], "resume", self.infohash.encode("hex"))
            if os.path.exists(filename):
                try:
                    resumefile = file(filename, "rb")
                    if self._storage.check_fastresume(resumefile) == 0:
                        resumefile.close()
                        resumefile = None
                except Exception, e:
                    log.info("Could not load fastresume data: " + str(e) + ". Will perform full hash check.")
                    if resumefile is not None:
                        resumefile.close()
                    resumefile = None
Exemplo n.º 22
0
 def add_neighbor(self, socket, id):
     log.info("Adding Neighbor: \\x%02x" % ord(id))
     self.neighbors[id] = NeighborLink(self, socket, id, \
             self.config, self.ratelimiter)
Exemplo n.º 23
0
class NeighborManager(object):
    """NeighborManager keeps track of the neighbors a peer is connected to
    and which tracker those neighbors are on.
    """
    def __init__(self, config, certificate, ssl_ctx, sessionid, schedule,
                 ratelimiter):
        self.config = config
        self.certificate = certificate
        self.ssl_ctx = ssl_ctx
        self.sessionid = sessionid
        self.schedule = schedule
        self.ratelimiter = ratelimiter
        self.neighbors = {}
        self.relay_measure = Measure(self.config['max_rate_period'])
        self.relay_count = 0
        self.incomplete = {}
        self.torrents = {}
        self.waiting_tcs = {}
        self.failedPeers = []

    ## Got new neighbor list from the tracker ##
    def update_neighbor_list(self, list):
        freshids = dict([(i[2], (i[0], i[1]))
                         for i in list])  #{nid : (ip, port)}
        # Remove neighbors not found in freshids
        for id in self.neighbors.keys():
            if not freshids.has_key(id):
                self.rm_neighbor(id)
        # Start connections with the new neighbors
        for id, loc in freshids.iteritems():
            if self.nid_collision(id, loc):
                # Already had neighbor by the given id at a different location
                log.warning('NID collision - x%02x' % ord(id))
                # To be safe, kill connection with the neighbor we already
                # had with the requested ID and add ID to the failed list
                self.rm_neighbor(id)
            elif (not self.has_neighbor(id)) and (id not in self.failedPeers):
                self.start_connection(id, loc)

    ## Start a new neighbor connection ##
    def start_connection(self, id, loc):
        """ Start a new SSL connection to the peer at loc and 
            assign them the NeighborID id
            @param loc: (IP, Port)
            @param id: The neighbor ID to assign to this connection
            @type loc: tuple
            @type id: int """

        if self.config['one_connection_per_ip'] and self.has_ip(loc[0]):
            log.warning('Got duplicate IP address in neighbor list. ' \
                        'Multiple connections to the same IP are disabled ' \
                        'in your config.')
            return
        self.incomplete[id] = loc
        conn = P2PConnection(addr=loc,
                             ssl_ctx=self.ssl_ctx,
                             connect_cb=self.socket_cb,
                             schedule=self.schedule)

    def socket_cb(self, sock):
        """ Called by P2PConnection after connect() has completed """
        if sock.connected:
            log.info('Connected to %s' % str(sock.addr))
            for id, v in self.incomplete.iteritems():
                if v == sock.addr:
                    break
            else:
                return  #loc wasn't found
            AnomosNeighborInitializer(self, sock, id)
        else:
            #Remove nid,loc pair from incomplete
            torm = []
            for k, v in self.incomplete.items():
                if v == sock.addr:
                    log.info('Failed to connect, discarding \\x%02x' % ord(k))
                    torm.append(k)
            for j in torm:
                self.rm_neighbor(j)
            if sock.addr == None:
                if self.incomplete.items() != []:
                    log.info("Remaining incomplete peers: %d" %
                             len(self.incomplete.items()))
                else:
                    log.info("No remaining incomplete peers")
            else:
                log.info("Failed to open connection to %s\n" % str(sock.addr))

    def failed_connections(self):
        return self.failedPeers

    def remove_reported_failids(self, failids):
        for i in failids:
            if i in self.failedPeers:
                self.failedPeers.remove(i)

    ## AnomosNeighborInitializer got a full handshake ##
    def add_neighbor(self, socket, id):
        log.info("Adding Neighbor: \\x%02x" % ord(id))
        self.neighbors[id] = NeighborLink(self, socket, id, \
                self.config, self.ratelimiter)

    def rm_neighbor(self, nid):
        if self.incomplete.has_key(nid):
            self.incomplete.pop(nid)
        if self.neighbors.has_key(nid):
            self.neighbors.pop(nid)
        if nid is not None:
            self.failedPeers.append(nid)

    #TODO: implement banning
    def ban(self, ip):
        pass

    def has_neighbor(self, nid):
        return self.neighbors.has_key(nid) or self.incomplete.has_key(nid)

    def nid_collision(self, nid, loc):
        # If the locations are the same, there's no collision
        if self.neighbors.has_key(nid):
            return self.neighbors[nid].get_loc()[0] != loc[0]
        elif self.incomplete.has_key(nid):
            return self.incomplete[nid][0] != loc[0]
        return False

    def check_session_id(self, sid):
        return sid == self.sessionid

    def has_ip(self, ip):
        return ip in [n.socket.addr[0] for n in self.neighbors.values()] \
                or ip in [x for x,y in self.incomplete.values()]

    def get_ips(self):
        ips = []
        for n in self.neighbors.values():
            ips.append([n.socket.addr[0], n.socket.addr[1], n.id])
        return ips

    def is_incomplete(self, nid):
        return self.incomplete.has_key(nid)

    def count(self, tracker=None):
        return len(self.neighbors)

    def connection_completed(self, socket, id):
        """Called by AnomosNeighborInitializer"""
        if self.incomplete.has_key(id):
            del self.incomplete[id]
        if id == NAT_CHECK_ID:
            log.info("NAT check ok.")
            return
        self.add_neighbor(socket, id)
        tasks = self.waiting_tcs.get(id)
        if tasks is None:
            return
        for task in tasks:
            #TODO: Would a minimum wait between these tasks aid anonymity?
            self.schedule(0, task)
        del self.waiting_tcs[id]

    def lost_neighbor(self, id):
        self.rm_neighbor(id)

    def initializer_failed(self, id):
        """Connection closed before finishing initialization"""
        self.rm_neighbor(id)

    def start_circuit(self, tc, infohash, aeskey):
        """Called from Rerequester to initialize new circuits we've
        just gotten TCs for from the Tracker"""
        if self.count_streams() >= self.config['max_initiate']:
            log.warning("Not starting circuit -- Stream count exceeds maximum")
            return

        tcreader = TCReader(self.certificate)
        try:
            tcdata = tcreader.parseTC(tc)
        except Anomos.Crypto.CryptoError, e:
            log.error("Decryption Error: %s" % str(e))
            return
        nid = tcdata.neighborID
        sid = tcdata.sessionID
        torrent = self.get_torrent(infohash)
        nextTC = tcdata.nextLayer
        if sid != self.sessionid:
            log.error("Not starting circuit -- SessionID mismatch!")
        elif torrent is None:
            log.error("Not starting circuit -- Unknown torrent")
        elif nid in self.incomplete:
            log.info("Postponing circuit until neighbor \\x%02x completes " %
                     ord(nid))
            self.schedule_tc(nid, infohash, aeskey, nextTC)
        elif nid not in self.neighbors:
            log.error("Not starting circuit -- NID \\x%02x is not assigned" %
                      ord(nid))
        else:
            self.neighbors[nid].start_endpoint_stream(torrent,
                                                      aeskey,
                                                      data=nextTC)
Exemplo n.º 24
0
class HTTPSConnection(Dispatcher):
    def __init__(self, socket, getfunc):
        Dispatcher.__init__(self, socket)
        self.req = ''
        self.set_terminator('\n')
        self.getfunc = getfunc
        self.next_func = self.read_type

    ## HTTP handling methods ##
    def read_type(self, data):
        self.header = data.strip()
        words = data.split()
        if len(words) == 3:
            self.command, self.path, garbage = words
            self.pre1 = False
        elif len(words) == 2:
            self.command, self.path = words
            self.pre1 = True
            if self.command != 'GET':
                return None
        else:
            return None
        if self.command not in ('HEAD', 'GET'):
            return None
        self.headers = {}
        return self.read_header

    def read_header(self, data):
        data = data.strip()
        if data == '':
            # check for Accept-Encoding: header, pick a
            if self.headers.has_key('accept-encoding'):
                ae = self.headers['accept-encoding']
                log.debug("Got Accept-Encoding: " + ae + "\n")
            else:
                #identity assumed if no header
                ae = 'identity'
            # this eventually needs to support multple acceptable types
            # q-values and all that fancy HTTP crap
            # for now assume we're only communicating with our own client
            if ae.find('gzip') != -1:
                self.encoding = 'gzip'
            else:
                #default to identity.
                self.encoding = 'identity'
            r = self.getfunc(self, self.path, self.headers)
            if r is not None:
                self.answer(r)
                return None
        try:
            i = data.index(':')
        except ValueError:
            return None
        self.headers[data[:i].strip().lower()] = data[i+1:].strip()
        log.debug(data[:i].strip() + ": " + data[i+1:].strip())
        return self.read_header

    def answer(self, (responsecode, responsestring, headers, data)):
        if self.encoding == 'gzip':
            #transform data using gzip compression
            #this is nasty but i'm unsure of a better way at the moment
            compressed = StringIO()
            gz = GzipFile(fileobj = compressed, mode = 'wb', compresslevel = 9)
            gz.write(data)
            gz.close()
            compressed.seek(0,0)
            cdata = compressed.read()
            compressed.close()
            if len(cdata) >= len(data):
                self.encoding = 'identity'
            else:
                log.debug("Compressed: %i  Uncompressed: %i\n" % (len(cdata),len(data)))
                data = cdata
                headers['Content-Encoding'] = 'gzip'

        # i'm abusing the identd field here, but this should be ok
        if self.encoding == 'identity':
            ident = '-'
        else:
            ident = self.encoding
        username = '******'
        referer = self.headers.get('referer','-')
        useragent = self.headers.get('user-agent','-')
        timestamp = strftime("%d/%b/%Y:%H:%I:%S")
        log.info('%s %s %s [%s] "%s" %i %i "%s" "%s"' % (
                  self.socket.addr[0], ident, username, timestamp, self.header,
                  responsecode, len(data), referer, useragent))

        r = StringIO()
        r.write('HTTP/1.0 ' + str(responsecode) + ' ' + responsestring + '\r\n')
        if not self.pre1:
            headers['Content-Length'] = len(data)
            for key, value in headers.items():
                r.write(key + ': ' + str(value) + '\r\n')
            r.write('\r\n')
        if self.command != 'HEAD':
            r.write(data)

        self.push(r.getvalue())
        self.close_when_done()
Exemplo n.º 25
0
 def send_break(self):
     log.info("breaking %s" % str(self))
     self.network_ctl_msg(BREAK)
     self.sent_break = True
Exemplo n.º 26
0
 def socket_closed(self):
     if self.id != NAT_CHECK_ID and self.id != '':
         log.info("Failed to initialize connection to %s" % str(self.id))
     if not self.complete:
         self.manager.initializer_failed(self.id)
     self.socket = None
Exemplo n.º 27
0
 def data_flunked(amount, index):
     self._ratemeasure.data_rejected(amount)
     log.info("piece %d failed hash check, " "re-downloading it" % index)
Exemplo n.º 28
0
 def data_flunked(amount, index):
     self._ratemeasure.data_rejected(amount)
     log.info('piece %d failed hash check, '
                 're-downloading it' % index)
Exemplo n.º 29
0
 def add_neighbor(self, socket, id):
     log.info("Adding Neighbor: \\x%02x" % ord(id))
     self.neighbors[id] = NeighborLink(self, socket, id, \
             self.config, self.ratelimiter)