def portCheck(self): if config.ip_external: for ip_external in config.ip_external: SiteManager.peer_blacklist.append( (ip_external, self.port)) # Add myself to peer blacklist ip_external_types = set( [helper.getIpType(ip) for ip in config.ip_external]) res = { "ipv4": "ipv4" in ip_external_types, "ipv6": "ipv6" in ip_external_types } self.ip_external_list = config.ip_external self.port_opened.update(res) self.log.info( "Server port opened based on configuration ipv4: %s, ipv6: %s" % (res["ipv4"], res["ipv6"])) return res self.port_opened = {} if self.ui_server: self.ui_server.updateWebsocket() if "ipv6" in self.supported_ip_types: res_ipv6_thread = gevent.spawn(self.portchecker.portCheck, self.port, "ipv6") else: res_ipv6_thread = None res_ipv4 = self.portchecker.portCheck(self.port, "ipv4") if not res_ipv4["opened"]: if self.portchecker.portOpen(self.port): res_ipv4 = self.portchecker.portCheck(self.port, "ipv4") if res_ipv6_thread is None: res_ipv6 = {"ip": None, "opened": None} else: res_ipv6 = res_ipv6_thread.get() if res_ipv6["opened"] and not helper.getIpType( res_ipv6["ip"]) == "ipv6": self.log.info("Invalid IPv6 address from port check: %s" % res_ipv6["ip"]) res_ipv6["opened"] = False self.ip_external_list = [] for res_ip in [res_ipv4, res_ipv6]: if res_ip["ip"] and res_ip["ip"] not in self.ip_external_list: self.ip_external_list.append(res_ip["ip"]) SiteManager.peer_blacklist.append((res_ip["ip"], self.port)) self.log.info("Server port opened ipv4: %s, ipv6: %s" % (res_ipv4["opened"], res_ipv6["opened"])) res = {"ipv4": res_ipv4["opened"], "ipv6": res_ipv6["opened"]} self.port_opened.update(res) if self.ui_server: self.ui_server.updateWebsocket() return res
def actionFindHashIds(self, params): site = self.sites.get(params["site"]) s = time.time() if not site or not site.settings[ "serving"]: # Site unknown or not serving self.response({"error": "Unknown site"}) self.connection.badAction(5) return False event_key = "%s_findHashIds_%s_%s" % ( self.connection.ip, params["site"], len(params["hash_ids"])) if self.connection.cpu_time > 0.5 or not RateLimit.isAllowed( event_key, 60 * 5): time.sleep(0.1) back = self.findHashIds(site, params["hash_ids"], limit=10) else: back = self.findHashIds(site, params["hash_ids"]) RateLimit.called(event_key) # Check my hashfield if self.server.tor_manager and self.server.tor_manager.getOnion( site.address): # Running onion my_ip = helper.packOnionAddress( self.server.tor_manager.getOnion(site.address), self.server.port) my_ip_type = "onion" elif config.ip_external: # External ip defined my_ip = helper.packAddress(config.ip_external, self.server.port) my_ip_type = helper.getIpType(config.ip_external) elif self.server.ip and self.server.ip != "*": # No external ip defined my_ip = helper.packAddress(self.server.ip, self.server.port) my_ip_type = helper.getIpType(self.server.ip) else: my_ip = None my_ip_type = "ipv4" my_hashfield_set = set(site.content_manager.hashfield) for hash_id in params["hash_ids"]: if hash_id in my_hashfield_set: if hash_id not in back[my_ip_type]: back[my_ip_type][hash_id] = [] if my_ip: back[my_ip_type][hash_id].append(my_ip) # Add myself if config.verbose: self.log.debug("Found: %s for %s hashids in %.3fs" % ({key: len(val) for key, val in back.iteritems() }, len(params["hash_ids"]), time.time() - s)) self.response({ "peers": back["ipv4"], "peers_onion": back["onion"], "peers_ipv6": back["ipv6"] })
def announceTrackerUdp(self, tracker_address, mode="start", num_want=10): s = time.time() if config.disable_udp: raise AnnounceError("Udp disabled by config") if config.trackers_proxy != "disable": raise AnnounceError("Udp trackers not available with proxies") ip, port = tracker_address.split("/")[0].split(":") tracker = UdpTrackerClient(ip, int(port)) if helper.getIpType(ip) in self.getOpenedServiceTypes(): tracker.peer_port = self.fileserver_port else: tracker.peer_port = 0 tracker.connect() if not tracker.poll_once(): raise AnnounceError("Could not connect") tracker.announce(info_hash=hashlib.sha1(self.site.address).hexdigest(), num_want=num_want, left=431102370) back = tracker.poll_once() if not back: raise AnnounceError("No response after %.0fs" % (time.time() - s)) elif type(back) is dict and "response" in back: peers = back["response"]["peers"] else: raise AnnounceError("Invalid response: %r" % back) return peers
def start(self, check_connections=True): self.running = True if check_connections: self.thread_checker = gevent.spawn(self.checkConnections) CryptConnection.manager.loadCerts() if config.tor != "disable": self.tor_manager.start() if not self.port: self.log.info("No port found, not binding") return False self.log.debug( "Binding to: %s:%s, (msgpack: %s), supported crypt: %s, supported ip types: %s" % (self.ip, self.port, ".".join(map(str, msgpack.version)), CryptConnection.manager.crypt_supported, self.supported_ip_types)) try: self.pool = Pool(500) # do not accept more than 500 connections if helper.getIpType(self.ip) == "ipv6": sock_address = (self.ip, self.port, 0, 0) else: sock_address = (self.ip, self.port) self.stream_server = StreamServer(sock_address, self.handleIncomingConnection, spawn=self.pool, backlog=100) except Exception, err: self.log.info("StreamServer bind error: %s" % err)
def createSocket(self): if helper.getIpType( self.ip) == "ipv6" and not hasattr(socket, "socket_noproxy"): # Create IPv6 connection as IPv4 when using proxy return socket.socket(socket.AF_INET6, socket.SOCK_STREAM) else: return socket.socket(socket.AF_INET, socket.SOCK_STREAM)
def getOpenedServiceTypes(self): back = [] # Type of addresses they can reach me if self.site.connection_server.port_opened and config.trackers_proxy == "disable": back.append(helper.getIpType(self.site.connection_server.ip)) if self.site.connection_server.tor_manager.start_onions: back.append("onion") return back
def __init__(self, ip=config.fileserver_ip, port=config.fileserver_port, ip_type=config.fileserver_ip_type): self.site_manager = SiteManager.site_manager self.portchecker = PeerPortchecker.PeerPortchecker(self) self.log = logging.getLogger("FileServer") self.ip_type = ip_type self.ip_external_list = [] self.supported_ip_types = ["ipv4"] # Outgoing ip_type support if helper.getIpType(ip) == "ipv6" or self.isIpv6Supported(): self.supported_ip_types.append("ipv6") if ip_type == "ipv6" or (ip_type == "dual" and "ipv6" in self.supported_ip_types): ip = ip.replace("*", "::") else: ip = ip.replace("*", "0.0.0.0") if config.tor == "always": port = config.tor_hs_port config.fileserver_port = port elif port == 0: # Use random port port_range_from, port_range_to = list( map(int, config.fileserver_port_range.split("-"))) port = self.getRandomPort(ip, port_range_from, port_range_to) config.fileserver_port = port if not port: raise Exception("Can't find bindable port") if not config.tor == "always": config.saveValue( "fileserver_port", port) # Save random port value for next restart config.arguments.fileserver_port = port ConnectionServer.__init__(self, ip, port, self.handleRequest) self.log.debug("Supported IP types: %s" % self.supported_ip_types) if ip_type == "dual" and ip == "::": # Also bind to ipv4 addres in dual mode try: self.log.debug("Binding proxy to %s:%s" % ("::", self.port)) self.stream_server_proxy = StreamServer( ("0.0.0.0", self.port), self.handleIncomingConnection, spawn=self.pool, backlog=100) except Exception as err: self.log.info("StreamServer proxy create error: %s" % Debug.formatException(err)) self.port_opened = {} self.sites = self.site_manager.sites self.last_request = time.time() self.files_parsing = {} self.ui_server = None
def announceTrackerHttp(self, tracker_address, mode="start", num_want=10): tracker_ip, tracker_port = tracker_address.rsplit(":", 1) if helper.getIpType(tracker_ip) in self.getOpenedServiceTypes(): port = self.fileserver_port else: port = 1 params = { 'info_hash': hashlib.sha1(self.site.address).digest(), 'peer_id': self.peer_id, 'port': port, 'uploaded': 0, 'downloaded': 0, 'left': 431102370, 'compact': 1, 'numwant': num_want, 'event': 'started' } url = "http://" + tracker_address + "?" + urllib.urlencode(params) s = time.time() response = None # Load url if config.tor == "always" or config.trackers_proxy != "disable": timeout = 60 else: timeout = 30 with gevent.Timeout(timeout, False): # Make sure of timeout req = self.httpRequest(url) response = req.read() req.fp._sock.recv = None # Hacky avoidance of memory leak for older python versions req.close() req = None if not response: raise AnnounceError("No response after %.0fs" % (time.time() - s)) # Decode peers try: peer_data = bencode.decode(response)["peers"] response = None peer_count = len(peer_data) / 6 peers = [] for peer_offset in xrange(peer_count): off = 6 * peer_offset peer = peer_data[off:off + 6] addr, port = struct.unpack('!LH', peer) peers.append({ "addr": socket.inet_ntoa(struct.pack('!L', addr)), "port": port }) except Exception as err: raise AnnounceError("Invalid response: %r (%s)" % (response, err)) return peers
def findHashIds(self, site, hash_ids, limit=100): back = collections.defaultdict(lambda: collections.defaultdict(list)) found = site.worker_manager.findOptionalHashIds(hash_ids, limit=limit) for hash_id, peers in found.iteritems(): for peer in peers: ip_type = helper.getIpType(peer.ip) if len(back[ip_type][hash_id]) < 20: back[ip_type][hash_id].append(peer.packMyAddress()) return back
def actionCheckport(self, params): if helper.getIpType(self.connection.ip) == "ipv6": sock_address = (self.connection.ip, params["port"], 0, 0) else: sock_address = (self.connection.ip, params["port"]) with closing(helper.createSocket(self.connection.ip)) as sock: sock.settimeout(5) if sock.connect_ex(sock_address) == 0: self.response({"status": "open", "ip_external": self.connection.ip}) else: self.response({"status": "closed", "ip_external": self.connection.ip})
def getSupportedTrackers(self): trackers = self.getTrackers() if not self.site.connection_server.tor_manager.enabled: trackers = [tracker for tracker in trackers if ".onion" not in tracker] trackers = [tracker for tracker in trackers if self.getAddressParts(tracker)] # Remove trackers with unknown address if "ipv6" not in self.site.connection_server.supported_ip_types: trackers = [tracker for tracker in trackers if helper.getIpType(self.getAddressParts(tracker)["ip"]) != "ipv6"] return trackers
def getSupportedTrackers(self): trackers = self.getTrackers() if config.disable_udp or config.trackers_proxy != "disable": trackers = [tracker for tracker in trackers if not tracker.startswith("udp://")] if not self.site.connection_server.tor_manager.enabled: trackers = [tracker for tracker in trackers if ".onion" not in tracker] if "ipv6" not in self.site.connection_server.supported_ip_types: trackers = [tracker for tracker in trackers if helper.getIpType(self.getAddressParts(tracker)["ip"]) != "ipv6"] return trackers
def portCheck(self): res = self.portchecker.portCheck(self.port, helper.getIpType(self.ip)) if not res["opened"]: if self.portchecker.portOpen(self.port): res = self.portchecker.portCheck(self.port, helper.getIpType(self.ip)) if res["ip"]: config.ip_external = res["ip"] SiteManager.peer_blacklist.append( (config.ip_external, self.port)) # Add myself to peer blacklist else: config.ip_external = False if res["opened"]: self.log.info("Server port on %s:%s: Open" % (self.ip, self.port)) return True else: self.log.info("Server port on %s:%s: Closed" % (self.ip, self.port)) return False
def getPeerLocations(self, peers): import maxminddb db_path = self.getGeoipDb() if not db_path: self.log.debug("Not showing peer locations: no GeoIP database") return False self.log.info("Loading GeoIP database from: %s" % db_path) geodb = maxminddb.open_database(db_path) peers = list(peers.values()) # Place bars peer_locations = [] placed = {} # Already placed bars here for peer in peers: # Height of bar if peer.connection and peer.connection.last_ping_delay: ping = round(peer.connection.last_ping_delay * 1000) else: ping = None loc = self.getLoc(geodb, peer.ip) if not loc: continue # Create position array lat, lon = loc["lat"], loc["lon"] latlon = "%s,%s" % (lat, lon) if latlon in placed and helper.getIpType( peer.ip ) == "ipv4": # Dont place more than 1 bar to same place, fake repos using ip address last two part lat += float(128 - int(peer.ip.split(".")[-2])) / 50 lon += float(128 - int(peer.ip.split(".")[-1])) / 50 latlon = "%s,%s" % (lat, lon) placed[latlon] = True peer_location = {} peer_location.update(loc) peer_location["lat"] = lat peer_location["lon"] = lon peer_location["ping"] = ping peer_locations.append(peer_location) # Append myself for ip in self.site.connection_server.ip_external_list: my_loc = self.getLoc(geodb, ip) if my_loc: my_loc["ping"] = 0 peer_locations.append(my_loc) return peer_locations
def getPeerLocations(self, peers): import maxminddb db_path = config.data_dir + '/GeoLite2-City.mmdb' if not os.path.isfile(db_path) or os.path.getsize(db_path) == 0: if not self.downloadGeoLiteDb(db_path): return False geodb = maxminddb.open_database(db_path) peers = peers.values() # Place bars peer_locations = [] placed = {} # Already placed bars here for peer in peers: # Height of bar if peer.connection and peer.connection.last_ping_delay: ping = round(peer.connection.last_ping_delay * 1000) else: ping = None loc = self.getLoc(geodb, peer.ip) if not loc: continue # Create position array lat, lon = loc["lat"], loc["lon"] latlon = "%s,%s" % (lat, lon) if latlon in placed and helper.getIpType( peer.ip ) == "ipv4": # Dont place more than 1 bar to same place, fake repos using ip address last two part lat += float(128 - int(peer.ip.split(".")[-2])) / 50 lon += float(128 - int(peer.ip.split(".")[-1])) / 50 latlon = "%s,%s" % (lat, lon) placed[latlon] = True peer_location = {} peer_location.update(loc) peer_location["lat"] = lat peer_location["lon"] = lon peer_location["ping"] = ping peer_locations.append(peer_location) # Append myself for ip in config.ip_external: my_loc = self.getLoc(geodb, ip) if my_loc: my_loc["ping"] = 0 peer_locations.append(my_loc) return peer_locations
def isIpv6Supported(self): if helper.getIpType(self.ip) == "ipv6": return True # Test if we can connect to ipv6 address ipv6_testip = "2001:19f0:6c01:e76:5400:1ff:fed6:3eca" try: sock = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM) sock.connect((ipv6_testip, 80)) local_ipv6 = sock.getsockname()[0] if local_ipv6 == "::1": return False else: return True except Exception as err: return False
def portCheck(self): self.port_opened = {} if self.ui_server: self.ui_server.updateWebsocket() if "ipv6" in self.supported_ip_types: res_ipv6_thread = gevent.spawn(self.portchecker.portCheck, self.port, "ipv6") else: res_ipv6_thread = None res_ipv4 = self.portchecker.portCheck(self.port, "ipv4") if not res_ipv4["opened"]: if self.portchecker.portOpen(self.port): res_ipv4 = self.portchecker.portCheck(self.port, "ipv4") if res_ipv6_thread == None: res_ipv6 = {"ip": None, "opened": None} else: res_ipv6 = res_ipv6_thread.get() if res_ipv6["opened"] and not helper.getIpType( res_ipv6["ip"]) == "ipv6": self.log.info("Invalid IPv6 address from port check: %s" % res_ipv6["ip"]) res_ipv6["opened"] = False if res_ipv4["ip"]: config.ip_external = res_ipv4["ip"] SiteManager.peer_blacklist.append((res_ipv4["ip"], self.port)) else: config.ip_external = False if res_ipv6["ip"]: SiteManager.peer_blacklist.append((res_ipv6["ip"], self.port)) self.log.info("Server port opened ipv4: %s, ipv6: %s" % (res_ipv4["opened"], res_ipv6["opened"])) res = {"ipv4": res_ipv4["opened"], "ipv6": res_ipv6["opened"]} self.port_opened.update(res) if self.ui_server: self.ui_server.updateWebsocket() return res
def testPassive(self, file_server, bootstrapper_db): peer = Peer(file_server.ip, 1544, connection_server=file_server) ip_type = helper.getIpType(file_server.ip) hash1 = hashlib.sha256(b"hash1").digest() bootstrapper_db.peerAnnounce(ip_type, address=None, port=15441, hashes=[hash1]) res = peer.request( "announce", { "hashes": [hash1], "port": 15441, "need_types": [ip_type], "need_num": 10, "add": [] }) assert len(res["peers"][0]["ipv4"]) == 0 # Empty result
def testHashCache(self, file_server, bootstrapper_db): ip_type = helper.getIpType(file_server.ip) peer = Peer(file_server.ip, 1544, connection_server=file_server) hash1 = hashlib.sha256(b"site1").digest() hash2 = hashlib.sha256(b"site2").digest() hash3 = hashlib.sha256(b"site3").digest() # Verify empty result res = peer.request( "announce", { "hashes": [hash1, hash2], "port": 15441, "need_types": [ip_type], "need_num": 10, "add": [ip_type] }) assert len(res["peers"][0][ip_type]) == 0 # Empty result hash_ids_before = bootstrapper_db.hash_ids.copy() bootstrapper_db.updateHashCache() assert hash_ids_before == bootstrapper_db.hash_ids
def portCheck(self): if config.ip_external: for ip_external in config.ip_external: SiteManager.peer_blacklist.append( (ip_external, self.port)) # Add myself to peer blacklist ip_external_types = set( [helper.getIpType(ip) for ip in config.ip_external]) res = { "ipv4": "ipv4" in ip_external_types, "ipv6": "ipv6" in ip_external_types } self.ip_external_list = config.ip_external self.port_opened.update(res) self.log.info( "Server port opened based on configuration ipv4: %s, ipv6: %s" % (res["ipv4"], res["ipv6"])) return res self.port_opened = {} if self.ui_server: self.ui_server.updateWebsocket() if "ipv6" in self.supported_ip_types: res_ipv6_thread = gevent.spawn(self.portchecker.portCheck, self.port, "ipv6") else: res_ipv6_thread = None res_ipv4 = self.portchecker.portCheck(self.port, "ipv4") if not res_ipv4["opened"] and config.tor != "always": if self.portchecker.portOpen(self.port): res_ipv4 = self.portchecker.portCheck(self.port, "ipv4") if res_ipv6_thread is None: res_ipv6 = {"ip": None, "opened": None} else: res_ipv6 = res_ipv6_thread.get() if res_ipv6["opened"] and not helper.getIpType( res_ipv6["ip"]) == "ipv6": self.log.info("Invalid IPv6 address from port check: %s" % res_ipv6["ip"]) res_ipv6["opened"] = False self.ip_external_list = [] for res_ip in [res_ipv4, res_ipv6]: if res_ip["ip"] and res_ip["ip"] not in self.ip_external_list: self.ip_external_list.append(res_ip["ip"]) SiteManager.peer_blacklist.append((res_ip["ip"], self.port)) self.log.info("Server port opened ipv4: %s, ipv6: %s" % (res_ipv4["opened"], res_ipv6["opened"])) res = {"ipv4": res_ipv4["opened"], "ipv6": res_ipv6["opened"]} # Add external IPs from local interfaces interface_ips = helper.getInterfaceIps("ipv4") if "ipv6" in self.supported_ip_types: interface_ips += helper.getInterfaceIps("ipv6") for ip in interface_ips: if not helper.isPrivateIp(ip) and ip not in self.ip_external_list: self.ip_external_list.append(ip) res[helper.getIpType( ip)] = True # We have opened port if we have external ip SiteManager.peer_blacklist.append((ip, self.port)) self.log.debug("External ip found on interfaces: %s" % ip) self.port_opened.update(res) if self.ui_server: self.ui_server.updateWebsocket() return res
def setIp(self, ip): self.ip = ip self.ip_type = helper.getIpType(ip) self.updateName()
def getConnection(self, ip=None, port=None, peer_id=None, create=True, site=None, is_tracker_connection=False): ip_type = helper.getIpType(ip) has_per_site_onion = (ip.endswith(".onion") or self.port_opened.get( ip_type, None) == False) and self.tor_manager.start_onions and site if has_per_site_onion: # Site-unique connection for Tor if ip.endswith(".onion"): site_onion = self.tor_manager.getOnion(site.address) else: site_onion = self.tor_manager.getOnion("global") key = ip + site_onion else: key = ip # Find connection by ip if key in self.ips: connection = self.ips[key] if not peer_id or connection.handshake.get( "peer_id") == peer_id: # Filter by peer_id if not connection.connected and create: succ = connection.event_connected.get( ) # Wait for connection if not succ: raise Exception("Connection event return error") return connection # Recover from connection pool for connection in self.connections: if connection.ip == ip: if peer_id and connection.handshake.get( "peer_id") != peer_id: # Does not match continue if ip.endswith( ".onion" ) and self.tor_manager.start_onions and ip.replace( ".onion", "") != connection.target_onion: # For different site continue if not connection.connected and create: succ = connection.event_connected.get( ) # Wait for connection if not succ: raise Exception("Connection event return error") return connection # No connection found if create and not config.offline: # Allow to create new connection if not found if port == 0: raise Exception("This peer is not connectable") if (ip, port) in self.peer_blacklist and not is_tracker_connection: raise Exception("This peer is blacklisted") try: if has_per_site_onion: # Lock connection to site connection = Connection( self, ip, port, target_onion=site_onion, is_tracker_connection=is_tracker_connection) else: connection = Connection( self, ip, port, is_tracker_connection=is_tracker_connection) self.num_outgoing += 1 self.ips[key] = connection self.connections.append(connection) connection.log("Connecting... (site: %s)" % site) succ = connection.connect() if not succ: connection.close("Connection event return error") raise Exception("Connection event return error") except Exception as err: connection.close("%s Connect error: %s" % (ip, Debug.formatException(err))) raise err if len(self.connections) > config.global_connected_limit: gevent.spawn(self.checkMaxConnections) return connection else: return None
def actionAnnounce(self, params): time_started = time.time() s = time.time() # Backward compatibility if "ip4" in params["add"]: params["add"].append("ipv4") if "ip4" in params["need_types"]: params["need_types"].append("ipv4") hashes = params["hashes"] all_onions_signed = self.checkOnionSigns(params.get("onions", []), params.get("onion_signs"), params.get("onion_sign_this")) time_onion_check = time.time() - s ip_type = helper.getIpType(self.connection.ip) if ip_type == "onion" or self.connection.ip in config.ip_local: is_port_open = False elif ip_type in params["add"]: is_port_open = True else: is_port_open = False s = time.time() # Separatley add onions to sites or at once if no onions present i = 0 onion_to_hash = {} for onion in params.get("onions", []): if onion not in onion_to_hash: onion_to_hash[onion] = [] onion_to_hash[onion].append(hashes[i]) i += 1 hashes_changed = 0 for onion, onion_hashes in onion_to_hash.items(): hashes_changed += db.peerAnnounce(ip_type="onion", address=onion, port=params["port"], hashes=onion_hashes, onion_signed=all_onions_signed) time_db_onion = time.time() - s s = time.time() if is_port_open: hashes_changed += db.peerAnnounce( ip_type=ip_type, address=self.connection.ip, port=params["port"], hashes=hashes, delete_missing_hashes=params.get("delete")) time_db_ip = time.time() - s s = time.time() # Query sites back = {} peers = [] if params.get("onions") and not all_onions_signed and hashes_changed: back["onion_sign_this"] = "%.0f" % time.time( ) # Send back nonce for signing if len(hashes) > 500 or not hashes_changed: limit = 5 order = False else: limit = 30 order = True for hash in hashes: if time.time() - time_started > 1: # 1 sec limit on request self.connection.log( "Announce time limit exceeded after %s/%s sites" % (len(peers), len(hashes))) break hash_peers = db.peerList(hash, address=self.connection.ip, onions=list(onion_to_hash.keys()), port=params["port"], limit=min(limit, params["need_num"]), need_types=params["need_types"], order=order) if "ip4" in params["need_types"]: # Backward compatibility hash_peers["ip4"] = hash_peers["ipv4"] del (hash_peers["ipv4"]) peers.append(hash_peers) time_peerlist = time.time() - s back["peers"] = peers self.connection.log( "Announce %s sites (onions: %s, onion_check: %.3fs, db_onion: %.3fs, db_ip: %.3fs, peerlist: %.3fs, limit: %s)" % (len(hashes), len(onion_to_hash), time_onion_check, time_db_onion, time_db_ip, time_peerlist, limit)) self.response(back)
def testBootstrapperDb(self, file_server, bootstrapper_db): ip_type = helper.getIpType(file_server.ip) peer = Peer(file_server.ip, 1544, connection_server=file_server) hash1 = hashlib.sha256(b"site1").digest() hash2 = hashlib.sha256(b"site2").digest() hash3 = hashlib.sha256(b"site3").digest() # Verify empty result res = peer.request( "announce", { "hashes": [hash1, hash2], "port": 15441, "need_types": [ip_type], "need_num": 10, "add": [ip_type] }) assert len(res["peers"][0][ip_type]) == 0 # Empty result # Verify added peer on previous request bootstrapper_db.peerAnnounce(ip_type, file_server.ip_external, port=15441, hashes=[hash1, hash2], delete_missing_hashes=True) res = peer.request( "announce", { "hashes": [hash1, hash2], "port": 15441, "need_types": [ip_type], "need_num": 10, "add": [ip_type] }) assert len(res["peers"][0][ip_type]) == 1 assert len(res["peers"][1][ip_type]) == 1 # hash2 deleted from 1.2.3.4 bootstrapper_db.peerAnnounce(ip_type, file_server.ip_external, port=15441, hashes=[hash1], delete_missing_hashes=True) res = peer.request( "announce", { "hashes": [hash1, hash2], "port": 15441, "need_types": [ip_type], "need_num": 10, "add": [ip_type] }) assert len(res["peers"][0][ip_type]) == 1 assert len(res["peers"][1][ip_type]) == 0 # Announce 3 hash again bootstrapper_db.peerAnnounce(ip_type, file_server.ip_external, port=15441, hashes=[hash1, hash2, hash3], delete_missing_hashes=True) res = peer.request( "announce", { "hashes": [hash1, hash2, hash3], "port": 15441, "need_types": [ip_type], "need_num": 10, "add": [ip_type] }) assert len(res["peers"][0][ip_type]) == 1 assert len(res["peers"][1][ip_type]) == 1 assert len(res["peers"][2][ip_type]) == 1 # Single hash announce res = peer.request( "announce", { "hashes": [hash1], "port": 15441, "need_types": [ip_type], "need_num": 10, "add": [ip_type] }) assert len(res["peers"][0][ip_type]) == 1 # Test DB cleanup assert [ row[0] for row in bootstrapper_db.execute( "SELECT address FROM peer").fetchall() ] == [file_server.ip_external] # 127.0.0.1 never get added to db # Delete peers bootstrapper_db.execute("DELETE FROM peer WHERE address = ?", [file_server.ip_external]) assert bootstrapper_db.execute( "SELECT COUNT(*) AS num FROM peer_to_hash").fetchone()["num"] == 0 assert bootstrapper_db.execute("SELECT COUNT(*) AS num FROM hash" ).fetchone()["num"] == 3 # 3 sites assert bootstrapper_db.execute("SELECT COUNT(*) AS num FROM peer" ).fetchone()["num"] == 0 # 0 peer