def scan(self, nodes: RpcNodeList, remove_invalid=False): """ Start threads checking known nodes to see if they're alive. :param nodes: :param remove_invalid: only return valid nodes when set to True :return: valid nodes """ if len(nodes) == 0: return nodes now = datetime.now() log_msg( 'Scanning %d node(s) on port %d. This can take several minutes. Let it run.' % (len(nodes), self._m_rpc_port)) pool = Pool(processes=CONFIG['concurrent_scans']) nodes = RpcNodeList.from_list( pool.map(partial(RpcNode.is_valid, self._blockchain_height), nodes)) pool.close() pool.join() log_msg('Scanning %d node(s) done after %d seconds, found %d valid' % (len(nodes), (datetime.now() - now).total_seconds(), len(nodes.valid(valid=True)))) if remove_invalid: nodes = nodes.valid(valid=True) return nodes
def get_records(self): max_retries = 5 nodes = RpcNodeList() log_msg('Fetching existing record(s) (%s.%s)' % (self.subdomain_name, self.domain_name)) retries = 0 while (True): try: result = make_json_request( '%s/%s/dns_records/?type=A&name=%s.%s' % (self.api_base, self.zone_id, self.subdomain_name, self.domain_name), headers=self.headers) records = result.get('result') # filter on A records / subdomain for record in records: if record.get('type') != 'A' or record.get( 'name') != self.fulldomain_name: continue node = RpcNode(address=record.get('content'), uid=record.get('id')) nodes.append(node) log_msg('> A %s %s' % (record.get('name'), record.get('content'))) return nodes except Exception as ex: log_err("Cloudflare record fetching failed: %s" % (str(ex))) retries += 1 time.sleep(1) if retries > max_retries: return None
def main(self): # get & set the current blockheight height = self.monerod_get_height( method=self.md_height_discovery_method) if not height or not isinstance(height, int): log_err("Unable to fetch the current blockchain height") return self._blockchain_height = height nodes = RpcNodeList() nodes += RpcNodeList.cache_read(PATH_CACHE) # from `cached_nodes.json` if nodes: nodes = self.scan(nodes, remove_invalid=True) if len(nodes.nodes) <= 2: peers = self.monerod_get_peers() # from monerod nodes += self.scan(peers, remove_invalid=True) if nodes.nodes: nodes.cache_write() nodes.shuffle() inserts = nodes.nodes[:self.dns_provider.max_records] dns_nodes = self.dns_provider.get_records() # insert new records for node in inserts: if node.address not in dns_nodes: self.dns_provider.add_record(node) # remove old records for i, node in enumerate(dns_nodes): if node.address not in inserts: self.dns_provider.delete_record(node)
def get_records(self): nodes = RpcNodeList() log_msg('Fetching existing record(s) (%s.%s)' % (self.subdomain_name, self.domain_name)) result = make_json_request('%s/%s/dns_records/?type=A&name=%s.%s' % ( self.api_base, self.zone_id, self.subdomain_name, self.domain_name), headers=self.headers) records = result.get('result') # filter on A records / subdomain for record in records: if record.get('type') != 'A' or record.get('name') != self.fulldomain_name: continue node = RpcNode(address=record.get('content'), uid=record.get('id')) nodes.append(node) log_msg('> A %s %s' % (record.get('name'), record.get('content'))) return nodes
def monerod_get_peers(self): """Gets the last known peers from monerod""" nodes = RpcNodeList() output = self._daemon_command("print_pl") if not output: return nodes regex = r"(gray|white)\s+(\w+)\s+(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}):(\d{1,5})" matches = re.finditer(regex, output) for i, match in enumerate(matches): if match.group(1) != 'white': continue address = match.group(3) nodes.append(RpcNode(address=address)) log_msg('Got peers from RPC: %d node(s)' % len(nodes)) return nodes
def get_records(self, all_records=False): nodes = RpcNodeList() cookie = self.build_cookie(mode=MODE_RO, method='getInfo', parameters=[self.domain_name]) self.update_cookie(cookie) result = self.soap_client.service.getInfo(self.domain_name) for dnsentry in result.dnsEntries: if dnsentry.__class__.__name__ != 'DnsEntry': continue if dnsentry.type != 'A' and not all_records: continue if dnsentry.name != self.subdomain_name and not all_records: continue nodes.append( RpcNode(address=dnsentry.content, type=dnsentry.type, name=dnsentry.name, expire=dnsentry.expire)) return nodes
def main(self): # get & set the current blockheight height = self.monerod_get_height( method=self.md_height_discovery_method) if not height or not isinstance(height, int): log_err("Unable to fetch the current blockchain height") return self._blockchain_height = height nodes = RpcNodeList() nodes += RpcNodeList.cache_read(PATH_CACHE) # from `cached_nodes.json` if nodes: nodes = self.scan(nodes, remove_invalid=True) now = time.time() this_round_uptime = now - self.last_mass_scan_time if len( nodes.nodes ) <= self.dns_provider.max_records or this_round_uptime > CONFIG[ 'scan_interval']: peers = self.monerod_get_peers() # from monerod nodes += self.scan(peers, remove_invalid=True) self.last_mass_scan_time = now if len(nodes.nodes) > 0: nodes.cache_write() nodes.shuffle() inserts = nodes.nodes[:self.dns_provider.max_records] insert_ips = [] for node in inserts: insert_ips.append(node.address) dns_nodes = self.dns_provider.get_records() if dns_nodes != None: # insert new records for node in inserts: if node.address not in dns_nodes: self.dns_provider.add_record(node) # remove old records for node in dns_nodes: if node.address not in insert_ips: self.dns_provider.delete_record(node) else: log_err('Could not fetch DNS records, skipping this update.') else: log_err('Could not get any valid node, skipping this update.')