def discover_peers(self): logger.info("Discovering nearby peers..") try: for host, port in rpyc.discover('storage'): self.fire(ProbePeer(host)) except Exception as e: logger.error('Got error while discovering nearby peers: %s', e)
def get_ip() -> str: ip = '0.0.0.0' try: debug( 'TrackerService.get_ip - Discovering nodes to establish a connection to obtain the IP' ) peers = discover(TrackerService.get_name(TrackerService)) debug( f'TrackerService.get_ip - Nodes discovered to obtain IP: {peers}' ) for peer in peers: s = socket(AF_INET, SOCK_DGRAM) try: debug( f'TrackerService.get_ip - Attempting to connect to the node: {peer}' ) s.connect(peer) ip = s.getsockname()[0] except Exception as e: error( f'TrackerService.get_ip - Error connecting to node: {peer}. Exception: {e}' ) sleep(0.1) continue finally: s.close() except Exception as e: error( f'TrackerService.get_ip - Obtaining IP from a socket locally because no node was discovered. Exception: {e}' ) ip = gethostbyname(gethostname( )) # This should never happen if the Registry is online return ip
def processor(self): ''' 先查看当前是否有空闲的执行单元,如果有,则从生产者队列中取出一个任务进行分配。 ''' to_wait = 0.1 while True: try: result = rpyc.discover(self.service_name) self.discovery_error = False self.update_server(result) with self.lock: for item in self.record: if self.record[item] <= 0: node = self.producer.get() thread = threading.Thread(target=self.sub_processor, args=(item, node)) thread.start() self.record[item] += 1 self.time_record[item] = int(time.time()) except rpyc.utils.factory.DiscoveryError: if not self.discovery_error: self.discovery_error = True trivial_util.print_t(f'not found server with name {self.service_name}!') # 先等待一段时间 time.sleep(to_wait)
def service01(): conn = rpyc.connect(host='localhost', port=18861) root = conn.root # MyService object # object print root print root.get_service_name() print root.get_service_aliases() # custom method print root.get_answer() # 66 print root.exposed_get_answer() # 66 # print root.get_question() # AttributeError: cannot access 'get_question' registrar = UDPRegistryClient() list_of_servers = registrar.discover("foo") print rpyc.discover(service_name='MY', host='localhost')
def __init__(self, ip=None, port=None): ''' @param (ip, port) namenode. ''' if ip is None and port is None: address = rpyc.discover('NAMENODE')[0] self.ip, self.port = address else: assert(ip is not None and port is not None) self.ip = ip self.port = port
def on_connect(self, conn): print('OPEN - {}'.format(conn)) self.conns = [] self.datanodes = [] datanode_candidate = rpyc.discover('DATANODE') for node in datanode_candidate: ip, port = node try: conn = rpyc.connect(ip, port) self.conns.append(conn) self.datanodes.append((ip, port)) except: pass
def __refresh_cloud_state(self): """Refresh the cloud state list using the registry server""" # Check network with rpyc registry thread self.mutex_server_list.acquire() try: self.server_list = rpyc.discover("BLENDERSIM") logging.debug("Server list " + str(self.server_list)) except DiscoveryError: if self.reg_found: logging.info("Simulation servers not found on the network!") self.reg_found = False pass self.mutex_server_list.release() if self.server_list and not self.reg_found: logging.info("Simulation servers found on the network: " + str(self.server_list)) self.reg_found = True # Lock cloud_state and server_list ressource self.mutex_cloud_state.acquire() self.mutex_server_list.acquire() # Transform server list into a dict serv_list_dict = [] for item in map( lambda x: ["address", x[0], "port", x[1], "n_threads", 0], self.server_list): serv_list_dict.append(dict(zip((item[0::2]), (item[1::2])))) serv_dict = dict(zip(map(hash, self.server_list), serv_list_dict)) # Create sets for server_dict and cloud_state keys_serv_dict = set(serv_dict.keys()) keys_cloud_state = set(self.cloud_state.keys()) # Compare and update cloud_state set if needed for elem in keys_serv_dict.difference(keys_cloud_state): self.cloud_state[elem] = serv_dict[elem] for elem in keys_cloud_state.difference(keys_serv_dict): self.cloud_state.pop(elem) # Release ressources self.mutex_cloud_state.release() self.mutex_server_list.release() logging.debug("Server list " + str(self.server_list) + " cloud " + str(self.cloud_state))
def test1(): nodes = rpyc.discover('TRACKER') assert len(nodes) == 2 node_c = nodes[0] if nodes[0][0].split('.')[3] == '13' else nodes[1] node_l = nodes[1] if nodes[0][0].split('.')[3] == '13' else nodes[0] c_c = rpyc.connect(*node_c) c_l = rpyc.connect(*node_l) print(c_c.root.client_store(leynier.get_id(), leynier.to_json())) c_c.root.client_data() c_c.root.client_table() c_l.root.client_data() c_l.root.client_table()
def __refresh_cloud_state(self): """Refresh the cloud state list using the registry server""" # Check network with rpyc registry thread self.mutex_server_list.acquire() try: self.server_list = rpyc.discover("BLENDERSIM") logging.debug("Server list " + str(self.server_list)) except DiscoveryError: if self.reg_found: logging.info("Simulation servers not found on the network!") self.reg_found = False pass self.mutex_server_list.release() if self.server_list and not self.reg_found: logging.info("Simulation servers found on the network: " + str(self.server_list)) self.reg_found = True # Lock cloud_state and server_list ressource self.mutex_cloud_state.acquire() self.mutex_server_list.acquire() # Transform server list into a dict serv_list_dict = [] for item in map(lambda x: ["address", x[0], "port", x[1], "n_threads", 0], self.server_list): serv_list_dict.append(dict(zip((item[0::2]), (item[1::2])))) serv_dict = dict(zip(map(hash, self.server_list), serv_list_dict)) # Create sets for server_dict and cloud_state keys_serv_dict = set(serv_dict.keys()) keys_cloud_state = set(self.cloud_state.keys()) # Compare and update cloud_state set if needed for elem in keys_serv_dict.difference(keys_cloud_state): self.cloud_state[elem] = serv_dict[elem] for elem in keys_cloud_state.difference(keys_serv_dict): self.cloud_state.pop(elem) # Release ressources self.mutex_cloud_state.release() self.mutex_server_list.release() logging.debug("Server list " + str(self.server_list) + " cloud " + str(self.cloud_state))
def __refresh_cloud_state(self): """Refresh the cloud state list using the registry server""" # Check network with rpyc registry thread try: self.server_list = rpyc.discover((self.simulator + "sim").capitalize(), registrar=UDPRegistryClient(ip=self.ip_register, port=REGISTRY_PORT)) logging.debug("Server list " + str(self.server_list)) except DiscoveryError: if self.reg_found: logging.info("No simulation Server found on the network!") self.reg_found = False self.server_list = [] if self.server_list and not self.reg_found: logging.info("Simulation Server(s) found on the network: " + str(self.server_list)) self.reg_found = True # Transform server list into a dict serv_list_dict = [] for item in map(lambda x: ServerInfo(x[0], x[1]), self.server_list): serv_list_dict.append(item) serv_dict = dict(zip(map(hash, self.server_list), serv_list_dict)) # Create sets for server_dict and cloud_state keys_serv_dict = set(serv_dict.keys()) keys_cloud_state = set(self.cloud_state.keys()) # Compare and update cloud_state set if needed for elem in keys_serv_dict.difference(keys_cloud_state): if len(self.rqt) > 0: self.mutex_cloud_state.acquire() self.cloud_state[elem] = serv_dict[elem] self.cloud_state[elem].status = True self.cloud_state[elem].nb_threads = 0 self.mutex_cloud_state.release() for elem in keys_cloud_state.difference(keys_serv_dict): self.mutex_cloud_state.acquire() self.cloud_state.pop(elem) self.mutex_cloud_state.release() logging.debug("Server list " + str(self.server_list) + " cloud " + str(self.cloud_state))
def exposed_client_update_network(self): if not self.is_initialized: error( f'KademliaService.exposed_client_update_network - Instance not initialized' ) service_name = KademliaService.get_name(self.__class__) peers = discover(service_name) for peer in peers: tcontact = Contact(get_id(peer), *peer) debug( f'KademliaService.exposed_client_update_network - Making ping to peer: {tcontact}' ) result, _ = self.ping_to(tcontact) if result: debug( f'KademliaService.exposed_client_update_network - Successfull ping to peer: {tcontact}' ) else: debug( f'KademliaService.exposed_client_update_network - Unsuccessfull ping to peer: {tcontact}' )
def __init__(self, service_name, database, host_name, registrar_ip): self._service_name = service_name self._database = database self._host_name = host_name self._ip = dlpxqa.get_database_ip(database) self._registrar = TCPRegistryClient(registrar_ip) self._addrs = [] self._remote_host_ip = '' self._port = '' self._connection = None self._discovered_service = False self._connected = False try: self._addrs = rpyc.discover(self._service_name, host=self._ip, registrar=self._registrar) self._discovered_service = True self._remote_host_ip, self._port = self._addrs[0] try: self._connection = rpyc.connect(self._remote_host_ip, self._port) self._connected = True except socket.error: pass except DiscoveryError: pass
def exposed_connect_to_network(self, contact: str): self.exposed_init(contact) contact = Contact.from_json(contact) while not self.is_started_node: try: if not self.is_initialized: raise Exception( f'KademliaService.exposed_connect_to_network - Instance not initialized' ) try: service_name = KademliaService.get_name(self.__class__) debug( f'KademliaService.exposed_connect_to_network - Server name in the connect_to_network: {service_name}' ) nodes = discover(service_name) debug( f'KademliaService.exposed_connect_to_network - Discovered nodes: {nodes}' ) except DiscoveryError: raise Exception( f'KademliaService.exposed_connect_to_network - No service found' ) mark = False for ip, port in nodes: if ip == self.my_contact.ip and port == self.my_contact.port: continue count = 0 while count < 5: try: debug( f'KademliaService.exposed_connect_to_network - Establishing connection with {ip}:{port}' ) conn = connect(ip, port) debug( f'KademliaService.exposed_connect_to_network - Pinging to {ip}:{port}' ) result, _ = conn.root.ping( self.my_contact.to_json(), self.lamport) if result: contact = Contact.from_json(result) else: raise Exception( f'KademliaService.exposed_connect_to_network - The contact with address {ip}:{port} is not initialized' ) debug( f'KademliaService.exposed_connect_to_network - The contact {contact} responded to the ping correctly' ) break except Exception as e: error( f'Exception: {e} when trying ping to node with ip: {ip} and port: {port}' ) count += 1 if count == 5: debug( f'KademliaService.exposed_connect_to_network - The service with address {ip}: {port} does not respond' ) continue if contact != self.my_contact: mark = True self.update_contact(contact) if not mark: raise Exception( 'KademliaService.exposed_connect_to_network - Not discover node different' ) try: self.exposed_client_find_node(self.my_contact.id) except Exception as e: raise Exception( f'KademliaService.exposed_connect_to_network - I can\'t perform the first iterative find node because: {e}' ) count_of_buckets = len(self.table) for i in range(count_of_buckets): if not self.table.get_bucket(i): continue count = 0 while count < 5: key = randint(2**i, 2**(i + 1) - 1) try: self.exposed_client_find_node(key) break except Exception as e: error( f'KademliaService.exposed_connect_to_network - I cannot perform the iterative find node. Exception: {e}' ) count += 1 if count == 5: debug( f'KademliaService.exposed_connect_to_network - I cannot perform the iterative find node' ) self.is_started_node = True debug( f'KademliaService.exposed_connect_to_network - Finish method. Node is started' ) return True except Exception as e: error(e) debug( 'KademliaService.exposed_connect_to_network - Sleep for 5 seconds and try to connect to the network again' ) sleep(0.2) return False
options, args = parser.parse_args() #ip = options.ip #port = options.port HOST, PORT = "192.168.1.13", 10003 #server = SocketServer.TCPServer((HOST, PORT), RoboServer) #server.serve_forever() import rpyc.utils.registry as reg host = "192.168.1.27" registrar = reg.TCPRegistryClient("192.168.1.27") print rpyc.discover("ENGINE", host, timeout=10)#, registrar=registrar)/home/kkirsanov/arm/servers/ #import rpyc.utils.registry as reg #registrar = reg.TCPRegistryClient(options.server) #lst = rpyc.discover(service, host, timeout=timeout, registrar=registrar) #odometry #o = rpyc.connect(ip, int(port)) #o.root.Get()[0][1] #engine #u = rpyc.connect(ip, int(port)) #u.root.SetBSpeed(var1) #u.root.SetASpeed(var2) #u.root.SetBits("00000000")
def get_workers(self): # discovers and returns the tuple of (addr, port) values of workers return rpyc.discover("FRONTIER")