def get_next_leader_stub_manager(self, group_id=None): """다음 리더 peer, stub manager 을 식별한다. :param group_id: :return: peer, stub manager """ # TODO 피어 재시작 후의 접속하는 피어의 connected 상태 변경 확인할 것 # connected peer 만 순회하도록 수정할 것, 현재는 확인 되지 않았으므로 전체 순회로 구현함 # max_retry = self.get_connected_peer_count(group_id) if group_id is None: group_id = conf.ALL_GROUP_ID max_retry = self.get_peer_count(None) try_count = 0 next_leader_peer = self.__get_next_peer(self.get_leader_peer(group_id), group_id) while try_count < max_retry: stub_manager = StubManager(next_leader_peer.target, loopchain_pb2_grpc.PeerServiceStub) try: try_count += 1 response = stub_manager.call("GetStatus", loopchain_pb2.CommonRequest(request="")) logging.debug("Peer Status: " + str(response)) return next_leader_peer, stub_manager except Exception as e: logging.debug("try another stub..." + str(e)) next_leader_peer = self.__get_next_peer(next_leader_peer, group_id) logging.warning("fail found next leader stub") return None, None
def get_next_leader_stub_manager(self, group_id=None): """다음 리더 peer, stub manager 을 식별한다. :param group_id: :return: peer, stub manager """ util.logger.spam(f"peer_manager:get_next_leader_stub_manager") if group_id is None: group_id = conf.ALL_GROUP_ID max_retry = self.get_peer_count(None) try_count = 0 next_leader_peer = self.__get_next_peer(self.get_leader_peer(group_id), group_id) while try_count < max_retry: stub_manager = StubManager(next_leader_peer.target, loopchain_pb2_grpc.PeerServiceStub, conf.GRPC_SSL_TYPE) try: try_count += 1 response = stub_manager.call( "GetStatus", loopchain_pb2.CommonRequest(request="")) logging.debug("Peer Status: " + str(response)) return next_leader_peer, stub_manager except Exception as e: logging.debug("try another stub..." + str(e)) next_leader_peer = self.__get_next_peer(next_leader_peer, group_id) logging.warning("fail found next leader stub") return None, None
async def block_height_sync_channel(self): # leader 로 시작하지 않았는데 자신의 정보가 leader Peer 정보이면 block height sync 하여 # 최종 블럭의 leader 를 찾는다. peer_manager = self.peer_manager peer_leader = peer_manager.get_leader_peer() self_peer_object = peer_manager.get_peer(ChannelProperty().peer_id) is_delay_announce_new_leader = False peer_old_leader = None if peer_leader: block_sync_target = peer_leader.target block_sync_target_stub = StubManager.get_stub_manager_to_server( block_sync_target, loopchain_pb2_grpc.PeerServiceStub, time_out_seconds=conf.CONNECTION_RETRY_TIMEOUT, ssl_auth_type=conf.GRPC_SSL_TYPE) else: block_sync_target = ChannelProperty().radio_station_target block_sync_target_stub = self.__radio_station_stub if block_sync_target != ChannelProperty().peer_target: if block_sync_target_stub is None: logging.warning( "You maybe Older from this network... or No leader in this network!" ) is_delay_announce_new_leader = True peer_old_leader = peer_leader peer_leader = self.peer_manager.leader_complain_to_rs( conf.ALL_GROUP_ID, is_announce_new_peer=False) if peer_leader is not None and ChannelProperty( ).node_type == conf.NodeType.CommunityNode: block_sync_target_stub = StubManager.get_stub_manager_to_server( peer_leader.target, loopchain_pb2_grpc.PeerServiceStub, time_out_seconds=conf.CONNECTION_RETRY_TIMEOUT, ssl_auth_type=conf.GRPC_SSL_TYPE) if self.is_support_node_function(conf.NodeFunction.Vote) and \ (not peer_leader or peer_leader.peer_id == ChannelProperty().peer_id): peer_leader = self_peer_object self.block_manager.set_peer_type(loopchain_pb2.BLOCK_GENERATOR) else: _, future = self.block_manager.block_height_sync( block_sync_target_stub) await future self.show_peers() if is_delay_announce_new_leader and ChannelProperty( ).node_type == conf.NodeType.CommunityNode: self.peer_manager.announce_new_leader( peer_old_leader.peer_id, peer_leader.peer_id, self_peer_id=ChannelProperty().peer_id)
def __load_score(self, score_package_name: str, score_container_stub: StubManager, peer_target: str): """스코어를 로드한다. :param score_package_name: score package name """ util.logger.spam(f"peer_service:__load_score --start--") logging.info("LOAD SCORE AND CONNECT TO SCORE SERVICE!") params = dict() params[message_code.MetaParams.ScoreLoad. repository_path] = conf.DEFAULT_SCORE_REPOSITORY_PATH params[message_code.MetaParams.ScoreLoad. score_package] = score_package_name params[ message_code.MetaParams.ScoreLoad.base] = conf.DEFAULT_SCORE_BASE params[message_code.MetaParams.ScoreLoad.peer_id] = \ None if ObjectManager().peer_service is None else ObjectManager().peer_service.peer_id meta = json.dumps(params) logging.debug(f"load score params : {meta}") if score_container_stub is None: util.exit_and_msg(f"there is no __stub_to_scoreservice!") util.logger.spam(f"peer_service:__load_score --1--") # Score Load is so slow ( load time out is more than GRPC_CONNECTION_TIMEOUT) response = score_container_stub.call( "Request", loopchain_pb2.Message(code=message_code.Request.score_load, meta=meta), conf.SCORE_LOAD_TIMEOUT) logging.debug("try score load on score service: " + str(response)) if response is None: return None util.logger.spam(f"peer_service:__load_score --2--") response_connect = score_container_stub.call( "Request", loopchain_pb2.Message(code=message_code.Request.score_connect, message=peer_target), conf.GRPC_CONNECTION_TIMEOUT) logging.debug("try connect to score service: " + str(response_connect)) if response_connect is None: return None if response.code == message_code.Response.success: logging.debug("Get Score from Score Server...") score_info = json.loads(response.meta) else: util.exit_and_msg("Fail Get Score from Score Server...") logging.info("LOAD SCORE DONE!") util.logger.spam(f"peer_service:__load_score --end--") return score_info
def __handler_connect_to_leader(connect_to_leader_param): # logging.debug("(tx process) try... connect to leader: " + str(connect_to_leader_param)) stub_to_leader = StubManager.get_stub_manager_to_server( connect_to_leader_param, loopchain_pb2_grpc.PeerServiceStub, time_out_seconds=conf.CONNECTION_RETRY_TIMEOUT, is_allow_null_stub=True) __process_variables[ self.PROCESS_VARIABLE_STUB_TO_LEADER] = stub_to_leader stub_to_self_peer = __process_variables[ self.PROCESS_VARIABLE_STUB_TO_SELF_PEER] __process_variables[ self.PROCESS_VARIABLE_PEER_STATUS] = PeerProcessStatus.normal # TODO block generator 연결 실패 조건 확인할 것 if stub_to_leader is None: stub_to_self_peer.call( "NotifyProcessError", loopchain_pb2.CommonRequest( request="Connect to leader Fail!")) else: try: create_tx_continue(stub_to_leader) except Exception as e: logging.warning( "in peer_process::connect_to_blockgenerator Exception: " + str(e)) __process_variables[ self. PROCESS_VARIABLE_PEER_STATUS] = PeerProcessStatus.leader_complained stub_to_self_peer.call( "NotifyLeaderBroken", loopchain_pb2.CommonRequest( request="Fail Add Tx to Leader"))
def run_radio_station_as_process_and_stub_manager(port): process = run_radio_station_as_process(port) stub_manager = StubManager.get_stub_manager_to_server( 'localhost:' + str(port), loopchain_pb2_grpc.RadioStationStub) util.request_server_in_time(stub_manager.stub.GetStatus, loopchain_pb2.StatusRequest(request="")) return process, stub_manager
def __add_audience(self, audience_target): util.logger.debug(f"audience_target({audience_target})") if audience_target not in self.__audience: stub_manager = StubManager(audience_target, loopchain_pb2_grpc.PeerServiceStub, ssl_auth_type=conf.GRPC_SSL_TYPE) self.__audience[audience_target] = stub_manager
def __handler_connect_to_leader(self, request, context): logging.debug( f"TxService handler connect to leader({request.message})") leader_target = request.message self.__stub_to_leader = StubManager.get_stub_manager_to_server( leader_target, loopchain_pb2_grpc.PeerServiceStub, time_out_seconds=conf.CONNECTION_RETRY_TIMEOUT, is_allow_null_stub=True) self.__peer_status = PeerProcessStatus.normal # TODO block generator 연결 실패 조건 확인할 것 if self.__stub_to_leader is None: return loopchain_pb2.Message( code=message_code.Response.fail_connect_to_leader) else: try: self.__create_tx_continue() except Exception as e: logging.warning( "in tx service create tx continue() Exception: " + str(e)) self.__peer_status = PeerProcessStatus.leader_complained return loopchain_pb2.Message( code=message_code.Response.fail_add_tx_to_leader) return loopchain_pb2.Message(code=message_code.Response.success)
def run_radio_station_as_process_and_stub_manager(port, timeout=None): process = run_radio_station_as_process(port) stub_manager = StubManager(f"localhost:{port}", loopchain_pb2_grpc.RadioStationStub, conf.GRPC_SSL_TYPE) util.request_server_in_time(stub_manager.stub.GetStatus, loopchain_pb2.StatusRequest(request="")) return process, stub_manager
def stub_to_radiostation(self) -> StubManager: if self.__stub_to_radio_station is None: self.__stub_to_radio_station = StubManager.get_stub_manager_to_server( self.__radio_station_target, loopchain_pb2_grpc.RadioStationStub, conf.CONNECTION_RETRY_TIMEOUT_TO_RS) return self.__stub_to_radio_station
def __create_live_data(self): try: self.__stub_manager = StubManager(self.__peer_info.target, loopchain_pb2_grpc.PeerServiceStub, conf.GRPC_SSL_TYPE) except Exception as e: logging.exception(f"Create Peer create stub_manager fail target : {self.__peer_info.target} \n" f"exception : {e}")
def __handler_subscribe(subscribe_peer_target): # logging.debug("BroadcastProcess received subscribe command peer_target: " + str(subscribe_peer_target)) if subscribe_peer_target not in __audience: stub_manager = StubManager.get_stub_manager_to_server( subscribe_peer_target, loopchain_pb2_grpc.PeerServiceStub, is_allow_null_stub=True) __audience[subscribe_peer_target] = stub_manager
def __init_radio_station_stub(self): if self.is_support_node_function(conf.NodeFunction.Vote): self.__radio_station_stub = StubManager.get_stub_manager_to_server( ChannelProperty().radio_station_target, loopchain_pb2_grpc.RadioStationStub, conf.CONNECTION_RETRY_TIMEOUT_TO_RS, ssl_auth_type=conf.GRPC_SSL_TYPE) else: self.__radio_station_stub = RestStubManager(ChannelProperty().radio_station_target, ChannelProperty().name)
def __handler_subscribe(subscribe_peer_target): # logging.debug("BroadcastProcess received subscribe command peer_target: " + str(subscribe_peer_target)) if subscribe_peer_target not in __audience: stub_manager = StubManager.get_stub_manager_to_server( subscribe_peer_target, loopchain_pb2_grpc.PeerServiceStub, time_out_seconds=conf.CONNECTION_RETRY_TIMEOUT_WHEN_INITIAL, is_allow_null_stub=True ) __audience[subscribe_peer_target] = stub_manager
def __handler_subscribe(self, audience_target): logging.debug("BroadcastThread received subscribe command peer_target: " + str(audience_target)) if audience_target not in self.__audience: stub_manager = StubManager.get_stub_manager_to_server( audience_target, loopchain_pb2_grpc.PeerServiceStub, time_out_seconds=conf.CONNECTION_RETRY_TIMEOUT_WHEN_INITIAL, is_allow_null_stub=True, ssl_auth_type=conf.GRPC_SSL_TYPE ) self.__audience[audience_target] = stub_manager
def run_peer_server_as_process_and_stub_manager( port, radiostation_port=conf.PORT_RADIOSTATION, group_id=None, score=None): process = run_peer_server_as_process(port, radiostation_port, group_id, score) stub_manager = StubManager.get_stub_manager_to_server( 'localhost:' + str(port), loopchain_pb2_grpc.PeerServiceStub) return process, stub_manager
def run_peer_server_as_process_and_stub_manager( port, radiostation_port=conf.PORT_RADIOSTATION, group_id=None, score=None, timeout=None): process = run_peer_server_as_process(port, radiostation_port, group_id, score) stub_manager = StubManager(f"localhost:{port}", loopchain_pb2_grpc.PeerServiceStub, ssl_auth_type=conf.GRPC_SSL_TYPE) return process, stub_manager
def stub_manager(self): if not self.__stub_manager: try: self.__stub_manager = StubManager( self.target, loopchain_pb2_grpc.PeerServiceStub, conf.GRPC_SSL_TYPE) except Exception as e: logging.exception( f"Create Peer create stub_manager fail target : {self.target} \n" f"exception : {e}") return self.__stub_manager
def __handler_connect_to_self_peer(connect_param): # 자신을 생성한 부모 Peer 에 접속하기 위한 stub 을 만든다. # pipe 를 통한 return 은 pipe send 와 쌍이 맞지 않은 경우 오류를 발생시킬 수 있다. # 안전한 연결을 위하여 부모 프로세스와도 gRPC stub 을 이용하여 통신한다. logging.debug("try connect to self peer: " + str(connect_param)) stub_to_self_peer = StubManager.get_stub_manager_to_server( connect_param, loopchain_pb2_grpc.InnerServiceStub, time_out_seconds=conf.CONNECTION_RETRY_TIMEOUT_WHEN_INITIAL, is_allow_null_stub=True ) __process_variables[self.SELF_PEER_TARGET_KEY] = connect_param __process_variables[self.PROCESS_VARIABLE_STUB_TO_SELF_PEER] = stub_to_self_peer
def get_peer_stub_manager(self, peer, group_id=None): if group_id is None: group_id = conf.ALL_GROUP_ID try: return self.peer_stub_managers[group_id][peer.peer_id] except KeyError: try: self.__init_peer_group(peer.group_id) stub_manager = StubManager(peer.target, loopchain_pb2_grpc.PeerServiceStub) self.peer_stub_managers[group_id][peer.peer_id] = stub_manager return stub_manager except Exception as e: logging.debug("try get peer stub except: " + str(e)) logging.warning("fail make peer stub: " + peer.target) return None
def __create_live_data(self): """create live data that can't serialized""" # TODO live data 생성 실패 때 정책 설정 필요 try: self.__stub_manager = StubManager( self.__peer_info.target, loopchain_pb2_grpc.PeerServiceStub) except Exception as e: logging.exception( f"Create Peer create stub_manager fail target : {self.__peer_info.target} \n" f"exception : {e}") try: self.__cert_verifier = PublicVerifier(self.peer_info.cert) except Exception as e: logging.exception( f"create cert verifier error : {self.__peer_info.cert} \n" f"exception {e}")
def stub_to_radiostation(self): if self.__radio_station_stub is None: if self.is_support_node_function(conf.NodeFunction.Vote): if conf.ENABLE_REP_RADIO_STATION: self.__radio_station_stub = StubManager.get_stub_manager_to_server( self.__radio_station_target, loopchain_pb2_grpc.RadioStationStub, conf.CONNECTION_RETRY_TIMEOUT_TO_RS, ssl_auth_type=conf.GRPC_SSL_TYPE) else: self.__radio_station_stub = None else: self.__radio_station_stub = RestStubManager( self.__radio_station_target) return self.__radio_station_stub
def __handler_connect_to_inner_peer(self, request, context): logging.debug( f"TxService handler connect to inner peer({request.message})") inner_peer_target = request.message # 자신을 생성한 부모 Peer 에 접속하기 위한 stub 을 만든다. # pipe 를 통한 return 은 pipe send 와 쌍이 맞지 않은 경우 오류를 발생시킬 수 있다. # 안전한 연결을 위하여 부모 프로세스와도 gRPC stub 을 이용하여 통신한다. self.__stub_to_inner_peer = StubManager.get_stub_manager_to_server( inner_peer_target, loopchain_pb2_grpc.InnerServiceStub, time_out_seconds=conf.CONNECTION_RETRY_TIMEOUT, is_allow_null_stub=True) logging.debug("try connect to inner peer: " + str(inner_peer_target)) return loopchain_pb2.Message(code=message_code.Response.success)
def stub_to_radiostation(self) -> StubManager: stub_type = loopchain_pb2_grpc.PeerServiceStub if self.is_support_node_function(conf.NodeFunction.Vote): stub_type = loopchain_pb2_grpc.RadioStationStub if self.__radio_station_stub is None: if self.is_support_node_function(conf.NodeFunction.Vote): self.__radio_station_stub = StubManager.get_stub_manager_to_server( self.__radio_station_target, stub_type, conf.CONNECTION_RETRY_TIMEOUT_TO_RS, ssl_auth_type=conf.GRPC_SSL_TYPE) else: self.__radio_station_stub = RestStubManager( self.__radio_station_target) return self.__radio_station_stub
def load_score_container_each(self, channel_name: str, score_package: str, container_port: int, peer_target: str): """create score container and save score_info and score_stub :param channel_name: channel name :param score_package: load score package name :param container_port: score container port :return: """ score_info = None retry_times = 1 while score_info is None: if util.check_port_using(conf.IP_PEER, container_port) is False: util.logger.spam( f"channel_manager:load_score_container_each init ScoreService port({container_port})" ) self.__score_containers[channel_name] = ScoreService( container_port) self.__score_stubs[ channel_name] = StubManager.get_stub_manager_to_server( conf.IP_PEER + ':' + str(container_port), loopchain_pb2_grpc.ContainerStub, is_allow_null_stub=True) score_info = self.__load_score( score_package, self.get_score_container_stub(channel_name), peer_target) if score_info is not None or retry_times >= conf.SCORE_LOAD_RETRY_TIMES: break else: util.logger.spam( f"channel_manager:load_score_container_each score_info load fail retry({retry_times})" ) retry_times += 1 time.sleep(conf.SCORE_LOAD_RETRY_INTERVAL) if score_info is None: return False self.__score_infos[channel_name] = score_info return True
def __handler_connect_to_self_peer(connect_param): # 자신을 생성한 부모 Peer 에 접속하기 위한 stub 을 만든다. # pipe 를 통한 return 은 pipe send 와 쌍이 맞지 않은 경우 오류를 발생시킬 수 있다. # 안전한 연결을 위하여 부모 프로세스와도 gRPC stub 을 이용하여 통신한다. logging.debug("try connect to self peer: " + str(connect_param)) stub_to_self_peer = StubManager.get_stub_manager_to_server( connect_param, loopchain_pb2_grpc.InnerServiceStub, time_out_seconds=conf.CONNECTION_RETRY_TIMEOUT, is_allow_null_stub=True) __process_variables[ self.PROCESS_VARIABLE_STUB_TO_SELF_PEER] = stub_to_self_peer response = util.request_server_wait_response( stub_to_self_peer.stub.GetStatus, loopchain_pb2.StatusRequest( request="(tx process) connect to self peer")) logging.debug("connect to inner channel: " + str(response))
def __run_inner_services(self, port): if conf.ENABLE_REST_SERVICE: self.__rest_service = RestService(int(port)) self.__score_service = ScoreService( int(port) + conf.PORT_DIFF_SCORE_CONTAINER) # TODO tx service 는 더이상 사용하지 않는다. 하지만 이 로직을 제거하면 블록체인 네트워크가 정상적으로 형성되지 않는 # 버그가 발생한다. 원인 파악 필요함 self.__tx_service = TxService(int(port) + conf.PORT_DIFF_TX_CONTAINER) # TODO stub to score service Connect 확인을 util 로 할 수 있게 수정하기 # self.__stub_to_score_service = util.get_stub_to_server('localhost:' + # str(int(port) + conf.PORT_DIFF_SCORE_CONTAINER), # loopchain_pb2_grpc.ContainerStub) self.__stub_to_score_service = StubManager.get_stub_manager_to_server( conf.IP_PEER + ':' + str(int(port) + conf.PORT_DIFF_SCORE_CONTAINER), loopchain_pb2_grpc.ContainerStub, is_allow_null_stub=True)
def __create_live_data(self): """create live data that can't serialized :param channel: channel_name """ try: self.__stub_manager = StubManager( self.__peer_info.target, loopchain_pb2_grpc.PeerServiceStub, conf.GRPC_SSL_TYPE) except Exception as e: logging.exception( f"Create Peer create stub_manager fail target : {self.__peer_info.target} \n" f"exception : {e}") try: self.__cert_verifier = IcxVerifier() self.__cert_verifier.init_and_verify_address( pubkey=self.peer_info.cert, address=self.peer_info.peer_id) except Exception as e: logging.exception( f"create cert verifier error : {self.__channel} {self.__peer_info.cert} \n" f"exception {e}")
def __create_live_data(self): """create live data that can't serialized :param channel: channel_name """ try: self.__stub_manager = StubManager(self.__peer_info.target, loopchain_pb2_grpc.PeerServiceStub, conf.GRPC_SSL_TYPE) except Exception as e: logging.exception(f"Create Peer create stub_manager fail target : {self.__peer_info.target} \n" f"exception : {e}") try: if conf.CHANNEL_OPTION[self.__channel]["send_tx_type"] == conf.SendTxType.icx: self.__cert_verifier = IcxVerifier() self.__cert_verifier.init_and_verify_address(pubkey=self.peer_info.cert, address=self.peer_info.peer_id) else: self.__cert_verifier = PublicVerifier(self.__channel) self.__cert_verifier.load_public_for_tx_verify(self.peer_info.cert) except Exception as e: logging.exception(f"create cert verifier error : {self.__channel} {self.__peer_info.cert} \n" f"exception {e}")
def set_stub_port(self, port): self.__stub_to_rs_service = StubManager( conf.IP_LOCAL + ':' + str(port), loopchain_pb2_grpc.RadioStationStub, ssl_auth_type=conf.GRPC_SSL_TYPE)