async def block_height_sync_channel(self): # leader 로 시작하지 않았는데 자신의 정보가 leader Peer 정보이면 block height sync 하여 # 최종 블럭의 leader 를 찾는다. peer_manager = self.peer_manager peer_leader = peer_manager.get_leader_peer() self_peer_object = peer_manager.get_peer(ChannelProperty().peer_id) is_delay_announce_new_leader = False peer_old_leader = None if peer_leader: block_sync_target = peer_leader.target block_sync_target_stub = StubManager.get_stub_manager_to_server( block_sync_target, loopchain_pb2_grpc.PeerServiceStub, time_out_seconds=conf.CONNECTION_RETRY_TIMEOUT, ssl_auth_type=conf.GRPC_SSL_TYPE) else: block_sync_target = ChannelProperty().radio_station_target block_sync_target_stub = self.__radio_station_stub if block_sync_target != ChannelProperty().peer_target: if block_sync_target_stub is None: logging.warning( "You maybe Older from this network... or No leader in this network!" ) is_delay_announce_new_leader = True peer_old_leader = peer_leader peer_leader = self.peer_manager.leader_complain_to_rs( conf.ALL_GROUP_ID, is_announce_new_peer=False) if peer_leader is not None and ChannelProperty( ).node_type == conf.NodeType.CommunityNode: block_sync_target_stub = StubManager.get_stub_manager_to_server( peer_leader.target, loopchain_pb2_grpc.PeerServiceStub, time_out_seconds=conf.CONNECTION_RETRY_TIMEOUT, ssl_auth_type=conf.GRPC_SSL_TYPE) if self.is_support_node_function(conf.NodeFunction.Vote) and \ (not peer_leader or peer_leader.peer_id == ChannelProperty().peer_id): peer_leader = self_peer_object self.block_manager.set_peer_type(loopchain_pb2.BLOCK_GENERATOR) else: _, future = self.block_manager.block_height_sync( block_sync_target_stub) await future self.show_peers() if is_delay_announce_new_leader and ChannelProperty( ).node_type == conf.NodeType.CommunityNode: self.peer_manager.announce_new_leader( peer_old_leader.peer_id, peer_leader.peer_id, self_peer_id=ChannelProperty().peer_id)
def run_radio_station_as_process_and_stub_manager(port): process = run_radio_station_as_process(port) stub_manager = StubManager.get_stub_manager_to_server( 'localhost:' + str(port), loopchain_pb2_grpc.RadioStationStub) util.request_server_in_time(stub_manager.stub.GetStatus, loopchain_pb2.StatusRequest(request="")) return process, stub_manager
def __handler_connect_to_leader(self, request, context): logging.debug( f"TxService handler connect to leader({request.message})") leader_target = request.message self.__stub_to_leader = StubManager.get_stub_manager_to_server( leader_target, loopchain_pb2_grpc.PeerServiceStub, time_out_seconds=conf.CONNECTION_RETRY_TIMEOUT, is_allow_null_stub=True) self.__peer_status = PeerProcessStatus.normal # TODO block generator 연결 실패 조건 확인할 것 if self.__stub_to_leader is None: return loopchain_pb2.Message( code=message_code.Response.fail_connect_to_leader) else: try: self.__create_tx_continue() except Exception as e: logging.warning( "in tx service create tx continue() Exception: " + str(e)) self.__peer_status = PeerProcessStatus.leader_complained return loopchain_pb2.Message( code=message_code.Response.fail_add_tx_to_leader) return loopchain_pb2.Message(code=message_code.Response.success)
def __handler_connect_to_leader(connect_to_leader_param): # logging.debug("(tx process) try... connect to leader: " + str(connect_to_leader_param)) stub_to_leader = StubManager.get_stub_manager_to_server( connect_to_leader_param, loopchain_pb2_grpc.PeerServiceStub, time_out_seconds=conf.CONNECTION_RETRY_TIMEOUT, is_allow_null_stub=True) __process_variables[ self.PROCESS_VARIABLE_STUB_TO_LEADER] = stub_to_leader stub_to_self_peer = __process_variables[ self.PROCESS_VARIABLE_STUB_TO_SELF_PEER] __process_variables[ self.PROCESS_VARIABLE_PEER_STATUS] = PeerProcessStatus.normal # TODO block generator 연결 실패 조건 확인할 것 if stub_to_leader is None: stub_to_self_peer.call( "NotifyProcessError", loopchain_pb2.CommonRequest( request="Connect to leader Fail!")) else: try: create_tx_continue(stub_to_leader) except Exception as e: logging.warning( "in peer_process::connect_to_blockgenerator Exception: " + str(e)) __process_variables[ self. PROCESS_VARIABLE_PEER_STATUS] = PeerProcessStatus.leader_complained stub_to_self_peer.call( "NotifyLeaderBroken", loopchain_pb2.CommonRequest( request="Fail Add Tx to Leader"))
def __handler_subscribe(subscribe_peer_target): # logging.debug("BroadcastProcess received subscribe command peer_target: " + str(subscribe_peer_target)) if subscribe_peer_target not in __audience: stub_manager = StubManager.get_stub_manager_to_server( subscribe_peer_target, loopchain_pb2_grpc.PeerServiceStub, is_allow_null_stub=True) __audience[subscribe_peer_target] = stub_manager
def stub_to_radiostation(self) -> StubManager: if self.__stub_to_radio_station is None: self.__stub_to_radio_station = StubManager.get_stub_manager_to_server( self.__radio_station_target, loopchain_pb2_grpc.RadioStationStub, conf.CONNECTION_RETRY_TIMEOUT_TO_RS) return self.__stub_to_radio_station
def __init_radio_station_stub(self): if self.is_support_node_function(conf.NodeFunction.Vote): self.__radio_station_stub = StubManager.get_stub_manager_to_server( ChannelProperty().radio_station_target, loopchain_pb2_grpc.RadioStationStub, conf.CONNECTION_RETRY_TIMEOUT_TO_RS, ssl_auth_type=conf.GRPC_SSL_TYPE) else: self.__radio_station_stub = RestStubManager(ChannelProperty().radio_station_target, ChannelProperty().name)
def __handler_subscribe(subscribe_peer_target): # logging.debug("BroadcastProcess received subscribe command peer_target: " + str(subscribe_peer_target)) if subscribe_peer_target not in __audience: stub_manager = StubManager.get_stub_manager_to_server( subscribe_peer_target, loopchain_pb2_grpc.PeerServiceStub, time_out_seconds=conf.CONNECTION_RETRY_TIMEOUT_WHEN_INITIAL, is_allow_null_stub=True ) __audience[subscribe_peer_target] = stub_manager
def run_peer_server_as_process_and_stub_manager( port, radiostation_port=conf.PORT_RADIOSTATION, group_id=None, score=None): process = run_peer_server_as_process(port, radiostation_port, group_id, score) stub_manager = StubManager.get_stub_manager_to_server( 'localhost:' + str(port), loopchain_pb2_grpc.PeerServiceStub) return process, stub_manager
def __handler_subscribe(self, audience_target): logging.debug("BroadcastThread received subscribe command peer_target: " + str(audience_target)) if audience_target not in self.__audience: stub_manager = StubManager.get_stub_manager_to_server( audience_target, loopchain_pb2_grpc.PeerServiceStub, time_out_seconds=conf.CONNECTION_RETRY_TIMEOUT_WHEN_INITIAL, is_allow_null_stub=True, ssl_auth_type=conf.GRPC_SSL_TYPE ) self.__audience[audience_target] = stub_manager
def __handler_connect_to_self_peer(connect_param): # 자신을 생성한 부모 Peer 에 접속하기 위한 stub 을 만든다. # pipe 를 통한 return 은 pipe send 와 쌍이 맞지 않은 경우 오류를 발생시킬 수 있다. # 안전한 연결을 위하여 부모 프로세스와도 gRPC stub 을 이용하여 통신한다. logging.debug("try connect to self peer: " + str(connect_param)) stub_to_self_peer = StubManager.get_stub_manager_to_server( connect_param, loopchain_pb2_grpc.InnerServiceStub, time_out_seconds=conf.CONNECTION_RETRY_TIMEOUT_WHEN_INITIAL, is_allow_null_stub=True ) __process_variables[self.SELF_PEER_TARGET_KEY] = connect_param __process_variables[self.PROCESS_VARIABLE_STUB_TO_SELF_PEER] = stub_to_self_peer
def __handler_connect_to_inner_peer(self, request, context): logging.debug( f"TxService handler connect to inner peer({request.message})") inner_peer_target = request.message # 자신을 생성한 부모 Peer 에 접속하기 위한 stub 을 만든다. # pipe 를 통한 return 은 pipe send 와 쌍이 맞지 않은 경우 오류를 발생시킬 수 있다. # 안전한 연결을 위하여 부모 프로세스와도 gRPC stub 을 이용하여 통신한다. self.__stub_to_inner_peer = StubManager.get_stub_manager_to_server( inner_peer_target, loopchain_pb2_grpc.InnerServiceStub, time_out_seconds=conf.CONNECTION_RETRY_TIMEOUT, is_allow_null_stub=True) logging.debug("try connect to inner peer: " + str(inner_peer_target)) return loopchain_pb2.Message(code=message_code.Response.success)
def stub_to_radiostation(self): if self.__radio_station_stub is None: if self.is_support_node_function(conf.NodeFunction.Vote): if conf.ENABLE_REP_RADIO_STATION: self.__radio_station_stub = StubManager.get_stub_manager_to_server( self.__radio_station_target, loopchain_pb2_grpc.RadioStationStub, conf.CONNECTION_RETRY_TIMEOUT_TO_RS, ssl_auth_type=conf.GRPC_SSL_TYPE) else: self.__radio_station_stub = None else: self.__radio_station_stub = RestStubManager( self.__radio_station_target) return self.__radio_station_stub
def stub_to_radiostation(self) -> StubManager: stub_type = loopchain_pb2_grpc.PeerServiceStub if self.is_support_node_function(conf.NodeFunction.Vote): stub_type = loopchain_pb2_grpc.RadioStationStub if self.__radio_station_stub is None: if self.is_support_node_function(conf.NodeFunction.Vote): self.__radio_station_stub = StubManager.get_stub_manager_to_server( self.__radio_station_target, stub_type, conf.CONNECTION_RETRY_TIMEOUT_TO_RS, ssl_auth_type=conf.GRPC_SSL_TYPE) else: self.__radio_station_stub = RestStubManager( self.__radio_station_target) return self.__radio_station_stub
def load_score_container_each(self, channel_name: str, score_package: str, container_port: int, peer_target: str): """create score container and save score_info and score_stub :param channel_name: channel name :param score_package: load score package name :param container_port: score container port :return: """ score_info = None retry_times = 1 while score_info is None: if util.check_port_using(conf.IP_PEER, container_port) is False: util.logger.spam( f"channel_manager:load_score_container_each init ScoreService port({container_port})" ) self.__score_containers[channel_name] = ScoreService( container_port) self.__score_stubs[ channel_name] = StubManager.get_stub_manager_to_server( conf.IP_PEER + ':' + str(container_port), loopchain_pb2_grpc.ContainerStub, is_allow_null_stub=True) score_info = self.__load_score( score_package, self.get_score_container_stub(channel_name), peer_target) if score_info is not None or retry_times >= conf.SCORE_LOAD_RETRY_TIMES: break else: util.logger.spam( f"channel_manager:load_score_container_each score_info load fail retry({retry_times})" ) retry_times += 1 time.sleep(conf.SCORE_LOAD_RETRY_INTERVAL) if score_info is None: return False self.__score_infos[channel_name] = score_info return True
def __handler_connect_to_self_peer(connect_param): # 자신을 생성한 부모 Peer 에 접속하기 위한 stub 을 만든다. # pipe 를 통한 return 은 pipe send 와 쌍이 맞지 않은 경우 오류를 발생시킬 수 있다. # 안전한 연결을 위하여 부모 프로세스와도 gRPC stub 을 이용하여 통신한다. logging.debug("try connect to self peer: " + str(connect_param)) stub_to_self_peer = StubManager.get_stub_manager_to_server( connect_param, loopchain_pb2_grpc.InnerServiceStub, time_out_seconds=conf.CONNECTION_RETRY_TIMEOUT, is_allow_null_stub=True) __process_variables[ self.PROCESS_VARIABLE_STUB_TO_SELF_PEER] = stub_to_self_peer response = util.request_server_wait_response( stub_to_self_peer.stub.GetStatus, loopchain_pb2.StatusRequest( request="(tx process) connect to self peer")) logging.debug("connect to inner channel: " + str(response))
def __run_inner_services(self, port): if conf.ENABLE_REST_SERVICE: self.__rest_service = RestService(int(port)) self.__score_service = ScoreService( int(port) + conf.PORT_DIFF_SCORE_CONTAINER) # TODO tx service 는 더이상 사용하지 않는다. 하지만 이 로직을 제거하면 블록체인 네트워크가 정상적으로 형성되지 않는 # 버그가 발생한다. 원인 파악 필요함 self.__tx_service = TxService(int(port) + conf.PORT_DIFF_TX_CONTAINER) # TODO stub to score service Connect 확인을 util 로 할 수 있게 수정하기 # self.__stub_to_score_service = util.get_stub_to_server('localhost:' + # str(int(port) + conf.PORT_DIFF_SCORE_CONTAINER), # loopchain_pb2_grpc.ContainerStub) self.__stub_to_score_service = StubManager.get_stub_manager_to_server( conf.IP_PEER + ':' + str(int(port) + conf.PORT_DIFF_SCORE_CONTAINER), loopchain_pb2_grpc.ContainerStub, is_allow_null_stub=True)
def set_stub_port(self, port): self.__stub_to_rs_service = StubManager.get_stub_manager_to_server( conf.IP_LOCAL + ':' + str(port), loopchain_pb2_grpc.RadioStationStub )
def __connect_to_radiostation(self): """RadioStation 접속 :return: 접속정보, 실패시 None """ logging.debug("try to connect to radiostation") self.__stub_to_radio_station = StubManager.get_stub_manager_to_server( self.__radio_station_target, loopchain_pb2_grpc.RadioStationStub, conf.CONNECTION_RETRY_TIMEOUT_TO_RS) if self.__stub_to_radio_station is None: logging.warning("fail make stub to Radio Station!!") return None token = None if self.__auth.is_secure: self.__peer_object = self.__peer_manager.get_peer(self.peer_id) token = None if self.__peer_object is not None: token = self.__peer_object.token logging.debug("Self Peer Token : %s", token) # 토큰 유효시간이 지나면 다시 생성 요청 if token is not None and self.__auth.get_token_time(token) is None: token = None self.__auth.set_peer_info(self.peer_id, self.__peer_target, self.group_id, self.__peer_type) cert_bytes = self.__auth.get_cert_bytes() if token is None: # 서버로부터 난수 수신 # response = util.request_server_in_time(self.__stub_to_radio_station.ConnectPeer, # loopchain_pb2.PeerRequest( # peer_object=b'', # peer_id=self.peer_id, # peer_target=self.__peer_target, # group_id=self.group_id, # peer_type=self.__peer_type, # token=conf.TOKEN_TYPE_CERT + cert_bytes.hex()) # ) response = self.__stub_to_radio_station.call( "ConnectPeer", loopchain_pb2.PeerRequest(peer_object=b'', peer_id=self.peer_id, peer_target=self.__peer_target, group_id=self.group_id, peer_type=self.__peer_type, token=conf.TOKEN_TYPE_CERT + cert_bytes.hex()), conf.GRPC_TIMEOUT) rand_key = None if response is not None and response.status == message_code.Response.success: logging.debug("Received Random : %s", response.more_info) if len(response.more_info) is not 32: # 토큰 크기가 16바이트가 아니면 접속을 할 수 없습니다. logging.debug('서버로부터 수신한 토큰 길이는 16바이트가 되어야 합니다.') else: rand_key = response.more_info else: return response # 난수와 Peer 정보에 서명 if rand_key is None: return None else: sign = self.__auth.generate_request_sign(rand_key=rand_key) token = conf.TOKEN_TYPE_SIGN + sign.hex() else: self.__auth.add_token(token) # 공통 부분 # response = util.request_server_in_time(self.__stub_to_radio_station.ConnectPeer, # loopchain_pb2.PeerRequest( # peer_object=b'', # peer_id=self.peer_id, # peer_target=self.__peer_target, # group_id=self.group_id, # peer_type=self.__peer_type, # token=token # )) response = self.__stub_to_radio_station.call( "ConnectPeer", loopchain_pb2.PeerRequest(peer_object=b'', peer_id=self.peer_id, peer_target=self.__peer_target, group_id=self.group_id, peer_type=self.__peer_type, token=token), conf.GRPC_CONNECTION_TIMEOUT) if response is not None and response.status == message_code.Response.success: if self.__auth.is_secure: logging.debug("Received Token : %s", response.more_info) # Radiostation으로부터 수신한 토큰 검증 if len(response.more_info) < 9: # 토큰 크기가 8 + 1바이트 보다 크지 아니면 접속을 할 수 없습니다. logging.debug('서버로부터 수신한 토큰 길이는 9바이트 이상이 되어야 합니다.') response.status = message_code.Response.fail_validate_params response.more_info = "Invalid Token Data" else: token = response.more_info tag = token[:2] if tag == conf.TOKEN_TYPE_TOKEN: if self.__auth.verify_token(token): logging.debug("토큰 검증에 성공하였습니다.") self.__auth.add_token(token) else: logging.debug("토큰 검증에 실패하였습니다.") response.status = message_code.Response.fail_validate_params response.more_info = "Invalid Token Signature" logging.debug("Connect to radiostation: " + str(response)) is_peer_list_from_rs = False if response is not None and response.status == message_code.Response.success: # RS 의 응답이 있으면 peer_list 는 RS 가 전달한 결과로 업데이트 된다. # 없는 경우 local 의 level DB 로 부터 읽어드린 값을 default 로 사용하게 된다. # TODO RS 는 어떻게 신뢰하지? RS 가 새로운 피어의 참여를 승인하더라도 참여한 피어 목록은 더 신뢰할만한 방식으로 보호가 필요하지 않나? # 누군가 RS 를 죽인다면 RS 인척 가짜로 이루어진 피어 리스트를 전송하면 네트워크를 파괴할 수 있지 않나? # 피어의 참여는 RS 가 승인한 다음 블록에 담아서 블록체인에 추가하면 어떨까? peer_list_data = pickle.loads(response.peer_list) self.__peer_manager.load(peer_list_data, False) self.__common_service.save_peer_list(self.__peer_manager) logging.debug("peer list update: " + self.__peer_manager.get_peers_for_debug()) is_peer_list_from_rs = True else: logging.debug("using local peer list: " + self.__peer_manager.get_peers_for_debug()) return is_peer_list_from_rs
def serve(self, port, score=conf.DEFAULT_SCORE_PACKAGE): """피어 실행 :param port: 피어의 실행포트 :param score: 피어의 실행 체인코드 """ stopwatch_start = timeit.default_timer() is_all_service_safe_start = True is_delay_announce_new_leader = False self.__port_init(port) self.__run_inner_services(port) inner_service_port = conf.PORT_INNER_SERVICE or ( int(port) + conf.PORT_DIFF_INNER_SERVICE) self.__common_service = CommonService(loopchain_pb2, self.__peer_target, inner_service_port) self.peer_id = str(self.__common_service.get_peer_id()) self.__peer_manager = self.__common_service.load_peer_manager() self.__block_manager = self.__load_block_manager() response = self.__connect_to_radiostation() logging.debug("Connect to radiostation: " + str(response)) is_peer_list_from_rs = False if response is not None and response.status == message_code.Response.success: # RS 의 응답이 있으면 peer_list 는 RS 가 전달한 결과로 업데이트 된다. # 없는 경우 local 의 level DB 로 부터 읽어드린 값을 default 로 사용하게 된다. # TODO RS 는 어떻게 신뢰하지? RS 가 새로운 피어의 참여를 승인하더라도 참여한 피어 목록은 더 신뢰할만한 방식으로 보호가 필요하지 않나? # 누군가 RS 를 죽인다면 RS 인척 가짜로 이루어진 피어 리스트를 전송하면 네트워크를 파괴할 수 있지 않나? # 피어의 참여는 RS 가 승인한 다음 블록에 담아서 블록체인에 추가하면 어떨까? peer_list_data = pickle.loads(response.peer_list) self.__peer_manager.load(peer_list_data, False) self.__common_service.save_peer_list(self.__peer_manager) logging.debug("peer list update: " + self.__peer_manager.get_peers_for_debug()) is_peer_list_from_rs = True else: logging.debug("using local peer list: " + self.__peer_manager.get_peers_for_debug()) logging.debug("peer_id: " + str(self.peer_id)) if self.__peer_manager.get_peer_count() == 0: util.exit_and_msg( "There is no peer_list, initial network is not allowed without RS!" ) peer_self = self.__peer_manager.get_peer(self.peer_id, self.group_id) logging.debug("peer_self: " + str(peer_self)) peer_leader = self.__peer_manager.get_leader_peer( is_complain_to_rs=True) logging.debug("peer_leader: " + str(peer_leader)) # TODO LOOPCHAIN-61 인증서 로드 _cert = None # TODO LOOPCHAIN-61 인증서 키로드 _private_key = None # TODO 인증정보 요청 # TODO 이 부분을 조건 검사가 아니라 leader complain 을 이용해서 리더가 되도록 하는 방법 검토하기 if peer_self.peer_id == peer_leader.peer_id: # 자기가 peer_list 의 유일한 connected PEER 이거나 rs 의 leader 정보와 같을 때 block generator 가 된다. if is_peer_list_from_rs is True or self.__peer_manager.get_connected_peer_count( None) == 1: logging.debug("Set Peer Type Block Generator!") self.__peer_type = loopchain_pb2.BLOCK_GENERATOR # load score 는 score 서비스가 시작된 이후 block height sync 가 시작되기전에 이루어져야 한다. is_all_service_safe_start &= self.__load_score(score) if self.__peer_type == loopchain_pb2.PEER: # leader 로 시작하지 않았는데 자신의 정보가 leader Peer 정보이면 block height sync 하여 # 최종 블럭의 leader 를 찾는다. if peer_leader.target != self.__peer_target: block_sync_target_stub = StubManager.get_stub_manager_to_server( peer_leader.target, loopchain_pb2_grpc.PeerServiceStub, time_out_seconds=conf.GRPC_TIMEOUT) else: block_sync_target_stub = None if block_sync_target_stub is None: logging.warning( "You maybe Older from this network... or No leader in this network!" ) # TODO 이 상황에서 rs 에 leader complain 을 진행한다 is_delay_announce_new_leader = True peer_old_leader = peer_leader peer_leader = self.__peer_manager.leader_complain_to_rs( conf.ALL_GROUP_ID, is_announce_new_peer=False) if peer_leader is not None: block_sync_target_stub = StubManager.get_stub_manager_to_server( peer_leader.target, loopchain_pb2_grpc.PeerServiceStub, time_out_seconds=conf.GRPC_TIMEOUT) if peer_leader is None or peer_leader.peer_id == peer_self.peer_id: peer_leader = peer_self self.__peer_type = loopchain_pb2.BLOCK_GENERATOR else: self.block_height_sync(block_sync_target_stub) # # TODO 마지막 블럭으로 leader 정보를 판단하는 로직은 리더 컴플레인 알고리즘 수정 후 유효성을 다시 판단할 것 # last_block_peer_id = self.__block_manager.get_blockchain().last_block.peer_id # # if last_block_peer_id != "" and last_block_peer_id != self.__peer_list.get_leader_peer().peer_id: # logging.debug("make leader stub after block height sync...") # new_leader_peer = self.__peer_list.get_peer(last_block_peer_id) # # if new_leader_peer is None: # new_leader_peer = self.__peer_list.leader_complain_to_rs(conf.ALL_GROUP_ID) # # self.__peer_list.set_leader_peer(new_leader_peer, None) # # TODO 리더가 상단의 next_leader_pear 와 같을 경우 stub 을 재설정하게 되는데 문제 없는지 확인 할 것 # self.__stub_to_blockgenerator = self.__peer_list.get_peer_stub_manager(new_leader_peer) # peer_leader = new_leader_peer # else: # self.__stub_to_blockgenerator = block_sync_target_stub self.__stub_to_blockgenerator = block_sync_target_stub if self.__stub_to_blockgenerator is None: util.exit_and_msg("Fail connect to leader!!") self.show_peers() self.__common_service.set_peer_type(self.__peer_type) if self.__peer_type == loopchain_pb2.BLOCK_GENERATOR: self.__block_manager.set_peer_type(self.__peer_type) loopchain_pb2_grpc.add_PeerServiceServicer_to_server( self.__outer_service, self.__common_service.outer_server) loopchain_pb2_grpc.add_InnerServiceServicer_to_server( self.__inner_service, self.__common_service.inner_server) logging.info("Start peer service at port: " + str(port)) self.__block_manager.start() self.__common_service.start(port, self.peer_id, self.group_id) if self.__stub_to_radio_station is not None: self.__common_service.subscribe(self.__stub_to_radio_station) # Start Peer Process for gRPC send to Block Generator # But It use only when create tx (yet) logging.debug("peer_leader target is: " + str(peer_leader.target)) self.__tx_process = self.__run_tx_process( blockgenerator_info=peer_leader.target, inner_channel_info=conf.IP_LOCAL + ":" + str(inner_service_port)) if self.__stub_to_blockgenerator is not None: self.__common_service.subscribe(self.__stub_to_blockgenerator, loopchain_pb2.BLOCK_GENERATOR) if is_delay_announce_new_leader: self.__peer_manager.announce_new_leader(peer_old_leader.peer_id, peer_leader.peer_id) self.__send_to_process_thread = SendToProcess(self.__tx_process) self.__send_to_process_thread.start() stopwatch_duration = timeit.default_timer() - stopwatch_start logging.info( f"Start Peer Service start duration({stopwatch_duration})") # service 종료를 기다린다. if is_all_service_safe_start: self.__common_service.wait() else: self.service_stop() self.__send_to_process_thread.stop() self.__send_to_process_thread.wait() logging.info("Peer Service Ended.") self.__score_service.stop() if self.__rest_service is not None: self.__rest_service.stop() self.__tx_service.stop() self.__stop_tx_process()
def __connect_to_radiostation(self): """RadioStation 접속 :return: 접속정보, 실패시 None """ logging.debug("try to connect to radiostation") self.__stub_to_radio_station = StubManager.get_stub_manager_to_server( self.__radio_station_target, loopchain_pb2_grpc.RadioStationStub, conf.CONNECTION_RETRY_TIMEOUT_TO_RS) if self.__stub_to_radio_station is None: logging.warning("fail make stub to Radio Station!!") return None token = None if self.__auth.is_secure: peer_self = self.__peer_manager.get_peer(self.peer_id) token = None if peer_self is not None: token = peer_self.token logging.debug("Self Peer Token : %s", token) # 토큰 유효시간이 지나면 다시 생성 요청 if token is not None and self.__auth.get_token_time(token) is None: token = None self.__auth.set_peer_info(self.peer_id, self.__peer_target, self.group_id, self.__peer_type) cert_bytes = self.__auth.get_cert_bytes() if token is None: # 서버로부터 난수 수신 # response = util.request_server_in_time(self.__stub_to_radio_station.ConnectPeer, # loopchain_pb2.PeerRequest( # peer_object=b'', # peer_id=self.peer_id, # peer_target=self.__peer_target, # group_id=self.group_id, # peer_type=self.__peer_type, # token=conf.TOKEN_TYPE_CERT + cert_bytes.hex()) # ) response = self.__stub_to_radio_station.call( "ConnectPeer", loopchain_pb2.PeerRequest(peer_object=b'', peer_id=self.peer_id, peer_target=self.__peer_target, group_id=self.group_id, peer_type=self.__peer_type, token=conf.TOKEN_TYPE_CERT + cert_bytes.hex()), conf.GRPC_TIMEOUT) rand_key = None if response is not None and response.status == message_code.Response.success: logging.debug("Received Random : %s", response.more_info) if len(response.more_info) is not 32: # 토큰 크기가 16바이트가 아니면 접속을 할 수 없습니다. logging.debug('서버로부터 수신한 토큰 길이는 16바이트가 되어야 합니다.') else: rand_key = response.more_info else: return response # 난수와 Peer 정보에 서명 if rand_key is None: return None else: sign = self.__auth.generate_request_sign(rand_key=rand_key) token = conf.TOKEN_TYPE_SIGN + sign.hex() else: self.__auth.add_token(token) # 공통 부분 # response = util.request_server_in_time(self.__stub_to_radio_station.ConnectPeer, # loopchain_pb2.PeerRequest( # peer_object=b'', # peer_id=self.peer_id, # peer_target=self.__peer_target, # group_id=self.group_id, # peer_type=self.__peer_type, # token=token # )) response = self.__stub_to_radio_station.call( "ConnectPeer", loopchain_pb2.PeerRequest(peer_object=b'', peer_id=self.peer_id, peer_target=self.__peer_target, group_id=self.group_id, peer_type=self.__peer_type, token=token), conf.GRPC_CONNECTION_TIMEOUT) if response is not None and response.status == message_code.Response.success: if self.__auth.is_secure: logging.debug("Received Token : %s", response.more_info) # Radiostation으로부터 수신한 토큰 검증 if len(response.more_info) < 9: # 토큰 크기가 8 + 1바이트 보다 크지 아니면 접속을 할 수 없습니다. logging.debug('서버로부터 수신한 토큰 길이는 9바이트 이상이 되어야 합니다.') response.status = message_code.Response.fail_validate_params response.more_info = "Invalid Token Data" else: token = response.more_info tag = token[:2] if tag == conf.TOKEN_TYPE_TOKEN: if self.__auth.verify_token(token): logging.debug("토큰 검증에 성공하였습니다.") self.__auth.add_token(token) else: logging.debug("토큰 검증에 실패하였습니다.") response.status = message_code.Response.fail_validate_params response.more_info = "Invalid Token Signature" return response
def __block_height_sync_channel(self, channel_name): # leader 로 시작하지 않았는데 자신의 정보가 leader Peer 정보이면 block height sync 하여 # 최종 블럭의 leader 를 찾는다. block_sync_target_stub = None peer_manager = self.__channel_manager.get_peer_manager(channel_name) peer_leader = peer_manager.get_leader_peer() self_peer_object = peer_manager.get_peer(self.__peer_id) is_delay_announce_new_leader = False peer_old_leader = None if peer_leader.target != self.__peer_target: block_sync_target_stub = StubManager.get_stub_manager_to_server( peer_leader.target, loopchain_pb2_grpc.PeerServiceStub, time_out_seconds=conf.CONNECTION_RETRY_TIMEOUT) if block_sync_target_stub is None: logging.warning( "You maybe Older from this network... or No leader in this network!" ) # TODO 이 상황에서 rs 에 leader complain 을 진행한다 is_delay_announce_new_leader = True peer_old_leader = peer_leader peer_leader = self.__channel_manager.get_peer_manager( channel_name).leader_complain_to_rs( conf.ALL_GROUP_ID, is_announce_new_peer=False) if peer_leader is not None: block_sync_target_stub = StubManager.get_stub_manager_to_server( peer_leader.target, loopchain_pb2_grpc.PeerServiceStub, time_out_seconds=conf.CONNECTION_RETRY_TIMEOUT) if peer_leader is None or peer_leader.peer_id == self.__peer_id: peer_leader = self_peer_object self.__channel_manager.get_block_manager( channel_name).set_peer_type(loopchain_pb2.BLOCK_GENERATOR) else: self.__channel_manager.get_block_manager( channel_name).block_height_sync(block_sync_target_stub) # # TODO 마지막 블럭으로 leader 정보를 판단하는 로직은 리더 컴플레인 알고리즘 수정 후 유효성을 다시 판단할 것 # last_block_peer_id = self.__channel_manager.get_block_manager().get_blockchain().last_block.peer_id # # if last_block_peer_id != "" and last_block_peer_id != self.__peer_list.get_leader_peer().peer_id: # logging.debug("make leader stub after block height sync...") # new_leader_peer = self.__peer_list.get_peer(last_block_peer_id) # # if new_leader_peer is None: # new_leader_peer = self.__peer_list.leader_complain_to_rs(conf.ALL_GROUP_ID) # # self.__peer_list.set_leader_peer(new_leader_peer, None) # # TODO 리더가 상단의 next_leader_pear 와 같을 경우 stub 을 재설정하게 되는데 문제 없는지 확인 할 것 # peer_leader = new_leader_peer # else: if block_sync_target_stub is None: util.exit_and_msg("Fail connect to leader!!") self.show_peers(channel_name) if block_sync_target_stub is not None: self.__common_service.subscribe(channel_name, block_sync_target_stub, loopchain_pb2.BLOCK_GENERATOR) if is_delay_announce_new_leader: self.__channel_manager.get_peer_manager( channel_name).announce_new_leader(peer_old_leader.peer_id, peer_leader.peer_id)