예제 #1
0
    def __init__(self, radio_station_ip=conf.IP_RADIOSTATION, cert_path=None, cert_pass=None):
        """
        RadioStation Init

        :param radio_station_ip: radioStation Ip
        :param cert_path: RadioStation 인증서 디렉토리 경로
        :param cert_pass: RadioStation private key password
        """
        self.__handler_map = {
            message_code.Request.status: self.__handler_status,
            message_code.Request.peer_get_leader: self.__handler_get_leader_peer,
            message_code.Request.peer_complain_leader: self.__handler_complain_leader,
            message_code.Request.rs_set_configuration: self.__handler_set_configuration,
            message_code.Request.rs_get_configuration: self.__handler_get_configuration
        }
        logging.info("Set RadioStationService IP: " + radio_station_ip)
        if cert_path is not None:
            logging.info("CA Certificate Path : " + cert_path)

        self._rs = RadioStation()
        self.__common_service = CommonService(loopchain_pb2)
        self.__common_service.set_peer_type(loopchain_pb2.RADIO_STATION)
        self.__peer_manager = self.__common_service.load_peer_manager()

        self.__rest_service = None

        # 인증 클래스
        self.__ca = CertificateAuthorization()

        if cert_path is not None:
            # 인증서 로드
            self.__ca.load_pki(cert_path, cert_pass)

        logging.info("Current group ID:"+self._rs.get_group_id())
        logging.info("Current RadioStation SECURITY_MODE : " + str(self.__ca.is_secure))
예제 #2
0
    def run_common_service(self):
        inner_service_port = conf.PORT_INNER_SERVICE or (
            self.__peer_port + conf.PORT_DIFF_INNER_SERVICE)
        self.__inner_target = conf.IP_LOCAL + ":" + str(inner_service_port)

        self.__common_service = CommonService(loopchain_pb2,
                                              inner_service_port)
        self.__common_service.start(str(self.__peer_port), self.__peer_id,
                                    self.__group_id)

        loopchain_pb2_grpc.add_PeerServiceServicer_to_server(
            self.__outer_service, self.__common_service.outer_server)
예제 #3
0
    def __init__(self,
                 radio_station_ip=None,
                 cert_path=None,
                 cert_pass=None,
                 rand_seed=None):
        """RadioStation Init

        :param radio_station_ip: radioStation Ip
        :param cert_path: RadioStation 인증서 디렉토리 경로
        :param cert_pass: RadioStation private key password
        """
        logger_preset = loggers.get_preset()
        logger_preset.peer_id = "RadioStation"
        logger_preset.update_logger()

        if radio_station_ip is None:
            radio_station_ip = conf.IP_RADIOSTATION
        logging.info("Set RadioStationService IP: " + radio_station_ip)
        if cert_path is not None:
            logging.info("CA Certificate Path : " + cert_path)

        self.__common_service = CommonService(loopchain_pb2)
        self.__admin_manager = AdminManager("station")
        self.__channel_manager = None
        self.__rest_service = None
        self.__timer_service = TimerService()

        # RS has two status (active, standby) active means enable outer service
        # standby means stop outer service and heartbeat to the other RS (active)
        self.__is_active = False

        # 인증 클래스
        self.__ca = CertificateAuthorization()

        if cert_path is not None:
            # 인증서 로드
            self.__ca.load_pki(cert_path, cert_pass)

        logging.info("Current RadioStation SECURITY_MODE : " +
                     str(self.__ca.is_secure))

        # gRPC service for Radiostation
        self.__outer_service = OuterService()
        self.__admin_service = AdminService(self.__admin_manager)

        # {group_id:[ {peer_id:IP} ] }로 구성된 dictionary
        self.peer_groups = {conf.ALL_GROUP_ID: []}

        # Peer의 보안을 담당
        self.auth = {}

        ObjectManager().rs_service = self
예제 #4
0
    def __start_base_services(self, score):
        """start base services >> common_service, channel_manager, tx_process

        :param score:
        :return:
        """
        inner_service_port = conf.PORT_INNER_SERVICE or (
            self.__peer_port + conf.PORT_DIFF_INNER_SERVICE)

        self.__common_service = CommonService(loopchain_pb2,
                                              inner_service_port)

        self.__channel_manager = ChannelManager(
            common_service=self.__common_service,
            level_db_identity=self.__peer_target)

        self.__tx_process = self.__run_tx_process(
            inner_channel_info=conf.IP_LOCAL + ":" + str(inner_service_port))
예제 #5
0
class RadioStationService:
    """Radiostation 의 main Class
    peer 를 위한 outer service 와 관리용 admin service 두개의 gRPC interface 를 가진다.
    """

    # 인증처리
    __ca = None

    def __init__(self,
                 radio_station_ip=None,
                 cert_path=None,
                 cert_pass=None,
                 rand_seed=None):
        """RadioStation Init

        :param radio_station_ip: radioStation Ip
        :param cert_path: RadioStation 인증서 디렉토리 경로
        :param cert_pass: RadioStation private key password
        """
        logger_preset = loggers.get_preset()
        logger_preset.peer_id = "RadioStation"
        logger_preset.update_logger()

        if radio_station_ip is None:
            radio_station_ip = conf.IP_RADIOSTATION
        logging.info("Set RadioStationService IP: " + radio_station_ip)
        if cert_path is not None:
            logging.info("CA Certificate Path : " + cert_path)

        self.__common_service = CommonService(loopchain_pb2)
        self.__admin_manager = AdminManager("station")
        self.__channel_manager = None
        self.__rest_service = None
        self.__timer_service = TimerService()

        # RS has two status (active, standby) active means enable outer service
        # standby means stop outer service and heartbeat to the other RS (active)
        self.__is_active = False

        # 인증 클래스
        self.__ca = CertificateAuthorization()

        if cert_path is not None:
            # 인증서 로드
            self.__ca.load_pki(cert_path, cert_pass)

        logging.info("Current RadioStation SECURITY_MODE : " +
                     str(self.__ca.is_secure))

        # gRPC service for Radiostation
        self.__outer_service = OuterService()
        self.__admin_service = AdminService(self.__admin_manager)

        # {group_id:[ {peer_id:IP} ] }로 구성된 dictionary
        self.peer_groups = {conf.ALL_GROUP_ID: []}

        # Peer의 보안을 담당
        self.auth = {}

        ObjectManager().rs_service = self

    def __del__(self):
        pass

    def launch_block_generator(self):
        pass

    @property
    def admin_manager(self) -> AdminManager:
        return self.__admin_manager

    @property
    def channel_manager(self) -> ChannelManager:
        return self.__channel_manager

    @property
    def common_service(self) -> CommonService:
        return self.__common_service

    @property
    def timer_service(self) -> TimerService:
        return self.__timer_service

    def check_peer_status(self, channel):
        """service loop for status heartbeat check to peer list

        :return:
        """
        util.logger.spam(
            f"rs_service:check_peer_status(Heartbeat...{channel}) "
            f"for reset Leader and delete no response Peer")

        peer_manager = self.__channel_manager.get_peer_manager(channel)
        peer_manager.check_peer_status()

    def __create_random_table(self, rand_seed: int) -> list:
        """create random_table using random_seed
        table size define in conf.RANDOM_TABLE_SIZE

        :param rand_seed: random seed for create random table
        :return: random table
        """
        random.seed(rand_seed)
        random_table = []
        for i in range(conf.RANDOM_TABLE_SIZE):
            random_num: int = random.getrandbits(conf.RANDOM_SIZE)
            random_table.append(random_num)

        return random_table

    def register_peers(self):
        util.logger.spam(f"register_peers() : start register to peer_manager")

        logging.debug(
            f"register_peers() : channel_list = {self.admin_manager.get_channel_list()}"
        )
        for channel_name, channel_data in self.admin_manager.json_data.items():
            peer_manager = self.channel_manager.get_peer_manager(channel_name)

            for peer_data in channel_data['peers']:
                peer_info = {
                    "id": peer_data['id'],
                    "peer_target": peer_data['peer_target'],
                    "order": peer_data['order']
                }
                logging.debug(
                    f"register Peer : channel = {channel_name}, peer_info = {peer_info}"
                )
                peer_manager.add_peer(peer_info)

            if conf.ENABLE_RADIOSTATION_HEARTBEAT:
                timer_key = f"{TimerService.TIMER_KEY_RS_HEARTBEAT}_{channel_name}"
                if timer_key not in self.timer_service.timer_list:
                    self.timer_service.add_timer(
                        timer_key,
                        Timer(target=timer_key,
                              duration=conf.
                              SLEEP_SECONDS_IN_RADIOSTATION_HEARTBEAT,
                              is_repeat=True,
                              callback=self.check_peer_status,
                              callback_kwargs={"channel": channel_name}))

    def serve(self, port=None, event_for_init: multiprocessing.Event = None):
        """Peer(BlockGenerator Peer) to RadioStation

        :param port: RadioStation Peer
        :param event_for_init:
        """
        if port is None:
            port = conf.PORT_RADIOSTATION
        stopwatch_start = timeit.default_timer()

        self.__channel_manager = ChannelManager(self.__common_service)

        self.register_peers()

        # TODO: Currently, some environments are failing to execute RestServiceRS without this sleep.
        # This sleep fixes current node's issue but we need to fix it right way by investigating.
        time.sleep(1)

        if conf.ENABLE_REST_SERVICE:
            self.__rest_service = RestServiceRS(int(port))

        loopchain_pb2_grpc.add_RadioStationServicer_to_server(
            self.__outer_service, self.__common_service.outer_server)
        loopchain_pb2_grpc.add_AdminServiceServicer_to_server(
            self.__admin_service, self.__common_service.inner_server)

        logging.info("Start Radio Station service at port: " + str(port))

        self.__common_service.start(port)
        self.__timer_service.start()

        stopwatch_duration = timeit.default_timer() - stopwatch_start
        logging.info(
            f"Start Radio Station service at port: {port} start duration({stopwatch_duration})"
        )

        if event_for_init is not None:
            event_for_init.set()

        signal.signal(signal.SIGINT, self.close)
        signal.signal(signal.SIGTERM, self.close)

        # service 종료를 기다린다.
        self.__common_service.wait()
        self.__timer_service.wait()

        if self.__rest_service is not None:
            self.__rest_service.stop()

    def close(self, sig, frame):
        self.__common_service.stop()
        self.__timer_service.stop()
예제 #6
0
class RadioStationService:
    """Radiostation 의 main Class
    peer 를 위한 outer service 와 관리용 admin service 두개의 gRPC interface 를 가진다.
    """

    # 인증처리
    __ca = None

    def __init__(self,
                 radio_station_ip=None,
                 cert_path=None,
                 cert_pass=None,
                 rand_seed=None):
        """RadioStation Init

        :param radio_station_ip: radioStation Ip
        :param cert_path: RadioStation 인증서 디렉토리 경로
        :param cert_pass: RadioStation private key password
        """
        logger_preset = loggers.get_preset()
        logger_preset.peer_id = "RadioStation"
        logger_preset.update_logger()

        if radio_station_ip is None:
            radio_station_ip = conf.IP_RADIOSTATION
        logging.info("Set RadioStationService IP: " + radio_station_ip)
        if cert_path is not None:
            logging.info("CA Certificate Path : " + cert_path)

        self.__common_service = CommonService(loopchain_pb2)
        self.__admin_manager = AdminManager("station")
        self.__channel_manager = None
        self.__rest_service = None
        self.__timer_service = TimerService()

        # RS has two status (active, standby) active means enable outer service
        # standby means stop outer service and heartbeat to the other RS (active)
        self.__is_active = False

        # 인증 클래스
        self.__ca = CertificateAuthorization()

        if cert_path is not None:
            # 인증서 로드
            self.__ca.load_pki(cert_path, cert_pass)

        logging.info("Current RadioStation SECURITY_MODE : " +
                     str(self.__ca.is_secure))

        # gRPC service for Radiostation
        self.__outer_service = OuterService()
        self.__admin_service = AdminService(self.__admin_manager)

        # {group_id:[ {peer_id:IP} ] }로 구성된 dictionary
        self.peer_groups = {conf.ALL_GROUP_ID: []}

        # Peer의 보안을 담당
        self.auth = {}

        ObjectManager().rs_service = self

    def __del__(self):
        pass

    def launch_block_generator(self):
        pass

    @property
    def admin_manager(self):
        return self.__admin_manager

    @property
    def channel_manager(self):
        return self.__channel_manager

    @property
    def common_service(self):
        return self.__common_service

    @property
    def timer_service(self):
        return self.__timer_service

    @property
    def random_table(self):
        return self.__random_table

    def __broadcast_new_peer(self, peer_request):
        """새로 들어온 peer 를 기존의 peer 들에게 announce 한다."""

        logging.debug("Broadcast New Peer.... " + str(peer_request))
        if self.__channel_manager is not None:
            self.__channel_manager.broadcast(peer_request.channel,
                                             "AnnounceNewPeer", peer_request)

    def check_peer_status(self, channel):
        """service loop for status heartbeat check to peer list

        :return:
        """
        util.logger.spam(
            f"rs_service:check_peer_status(Heartbeat...{channel}) "
            f"for reset Leader and delete no response Peer")

        peer_manager = self.__channel_manager.get_peer_manager(channel)
        delete_peer_list = peer_manager.check_peer_status()

        for delete_peer in delete_peer_list:
            logging.debug(f"delete peer {delete_peer.peer_id}")
            message = loopchain_pb2.PeerID(peer_id=delete_peer.peer_id,
                                           channel=channel,
                                           group_id=delete_peer.group_id)
            self.__channel_manager.broadcast(channel, "AnnounceDeletePeer",
                                             message)

        # save current peer_manager after heartbeat to peers.
        ObjectManager().rs_service.admin_manager.save_peer_manager(
            channel, peer_manager)

    def __create_random_table(self, rand_seed: int) -> list:
        """create random_table using random_seed
        table size define in conf.RANDOM_TABLE_SIZE

        :param rand_seed: random seed for create random table
        :return: random table
        """
        random.seed(rand_seed)
        random_table = []
        for i in range(conf.RANDOM_TABLE_SIZE):
            random_num: int = random.getrandbits(conf.RANDOM_SIZE)
            random_table.append(random_num)

        return random_table

    def serve(self, port=None, event_for_init: multiprocessing.Event = None):
        """Peer(BlockGenerator Peer) to RadioStation

        :param port: RadioStation Peer
        """
        if port is None:
            port = conf.PORT_RADIOSTATION
        stopwatch_start = timeit.default_timer()

        self.__channel_manager = ChannelManager(self.__common_service)

        if conf.ENABLE_REST_SERVICE:
            self.__rest_service = RestServiceRS(int(port))

        loopchain_pb2_grpc.add_RadioStationServicer_to_server(
            self.__outer_service, self.__common_service.outer_server)
        loopchain_pb2_grpc.add_AdminServiceServicer_to_server(
            self.__admin_service, self.__common_service.inner_server)

        logging.info("Start Radio Station service at port: " + str(port))

        self.__common_service.start(port)
        self.__timer_service.start()

        stopwatch_duration = timeit.default_timer() - stopwatch_start
        logging.info(
            f"Start Radio Station service at port: {port} start duration({stopwatch_duration})"
        )

        if event_for_init is not None:
            event_for_init.set()

        signal.signal(signal.SIGINT, self.close)
        signal.signal(signal.SIGTERM, self.close)

        # service 종료를 기다린다.
        self.__common_service.wait()
        self.__timer_service.wait()

        if self.__rest_service is not None:
            self.__rest_service.stop()

    def close(self, sig, frame):
        self.__common_service.stop()
        self.__timer_service.stop()
예제 #7
0
class RadioStationService:
    """Radiostation 의 main Class
    peer 를 위한 outer service 와 관리용 admin service 두개의 gRPC interface 를 가진다.
    """

    # 인증처리
    __ca = None

    def __init__(self, radio_station_ip=None, cert_path=None, cert_pass=None, rand_seed=None):
        """RadioStation Init

        :param radio_station_ip: radioStation Ip
        :param cert_path: RadioStation 인증서 디렉토리 경로
        :param cert_pass: RadioStation private key password
        """
        if radio_station_ip is None:
            radio_station_ip = conf.IP_RADIOSTATION
        logging.info("Set RadioStationService IP: " + radio_station_ip)
        if cert_path is not None:
            logging.info("CA Certificate Path : " + cert_path)

        self.__common_service = CommonService(loopchain_pb2)
        self.__admin_manager = AdminManager("station")
        self.__channel_manager = None
        self.__rest_service = None

        # RS has two status (active, standby) active means enable outer service
        # standby means stop outer service and heartbeat to the other RS (active)
        self.__is_active = False

        # 인증 클래스
        self.__ca = CertificateAuthorization()

        if cert_path is not None:
            # 인증서 로드
            self.__ca.load_pki(cert_path, cert_pass)

        if conf.ENABLE_KMS:
            if rand_seed is None:
                util.exit_and_msg("KMS needs input random seed \n"
                                  "you can put seed -s --seed")
            self.__random_table = self.__create_random_table(rand_seed)

        logging.info("Current RadioStation SECURITY_MODE : " + str(self.__ca.is_secure))

        # gRPC service for Radiostation
        self.__outer_service = OuterService()
        self.__admin_service = AdminService()

        # {group_id:[ {peer_id:IP} ] }로 구성된 dictionary
        self.peer_groups = {conf.ALL_GROUP_ID: []}

        # Peer의 보안을 담당
        self.auth = {}

        ObjectManager().rs_service = self

    def __del__(self):
        pass

    def launch_block_generator(self):
        pass

    def validate_group_id(self, group_id: str):
        # TODO group id 를 검사하는 새로운 방법이 필요하다, 현재는 임시로 모두 통과 시킨다.
        return 0, "It's available group ID:"+group_id

    @property
    def admin_manager(self):
        return self.__admin_manager

    @property
    def channel_manager(self):
        return self.__channel_manager

    @property
    def common_service(self):
        return self.__common_service

    @property
    def random_table(self):
        return self.__random_table

    def __broadcast_new_peer(self, peer_request):
        """새로 들어온 peer 를 기존의 peer 들에게 announce 한다."""

        logging.debug("Broadcast New Peer.... " + str(peer_request))
        if self.__common_service is not None:
            self.__common_service.broadcast("AnnounceNewPeer", peer_request)

    def check_peer_status(self):
        """service loop for status heartbeat check to peer list

        :return:
        """
        time.sleep(conf.SLEEP_SECONDS_IN_RADIOSTATION_HEARTBEAT)
        util.logger.spam(f"rs_service:check_peer_status(Heartbeat.!.!) for reset Leader and delete no response Peer")

        for channel in self.__channel_manager.get_channel_list():
            delete_peer_list = self.__channel_manager.get_peer_manager(channel).check_peer_status()

            for delete_peer in delete_peer_list:
                logging.debug(f"delete peer {delete_peer.peer_id}")
                message = loopchain_pb2.PeerID(
                    peer_id=delete_peer.peer_id,
                    channel=channel,
                    group_id=delete_peer.group_id)
                self.__common_service.broadcast("AnnounceDeletePeer", message)

    def __create_random_table(self, rand_seed: int) -> list:
        """create random_table using random_seed
        table size define in conf.RANDOM_TABLE_SIZE

        :param rand_seed: random seed for create random table
        :return: random table
        """
        random.seed(rand_seed)
        random_table = []
        for i in range(conf.RANDOM_TABLE_SIZE):
            random_num: int = random.getrandbits(conf.RANDOM_SIZE)
            random_table.append(random_num)

        return random_table

    def serve(self, port=None):
        """Peer(BlockGenerator Peer) to RadioStation

        :param port: RadioStation Peer
        """
        if port is None:
            port = conf.PORT_RADIOSTATION
        stopwatch_start = timeit.default_timer()

        self.__channel_manager = ChannelManager(self.__common_service)

        if conf.ENABLE_REST_SERVICE:
            self.__rest_service = RestServiceRS(int(port))

        loopchain_pb2_grpc.add_RadioStationServicer_to_server(self.__outer_service, self.__common_service.outer_server)
        loopchain_pb2_grpc.add_AdminServiceServicer_to_server(self.__admin_service, self.__common_service.inner_server)

        logging.info("Start peer service at port: " + str(port))

        if conf.ENABLE_RADIOSTATION_HEARTBEAT:
            self.__common_service.add_loop(self.check_peer_status)
        self.__common_service.start(port)

        stopwatch_duration = timeit.default_timer() - stopwatch_start
        logging.info(f"Start Radio Station start duration({stopwatch_duration})")

        # service 종료를 기다린다.
        self.__common_service.wait()

        if self.__rest_service is not None:
            self.__rest_service.stop()
예제 #8
0
class PeerService:
    """Peer Service 의 main Class
    outer 와 inner gRPC 인터페이스를 가진다.
    서비스 루프 및 공통 요소는 commonservice 를 통해서 처리한다.
    channel 관련 instance 는 channel manager 를 통해서 관리한다.
    """
    def __init__(self,
                 group_id=None,
                 radio_station_target=None,
                 node_type=None):
        """Peer는 Radio Station 에 접속하여 leader 및 다른 Peer에 대한 접속 정보를 전달 받는다.

        :param group_id: Peer Group 을 구분하기 위한 ID, None 이면 Single Peer Group 이 된다. (peer_id is group_id)
        conf.PEER_GROUP_ID 를 사용하면 configure 파일에 저장된 값을 group_id 로 사용하게 된다.
        :param radio_station_ip: RS IP
        :param radio_station_port: RS Port
        :return:
        """
        node_type = node_type or conf.NodeType.CommunityNode

        self.is_support_node_function = \
            partial(conf.NodeType.is_support_node_function, node_type=node_type)

        util.logger.spam(f"Your Peer Service runs on debugging MODE!")
        util.logger.spam(
            f"You can see many terrible garbage logs just for debugging, DO U Really want it?"
        )

        # process monitor must start before any subprocess
        if conf.ENABLE_PROCESS_MONITORING:
            Monitor().start()

        self.__node_type = node_type

        self.__radio_station_target = radio_station_target
        logging.info("Set Radio Station target is " +
                     self.__radio_station_target)

        self.__radio_station_stub = None

        self.__level_db = None
        self.__level_db_path = ""

        self.__peer_id = None
        self.__group_id = group_id
        if self.__group_id is None and conf.PEER_GROUP_ID != "":
            self.__group_id = conf.PEER_GROUP_ID

        self.__common_service = None
        self.__channel_infos = None

        self.__rest_service = None
        self.__rest_proxy_server = None

        # peer status cache for channel
        self.status_cache = {}  # {channel:status}

        self.__score = None
        self.__peer_target = None
        self.__rest_target = None
        self.__inner_target = None
        self.__peer_port = 0

        # gRPC service for Peer
        self.__inner_service: PeerInnerService = None
        self.__outer_service: PeerOuterService = None
        self.__channel_services = {}

        self.__reset_voter_in_progress = False
        self.__json_conf_path = None

        self.__node_keys: dict = {}

        ObjectManager().peer_service = self

    @property
    def common_service(self):
        return self.__common_service

    @property
    def inner_service(self):
        return self.__inner_service

    @property
    def outer_service(self):
        return self.__outer_service

    @property
    def peer_target(self):
        return self.__peer_target

    @property
    def rest_target(self):
        return self.__rest_target

    @property
    def json_conf_path(self):
        return self.__json_conf_path

    @property
    def channel_infos(self):
        return self.__channel_infos

    @property
    def node_type(self):
        return self.__node_type

    @property
    def radio_station_target(self):
        return self.__radio_station_target

    @property
    def stub_to_radiostation(self):
        if self.__radio_station_stub is None:
            if self.is_support_node_function(conf.NodeFunction.Vote):
                if conf.ENABLE_REP_RADIO_STATION:
                    self.__radio_station_stub = StubManager.get_stub_manager_to_server(
                        self.__radio_station_target,
                        loopchain_pb2_grpc.RadioStationStub,
                        conf.CONNECTION_RETRY_TIMEOUT_TO_RS,
                        ssl_auth_type=conf.GRPC_SSL_TYPE)
                else:
                    self.__radio_station_stub = None
            else:
                self.__radio_station_stub = RestStubManager(
                    self.__radio_station_target)

        return self.__radio_station_stub

    @property
    def peer_port(self):
        return self.__peer_port

    @property
    def peer_id(self):
        return self.__peer_id

    @property
    def group_id(self):
        if self.__group_id is None:
            self.__group_id = self.__peer_id
        return self.__group_id

    @property
    def node_keys(self):
        return self.__node_keys

    def service_stop(self):
        self.__common_service.stop()

    def __get_channel_infos(self):
        # util.logger.spam(f"__get_channel_infos:node_type::{self.__node_type}")
        if self.is_support_node_function(conf.NodeFunction.Vote):
            if conf.ENABLE_REP_RADIO_STATION:
                response = self.stub_to_radiostation.call_in_times(
                    method_name="GetChannelInfos",
                    message=loopchain_pb2.GetChannelInfosRequest(
                        peer_id=self.__peer_id,
                        peer_target=self.__peer_target,
                        group_id=self.group_id),
                    retry_times=conf.CONNECTION_RETRY_TIMES_TO_RS,
                    is_stub_reuse=False,
                    timeout=conf.CONNECTION_TIMEOUT_TO_RS)
                # util.logger.spam(f"__get_channel_infos:response::{response}")

                if not response:
                    return None
                logging.info(
                    f"Connect to channels({util.pretty_json(response.channel_infos)})"
                )
                channels = json.loads(response.channel_infos)
            else:
                channels = util.load_json_data(conf.CHANNEL_MANAGE_DATA_PATH)

                if conf.ENABLE_CHANNEL_AUTH:
                    filtered_channels = {
                        channel: channels[channel]
                        for channel in channels
                        for peer in channels[channel]['peers']
                        if self.__peer_id == peer['id']
                    }
                    channels = filtered_channels
        else:
            response = self.stub_to_radiostation.call_in_times(
                method_name="GetChannelInfos")
            channels = {
                channel: value
                for channel, value in response["channel_infos"].items()
            }

        return channels

    def __init_port(self, port):
        # service 초기화 작업
        target_ip = util.get_private_ip()
        self.__peer_target = util.get_private_ip() + ":" + str(port)
        self.__peer_port = int(port)

        rest_port = int(port) + conf.PORT_DIFF_REST_SERVICE_CONTAINER
        self.__rest_target = f"{target_ip}:{rest_port}"

        logging.info("Start Peer Service at port: " + str(port))

    def __init_level_db(self):
        # level db for peer service not a channel, It store unique peer info like peer_id
        self.__level_db, self.__level_db_path = util.init_level_db(
            level_db_identity=self.__peer_target, allow_rename_path=False)

    def __run_rest_services(self, port):
        if conf.ENABLE_REST_SERVICE and conf.RUN_ICON_IN_LAUNCHER:
            logging.debug(
                f'Launch Sanic RESTful server. '
                f'Port = {int(port) + conf.PORT_DIFF_REST_SERVICE_CONTAINER}')
            self.__rest_service = RestService(int(port))

    def __init_key_by_channel(self):
        for channel in conf.CHANNEL_OPTION:
            signer = Signer.from_channel(channel)
            if channel == conf.LOOPCHAIN_DEFAULT_CHANNEL:
                self.__make_peer_id(signer.address)
            self.__node_keys[channel] = signer.private_key.private_key

    def __make_peer_id(self, address):
        self.__peer_id = address

        logger_preset = loggers.get_preset()
        logger_preset.peer_id = self.peer_id
        logger_preset.update_logger()

        logging.info(f"run peer_id : {self.__peer_id}")

    def timer_test_callback_function(self, message):
        logging.debug(f'timer test callback function :: ({message})')

    @staticmethod
    def __get_use_kms():
        if conf.GRPC_SSL_KEY_LOAD_TYPE == conf.KeyLoadType.KMS_LOAD:
            return True
        for value in conf.CHANNEL_OPTION.values():
            if value["key_load_type"] == conf.KeyLoadType.KMS_LOAD:
                return True
        return False

    def __init_kms_helper(self, agent_pin):
        if self.__get_use_kms():
            from loopchain.tools.kms_helper import KmsHelper
            KmsHelper().set_agent_pin(agent_pin)

    def __close_kms_helper(self):
        if self.__get_use_kms():
            from loopchain.tools.kms_helper import KmsHelper
            KmsHelper().remove_agent_pin()

    def run_common_service(self):
        inner_service_port = conf.PORT_INNER_SERVICE or (
            self.__peer_port + conf.PORT_DIFF_INNER_SERVICE)
        self.__inner_target = conf.IP_LOCAL + ":" + str(inner_service_port)

        self.__common_service = CommonService(loopchain_pb2,
                                              inner_service_port)
        self.__common_service.start(str(self.__peer_port), self.__peer_id,
                                    self.__group_id)

        loopchain_pb2_grpc.add_PeerServiceServicer_to_server(
            self.__outer_service, self.__common_service.outer_server)

    def serve(self,
              port,
              agent_pin: str = None,
              amqp_target: str = None,
              amqp_key: str = None,
              event_for_init: multiprocessing.Event = None):
        """start func of Peer Service ===================================================================

        :param port:
        :param agent_pin: kms agent pin
        :param amqp_target: rabbitmq host target
        :param amqp_key: sharing queue key
        :param event_for_init: set when peer initiates
        """

        amqp_target = amqp_target or conf.AMQP_TARGET
        amqp_key = amqp_key or conf.AMQP_KEY

        stopwatch_start = timeit.default_timer()

        self.__init_kms_helper(agent_pin)
        self.__init_port(port)
        self.__init_level_db()
        self.__init_key_by_channel()

        StubCollection().amqp_target = amqp_target
        StubCollection().amqp_key = amqp_key

        peer_queue_name = conf.PEER_QUEUE_NAME_FORMAT.format(amqp_key=amqp_key)
        self.__outer_service = PeerOuterService()
        self.__inner_service = PeerInnerService(amqp_target,
                                                peer_queue_name,
                                                conf.AMQP_USERNAME,
                                                conf.AMQP_PASSWORD,
                                                peer_service=self)

        self.__channel_infos = self.__get_channel_infos()
        if not self.__channel_infos:
            util.exit_and_msg(
                "There is no peer_list, initial network is not allowed without RS!"
            )

        self.__run_rest_services(port)
        self.run_common_service()

        self.__close_kms_helper()

        stopwatch_duration = timeit.default_timer() - stopwatch_start
        logging.info(
            f"Start Peer Service at port: {port} start duration({stopwatch_duration})"
        )

        async def _serve():
            await self.ready_tasks()
            await self.__inner_service.connect(conf.AMQP_CONNECTION_ATTEMPS,
                                               conf.AMQP_RETRY_DELAY,
                                               exclusive=True)

            if conf.CHANNEL_BUILTIN:
                await self.serve_channels()

            if event_for_init is not None:
                event_for_init.set()

            logging.info(f'peer_service: init complete peer: {self.peer_id}')

        loop = self.__inner_service.loop
        loop.create_task(_serve())
        loop.add_signal_handler(signal.SIGINT, self.close)
        loop.add_signal_handler(signal.SIGTERM, self.close)

        try:
            loop.run_forever()
        finally:
            loop.run_until_complete(loop.shutdown_asyncgens())
            loop.close()

        self.__common_service.wait()

        # process monitor must stop monitoring before any subprocess stop
        # Monitor().stop()

        logging.info("Peer Service Ended.")
        if self.__rest_service is not None:
            self.__rest_service.stop()

        if self.__rest_proxy_server is not None:
            self.__rest_proxy_server.stop()

    def close(self):
        async def _close():
            for channel_stub in StubCollection().channel_stubs.values():
                await channel_stub.async_task().stop("Close")

            self.service_stop()
            loop.stop()

        loop = self.__inner_service.loop
        loop.create_task(_close())

    async def serve_channels(self):
        for i, channel_name in enumerate(self.__channel_infos.keys()):
            score_port = self.__peer_port + conf.PORT_DIFF_SCORE_CONTAINER + conf.PORT_DIFF_BETWEEN_SCORE_CONTAINER * i

            args = ['python3', '-m', 'loopchain', 'channel']
            args += ['-p', str(score_port)]
            args += ['--channel', str(channel_name)]
            args += command_arguments.get_raw_commands_by_filter(
                command_arguments.Type.Develop,
                command_arguments.Type.AMQPTarget,
                command_arguments.Type.AMQPKey,
                command_arguments.Type.ConfigurationFilePath,
                command_arguments.Type.RadioStationTarget)

            service = CommonSubprocess(args)

            channel_stub = StubCollection().channel_stubs[channel_name]
            await channel_stub.async_task().hello()

            self.__channel_services[channel_name] = service

    async def ready_tasks(self):
        await StubCollection().create_peer_stub()  # for getting status info

        for channel_name, channel_info in self.__channel_infos.items():
            await StubCollection().create_channel_stub(channel_name)
            await StubCollection().create_channel_tx_receiver_stub(channel_name
                                                                   )

            await StubCollection().create_icon_score_stub(channel_name)
예제 #9
0
class RadioStationService(loopchain_pb2_grpc.RadioStationServicer):
    """Radiostation의 gRPC service를 구동하는 Class.
    """
    # 인증처리
    __ca = None

    def __init__(self,
                 radio_station_ip=conf.IP_RADIOSTATION,
                 cert_path=None,
                 cert_pass=None):
        """
        RadioStation Init

        :param radio_station_ip: radioStation Ip
        :param cert_path: RadioStation 인증서 디렉토리 경로
        :param cert_pass: RadioStation private key password
        """
        self.__handler_map = {
            message_code.Request.status:
            self.__handler_status,
            message_code.Request.peer_get_leader:
            self.__handler_get_leader_peer,
            message_code.Request.peer_complain_leader:
            self.__handler_complain_leader,
            message_code.Request.rs_set_configuration:
            self.__handler_set_configuration,
            message_code.Request.rs_get_configuration:
            self.__handler_get_configuration
        }
        logging.info("Set RadioStationService IP: " + radio_station_ip)
        if cert_path is not None:
            logging.info("CA Certificate Path : " + cert_path)

        self._rs = RadioStation()
        self.__common_service = CommonService(loopchain_pb2)
        self.__common_service.set_peer_type(loopchain_pb2.RADIO_STATION)
        self.__peer_manager = self.__common_service.load_peer_manager()

        self.__rest_service = None

        # 인증 클래스
        self.__ca = CertificateAuthorization()

        if cert_path is not None:
            # 인증서 로드
            self.__ca.load_pki(cert_path, cert_pass)

        logging.info("Current group ID:" + self._rs.get_group_id())
        logging.info("Current RadioStation SECURITY_MODE : " +
                     str(self.__ca.is_secure))

    def __handler_status(self, request, context):
        return loopchain_pb2.Message(code=message_code.Response.success)

    def __handler_get_leader_peer(self, request, context):
        """Get Leader Peer

        :param request: proto.Message {message=group_id}
        :param context:
        :return: proto.Message {object=leader_peer_object}
        """

        # TODO 현재는 peer_list.get_leader_peer 가 서브 그룹 리더에 대한 처리를 제공하지 않고 있다.
        leader_peer = self.__peer_manager.get_leader_peer(
            group_id=request.message, is_peer=False)
        if leader_peer is not None:
            logging.debug(f"leader_peer ({leader_peer.peer_id})")
            peer_dump = pickle.dumps(leader_peer)

            return loopchain_pb2.Message(code=message_code.Response.success,
                                         object=peer_dump)

        return loopchain_pb2.Message(
            code=message_code.Response.fail_no_leader_peer)

    def __handler_complain_leader(self, request, context):
        """Complain Leader Peer

        :param request: proto.Message {message=group_id}
        :param context:
        :return: proto.Message {object=leader_peer_object}
        """

        # 현재 leader peer status 확인 후 맞으면 peer id 를
        # 아니면 complain 한 peer 로 leader 를 변경 후 응답한다.

        # 선택된 peer 가 leader 로 동작하고 있는지 확인 후 지정하는데 만약
        # get_leader_peer 한 내용과 다르면 AnnounceNewLeader 를 broadcast 하여야 한다.

        logging.debug("in complain leader (radiostation)")
        leader_peer = self.__peer_manager.complain_leader(
            group_id=request.message)
        if leader_peer is not None:
            logging.warning(
                f"leader_peer after complain({leader_peer.peer_id})")
            peer_dump = pickle.dumps(leader_peer)
            return loopchain_pb2.Message(code=message_code.Response.success,
                                         object=peer_dump)

        return loopchain_pb2.Message(
            code=message_code.Response.fail_no_leader_peer)

    def __handler_get_configuration(self, request, context):
        """Get Configuration

        :param request: proto.Message {meta=configuration_name}
        :param context:
        :return: proto.Message {meta=configuration_info(s)}
        """

        if request.meta == '':
            result = conf.get_all_configurations()
        else:
            meta = json.loads(request.meta)
            conf_name = meta['name']
            result = conf.get_configuration(conf_name)

        if result is None:
            return loopchain_pb2.Message(
                code=message_code.Response.fail,
                message="'" + conf_name +
                "' is an incorrect configuration name.")
        else:
            json_result = json.dumps(result)
            return loopchain_pb2.Message(code=message_code.Response.success,
                                         meta=json_result)

    def __handler_set_configuration(self, request, context):
        """Set Configuration

        :param request: proto.Message {meta=configuration_info}
        :param context:
        :return: proto.Message
        """

        meta = json.loads(request.meta)

        if conf.set_configuration(meta['name'], meta['value']):
            return loopchain_pb2.Message(code=message_code.Response.success)
        else:
            return loopchain_pb2.Message(
                code=message_code.Response.fail,
                message='"' + meta['name'] +
                '" does not exist in the loopchain configuration list.')

    def Request(self, request, context):
        logging.debug("RadioStationService got request: " + str(request))

        if request.code in self.__handler_map.keys():
            return self.__handler_map[request.code](request, context)

        return loopchain_pb2.Message(
            code=message_code.Response.not_treat_message_code)

    def GetStatus(self, request, context):
        """RadioStation의 현재 상태를 요청한다.

        :param request:
        :param context:
        :return:
        """

        logging.debug("RadioStation GetStatus : %s", request)
        peer_status = self.__common_service.getstatus(None)

        return loopchain_pb2.StatusReply(
            status=json.dumps(peer_status),
            block_height=peer_status["block_height"],
            total_tx=peer_status["total_tx"])

    def Stop(self, request, context):
        """RadioStation을 종료한다.

        :param request: StopRequest
        :param context:
        :return: StopReply
        """
        logging.info('RadioStation will stop... by: ' + request.reason)
        self.__common_service.stop()
        return loopchain_pb2.StopReply(status="0")

    def ConnectPeer(self, request, context):
        """RadioStation 에 접속한다. 응답으로 기존의 접속된 Peer 목록을 받는다.

        :param request: PeerRequest
        :param context:
        :return: PeerReply
        """
        logging.info("Trying to connect peer: " + request.peer_id)

        res, info = self._rs.validate_group_id(request.group_id)
        if res < 0:  # 잘못된 입력이 들어오면 빈 list를 보내준다.
            return loopchain_pb2.PeerReply(status=message_code.Response.fail,
                                           peer_list=b'',
                                           more_info=info)

        logging.debug("Connect Peer " + "\nPeer_id : " + request.peer_id +
                      "\nGroup_id : " + request.group_id + "\nPeer_target : " +
                      request.peer_target)

        auth = ""
        token = ""
        logging.info("CA SECURITY_MODE : " + str(self.__ca.is_secure))
        if self.__ca.is_secure:
            logging.debug("RadioStation is secure mode")
            if request.token is None or request.token is "":
                info = "Peer Token is None"
                return loopchain_pb2.PeerReply(
                    status=message_code.Response.fail,
                    peer_list=b'',
                    more_info=info)

            else:
                # TOKEN : "00", CERT : "01", SIGN : "02"
                tag = request.token[:2]
                data = request.token[2:]
                logging.debug("TOKEN TYPE : %s", tag)

                if tag == conf.TOKEN_TYPE_TOKEN:
                    peer = self.__peer_manager.get_peer(
                        request.peer_id, request.group_id)
                    if peer is None:
                        info = "Invalid Peer_ID[" + request.peer_id + "], Group_ID[" + request.group_id + "%s]"
                        return loopchain_pb2.PeerReply(
                            status=message_code.Response.fail,
                            peer_list=b'',
                            more_info=info)
                    else:
                        peer_type = request.peer_type
                        if peer_type is loopchain_pb2.BLOCK_GENERATOR:
                            peer_type = loopchain_pb2.PEER

                        if self.__ca.verify_peer_token(peer_token=data,
                                                       peer=peer,
                                                       peer_type=peer_type):
                            auth = peer.auth
                            token = request.token
                        else:
                            info = "Invalid TOKEN"
                            return loopchain_pb2.PeerReply(
                                status=message_code.Response.fail,
                                peer_list=b'',
                                more_info=info)

                elif tag == conf.TOKEN_TYPE_CERT:
                    # TODO: 인증서와 Peer_ID가 연결될 수 있는 장치가 필요(인증서 내부 정보 활용)
                    if self.__ca.verify_certificate_der(bytes.fromhex(data)):
                        rand_key = secrets.token_bytes(16).hex()
                        self._rs.auth[request.peer_id] = {
                            'rand_key': rand_key,
                            'auth': data
                        }
                        return loopchain_pb2.PeerReply(
                            status=message_code.Response.success,
                            peer_list=b'',
                            more_info=rand_key)
                    else:
                        info = "Invalid Peer Certificate"
                        return loopchain_pb2.PeerReply(
                            status=message_code.Response.fail,
                            peer_list=b'',
                            more_info=info)

                elif tag == conf.TOKEN_TYPE_SIGN:
                    try:
                        peer_auth = self._rs.auth[request.peer_id]
                        rand_key = peer_auth['rand_key']
                        auth = peer_auth['auth']
                    except KeyError:
                        info = "No Peer Info"
                        return loopchain_pb2.PeerReply(
                            status=message_code.Response.fail,
                            peer_list=b'',
                            more_info=info)
                    logging.debug("Get Rand_key: %s, auth: %s", rand_key, auth)

                    new_token = self.__ca.generate_peer_token(
                        peer_sign=bytes.fromhex(data),
                        peer_cert=bytes.fromhex(auth),
                        peer_id=request.peer_id,
                        peer_target=request.peer_target,
                        group_id=request.group_id,
                        peer_type=request.peer_type,
                        rand_key=bytes.fromhex(rand_key),
                        token_interval=conf.TOKEN_INTERVAL)

                    if new_token is not None:
                        self._rs.auth[request.peer_id] = {}

                    token = conf.TOKEN_TYPE_TOKEN + new_token

                else:
                    info = "Unknown token type(" + tag + ")"
                    return loopchain_pb2.PeerReply(
                        status=message_code.Response.fail,
                        peer_list=b'',
                        more_info=info)

        peer = self.__peer_manager.make_peer(request.peer_id,
                                             request.group_id,
                                             request.peer_target,
                                             PeerStatus.connected,
                                             peer_auth=auth,
                                             peer_token=token)

        peer_order = self.__peer_manager.add_peer_object(peer)
        self.__common_service.save_peer_list(self.__peer_manager)

        if peer_order > 0:
            # 접속 완료
            try:
                peer_dump = pickle.dumps(peer)
            except pickle.PicklingError as e:
                logging.warning("Fail Peer Dump: " + str(e))
                peer_dump = b''

            request.peer_object = peer_dump
            request.peer_order = peer_order
            logging.debug("Connect Peer: " + str(request))

            # self.__broadcast_new_peer(request)
            # TODO RS subsribe 를 이용하는 경우, RS 가 재시작시 peer broadcast 가 전체로 되지 않는 문제가 있다.
            # peer_list 를 이용하여 broadcast 하는 구조가되면 RS 혹은 Leader 에 대한 Subscribe 구조는 유효하지 않다.
            # 하지만 broadcast process 는 peer_list broadcast 인 경우 사용되어지지 않는다. peer_list 에서 broadcast 하는 동안
            # block 되는 구조. broadcast Process 를 peer_list 를 이용한 broadcast 에서도 활용할 수 있게 하거나.
            # RS 혹은 Leader 가 재시작 후에 Subscribe 정보를 복원하게 하거나.
            # 혹은 peer_list 가 broadcast 하여도 성능상(동시성에 있어) 문제가 없는지 보증하여야 한다. TODO TODO TODO
            self.__peer_manager.announce_new_peer(request)

            logging.debug("get_IP_of_peers_in_group: " +
                          str(self.__peer_manager.get_IP_of_peers_in_group()))

            try:
                peer_list_dump = self.__peer_manager.dump()
                status, reason = message_code.get_response(
                    message_code.Response.success)
                # TODO 현재는 reason 을 token return 으로 사용하고 있음, 이 용도로 reason 을 사용할 때에는 token 만 담겨있어야 함
                # 추후 메시지를 확장할 때 리팩토링 할 것
                reason = token
            except pickle.PicklingError as e:
                logging.warning("fail peer_list dump")
                peer_list_dump = b''
                status, reason = message_code.get_response(
                    message_code.Response.fail)
                reason += " " + str(e)

            return loopchain_pb2.PeerReply(status=status,
                                           peer_list=peer_list_dump,
                                           more_info=reason)

    def GetPeerList(self, request, context):
        """현재 RadioStation 에 접속된 Peer 목록을 구한다.

        :param request: CommonRequest
        :param context:
        :return: PeerList
        """
        try:
            peer_list_dump = self.__peer_manager.dump()
        except pickle.PicklingError as e:
            logging.warning("fail peer_list dump")
            peer_list_dump = b''

        return loopchain_pb2.PeerList(peer_list=peer_list_dump)

    def GetPeerStatus(self, request, context):
        # request parsing
        logging.debug(
            f"rs service GetPeerStatus peer_id({request.peer_id}) group_id({request.group_id})"
        )

        # get stub of target peer
        peer_stub_manager = self.__peer_manager.get_peer_stub_manager(
            self.__peer_manager.get_peer(request.peer_id))
        if peer_stub_manager is not None:
            try:
                response = peer_stub_manager.call_in_times(
                    "GetStatus",
                    loopchain_pb2.StatusRequest(
                        request="get peer status from rs"))
                if response is not None:
                    return response
            except Exception as e:
                logging.warning(f"fail GetStatus... ({e})")

        return loopchain_pb2.StatusReply(status="", block_height=0, total_tx=0)

    def AnnounceNewLeader(self, request, context):
        new_leader_peer = self.__peer_manager.get_peer(request.new_leader_id,
                                                       None)
        logging.debug(
            f"AnnounceNewLeader({request.new_leader_id})({new_leader_peer.target}): "
            + request.message)

        self.__peer_manager.set_leader_peer(new_leader_peer, None)

        return loopchain_pb2.CommonReply(
            response_code=message_code.Response.success, message="success")

    def Subscribe(self, request, context):
        """RadioStation 이 broadcast 하는 채널에 Peer 를 등록한다.

        :param request: SubscribeRequest
        :param context:
        :return: CommonReply
        """
        logging.debug("Radio Station Subscription peer_id: " + str(request))
        self.__common_service.add_audience(request)
        return loopchain_pb2.CommonReply(
            response_code=message_code.get_response_code(
                message_code.Response.success),
            message=message_code.get_response_msg(
                message_code.Response.success))

    def UnSubscribe(self, request, context):
        """RadioStation 의 broadcast 채널에서 Peer 를 제외한다.

        :param request: SubscribeRequest
        :param context:
        :return: CommonReply
        """
        logging.debug("Radio Station UnSubscription peer_id: " +
                      request.peer_target)
        self.__peer_manager.remove_peer(request.peer_id, request.group_id)
        self.__common_service.remove_audience(request.peer_id,
                                              request.peer_target)
        return loopchain_pb2.CommonReply(response_code=0, message="success")

    def __broadcast_new_peer(self, peer_request):
        """새로 들어온 peer 를 기존의 peer 들에게 announce 한다.
        """
        logging.debug("Broadcast New Peer.... " + str(peer_request))
        if self.__common_service is not None:
            self.__common_service.broadcast("AnnounceNewPeer", peer_request)

    def check_peer_status(self):
        """service loop for status heartbeat check to peer list

        :return:
        """
        delete_peer_list = self.__peer_manager.check_peer_status()

        for delete_peer in delete_peer_list:
            logging.debug(f"delete peer {delete_peer.peer_id}")
            message = loopchain_pb2.PeerID(peer_id=delete_peer.peer_id,
                                           group_id=delete_peer.group_id)
            self.__common_service.broadcast("AnnounceDeletePeer", message)

        time.sleep(conf.SLEEP_SECONDS_IN_RADIOSTATION_HEARTBEAT)

    def serve(self, port=conf.PORT_RADIOSTATION):
        """Peer(BlockGenerator Peer) to RadioStation

        :param port: RadioStation Peer
        """
        stopwatch_start = timeit.default_timer()

        if conf.ENABLE_REST_SERVICE:
            self.__rest_service = RestServiceRS(int(port))

        loopchain_pb2_grpc.add_RadioStationServicer_to_server(
            self, self.__common_service.outer_server)
        logging.info("Start peer service at port: " + str(port))

        self.__common_service.add_loop(self.check_peer_status)
        self.__common_service.start(port)

        stopwatch_duration = timeit.default_timer() - stopwatch_start
        logging.info(
            f"Start Radio Station start duration({stopwatch_duration})")

        # service 종료를 기다린다.
        self.__common_service.wait()

        if self.__rest_service is not None:
            self.__rest_service.stop()
예제 #10
0
    def serve(self, port, score=conf.DEFAULT_SCORE_PACKAGE):
        """피어 실행

        :param port: 피어의 실행포트
        :param score: 피어의 실행 체인코드
        """
        stopwatch_start = timeit.default_timer()

        is_all_service_safe_start = True
        is_delay_announce_new_leader = False

        self.__port_init(port)
        self.__run_inner_services(port)

        inner_service_port = conf.PORT_INNER_SERVICE or (
            int(port) + conf.PORT_DIFF_INNER_SERVICE)
        self.__common_service = CommonService(loopchain_pb2,
                                              self.__peer_target,
                                              inner_service_port)
        self.peer_id = str(self.__common_service.get_peer_id())
        self.__peer_manager = self.__common_service.load_peer_manager()
        self.__block_manager = self.__load_block_manager()

        response = self.__connect_to_radiostation()
        logging.debug("Connect to radiostation: " + str(response))

        is_peer_list_from_rs = False

        if response is not None and response.status == message_code.Response.success:
            # RS 의 응답이 있으면 peer_list 는 RS 가 전달한 결과로 업데이트 된다.
            # 없는 경우 local 의 level DB 로 부터 읽어드린 값을 default 로 사용하게 된다.
            # TODO RS 는 어떻게 신뢰하지? RS 가 새로운 피어의 참여를 승인하더라도 참여한 피어 목록은 더 신뢰할만한 방식으로 보호가 필요하지 않나?
            # 누군가 RS 를 죽인다면 RS 인척 가짜로 이루어진 피어 리스트를 전송하면 네트워크를 파괴할 수 있지 않나?
            # 피어의 참여는 RS 가 승인한 다음 블록에 담아서 블록체인에 추가하면 어떨까?

            peer_list_data = pickle.loads(response.peer_list)
            self.__peer_manager.load(peer_list_data, False)
            self.__common_service.save_peer_list(self.__peer_manager)
            logging.debug("peer list update: " +
                          self.__peer_manager.get_peers_for_debug())
            is_peer_list_from_rs = True
        else:
            logging.debug("using local peer list: " +
                          self.__peer_manager.get_peers_for_debug())

        logging.debug("peer_id: " + str(self.peer_id))

        if self.__peer_manager.get_peer_count() == 0:
            util.exit_and_msg(
                "There is no peer_list, initial network is not allowed without RS!"
            )
        peer_self = self.__peer_manager.get_peer(self.peer_id, self.group_id)
        logging.debug("peer_self: " + str(peer_self))
        peer_leader = self.__peer_manager.get_leader_peer(
            is_complain_to_rs=True)
        logging.debug("peer_leader: " + str(peer_leader))

        # TODO LOOPCHAIN-61 인증서 로드
        _cert = None
        # TODO LOOPCHAIN-61 인증서 키로드
        _private_key = None
        # TODO 인증정보 요청

        # TODO 이 부분을 조건 검사가 아니라 leader complain 을 이용해서 리더가 되도록 하는 방법 검토하기
        if peer_self.peer_id == peer_leader.peer_id:
            # 자기가 peer_list 의 유일한 connected PEER 이거나 rs 의 leader 정보와 같을 때 block generator 가 된다.
            if is_peer_list_from_rs is True or self.__peer_manager.get_connected_peer_count(
                    None) == 1:
                logging.debug("Set Peer Type Block Generator!")
                self.__peer_type = loopchain_pb2.BLOCK_GENERATOR

        # load score 는 score 서비스가 시작된 이후 block height sync 가 시작되기전에 이루어져야 한다.
        is_all_service_safe_start &= self.__load_score(score)

        if self.__peer_type == loopchain_pb2.PEER:
            # leader 로 시작하지 않았는데 자신의 정보가 leader Peer 정보이면 block height sync 하여
            # 최종 블럭의 leader 를 찾는다.
            if peer_leader.target != self.__peer_target:
                block_sync_target_stub = StubManager.get_stub_manager_to_server(
                    peer_leader.target,
                    loopchain_pb2_grpc.PeerServiceStub,
                    time_out_seconds=conf.GRPC_TIMEOUT)
            else:
                block_sync_target_stub = None

            if block_sync_target_stub is None:
                logging.warning(
                    "You maybe Older from this network... or No leader in this network!"
                )

                # TODO 이 상황에서 rs 에 leader complain 을 진행한다
                is_delay_announce_new_leader = True
                peer_old_leader = peer_leader
                peer_leader = self.__peer_manager.leader_complain_to_rs(
                    conf.ALL_GROUP_ID, is_announce_new_peer=False)

                if peer_leader is not None:
                    block_sync_target_stub = StubManager.get_stub_manager_to_server(
                        peer_leader.target,
                        loopchain_pb2_grpc.PeerServiceStub,
                        time_out_seconds=conf.GRPC_TIMEOUT)

            if peer_leader is None or peer_leader.peer_id == peer_self.peer_id:
                peer_leader = peer_self
                self.__peer_type = loopchain_pb2.BLOCK_GENERATOR
            else:
                self.block_height_sync(block_sync_target_stub)

                # # TODO 마지막 블럭으로 leader 정보를 판단하는 로직은 리더 컴플레인 알고리즘 수정 후 유효성을 다시 판단할 것
                # last_block_peer_id = self.__block_manager.get_blockchain().last_block.peer_id
                #
                # if last_block_peer_id != "" and last_block_peer_id != self.__peer_list.get_leader_peer().peer_id:
                #     logging.debug("make leader stub after block height sync...")
                #     new_leader_peer = self.__peer_list.get_peer(last_block_peer_id)
                #
                #     if new_leader_peer is None:
                #         new_leader_peer = self.__peer_list.leader_complain_to_rs(conf.ALL_GROUP_ID)
                #
                #     self.__peer_list.set_leader_peer(new_leader_peer, None)
                #     # TODO 리더가 상단의 next_leader_pear 와 같을 경우 stub 을 재설정하게 되는데 문제 없는지 확인 할 것
                #     self.__stub_to_blockgenerator = self.__peer_list.get_peer_stub_manager(new_leader_peer)
                #     peer_leader = new_leader_peer
                # else:
                #     self.__stub_to_blockgenerator = block_sync_target_stub

                self.__stub_to_blockgenerator = block_sync_target_stub

                if self.__stub_to_blockgenerator is None:
                    util.exit_and_msg("Fail connect to leader!!")

                self.show_peers()

        self.__common_service.set_peer_type(self.__peer_type)

        if self.__peer_type == loopchain_pb2.BLOCK_GENERATOR:
            self.__block_manager.set_peer_type(self.__peer_type)

        loopchain_pb2_grpc.add_PeerServiceServicer_to_server(
            self.__outer_service, self.__common_service.outer_server)
        loopchain_pb2_grpc.add_InnerServiceServicer_to_server(
            self.__inner_service, self.__common_service.inner_server)
        logging.info("Start peer service at port: " + str(port))

        self.__block_manager.start()
        self.__common_service.start(port, self.peer_id, self.group_id)

        if self.__stub_to_radio_station is not None:
            self.__common_service.subscribe(self.__stub_to_radio_station)

        # Start Peer Process for gRPC send to Block Generator
        # But It use only when create tx (yet)
        logging.debug("peer_leader target is: " + str(peer_leader.target))

        self.__tx_process = self.__run_tx_process(
            blockgenerator_info=peer_leader.target,
            inner_channel_info=conf.IP_LOCAL + ":" + str(inner_service_port))

        if self.__stub_to_blockgenerator is not None:
            self.__common_service.subscribe(self.__stub_to_blockgenerator,
                                            loopchain_pb2.BLOCK_GENERATOR)

        if is_delay_announce_new_leader:
            self.__peer_manager.announce_new_leader(peer_old_leader.peer_id,
                                                    peer_leader.peer_id)

        self.__send_to_process_thread = SendToProcess(self.__tx_process)
        self.__send_to_process_thread.start()

        stopwatch_duration = timeit.default_timer() - stopwatch_start
        logging.info(
            f"Start Peer Service start duration({stopwatch_duration})")

        # service 종료를 기다린다.
        if is_all_service_safe_start:
            self.__common_service.wait()
        else:
            self.service_stop()

        self.__send_to_process_thread.stop()
        self.__send_to_process_thread.wait()

        logging.info("Peer Service Ended.")
        self.__score_service.stop()
        if self.__rest_service is not None:
            self.__rest_service.stop()
        self.__tx_service.stop()
        self.__stop_tx_process()
예제 #11
0
class PeerService:
    """Peer Service 의 gRPC 인터페이스를 구현한다.
    서비스 루프 및 공통 요소는 commonservice 를 통해서 처리한다.
    """
    def __init__(self,
                 group_id=None,
                 radio_station_ip=conf.IP_RADIOSTATION,
                 radio_station_port=conf.PORT_RADIOSTATION,
                 cert_path=None,
                 cert_pass=None):
        """Peer는 Radio Station 에 접속하여 leader 및 다른 Peer에 대한 접속 정보를 전달 받는다.

        :param group_id: Peer Group 을 구분하기 위한 ID, None 이면 Single Peer Group 이 된다. (peer_id is group_id)
        conf.PEER_GROUP_ID 를 사용하면 configure 파일에 저장된 값을 group_id 로 사용하게 된다.
        :param radio_station_ip: RS IP
        :param radio_station_port: RS Port
        :param cert_path: Peer 인증서 디렉토리 경로
        :param cert_pass: Peer private key password
        :return:
        """
        self.__handler_map = {
            message_code.Request.status: self.__handler_status,
            message_code.Request.peer_peer_list: self.__handler_peer_list
        }
        self.__peer_type = loopchain_pb2.PEER
        self.__send_to_process_thread = None

        self.__radio_station_target = radio_station_ip + ":" + str(
            radio_station_port)
        self.__stub_to_radio_station = None
        logging.info("Set Radio Station target is " +
                     self.__radio_station_target)

        self.__peer_id = None
        self.__group_id = group_id
        if self.__group_id is None and conf.PEER_GROUP_ID != "":
            self.__group_id = conf.PEER_GROUP_ID

        self.__common_service = None
        self.__peer_manager = None
        self.__block_manager = None
        self.__score_service = None
        self.__rest_service = None
        self.__tx_service = None

        # Channel and  Stubs for Servers, It can be set after serve()
        self.__stub_to_blockgenerator = None
        self.__stub_to_score_service = None

        # TODO peer 서비스의 .__score를 삭제, set chain code 테스트에서만 쓰인다. (검토후 제거할 것)
        self.__score = None
        self.__score_info = None
        self.__peer_target = None
        self.__inner_target = None
        self.__peer_port = 0

        self.__block_height_sync_lock = False

        # For Send tx to leader
        self.__tx_process = None

        self.__auth = PeerAuthorization()

        # gRPC service for Peer
        self.__inner_service = InnerService()
        self.__outer_service = OuterService()

        # 인증서 저장
        if cert_path is not None:
            self.__auth.load_pki(cert_path, cert_pass)

        self.__reset_voter_in_progress = False

    @property
    def common_service(self):
        return self.__common_service

    @property
    def block_manager(self):
        return self.__block_manager

    # TODO peer_manager 로 이름을 변경하였으나 수정 범위가 광범위 하여 기존 이름 임시로 유지함
    # 점차적으로 적용하여 리팩토링 범위를 축소한 후 모두 처리한 다음 제거할 것
    @property
    def peer_list(self):
        return self.__peer_manager

    @property
    def peer_manager(self):
        return self.__peer_manager

    @property
    def stub_to_score_service(self):
        return self.__stub_to_score_service

    @property
    def score_info(self):
        return self.__score_info

    @property
    def send_to_process_thread(self):
        return self.__send_to_process_thread

    @property
    def peer_type(self):
        return self.__peer_type

    @property
    def auth(self):
        return self.__auth

    @property
    def stub_to_blockgenerator(self):
        return self.__stub_to_blockgenerator

    @property
    def stub_to_radiostation(self):
        return self.__stub_to_radio_station

    def __handler_status(self, request, context):
        return loopchain_pb2.Message(code=message_code.Response.success)

    def __handler_peer_list(self, request, context):
        message = "All Group Peers count: " + str(
            len(self.__peer_manager.peer_list[conf.ALL_GROUP_ID]))
        return loopchain_pb2.Message(code=message_code.Response.success,
                                     message=message,
                                     meta=str(self.__peer_manager.peer_list))

    def rotate_next_leader(self):
        """Find Next Leader Id from peer_list and reset leader to that peer

        :return:
        """

        # logging.debug("rotate next leader...")
        next_leader = self.__peer_manager.get_next_leader_peer(
            is_only_alive=True)

        # Check Next Leader is available...
        if next_leader is not None and next_leader.peer_id != self.peer_id:
            try:
                stub_manager = self.__peer_manager.get_peer_stub_manager(
                    next_leader)
                response = stub_manager.call(
                    "GetStatus",
                    loopchain_pb2.StatusRequest(request="get_leader_peer"),
                    is_stub_reuse=False)

                # Peer 가 leader 로 변경되는데 시간이 필요함으로 접속 여부만 확인한다.
                # peer_status = json.loads(response.status)
                # if peer_status["peer_type"] != str(loopchain_pb2.BLOCK_GENERATOR):
                #     logging.warning("next rotate is not a leader")
                #     raise Exception

            except Exception as e:
                logging.warning(f"rotate next leader exceptions({e})")
                next_leader = self.__peer_manager.leader_complain_to_rs(
                    conf.ALL_GROUP_ID)

        if next_leader is not None:
            self.reset_leader(next_leader.peer_id)

    def reset_leader(self, new_leader_id):
        logging.warning("RESET LEADER: " + str(new_leader_id))

        complained_leader = self.__peer_manager.get_leader_peer()

        leader_peer = self.__peer_manager.get_peer(new_leader_id, None)
        if leader_peer is None:
            logging.warning(
                f"in peer_service::reset_leader There is no peer by peer_id({new_leader_id})"
            )
            return

        self.__peer_manager.set_leader_peer(leader_peer, None)

        peer_self = self.__peer_manager.get_peer(self.peer_id)
        peer_leader = self.__peer_manager.get_leader_peer()

        if peer_self.target == peer_leader.target:
            logging.debug("Set Peer Type Block Generator!")
            self.__peer_type = loopchain_pb2.BLOCK_GENERATOR
            self.__block_manager.get_blockchain().reset_made_block_count()

            # TODO 아래 코드는 중복된 의미이다. 하지만, leader 가 변경되길 기다리는 코드로 의미를 명확히 할 경우
            # 블록체인 동작 지연으로 인한 오류가 발생한다. 우선 더 안정적인 테스트 결과를 보이는 상태로 유지한다.
            response = self.peer_list.get_peer_stub_manager(peer_self).call(
                "GetStatus",
                loopchain_pb2.StatusRequest(request="reset_leader"),
                is_stub_reuse=True)

            status_json = json.loads(response.status)
            if status_json['peer_type'] == str(loopchain_pb2.BLOCK_GENERATOR):
                is_broadcast = True
            else:
                is_broadcast = False

            self.__peer_manager.announce_new_leader(complained_leader.peer_id,
                                                    new_leader_id,
                                                    is_broadcast=is_broadcast)
        else:
            logging.debug("Set Peer Type Peer!")
            self.__peer_type = loopchain_pb2.PEER
            self.__stub_to_blockgenerator = self.__peer_manager.get_peer_stub_manager(
                peer_leader)
            # 새 leader 에게 subscribe 하기
            self.__common_service.subscribe(self.__stub_to_blockgenerator,
                                            loopchain_pb2.BLOCK_GENERATOR)

        self.__common_service.set_peer_type(self.__peer_type)
        # update candidate blocks
        self.__block_manager.get_candidate_blocks().set_last_block(
            self.__block_manager.get_blockchain().last_block)
        self.__block_manager.set_peer_type(self.__peer_type)

        if self.__tx_process is not None:
            # peer_process 의 남은 job 을 처리한다. (peer->leader 인 경우),
            # peer_process 를 리더 정보를 변경한다. (peer->peer 인 경우)
            self.__tx_process_connect_to_leader(self.__tx_process,
                                                peer_leader.target)

    def show_peers(self):
        logging.debug("Peers: ")
        for peer in self.__peer_manager.get_IP_of_peers_in_group():
            logging.debug("peer_target: " + peer)

    def __load_score(self, score):
        """스코어를 로드한다.

        :param score: score package name
        """
        if self.__score_info is None:
            logging.info("LOAD SCORE AND CONNECT TO SCORE SERVICE!")
            params = dict()
            params[message_code.MetaParams.ScoreLoad.
                   repository_path] = conf.DEFAULT_SCORE_REPOSITORY_PATH
            params[message_code.MetaParams.ScoreLoad.score_package] = score
            params[message_code.MetaParams.ScoreLoad.
                   base] = conf.DEFAULT_SCORE_BASE
            params[message_code.MetaParams.ScoreLoad.peer_id] = self.__peer_id
            meta = json.dumps(params)

            if self.__stub_to_score_service is None:
                logging.error(f"there is no __stub_to_scoreservice!")
                return False

            # Score Load is so slow ( load time out is more than GRPC_CONNECTION_TIMEOUT)
            response = self.__stub_to_score_service.call(
                "Request",
                loopchain_pb2.Message(code=message_code.Request.score_load,
                                      meta=meta), conf.SCORE_LOAD_TIMEOUT)
            logging.debug("try score load on score service: " + str(response))

            response_connect = self.__stub_to_score_service.call(
                "Request",
                loopchain_pb2.Message(code=message_code.Request.score_connect,
                                      message=self.__peer_target),
                conf.GRPC_CONNECTION_TIMEOUT)
            logging.debug("try connect to score service: " +
                          str(response_connect))

            if response.code == message_code.Response.success:
                logging.debug("Get Score from Score Server...")
                self.__score_info = json.loads(response.meta)
            else:
                logging.error("Fail Get Score from Score Server...")
                return False
            logging.info("LOAD SCORE DONE!")
        else:
            logging.info(
                "PEER SERVICE HAS SCORE BUT LOAD SCORE FUNCTION CALL!")
            score_dump = pickle.dumps(self.__score)
            response = self.__stub_to_score_service.call(
                "Request",
                loopchain_pb2.Message(code=message_code.Request.score_set,
                                      object=score_dump))
            if response.code != message_code.Response.success:
                logging.error("Fail Set Score!!")
            logging.info("LOAD SCORE DONE!")

        return True

    def service_stop(self):
        self.__block_manager.stop()
        self.__common_service.stop()

    def score_invoke(self, block):
        block_object = pickle.dumps(block)
        try:
            response = self.__stub_to_score_service.call(
                "Request",
                loopchain_pb2.Message(code=message_code.Request.score_invoke,
                                      object=block_object))
            # logging.debug("Score Server says: " + str(response))
            if response.code == message_code.Response.success:
                return json.loads(response.meta)
            else:
                raise ScoreInvokeError('score process grpc fail')
        except Exception as e:
            logging.warning("fail score invoke: " + str(e))
            return False

    def __load_block_manager(self):
        try:
            block_manager = BlockManager(self.__common_service)
            return block_manager
        except leveldb.LevelDBError as e:
            util.exit_and_msg("LevelDBError(" + str(e) + ")")

    def __connect_to_radiostation(self):
        """RadioStation 접속

        :return: 접속정보, 실패시 None
        """
        logging.debug("try to connect to radiostation")

        self.__stub_to_radio_station = StubManager.get_stub_manager_to_server(
            self.__radio_station_target, loopchain_pb2_grpc.RadioStationStub,
            conf.CONNECTION_RETRY_TIMEOUT_TO_RS)

        if self.__stub_to_radio_station is None:
            logging.warning("fail make stub to Radio Station!!")
            return None

        token = None
        if self.__auth.is_secure:
            peer_self = self.__peer_manager.get_peer(self.peer_id)
            token = None
            if peer_self is not None:
                token = peer_self.token
            logging.debug("Self Peer Token : %s", token)

            # 토큰 유효시간이 지나면 다시 생성 요청
            if token is not None and self.__auth.get_token_time(token) is None:
                token = None

            self.__auth.set_peer_info(self.peer_id, self.__peer_target,
                                      self.group_id, self.__peer_type)
            cert_bytes = self.__auth.get_cert_bytes()
            if token is None:
                # 서버로부터 난수 수신
                # response = util.request_server_in_time(self.__stub_to_radio_station.ConnectPeer,
                #                                        loopchain_pb2.PeerRequest(
                #                                            peer_object=b'',
                #                                            peer_id=self.peer_id,
                #                                            peer_target=self.__peer_target,
                #                                            group_id=self.group_id,
                #                                            peer_type=self.__peer_type,
                #                                            token=conf.TOKEN_TYPE_CERT + cert_bytes.hex())
                #                                        )
                response = self.__stub_to_radio_station.call(
                    "ConnectPeer",
                    loopchain_pb2.PeerRequest(peer_object=b'',
                                              peer_id=self.peer_id,
                                              peer_target=self.__peer_target,
                                              group_id=self.group_id,
                                              peer_type=self.__peer_type,
                                              token=conf.TOKEN_TYPE_CERT +
                                              cert_bytes.hex()),
                    conf.GRPC_TIMEOUT)

                rand_key = None
                if response is not None and response.status == message_code.Response.success:
                    logging.debug("Received Random : %s", response.more_info)
                    if len(response.more_info) is not 32:
                        # 토큰 크기가 16바이트가 아니면 접속을 할 수 없습니다.
                        logging.debug('서버로부터 수신한 토큰 길이는 16바이트가 되어야 합니다.')
                    else:
                        rand_key = response.more_info
                else:
                    return response

                # 난수와 Peer 정보에 서명
                if rand_key is None:
                    return None
                else:
                    sign = self.__auth.generate_request_sign(rand_key=rand_key)
                    token = conf.TOKEN_TYPE_SIGN + sign.hex()
            else:
                self.__auth.add_token(token)

        # 공통 부분
        # response = util.request_server_in_time(self.__stub_to_radio_station.ConnectPeer,
        #                                        loopchain_pb2.PeerRequest(
        #                                            peer_object=b'',
        #                                            peer_id=self.peer_id,
        #                                            peer_target=self.__peer_target,
        #                                            group_id=self.group_id,
        #                                            peer_type=self.__peer_type,
        #                                            token=token
        #                                        ))
        response = self.__stub_to_radio_station.call(
            "ConnectPeer",
            loopchain_pb2.PeerRequest(peer_object=b'',
                                      peer_id=self.peer_id,
                                      peer_target=self.__peer_target,
                                      group_id=self.group_id,
                                      peer_type=self.__peer_type,
                                      token=token),
            conf.GRPC_CONNECTION_TIMEOUT)

        if response is not None and response.status == message_code.Response.success:
            if self.__auth.is_secure:
                logging.debug("Received Token : %s", response.more_info)
                # Radiostation으로부터 수신한 토큰 검증
                if len(response.more_info) < 9:
                    # 토큰 크기가 8 + 1바이트 보다 크지 아니면 접속을 할 수 없습니다.
                    logging.debug('서버로부터 수신한 토큰 길이는 9바이트 이상이 되어야 합니다.')
                    response.status = message_code.Response.fail_validate_params
                    response.more_info = "Invalid Token Data"
                else:
                    token = response.more_info
                    tag = token[:2]
                    if tag == conf.TOKEN_TYPE_TOKEN:
                        if self.__auth.verify_token(token):
                            logging.debug("토큰 검증에 성공하였습니다.")
                            self.__auth.add_token(token)
                        else:
                            logging.debug("토큰 검증에 실패하였습니다.")
                            response.status = message_code.Response.fail_validate_params
                            response.more_info = "Invalid Token Signature"

        return response

    def add_unconfirm_block(self, block_unloaded):
        block = pickle.loads(block_unloaded)
        block_hash = block.block_hash

        response_code, response_msg = message_code.get_response(
            message_code.Response.fail_validate_block)

        # block 검증
        block_is_validated = False
        try:
            block_is_validated = block.validate()
        except (BlockInValidError, BlockError, TransactionInValidError) as e:
            logging.error(e)

        if block_is_validated:
            # broadcast 를 받으면 받은 블럭을 검증한 후 검증되면 자신의 blockchain 의 unconfirmed block 으로 등록해 둔다.
            confirmed, reason = self.__block_manager.get_blockchain(
            ).add_unconfirm_block(block)
            if confirmed:
                response_code, response_msg = message_code.get_response(
                    message_code.Response.success_validate_block)
            elif reason == "block_height":
                # Announce 되는 블럭과 자신의 height 가 다르면 Block Height Sync 를 다시 시도한다.
                self.block_height_sync(self.__stub_to_blockgenerator)

        return response_code, response_msg, block_hash

    def __tx_process_connect_to_leader(self, peer_process, leader_target):
        logging.debug("try... Peer Process connect_to_blockgenerator: " +
                      leader_target)
        logging.debug("peer_process: " + str(peer_process))
        peer_process.send_to_process(
            ("connect_to_blockgenerator", leader_target))

    def __run_tx_process(self, blockgenerator_info, inner_channel_info):
        tx_process = TxProcess()
        tx_process.start()
        tx_process.send_to_process(("status", ""))

        wait_times = 0
        wait_for_process_start = None

        while wait_for_process_start is None:
            time.sleep(conf.SLEEP_SECONDS_FOR_SUB_PROCESS_START)
            logging.debug(f"wait start tx process....")
            wait_for_process_start = tx_process.get_receive("status")

            if wait_for_process_start is None and wait_times > conf.WAIT_SUB_PROCESS_RETRY_TIMES:
                util.exit_and_msg("Tx Process start Fail!")

        logging.debug(f"Tx Process start({wait_for_process_start})")

        self.__tx_process_connect_to_leader(tx_process, blockgenerator_info)
        tx_process.send_to_process(
            ("make_self_connection", inner_channel_info))

        return tx_process

    def __stop_tx_process(self):
        if self.__tx_process is not None:
            self.__tx_process.stop()
            self.__tx_process.wait()

    @property
    def peer_id(self):
        return self.__peer_id

    @peer_id.setter
    def peer_id(self, peer_id):
        self.__peer_id = peer_id

    @property
    def group_id(self):
        if self.__group_id is None:
            self.__group_id = self.__peer_id
        return self.__group_id

    @property
    def peer_list(self):
        return self.__peer_manager

    @property
    def peer_target(self):
        return self.__peer_target

    def block_height_sync(self, target_peer_stub=None):
        """Peer간의 데이타 동기화
        """
        if self.__block_height_sync_lock is True:
            # ***** 이 보정 프로세스는 AnnounceConfirmBlock 메시지를 받았을때 블럭 Height 차이로 Peer 가 처리하지 못한 경우에도 진행한다.
            # 따라서 이미 sync 가 진행 중일때의 요청은 무시한다.
            logging.warning("block height sync is already running...")
            return

        self.__block_height_sync_lock = True
        if target_peer_stub is None:
            target_peer_stub = self.__stub_to_blockgenerator

        ### Block Height 보정 작업, Peer의 데이타 동기화 Process ###
        ### Love&Hate Algorithm ###
        logging.info("try block height sync...with love&hate")

        # Make Peer Stub List [peer_stub, ...] and get max_height of network
        max_height = 0
        peer_stubs = []
        for peer_target in self.__peer_manager.get_IP_of_peers_in_group():
            target = ":".join(peer_target.split(":")[1:])
            if target != self.__peer_target:
                logging.debug(f"try to target({target})")
                channel = grpc.insecure_channel(target)
                stub = loopchain_pb2_grpc.PeerServiceStub(channel)
                try:
                    response = stub.GetStatus(
                        loopchain_pb2.StatusRequest(request=""))
                    if response.block_height > max_height:
                        # Add peer as higher than this
                        max_height = response.block_height
                        peer_stubs.append(stub)
                except Exception as e:
                    logging.warning("Already bad.... I don't love you" +
                                    str(e))

        my_height = self.__block_manager.get_blockchain().block_height

        if max_height > my_height:  # 자기가 가장 높은 블럭일때 처리 필요 TODO
            logging.info(
                f"You need block height sync to: {max_height} yours: {my_height}"
            )
            # 자기(현재 Peer)와 다르면 Peer 목록을 순회하며 마지막 block 에서 자기 Height Block 까지 역순으로 요청한다.
            # (blockchain 의 block 탐색 로직 때문에 height 순으로 조회는 비효율적이다.)

            preload_blocks = {}  # height : block dictionary

            # Target Peer 의 마지막 block hash 부터 시작한다.
            response = target_peer_stub.call(
                "GetLastBlockHash", loopchain_pb2.StatusRequest(request=""))
            logging.debug(response)
            request_hash = response.block_hash

            max_try = max_height - my_height
            while self.__block_manager.get_blockchain(
            ).last_block.block_hash != request_hash and max_try > 0:

                for peer_stub in peer_stubs:
                    response = None
                    try:
                        # 이때 요청 받은 Peer 는 해당 Block 과 함께 자신의 현재 Height 를 같이 보내준다.
                        # TODO target peer 의 마지막 block 보다 높은 Peer 가 있으면 현재 target height 까지 완료 후
                        # TODO Height Sync 를 다시 한다.
                        response = peer_stub.BlockSync(
                            loopchain_pb2.BlockSyncRequest(
                                block_hash=request_hash), conf.GRPC_TIMEOUT)
                    except Exception as e:
                        logging.warning("There is a bad peer, I hate you: " +
                                        str(e))

                    if response is not None and response.response_code == message_code.Response.success:
                        dump = response.block
                        block = pickle.loads(dump)

                        # 마지막 블럭에서 역순으로 블럭을 구한다.
                        request_hash = block.prev_block_hash

                        # add block to preload_blocks
                        logging.debug("Add preload_blocks Height: " +
                                      str(block.height))
                        preload_blocks[block.height] = block

                        if response.max_block_height > max_height:
                            max_height = response.max_block_height

                        if (my_height + 1) == block.height:
                            max_try = 0  # 더이상 요청을 진행하지 않는다.
                            logging.info("Block Height Sync Complete.")
                            break
                        max_try -= 1
                    else:
                        # 이 반복 요청중 응답 하지 않은 Peer 는 반복중에 다시 요청하지 않는다.
                        # (TODO: 향후 Bad에 대한 리포트 전략은 별도로 작업한다.)
                        peer_stubs.remove(peer_stub)
                        logging.warning(
                            "Make this peer to bad (error above or no response): "
                            + str(peer_stub))

            if preload_blocks.__len__() > 0:
                while my_height < max_height:
                    add_height = my_height + 1
                    logging.debug("try add block height: " + str(add_height))
                    try:
                        self.__block_manager.add_block(
                            preload_blocks[add_height])
                        my_height = add_height
                    except KeyError as e:
                        logging.error("fail block height sync: " + str(e))
                        break
                    except exception.BlockError as e:
                        logging.error(
                            "Block Error Clear all block and restart peer.")
                        self.__block_manager.clear_all_blocks()
                        util.exit_and_msg(
                            "Block Error Clear all block and restart peer.")

            if my_height < max_height:
                # block height sync 가 완료되지 않았으면 다시 시도한다.
                logging.warning(
                    "fail block height sync in one time... try again...")
                self.__block_height_sync_lock = False
                self.block_height_sync(target_peer_stub)

        self.__block_height_sync_lock = False

    def reset_voter_count(self):
        """peer_list 의 활성화 상태(gRPC 응답)을 갱신하여 voter 수를 변경한다.

        :return:
        """
        if self.__reset_voter_in_progress is not True:
            self.__reset_voter_in_progress = True
            logging.debug("reset voter count before: " +
                          str(self.__common_service.get_voter_count()))

            # TODO peer_list 를 순회하면서 gRPC 오류인 사용자를 remove_audience 한다.
            self.__peer_manager.reset_peers(
                None, self.__common_service.remove_audience)
            logging.debug("reset voter count after: " +
                          str(self.__common_service.get_voter_count()))
            self.__reset_voter_in_progress = False

    def set_chain_code(self, score):
        """Score를 패스로 전달하지 않고 (serve(...)의 score 는 score 의 파일 Path 이다.)
        Object 를 직접 할당하기 위한 인터페이스로 serve 호출전에 지정되어야 한다.

        :param score: score Object
        """
        # TODO 현재는 테스트를 위해서만 사용되고 있다. 검토후 제거 할 것
        self.__score = score
        self.__score_info = dict()
        self.__score_info[
            message_code.MetaParams.ScoreInfo.score_id] = self.__score.id()
        self.__score_info[message_code.MetaParams.ScoreInfo.
                          score_version] = self.__score.version()

    def __port_init(self, port):
        # service 초기화 작업
        self.__peer_target = util.get_private_ip() + ":" + str(port)
        self.__inner_target = conf.IP_LOCAL + ":" + str(port)
        self.__peer_port = int(port)

        # SCORE Service check Using Port
        # check Port Using
        if util.check_port_using(conf.IP_PEER,
                                 int(port) + conf.PORT_DIFF_SCORE_CONTAINER):
            util.exit_and_msg('Score Service Port is Using ' +
                              str(int(port) + conf.PORT_DIFF_SCORE_CONTAINER))

    def __run_inner_services(self, port):
        if conf.ENABLE_REST_SERVICE:
            self.__rest_service = RestService(int(port))

        self.__score_service = ScoreService(
            int(port) + conf.PORT_DIFF_SCORE_CONTAINER)

        # TODO tx service 는 더이상 사용하지 않는다. 하지만 이 로직을 제거하면 블록체인 네트워크가 정상적으로 형성되지 않는
        # 버그가 발생한다. 원인 파악 필요함
        self.__tx_service = TxService(int(port) + conf.PORT_DIFF_TX_CONTAINER)

        # TODO stub to score service Connect 확인을 util 로 할 수 있게 수정하기
        # self.__stub_to_score_service = util.get_stub_to_server('localhost:' +
        #                                                        str(int(port) + conf.PORT_DIFF_SCORE_CONTAINER),
        #                                                        loopchain_pb2_grpc.ContainerStub)
        self.__stub_to_score_service = StubManager.get_stub_manager_to_server(
            conf.IP_PEER + ':' +
            str(int(port) + conf.PORT_DIFF_SCORE_CONTAINER),
            loopchain_pb2_grpc.ContainerStub,
            is_allow_null_stub=True)

    def serve(self, port, score=conf.DEFAULT_SCORE_PACKAGE):
        """피어 실행

        :param port: 피어의 실행포트
        :param score: 피어의 실행 체인코드
        """
        stopwatch_start = timeit.default_timer()

        is_all_service_safe_start = True
        is_delay_announce_new_leader = False

        self.__port_init(port)
        self.__run_inner_services(port)

        inner_service_port = conf.PORT_INNER_SERVICE or (
            int(port) + conf.PORT_DIFF_INNER_SERVICE)
        self.__common_service = CommonService(loopchain_pb2,
                                              self.__peer_target,
                                              inner_service_port)
        self.peer_id = str(self.__common_service.get_peer_id())
        self.__peer_manager = self.__common_service.load_peer_manager()
        self.__block_manager = self.__load_block_manager()

        response = self.__connect_to_radiostation()
        logging.debug("Connect to radiostation: " + str(response))

        is_peer_list_from_rs = False

        if response is not None and response.status == message_code.Response.success:
            # RS 의 응답이 있으면 peer_list 는 RS 가 전달한 결과로 업데이트 된다.
            # 없는 경우 local 의 level DB 로 부터 읽어드린 값을 default 로 사용하게 된다.
            # TODO RS 는 어떻게 신뢰하지? RS 가 새로운 피어의 참여를 승인하더라도 참여한 피어 목록은 더 신뢰할만한 방식으로 보호가 필요하지 않나?
            # 누군가 RS 를 죽인다면 RS 인척 가짜로 이루어진 피어 리스트를 전송하면 네트워크를 파괴할 수 있지 않나?
            # 피어의 참여는 RS 가 승인한 다음 블록에 담아서 블록체인에 추가하면 어떨까?

            peer_list_data = pickle.loads(response.peer_list)
            self.__peer_manager.load(peer_list_data, False)
            self.__common_service.save_peer_list(self.__peer_manager)
            logging.debug("peer list update: " +
                          self.__peer_manager.get_peers_for_debug())
            is_peer_list_from_rs = True
        else:
            logging.debug("using local peer list: " +
                          self.__peer_manager.get_peers_for_debug())

        logging.debug("peer_id: " + str(self.peer_id))

        if self.__peer_manager.get_peer_count() == 0:
            util.exit_and_msg(
                "There is no peer_list, initial network is not allowed without RS!"
            )
        peer_self = self.__peer_manager.get_peer(self.peer_id, self.group_id)
        logging.debug("peer_self: " + str(peer_self))
        peer_leader = self.__peer_manager.get_leader_peer(
            is_complain_to_rs=True)
        logging.debug("peer_leader: " + str(peer_leader))

        # TODO LOOPCHAIN-61 인증서 로드
        _cert = None
        # TODO LOOPCHAIN-61 인증서 키로드
        _private_key = None
        # TODO 인증정보 요청

        # TODO 이 부분을 조건 검사가 아니라 leader complain 을 이용해서 리더가 되도록 하는 방법 검토하기
        if peer_self.peer_id == peer_leader.peer_id:
            # 자기가 peer_list 의 유일한 connected PEER 이거나 rs 의 leader 정보와 같을 때 block generator 가 된다.
            if is_peer_list_from_rs is True or self.__peer_manager.get_connected_peer_count(
                    None) == 1:
                logging.debug("Set Peer Type Block Generator!")
                self.__peer_type = loopchain_pb2.BLOCK_GENERATOR

        # load score 는 score 서비스가 시작된 이후 block height sync 가 시작되기전에 이루어져야 한다.
        is_all_service_safe_start &= self.__load_score(score)

        if self.__peer_type == loopchain_pb2.PEER:
            # leader 로 시작하지 않았는데 자신의 정보가 leader Peer 정보이면 block height sync 하여
            # 최종 블럭의 leader 를 찾는다.
            if peer_leader.target != self.__peer_target:
                block_sync_target_stub = StubManager.get_stub_manager_to_server(
                    peer_leader.target,
                    loopchain_pb2_grpc.PeerServiceStub,
                    time_out_seconds=conf.GRPC_TIMEOUT)
            else:
                block_sync_target_stub = None

            if block_sync_target_stub is None:
                logging.warning(
                    "You maybe Older from this network... or No leader in this network!"
                )

                # TODO 이 상황에서 rs 에 leader complain 을 진행한다
                is_delay_announce_new_leader = True
                peer_old_leader = peer_leader
                peer_leader = self.__peer_manager.leader_complain_to_rs(
                    conf.ALL_GROUP_ID, is_announce_new_peer=False)

                if peer_leader is not None:
                    block_sync_target_stub = StubManager.get_stub_manager_to_server(
                        peer_leader.target,
                        loopchain_pb2_grpc.PeerServiceStub,
                        time_out_seconds=conf.GRPC_TIMEOUT)

            if peer_leader is None or peer_leader.peer_id == peer_self.peer_id:
                peer_leader = peer_self
                self.__peer_type = loopchain_pb2.BLOCK_GENERATOR
            else:
                self.block_height_sync(block_sync_target_stub)

                # # TODO 마지막 블럭으로 leader 정보를 판단하는 로직은 리더 컴플레인 알고리즘 수정 후 유효성을 다시 판단할 것
                # last_block_peer_id = self.__block_manager.get_blockchain().last_block.peer_id
                #
                # if last_block_peer_id != "" and last_block_peer_id != self.__peer_list.get_leader_peer().peer_id:
                #     logging.debug("make leader stub after block height sync...")
                #     new_leader_peer = self.__peer_list.get_peer(last_block_peer_id)
                #
                #     if new_leader_peer is None:
                #         new_leader_peer = self.__peer_list.leader_complain_to_rs(conf.ALL_GROUP_ID)
                #
                #     self.__peer_list.set_leader_peer(new_leader_peer, None)
                #     # TODO 리더가 상단의 next_leader_pear 와 같을 경우 stub 을 재설정하게 되는데 문제 없는지 확인 할 것
                #     self.__stub_to_blockgenerator = self.__peer_list.get_peer_stub_manager(new_leader_peer)
                #     peer_leader = new_leader_peer
                # else:
                #     self.__stub_to_blockgenerator = block_sync_target_stub

                self.__stub_to_blockgenerator = block_sync_target_stub

                if self.__stub_to_blockgenerator is None:
                    util.exit_and_msg("Fail connect to leader!!")

                self.show_peers()

        self.__common_service.set_peer_type(self.__peer_type)

        if self.__peer_type == loopchain_pb2.BLOCK_GENERATOR:
            self.__block_manager.set_peer_type(self.__peer_type)

        loopchain_pb2_grpc.add_PeerServiceServicer_to_server(
            self.__outer_service, self.__common_service.outer_server)
        loopchain_pb2_grpc.add_InnerServiceServicer_to_server(
            self.__inner_service, self.__common_service.inner_server)
        logging.info("Start peer service at port: " + str(port))

        self.__block_manager.start()
        self.__common_service.start(port, self.peer_id, self.group_id)

        if self.__stub_to_radio_station is not None:
            self.__common_service.subscribe(self.__stub_to_radio_station)

        # Start Peer Process for gRPC send to Block Generator
        # But It use only when create tx (yet)
        logging.debug("peer_leader target is: " + str(peer_leader.target))

        self.__tx_process = self.__run_tx_process(
            blockgenerator_info=peer_leader.target,
            inner_channel_info=conf.IP_LOCAL + ":" + str(inner_service_port))

        if self.__stub_to_blockgenerator is not None:
            self.__common_service.subscribe(self.__stub_to_blockgenerator,
                                            loopchain_pb2.BLOCK_GENERATOR)

        if is_delay_announce_new_leader:
            self.__peer_manager.announce_new_leader(peer_old_leader.peer_id,
                                                    peer_leader.peer_id)

        self.__send_to_process_thread = SendToProcess(self.__tx_process)
        self.__send_to_process_thread.start()

        stopwatch_duration = timeit.default_timer() - stopwatch_start
        logging.info(
            f"Start Peer Service start duration({stopwatch_duration})")

        # service 종료를 기다린다.
        if is_all_service_safe_start:
            self.__common_service.wait()
        else:
            self.service_stop()

        self.__send_to_process_thread.stop()
        self.__send_to_process_thread.wait()

        logging.info("Peer Service Ended.")
        self.__score_service.stop()
        if self.__rest_service is not None:
            self.__rest_service.stop()
        self.__tx_service.stop()
        self.__stop_tx_process()
예제 #12
0
class PeerService:
    """Peer Service 의 main Class
    outer 와 inner gRPC 인터페이스를 가진다.
    서비스 루프 및 공통 요소는 commonservice 를 통해서 처리한다.
    channel 관련 instance 는 channel manager 를 통해서 관리한다.
    """
    def __init__(self,
                 group_id=None,
                 radio_station_ip=None,
                 radio_station_port=None,
                 node_type=None):
        """Peer는 Radio Station 에 접속하여 leader 및 다른 Peer에 대한 접속 정보를 전달 받는다.

        :param group_id: Peer Group 을 구분하기 위한 ID, None 이면 Single Peer Group 이 된다. (peer_id is group_id)
        conf.PEER_GROUP_ID 를 사용하면 configure 파일에 저장된 값을 group_id 로 사용하게 된다.
        :param radio_station_ip: RS IP
        :param radio_station_port: RS Port
        :return:
        """
        radio_station_ip = radio_station_ip or conf.IP_RADIOSTATION
        radio_station_port = radio_station_port or conf.PORT_RADIOSTATION
        node_type = node_type or conf.NodeType.CommunityNode

        self.is_support_node_function = \
            partial(conf.NodeType.is_support_node_function, node_type=node_type)

        util.logger.spam(f"Your Peer Service runs on debugging MODE!")
        util.logger.spam(
            f"You can see many terrible garbage logs just for debugging, DO U Really want it?"
        )

        # process monitor must start before any subprocess
        if conf.ENABLE_PROCESS_MONITORING:
            Monitor().start()

        self.__node_type = node_type

        self.__radio_station_target = radio_station_ip + ":" + str(
            radio_station_port)
        logging.info("Set Radio Station target is " +
                     self.__radio_station_target)

        self.__radio_station_stub = None

        self.__level_db = None
        self.__level_db_path = ""

        self.__peer_id = None
        self.__group_id = group_id
        if self.__group_id is None and conf.PEER_GROUP_ID != "":
            self.__group_id = conf.PEER_GROUP_ID

        self.__common_service = None
        self.__channel_infos = None

        self.__rest_service = None
        self.__rest_proxy_server = None

        # peer status cache for channel
        self.status_cache = {}  # {channel:status}

        self.__score = None
        self.__peer_target = None
        self.__rest_target = None
        self.__inner_target = None
        self.__peer_port = 0

        # gRPC service for Peer
        self.__inner_service: PeerInnerService = None
        self.__outer_service: PeerOuterService = None
        self.__channel_services = {}

        self.__reset_voter_in_progress = False
        self.__json_conf_path = None

        ObjectManager().peer_service = self

    @property
    def common_service(self):
        return self.__common_service

    @property
    def inner_service(self):
        return self.__inner_service

    @property
    def outer_service(self):
        return self.__outer_service

    @property
    def peer_target(self):
        return self.__peer_target

    @property
    def rest_target(self):
        return self.__rest_target

    @property
    def json_conf_path(self):
        return self.__json_conf_path

    @property
    def channel_infos(self):
        return self.__channel_infos

    @property
    def node_type(self):
        return self.__node_type

    @property
    def radio_station_target(self):
        return self.__radio_station_target

    @property
    def stub_to_radiostation(self):
        stub_type = loopchain_pb2_grpc.PeerServiceStub
        if self.is_support_node_function(conf.NodeFunction.Vote):
            stub_type = loopchain_pb2_grpc.RadioStationStub

        if self.__radio_station_stub is None:
            if self.is_support_node_function(conf.NodeFunction.Vote):
                self.__radio_station_stub = StubManager.get_stub_manager_to_server(
                    self.__radio_station_target,
                    stub_type,
                    conf.CONNECTION_RETRY_TIMEOUT_TO_RS,
                    ssl_auth_type=conf.GRPC_SSL_TYPE)
            else:
                self.__radio_station_stub = RestStubManager(
                    self.__radio_station_target)

        return self.__radio_station_stub

    @property
    def peer_port(self):
        return self.__peer_port

    @property
    def peer_id(self):
        return self.__peer_id

    @property
    def group_id(self):
        if self.__group_id is None:
            self.__group_id = self.__peer_id
        return self.__group_id

    @property
    def radio_station_target(self):
        return self.__radio_station_target

    def rotate_next_leader(self, channel_name):
        """Find Next Leader Id from peer_list and reset leader to that peer"""

        # logging.debug("rotate next leader...")
        util.logger.spam(f"peer_service:rotate_next_leader")
        peer_manager = self.__channel_manager.get_peer_manager(channel_name)
        next_leader = peer_manager.get_next_leader_peer(is_only_alive=True)

        # Check Next Leader is available...
        if next_leader is not None and next_leader.peer_id != self.peer_id:
            try:
                stub_manager = peer_manager.get_peer_stub_manager(next_leader)
                response = stub_manager.call(
                    "Request",
                    loopchain_pb2.Message(code=message_code.Request.status,
                                          channel=channel_name,
                                          message="get_leader_peer"),
                    is_stub_reuse=True)

                # Peer 가 leader 로 변경되는데 시간이 필요함으로 접속 여부만 확인한다.
                # peer_status = json.loads(response.status)
                # if peer_status["peer_type"] != str(loopchain_pb2.BLOCK_GENERATOR):
                #     logging.warning("next rotate is not a leader")
                #     raise Exception

            except Exception as e:
                logging.warning(f"rotate next leader exceptions({e})")
                next_leader = peer_manager.leader_complain_to_rs(
                    conf.ALL_GROUP_ID)

        if next_leader is not None:
            self.reset_leader(next_leader.peer_id, channel_name)
        else:
            util.logger.warning(
                f"peer_service:rotate_next_leader next_leader is None({next_leader})"
            )

    def service_stop(self):
        self.__common_service.stop()

    def __get_channel_infos(self):
        # util.logger.spam(f"__get_channel_infos:node_type::{self.__node_type}")
        if self.is_support_node_function(conf.NodeFunction.Vote):
            response = self.stub_to_radiostation.call_in_times(
                method_name="GetChannelInfos",
                message=loopchain_pb2.GetChannelInfosRequest(
                    peer_id=self.__peer_id,
                    peer_target=self.__peer_target,
                    group_id=self.group_id),
                retry_times=conf.CONNECTION_RETRY_TIMES_TO_RS,
                is_stub_reuse=False,
                timeout=conf.CONNECTION_TIMEOUT_TO_RS)
            # util.logger.spam(f"__get_channel_infos:response::{response}")

            if not response:
                return None
            logging.info(
                f"Connect to channels({util.pretty_json(response.channel_infos)})"
            )
            channels = json.loads(response.channel_infos)
        else:
            response = self.stub_to_radiostation.call_in_times(
                method_name="GetChannelInfos")
            channels = {
                channel: value
                for channel, value in response["channel_infos"].items()
                if util.channel_use_icx(channel)
            }

        return channels

    def __init_port(self, port):
        # service 초기화 작업
        target_ip = util.get_private_ip()
        self.__peer_target = util.get_private_ip() + ":" + str(port)
        self.__peer_port = int(port)

        rest_port = int(port) + conf.PORT_DIFF_REST_SERVICE_CONTAINER
        self.__rest_target = f"{target_ip}:{rest_port}"

        logging.info("Start Peer Service at port: " + str(port))

    def __init_level_db(self):
        # level db for peer service not a channel, It store unique peer info like peer_id
        self.__level_db, self.__level_db_path = util.init_level_db(
            level_db_identity=self.__peer_target, allow_rename_path=False)

    def __run_rest_services(self, port):
        if conf.ENABLE_REST_SERVICE and not conf.USE_EXTERNAL_REST:
            if conf.USE_GUNICORN_HA_SERVER:
                # Run web app on gunicorn in another process.
                self.__rest_proxy_server = RestProxyServer(int(port))
            else:
                # Run web app as it is.
                logging.debug(
                    f'Launch Sanic RESTful server. '
                    f'Port = {int(port) + conf.PORT_DIFF_REST_SERVICE_CONTAINER}'
                )
                self.__rest_service = RestService(int(port))

    def __make_peer_id(self):
        """네트워크에서 Peer 를 식별하기 위한 UUID를 level db 에 생성한다.
        """
        if util.channel_use_icx(conf.LOOPCHAIN_DEFAULT_CHANNEL):
            self.__peer_id = IcxAuthorization(
                conf.LOOPCHAIN_DEFAULT_CHANNEL).address
        else:
            try:
                uuid_bytes = bytes(
                    self.__level_db.Get(conf.LEVEL_DB_KEY_FOR_PEER_ID))
                peer_id = uuid.UUID(bytes=uuid_bytes)
            except KeyError:  # It's first Run
                peer_id = None

            if peer_id is None:
                peer_id = uuid.uuid1()
                logging.info("make new peer_id: " + str(peer_id))
                self.__level_db.Put(conf.LEVEL_DB_KEY_FOR_PEER_ID,
                                    peer_id.bytes)

            self.__peer_id = str(peer_id)

        logger_preset = loggers.get_preset()
        logger_preset.peer_id = self.peer_id
        logger_preset.update_logger()

        logging.info(f"run peer_id : {self.__peer_id}")

    def timer_test_callback_function(self, message):
        logging.debug(f'timer test callback function :: ({message})')

    @staticmethod
    def __get_use_kms():
        if conf.GRPC_SSL_KEY_LOAD_TYPE == conf.KeyLoadType.KMS_LOAD:
            return True
        for value in conf.CHANNEL_OPTION.values():
            if value[
                    PublicVerifier.KEY_LOAD_TYPE] == conf.KeyLoadType.KMS_LOAD:
                return True
        return False

    def __init_kms_helper(self, agent_pin):
        if self.__get_use_kms():
            from loopchain.tools.kms_helper import KmsHelper
            KmsHelper().set_agent_pin(agent_pin)

    def __close_kms_helper(self):
        if self.__get_use_kms():
            from loopchain.tools.kms_helper import KmsHelper
            KmsHelper().remove_agent_pin()

    def run_common_service(self):
        inner_service_port = conf.PORT_INNER_SERVICE or (
            self.__peer_port + conf.PORT_DIFF_INNER_SERVICE)
        self.__inner_target = conf.IP_LOCAL + ":" + str(inner_service_port)

        self.__common_service = CommonService(loopchain_pb2,
                                              inner_service_port)
        self.__common_service.start(str(self.__peer_port), self.__peer_id,
                                    self.__group_id)

        loopchain_pb2_grpc.add_PeerServiceServicer_to_server(
            self.__outer_service, self.__common_service.outer_server)

    def serve(self,
              port,
              agent_pin: str = None,
              amqp_target: str = None,
              amqp_key: str = None,
              event_for_init: multiprocessing.Event = None):
        """start func of Peer Service ===================================================================

        :param port:
        :param agent_pin: kms agent pin
        :param amqp_target: rabbitmq host target
        :param amqp_key: sharing queue key
        :param event_for_init: set when peer initiates
        """

        amqp_target = amqp_target or conf.AMQP_TARGET
        amqp_key = amqp_key or conf.AMQP_KEY

        stopwatch_start = timeit.default_timer()

        self.__init_kms_helper(agent_pin)
        self.__init_port(port)
        self.__init_level_db()

        self.__make_peer_id()

        StubCollection().amqp_target = amqp_target
        StubCollection().amqp_key = amqp_key

        peer_queue_name = conf.PEER_QUEUE_NAME_FORMAT.format(amqp_key=amqp_key)
        self.__outer_service = PeerOuterService()
        self.__inner_service = PeerInnerService(amqp_target,
                                                peer_queue_name,
                                                conf.AMQP_USERNAME,
                                                conf.AMQP_PASSWORD,
                                                peer_service=self)

        self.__channel_infos = self.__get_channel_infos()
        if not self.__channel_infos:
            util.exit_and_msg(
                "There is no peer_list, initial network is not allowed without RS!"
            )

        self.__run_rest_services(port)
        self.run_common_service()

        self.__close_kms_helper()

        stopwatch_duration = timeit.default_timer() - stopwatch_start
        logging.info(
            f"Start Peer Service at port: {port} start duration({stopwatch_duration})"
        )

        async def _serve():
            await self.ready_tasks()
            await self.__inner_service.connect(conf.AMQP_CONNECTION_ATTEMPS,
                                               conf.AMQP_RETRY_DELAY,
                                               exclusive=True)

            if conf.CHANNEL_BUILTIN:
                await self.serve_channels()

            if event_for_init is not None:
                event_for_init.set()

            logging.info(f'peer_service: init complete peer: {self.peer_id}')

        loop = self.__inner_service.loop
        loop.create_task(_serve())
        loop.add_signal_handler(signal.SIGINT, self.close)
        loop.add_signal_handler(signal.SIGTERM, self.close)

        try:
            loop.run_forever()
        finally:
            loop.run_until_complete(loop.shutdown_asyncgens())
            loop.close()

        self.__common_service.wait()

        # process monitor must stop monitoring before any subprocess stop
        # Monitor().stop()

        logging.info("Peer Service Ended.")
        if self.__rest_service is not None:
            self.__rest_service.stop()

        if self.__rest_proxy_server is not None:
            self.__rest_proxy_server.stop()

    def close(self):
        async def _close():
            for channel_stub in StubCollection().channel_stubs.values():
                await channel_stub.async_task().stop("Close")

            self.service_stop()
            loop.stop()

        loop = self.__inner_service.loop
        loop.create_task(_close())

    async def serve_channels(self):
        for i, channel_name in enumerate(self.__channel_infos.keys()):
            score_port = self.__peer_port + conf.PORT_DIFF_SCORE_CONTAINER + conf.PORT_DIFF_BETWEEN_SCORE_CONTAINER * i

            args = ['python3', '-m', 'loopchain', 'channel']
            args += ['-p', str(score_port)]
            args += ['--channel', str(channel_name)]
            args += command_arguments.get_raw_commands_by_filter(
                command_arguments.Type.Develop,
                command_arguments.Type.AMQPTarget,
                command_arguments.Type.AMQPKey,
                command_arguments.Type.ConfigurationFilePath)

            service = CommonSubprocess(args)

            channel_stub = StubCollection().channel_stubs[channel_name]
            await channel_stub.async_task().hello()

            self.__channel_services[channel_name] = service

    async def ready_tasks(self):
        await StubCollection().create_peer_stub()  # for getting status info

        for channel_name, channel_info in self.__channel_infos.items():
            await StubCollection().create_channel_stub(channel_name)

            if util.channel_use_icx(channel_name):
                await StubCollection().create_icon_score_stub(channel_name)
            else:
                await StubCollection().create_score_stub(
                    channel_name, channel_info['score_package'])
예제 #13
0
class PeerService:
    """Peer Service 의 main Class
    outer 와 inner gRPC 인터페이스를 가진다.
    서비스 루프 및 공통 요소는 commonservice 를 통해서 처리한다.
    channel 관련 instance 는 channel manager 를 통해서 관리한다.
    """
    def __init__(self,
                 group_id=None,
                 radio_station_ip=None,
                 radio_station_port=None,
                 public_path=None,
                 private_path=None,
                 cert_pass=None):
        """Peer는 Radio Station 에 접속하여 leader 및 다른 Peer에 대한 접속 정보를 전달 받는다.

        :param group_id: Peer Group 을 구분하기 위한 ID, None 이면 Single Peer Group 이 된다. (peer_id is group_id)
        conf.PEER_GROUP_ID 를 사용하면 configure 파일에 저장된 값을 group_id 로 사용하게 된다.
        :param radio_station_ip: RS IP
        :param radio_station_port: RS Port
        :param public_path: Peer 인증서 디렉토리 경로
        :param private_path: Cert Private key
        :param cert_pass: Peer private key password
        :return:
        """
        if radio_station_ip is None:
            radio_station_ip = conf.IP_RADIOSTATION
        if radio_station_port is None:
            radio_station_port = conf.PORT_RADIOSTATION
        if public_path is None:
            public_path = conf.PUBLIC_PATH
        if private_path is None:
            private_path = conf.PRIVATE_PATH
        if cert_pass is None:
            cert_pass = conf.DEFAULT_PW

        util.logger.spam(f"Your Peer Service runs on debugging MODE!")
        util.logger.spam(
            f"You can see many terrible garbage logs just for debugging, R U Really want it?"
        )

        self.__send_to_process_thread = SendToProcess()

        self.__radio_station_target = radio_station_ip + ":" + str(
            radio_station_port)
        logging.info("Set Radio Station target is " +
                     self.__radio_station_target)

        self.__stub_to_radio_station = None

        self.__level_db = None
        self.__level_db_path = ""

        self.__peer_id = None
        self.__group_id = group_id
        if self.__group_id is None and conf.PEER_GROUP_ID != "":
            self.__group_id = conf.PEER_GROUP_ID

        self.__common_service = None
        self.__channel_manager: ChannelManager = None

        self.__rest_service = None
        self.__timer_service = TimerService()

        # TODO peer 서비스의 .__score를 삭제, set chain code 테스트에서만 쓰인다. (검토후 제거할 것)
        self.__score = None
        self.__peer_target = None
        self.__inner_target = None
        self.__peer_port = 0

        # For Send tx to leader
        self.__tx_process = None

        if conf.ENABLE_KMS:
            rand_table = self.__get_random_table()
            self.__auth = PeerAuthorization(rand_table=rand_table)
        else:
            self.__auth = PeerAuthorization(public_path, private_path,
                                            cert_pass)

        # gRPC service for Peer
        self.__inner_service = InnerService()
        self.__outer_service = OuterService()

        self.__reset_voter_in_progress = False

    @property
    def common_service(self):
        return self.__common_service

    @property
    def timer_service(self):
        return self.__timer_service

    @property
    def channel_manager(self):
        return self.__channel_manager

    @property
    def send_to_process_thread(self):
        return self.__send_to_process_thread

    @property
    def tx_process(self):
        return self.__tx_process

    @property
    def peer_target(self):
        return self.__peer_target

    @property
    def auth(self):
        return self.__auth

    @property
    def stub_to_radiostation(self) -> StubManager:
        if self.__stub_to_radio_station is None:
            self.__stub_to_radio_station = StubManager.get_stub_manager_to_server(
                self.__radio_station_target,
                loopchain_pb2_grpc.RadioStationStub,
                conf.CONNECTION_RETRY_TIMEOUT_TO_RS)

        return self.__stub_to_radio_station

    @property
    def peer_id(self):
        return self.__peer_id

    @property
    def group_id(self):
        if self.__group_id is None:
            self.__group_id = self.__peer_id
        return self.__group_id

    @property
    def peer_target(self):
        return self.__peer_target

    def __get_random_table(self) -> list:
        """request get rand_table to rs

        :return: rand_table from rs
        """
        try:
            response = self.stub_to_radiostation.call_in_time(
                "GetRandomTable", loopchain_pb2.CommonRequest(request=""))
            if response.response_code == message_code.Response.success:
                random_table = json.loads(response.message)
            else:
                util.exit_and_msg(f"get random table fail \n"
                                  f"cause by {response.message}")
            return random_table
        except Exception as e:
            util.exit_and_msg(f"get random table and init peer_auth fail \n"
                              f"cause by : {e}")

    def rotate_next_leader(self, channel_name):
        """Find Next Leader Id from peer_list and reset leader to that peer"""

        # logging.debug("rotate next leader...")
        util.logger.spam(f"peer_service:rotate_next_leader")
        peer_manager = self.__channel_manager.get_peer_manager(channel_name)
        next_leader = peer_manager.get_next_leader_peer(is_only_alive=True)

        # Check Next Leader is available...
        if next_leader is not None and next_leader.peer_id != self.peer_id:
            try:
                stub_manager = peer_manager.get_peer_stub_manager(next_leader)
                response = stub_manager.call(
                    "GetStatus",
                    loopchain_pb2.StatusRequest(request="get_leader_peer"),
                    is_stub_reuse=True)

                # Peer 가 leader 로 변경되는데 시간이 필요함으로 접속 여부만 확인한다.
                # peer_status = json.loads(response.status)
                # if peer_status["peer_type"] != str(loopchain_pb2.BLOCK_GENERATOR):
                #     logging.warning("next rotate is not a leader")
                #     raise Exception

            except Exception as e:
                logging.warning(f"rotate next leader exceptions({e})")
                next_leader = peer_manager.leader_complain_to_rs(
                    conf.ALL_GROUP_ID)

        if next_leader is not None:
            self.reset_leader(next_leader.peer_id, channel_name)
        else:
            util.logger.warning(
                f"peer_service:rotate_next_leader next_leader is None({next_leader})"
            )

    def reset_leader(self, new_leader_id, channel: str):
        logging.info(
            f"RESET LEADER channel({channel}) leader_id({new_leader_id})")

        block_manager = self.__channel_manager.get_block_manager(channel)
        peer_manager = self.__channel_manager.get_peer_manager(channel)
        complained_leader = peer_manager.get_leader_peer()
        leader_peer = peer_manager.get_peer(new_leader_id, None)

        if leader_peer is None:
            logging.warning(
                f"in peer_service:reset_leader There is no peer by peer_id({new_leader_id})"
            )
            return

        util.logger.spam(
            f"peer_service:reset_leader target({leader_peer.target})")

        peer_manager.set_leader_peer(leader_peer, None)

        self_peer_object = peer_manager.get_peer(self.__peer_id)
        peer_leader = peer_manager.get_leader_peer()
        peer_type = loopchain_pb2.PEER

        if self_peer_object.target == peer_leader.target:
            util.change_log_color_set(True)
            logging.debug("Set Peer Type Leader!")
            peer_type = loopchain_pb2.BLOCK_GENERATOR
            block_manager.get_blockchain().reset_made_block_count()

            # TODO 아래 코드는 중복된 의미이다. 하지만, leader 가 변경되길 기다리는 코드로 의미를 명확히 할 경우
            # 블록체인 동작 지연으로 인한 오류가 발생한다. 우선 더 안정적인 테스트 결과를 보이는 상태로 유지한다.
            response = peer_manager.get_peer_stub_manager(
                self_peer_object).call("Request",
                                       loopchain_pb2.Message(
                                           code=message_code.Request.status,
                                           channel=channel),
                                       is_stub_reuse=True)

            peer_status = json.loads(response.meta)
            if peer_status['peer_type'] == str(loopchain_pb2.BLOCK_GENERATOR):
                is_broadcast = True
            else:
                is_broadcast = False

            peer_manager.announce_new_leader(complained_leader.peer_id,
                                             new_leader_id,
                                             is_broadcast=is_broadcast)
        else:
            util.change_log_color_set()
            logging.debug("Set Peer Type Peer!")
            # 새 leader 에게 subscribe 하기
            self.__common_service.subscribe(
                channel=channel,
                subscribe_stub=peer_manager.get_peer_stub_manager(peer_leader),
                peer_type=loopchain_pb2.BLOCK_GENERATOR)

        # update candidate blocks
        block_manager.get_candidate_blocks().set_last_block(
            block_manager.get_blockchain().last_block)
        block_manager.set_peer_type(peer_type)

        if self.__tx_process is not None:
            # peer_process 의 남은 job 을 처리한다. (peer->leader 인 경우),
            # peer_process 를 리더 정보를 변경한다. (peer->peer 인 경우)
            self.__tx_process_connect_to_leader(self.__tx_process,
                                                peer_leader.target)

    def show_peers(self, channel_name):
        logging.debug(f"peer_service:show_peers ({channel_name}): ")
        for peer in self.__channel_manager.get_peer_manager(
                channel_name).get_IP_of_peers_in_group():
            logging.debug("peer_target: " + peer)

    def service_stop(self):
        self.__channel_manager.stop_block_managers()
        self.__common_service.stop()

    def score_invoke(self, block, channel) -> dict:
        block_object = pickle.dumps(block)
        response = self.channel_manager.get_score_container_stub(channel).call(
            method_name="Request",
            message=loopchain_pb2.Message(
                code=message_code.Request.score_invoke, object=block_object),
            timeout=conf.SCORE_INVOKE_TIMEOUT,
            is_raise=True)
        # logging.debug("Score Server says: " + str(response))
        if response.code == message_code.Response.success:
            return json.loads(response.meta)

    def __connect_to_all_channel(self) -> bool:
        """connect to radiostation with all channel

        :return: is radiostation connected
        """
        response = self.__get_channel_infos()
        is_radiostation_connected = response is not None

        if is_radiostation_connected:
            logging.info(f"Connect to channels({response.channel_infos})")
            channels = json.loads(response.channel_infos)
            score_container_port_diff = 0

            for channel in list(channels.keys()):
                logging.debug(f"Try join channel({channel})")
                self.__channel_manager.load_block_manager(peer_id=self.peer_id,
                                                          channel=channel)
                self.__channel_manager.load_peer_manager(channel=channel)

                is_score_container_loaded = self.__channel_manager.load_score_container_each(
                    channel_name=channel,
                    score_package=channels[channel]["score_package"],
                    container_port=self.__peer_port +
                    conf.PORT_DIFF_SCORE_CONTAINER + score_container_port_diff,
                    peer_target=self.__peer_target)

                if is_score_container_loaded is False:
                    util.exit_and_msg(
                        f"peer_service:__connect_to_all_channel score container load Fail ({channel})"
                    )

                score_container_port_diff = score_container_port_diff + conf.PORT_DIFF_BETWEEN_SCORE_CONTAINER
                response = self.connect_to_radiostation(channel=channel)
                if response is not None:
                    self.__channel_manager.save_peer_manager(
                        self.__channel_manager.get_peer_manager(channel),
                        channel)

        return is_radiostation_connected

    def __get_channel_infos(self):
        response = self.stub_to_radiostation.call_in_times(
            method_name="GetChannelInfos",
            message=loopchain_pb2.GetChannelInfosRequest(
                peer_id=self.__peer_id,
                peer_target=self.__peer_target,
                group_id=self.group_id,
                cert=self.__auth.get_public_der()),
            retry_times=conf.CONNECTION_RETRY_TIMES_TO_RS,
            is_stub_reuse=True,
            timeout=conf.CONNECTION_TIMEOUT_TO_RS)

        return response

    def connect_to_radiostation(
            self,
            channel: str,
            is_reconnect: bool = False) -> loopchain_pb2.ConnectPeerReply:
        """connect to radiostation with channel

        :return: 접속정보, 실패시 None
        """
        logging.debug(f"try to connect to radiostation channel({channel})")

        if self.stub_to_radiostation is None:
            logging.warning("fail make stub to Radio Station!!")
            return None

        # 공통 부분
        response = self.stub_to_radiostation.call_in_times(
            method_name="ConnectPeer",
            message=loopchain_pb2.ConnectPeerRequest(
                channel=channel,
                peer_object=b'',
                peer_id=self.__peer_id,
                peer_target=self.__peer_target,
                group_id=self.group_id,
                cert=self.__auth.get_public_der()),
            retry_times=conf.CONNECTION_RETRY_TIMES_TO_RS,
            is_stub_reuse=True,
            timeout=conf.CONNECTION_TIMEOUT_TO_RS)

        if not is_reconnect:
            if response is not None and response.status == message_code.Response.success:
                peer_list_data = pickle.loads(response.peer_list)
                self.__channel_manager.get_peer_manager(channel).load(
                    peer_list_data, False)
                logging.debug("peer list update: " +
                              self.__channel_manager.get_peer_manager(
                                  channel).get_peers_for_debug())
            else:
                logging.debug("using local peer list: " +
                              self.__channel_manager.get_peer_manager(
                                  channel).get_peers_for_debug())

        return response

    def add_unconfirm_block(self, block_unloaded, channel_name=None):
        if channel_name is None:
            channel_name = conf.LOOPCHAIN_DEFAULT_CHANNEL

        block = pickle.loads(block_unloaded)
        block_hash = block.block_hash

        response_code, response_msg = message_code.get_response(
            message_code.Response.fail_validate_block)

        # block 검증
        block_is_validated = False
        try:
            block_is_validated = Block.validate(block)
        except Exception as e:
            logging.error(e)

        if block_is_validated:
            # broadcast 를 받으면 받은 블럭을 검증한 후 검증되면 자신의 blockchain 의 unconfirmed block 으로 등록해 둔다.
            confirmed, reason = \
                self.__channel_manager.get_block_manager(channel_name).get_blockchain().add_unconfirm_block(block)

            if confirmed:
                response_code, response_msg = message_code.get_response(
                    message_code.Response.success_validate_block)
            elif reason == "block_height":
                # Announce 되는 블럭과 자신의 height 가 다르면 Block Height Sync 를 다시 시도한다.
                self.__channel_manager.get_block_manager(
                    channel_name).block_height_sync()

        return response_code, response_msg, block_hash

    def __tx_process_connect_to_leader(self, peer_process, leader_target):
        logging.debug("try... Peer Process connect_to_leader: " +
                      leader_target)
        logging.debug("peer_process: " + str(peer_process))
        peer_process.send_to_process(
            (BroadcastProcess.CONNECT_TO_LEADER_COMMAND, leader_target))
        peer_process.send_to_process(
            (BroadcastProcess.SUBSCRIBE_COMMAND, leader_target))

    def __run_tx_process(self, inner_channel_info):
        tx_process = BroadcastProcess("Tx Process")
        tx_process.start()
        tx_process.send_to_process(("status", ""))

        wait_times = 0
        wait_for_process_start = None

        # TODO process wait loop 를 살리고 시간을 조정하였음, 이 상태에서 tx process 가 AWS infra 에서 시작되는지 확인 필요.
        # time.sleep(conf.WAIT_SECONDS_FOR_SUB_PROCESS_START)

        while wait_for_process_start is None:
            time.sleep(conf.SLEEP_SECONDS_FOR_SUB_PROCESS_START)
            logging.debug(f"wait start tx process....")
            wait_for_process_start = tx_process.get_receive("status")

            if wait_for_process_start is None and wait_times > conf.WAIT_SUB_PROCESS_RETRY_TIMES:
                util.exit_and_msg("Tx Process start Fail!")

        logging.debug(f"Tx Process start({wait_for_process_start})")
        tx_process.send_to_process(
            (BroadcastProcess.MAKE_SELF_PEER_CONNECTION_COMMAND,
             inner_channel_info))

        return tx_process

    def __stop_tx_process(self):
        if self.__tx_process is not None:
            self.__tx_process.stop()
            self.__tx_process.wait()

    def reset_voter_count(self):
        """peer_list 의 활성화 상태(gRPC 응답)을 갱신하여 voter 수를 변경한다.

        :return:
        """
        # if self.__reset_voter_in_progress is not True:
        #     self.__reset_voter_in_progress = True
        #     logging.debug("reset voter count before: " +
        #                   str(ObjectManager().peer_service.peer_manager.get_peer_count()))
        #
        #     # TODO peer_list 를 순회하면서 gRPC 오류인 사용자를 remove_audience 한다.
        #     self.__channel_manager.get_peer_manager(
        #         conf.LOOPCHAIN_DEFAULT_CHANNEL).reset_peers(None, self.__common_service.remove_audience)
        #     logging.debug("reset voter count after: " +
        #                   str(ObjectManager().peer_service.peer_manager.get_peer_count()))
        #     self.__reset_voter_in_progress = False
        pass

    def set_chain_code(self, score):
        """Score를 패스로 전달하지 않고 (serve(...)의 score 는 score 의 파일 Path 이다.)
        Object 를 직접 할당하기 위한 인터페이스로 serve 호출전에 지정되어야 한다.

        :param score: score Object
        """
        # TODO 현재는 테스트를 위해서만 사용되고 있다. 검토후 제거 할 것
        self.__score = score

        # TODO 아래 세줄은 삭제 가능할 듯 검토 후 다음 merge 때 삭제 부탁합니다. assign to @godong
        self.__score_info = dict()
        self.__score_info[
            message_code.MetaParams.ScoreInfo.score_id] = self.__score.id()
        self.__score_info[message_code.MetaParams.ScoreInfo.
                          score_version] = self.__score.version()

    def __port_init(self, port):
        # service 초기화 작업
        self.__peer_target = util.get_private_ip() + ":" + str(port)
        self.__inner_target = conf.IP_LOCAL + ":" + str(port)
        self.__peer_port = int(port)

        # SCORE Service check Using Port
        # check Port Using
        if util.check_port_using(conf.IP_PEER,
                                 int(port) + conf.PORT_DIFF_SCORE_CONTAINER):
            util.exit_and_msg('Score Service Port is Using ' +
                              str(int(port) + conf.PORT_DIFF_SCORE_CONTAINER))

    def __run_inner_services(self, port):
        if conf.ENABLE_REST_SERVICE:
            if conf.USE_GUNICORN_HA_SERVER:
                # Run web app on gunicorn in another process.
                new_rest_port = int(
                    port) + conf.PORT_DIFF_REST_SERVICE_CONTAINER
                logging.debug(
                    f'Launch gunicorn proxy server. Port = {new_rest_port}')
                subprocess.Popen(
                    ['python3', './rest_proxy.py', '-p',
                     str(port), '&'])
            else:
                # Run web app as it is.
                logging.debug(f'Launch Flask RESTful server. Port = {port}')
                self.__rest_service = RestService(int(port))

    def __make_peer_id(self):
        """네트워크에서 Peer 를 식별하기 위한 UUID를 level db 에 생성한다.
        """
        try:
            uuid_bytes = bytes(
                self.__level_db.Get(conf.LEVEL_DB_KEY_FOR_PEER_ID))
            peer_id = uuid.UUID(bytes=uuid_bytes)
        except KeyError:  # It's first Run
            peer_id = None

        if peer_id is None:
            peer_id = uuid.uuid1()
            logging.info("make new peer_id: " + str(peer_id))
            self.__level_db.Put(conf.LEVEL_DB_KEY_FOR_PEER_ID, peer_id.bytes)

        self.__peer_id = str(peer_id)

    def timer_test_callback_function(self, message):
        logging.debug(f'timer test callback function :: ({message})')

    def __block_height_sync_channel(self, channel_name):
        # leader 로 시작하지 않았는데 자신의 정보가 leader Peer 정보이면 block height sync 하여
        # 최종 블럭의 leader 를 찾는다.
        block_sync_target_stub = None
        peer_manager = self.__channel_manager.get_peer_manager(channel_name)
        peer_leader = peer_manager.get_leader_peer()
        self_peer_object = peer_manager.get_peer(self.__peer_id)
        is_delay_announce_new_leader = False
        peer_old_leader = None

        if peer_leader.target != self.__peer_target:
            block_sync_target_stub = StubManager.get_stub_manager_to_server(
                peer_leader.target,
                loopchain_pb2_grpc.PeerServiceStub,
                time_out_seconds=conf.CONNECTION_RETRY_TIMEOUT)

            if block_sync_target_stub is None:
                logging.warning(
                    "You maybe Older from this network... or No leader in this network!"
                )

                # TODO 이 상황에서 rs 에 leader complain 을 진행한다
                is_delay_announce_new_leader = True
                peer_old_leader = peer_leader
                peer_leader = self.__channel_manager.get_peer_manager(
                    channel_name).leader_complain_to_rs(
                        conf.ALL_GROUP_ID, is_announce_new_peer=False)

                if peer_leader is not None:
                    block_sync_target_stub = StubManager.get_stub_manager_to_server(
                        peer_leader.target,
                        loopchain_pb2_grpc.PeerServiceStub,
                        time_out_seconds=conf.CONNECTION_RETRY_TIMEOUT)

            if peer_leader is None or peer_leader.peer_id == self.__peer_id:
                peer_leader = self_peer_object
                self.__channel_manager.get_block_manager(
                    channel_name).set_peer_type(loopchain_pb2.BLOCK_GENERATOR)
            else:
                self.__channel_manager.get_block_manager(
                    channel_name).block_height_sync(block_sync_target_stub)
                # # TODO 마지막 블럭으로 leader 정보를 판단하는 로직은 리더 컴플레인 알고리즘 수정 후 유효성을 다시 판단할 것
                # last_block_peer_id = self.__channel_manager.get_block_manager().get_blockchain().last_block.peer_id
                #
                # if last_block_peer_id != "" and last_block_peer_id != self.__peer_list.get_leader_peer().peer_id:
                #     logging.debug("make leader stub after block height sync...")
                #     new_leader_peer = self.__peer_list.get_peer(last_block_peer_id)
                #
                #     if new_leader_peer is None:
                #         new_leader_peer = self.__peer_list.leader_complain_to_rs(conf.ALL_GROUP_ID)
                #
                #     self.__peer_list.set_leader_peer(new_leader_peer, None)
                #     # TODO 리더가 상단의 next_leader_pear 와 같을 경우 stub 을 재설정하게 되는데 문제 없는지 확인 할 것
                #     peer_leader = new_leader_peer
                # else:

                if block_sync_target_stub is None:
                    util.exit_and_msg("Fail connect to leader!!")

                self.show_peers(channel_name)

            if block_sync_target_stub is not None:
                self.__common_service.subscribe(channel_name,
                                                block_sync_target_stub,
                                                loopchain_pb2.BLOCK_GENERATOR)

            if is_delay_announce_new_leader:
                self.__channel_manager.get_peer_manager(
                    channel_name).announce_new_leader(peer_old_leader.peer_id,
                                                      peer_leader.peer_id)

    def __start_base_services(self, score):
        """start base services >> common_service, channel_manager, tx_process

        :param score:
        :return:
        """
        inner_service_port = conf.PORT_INNER_SERVICE or (
            self.__peer_port + conf.PORT_DIFF_INNER_SERVICE)

        self.__common_service = CommonService(loopchain_pb2,
                                              inner_service_port)

        self.__channel_manager = ChannelManager(
            common_service=self.__common_service,
            level_db_identity=self.__peer_target)

        self.__tx_process = self.__run_tx_process(
            inner_channel_info=conf.IP_LOCAL + ":" + str(inner_service_port))

    def serve(self, port, score=None):
        """start func of Peer Service ===================================================================

        :param port:
        :param score:
        """
        if score is None:
            score = conf.DEFAULT_SCORE_PACKAGE

        stopwatch_start = timeit.default_timer()
        peer_type = loopchain_pb2.PEER

        is_all_service_safe_start = True

        self.__port_init(port)
        self.__level_db, self.__level_db_path = util.init_level_db(
            self.__peer_target)
        self.__make_peer_id()
        self.__run_inner_services(port)
        self.__start_base_services(score=score)

        is_radiostation_connected = self.__connect_to_all_channel()

        if is_radiostation_connected is False:
            util.exit_and_msg(
                "There is no peer_list, initial network is not allowed without RS!"
            )

        # start timer service.
        if conf.CONSENSUS_ALGORITHM == conf.ConsensusAlgorithm.lft:
            self.__timer_service.start()

        # TODO LOOPCHAIN-61 인증서 로드
        _cert = None
        # TODO LOOPCHAIN-61 인증서 키로드
        _private_key = None
        # TODO 인증정보 요청

        for channel in self.__channel_manager.get_channel_list():
            peer_leader = self.__channel_manager.get_peer_manager(
                channel).get_leader_peer(is_complain_to_rs=True)
            logging.debug(f"channel({channel}) peer_leader: " +
                          str(peer_leader))

            # TODO 이 부분을 조건 검사가 아니라 leader complain 을 이용해서 리더가 되도록 하는 방법 검토하기
            # 자기가 peer_list 의 유일한 connected PEER 이거나 rs 의 leader 정보와 같을 때 block generator 가 된다.
            if self.__peer_id == peer_leader.peer_id:
                if is_radiostation_connected is True or self.__channel_manager.get_peer_manager(
                        channel).get_connected_peer_count(None) == 1:
                    util.change_log_color_set(True)
                    logging.debug(f"Set Peer Type Leader! channel({channel})")
                    peer_type = loopchain_pb2.BLOCK_GENERATOR

            # load score 는 score 서비스가 시작된 이후 block height sync 가 시작되기전에 이루어져야 한다.
            # is_all_service_safe_start &= self.__load_score(score)

            if peer_type == loopchain_pb2.BLOCK_GENERATOR:
                self.__channel_manager.get_block_manager(
                    channel).set_peer_type(peer_type)
            elif peer_type == loopchain_pb2.PEER:
                self.__block_height_sync_channel(channel)

            if conf.CONSENSUS_ALGORITHM == conf.ConsensusAlgorithm.lft:
                self.__common_service.update_audience(
                    self.channel_manager.get_peer_manager().dump())

        loopchain_pb2_grpc.add_PeerServiceServicer_to_server(
            self.__outer_service, self.__common_service.outer_server)
        loopchain_pb2_grpc.add_InnerServiceServicer_to_server(
            self.__inner_service, self.__common_service.inner_server)
        logging.info("Start peer service at port: " + str(port))

        self.__channel_manager.start_block_managers()
        self.__common_service.start(port, self.__peer_id, self.__group_id)

        if self.stub_to_radiostation is not None:
            for channel in self.__channel_manager.get_channel_list():
                self.__common_service.subscribe(
                    channel=channel, subscribe_stub=self.stub_to_radiostation)

        for channel in self.__channel_manager.get_channel_list():
            channel_leader = self.__channel_manager.get_peer_manager(
                channel).get_leader_peer()
            if channel_leader is not None:
                util.logger.spam(
                    f"connnect to channel({channel}) leader({channel_leader.target})"
                )
                self.__tx_process_connect_to_leader(self.__tx_process,
                                                    channel_leader.target)

        self.__send_to_process_thread.set_process(self.__tx_process)
        self.__send_to_process_thread.start()

        stopwatch_duration = timeit.default_timer() - stopwatch_start
        logging.info(
            f"Start Peer Service start duration({stopwatch_duration})")

        # service 종료를 기다린다.
        if is_all_service_safe_start:
            self.__common_service.wait()
        else:
            self.service_stop()

        self.__send_to_process_thread.stop()
        self.__send_to_process_thread.wait()

        if self.__timer_service.is_run():
            self.__timer_service.stop()
            self.__timer_service.wait()

        logging.info("Peer Service Ended.")
        self.__channel_manager.stop_score_containers()
        if self.__rest_service is not None:
            self.__rest_service.stop()
        self.__stop_tx_process()