Exemplo n.º 1
0
class RadioStationService:
    """Radiostation 의 main Class
    peer 를 위한 outer service 와 관리용 admin service 두개의 gRPC interface 를 가진다.
    """

    # 인증처리
    __ca = None

    def __init__(self,
                 radio_station_ip=None,
                 cert_path=None,
                 cert_pass=None,
                 rand_seed=None):
        """RadioStation Init

        :param radio_station_ip: radioStation Ip
        :param cert_path: RadioStation 인증서 디렉토리 경로
        :param cert_pass: RadioStation private key password
        """
        logger_preset = loggers.get_preset()
        logger_preset.peer_id = "RadioStation"
        logger_preset.update_logger()

        if radio_station_ip is None:
            radio_station_ip = conf.IP_RADIOSTATION
        logging.info("Set RadioStationService IP: " + radio_station_ip)
        if cert_path is not None:
            logging.info("CA Certificate Path : " + cert_path)

        self.__common_service = CommonService(loopchain_pb2)
        self.__admin_manager = AdminManager("station")
        self.__channel_manager = None
        self.__rest_service = None
        self.__timer_service = TimerService()

        # RS has two status (active, standby) active means enable outer service
        # standby means stop outer service and heartbeat to the other RS (active)
        self.__is_active = False

        # 인증 클래스
        self.__ca = CertificateAuthorization()

        if cert_path is not None:
            # 인증서 로드
            self.__ca.load_pki(cert_path, cert_pass)

        logging.info("Current RadioStation SECURITY_MODE : " +
                     str(self.__ca.is_secure))

        # gRPC service for Radiostation
        self.__outer_service = OuterService()
        self.__admin_service = AdminService(self.__admin_manager)

        # {group_id:[ {peer_id:IP} ] }로 구성된 dictionary
        self.peer_groups = {conf.ALL_GROUP_ID: []}

        # Peer의 보안을 담당
        self.auth = {}

        ObjectManager().rs_service = self

    def __del__(self):
        pass

    def launch_block_generator(self):
        pass

    @property
    def admin_manager(self) -> AdminManager:
        return self.__admin_manager

    @property
    def channel_manager(self) -> ChannelManager:
        return self.__channel_manager

    @property
    def common_service(self) -> CommonService:
        return self.__common_service

    @property
    def timer_service(self) -> TimerService:
        return self.__timer_service

    def check_peer_status(self, channel):
        """service loop for status heartbeat check to peer list

        :return:
        """
        util.logger.spam(
            f"rs_service:check_peer_status(Heartbeat...{channel}) "
            f"for reset Leader and delete no response Peer")

        peer_manager = self.__channel_manager.get_peer_manager(channel)
        peer_manager.check_peer_status()

    def __create_random_table(self, rand_seed: int) -> list:
        """create random_table using random_seed
        table size define in conf.RANDOM_TABLE_SIZE

        :param rand_seed: random seed for create random table
        :return: random table
        """
        random.seed(rand_seed)
        random_table = []
        for i in range(conf.RANDOM_TABLE_SIZE):
            random_num: int = random.getrandbits(conf.RANDOM_SIZE)
            random_table.append(random_num)

        return random_table

    def register_peers(self):
        util.logger.spam(f"register_peers() : start register to peer_manager")

        logging.debug(
            f"register_peers() : channel_list = {self.admin_manager.get_channel_list()}"
        )
        for channel_name, channel_data in self.admin_manager.json_data.items():
            peer_manager = self.channel_manager.get_peer_manager(channel_name)

            for peer_data in channel_data['peers']:
                peer_info = {
                    "id": peer_data['id'],
                    "peer_target": peer_data['peer_target'],
                    "order": peer_data['order']
                }
                logging.debug(
                    f"register Peer : channel = {channel_name}, peer_info = {peer_info}"
                )
                peer_manager.add_peer(peer_info)

            if conf.ENABLE_RADIOSTATION_HEARTBEAT:
                timer_key = f"{TimerService.TIMER_KEY_RS_HEARTBEAT}_{channel_name}"
                if timer_key not in self.timer_service.timer_list:
                    self.timer_service.add_timer(
                        timer_key,
                        Timer(target=timer_key,
                              duration=conf.
                              SLEEP_SECONDS_IN_RADIOSTATION_HEARTBEAT,
                              is_repeat=True,
                              callback=self.check_peer_status,
                              callback_kwargs={"channel": channel_name}))

    def serve(self, port=None, event_for_init: multiprocessing.Event = None):
        """Peer(BlockGenerator Peer) to RadioStation

        :param port: RadioStation Peer
        :param event_for_init:
        """
        if port is None:
            port = conf.PORT_RADIOSTATION
        stopwatch_start = timeit.default_timer()

        self.__channel_manager = ChannelManager(self.__common_service)

        self.register_peers()

        # TODO: Currently, some environments are failing to execute RestServiceRS without this sleep.
        # This sleep fixes current node's issue but we need to fix it right way by investigating.
        time.sleep(1)

        if conf.ENABLE_REST_SERVICE:
            self.__rest_service = RestServiceRS(int(port))

        loopchain_pb2_grpc.add_RadioStationServicer_to_server(
            self.__outer_service, self.__common_service.outer_server)
        loopchain_pb2_grpc.add_AdminServiceServicer_to_server(
            self.__admin_service, self.__common_service.inner_server)

        logging.info("Start Radio Station service at port: " + str(port))

        self.__common_service.start(port)
        self.__timer_service.start()

        stopwatch_duration = timeit.default_timer() - stopwatch_start
        logging.info(
            f"Start Radio Station service at port: {port} start duration({stopwatch_duration})"
        )

        if event_for_init is not None:
            event_for_init.set()

        signal.signal(signal.SIGINT, self.close)
        signal.signal(signal.SIGTERM, self.close)

        # service 종료를 기다린다.
        self.__common_service.wait()
        self.__timer_service.wait()

        if self.__rest_service is not None:
            self.__rest_service.stop()

    def close(self, sig, frame):
        self.__common_service.stop()
        self.__timer_service.stop()
Exemplo n.º 2
0
class ChannelService:
    def __init__(self, channel_name, amqp_target, amqp_key, rollback=False):
        self.__block_manager: BlockManager = None
        self.__score_container: CommonSubprocess = None
        self.__score_info: dict = None
        self.__peer_auth: Signer = None
        self.__broadcast_scheduler: BroadcastScheduler = None
        self.__rs_client: RestClient = None
        self.__timer_service = TimerService()
        self.__node_subscriber: NodeSubscriber = None
        self._rollback: bool = rollback

        loggers.get_preset().channel_name = channel_name
        loggers.get_preset().update_logger()

        channel_queue_name = conf.CHANNEL_QUEUE_NAME_FORMAT.format(channel_name=channel_name, amqp_key=amqp_key)
        self.__inner_service = ChannelInnerService(
            amqp_target, channel_queue_name, conf.AMQP_USERNAME, conf.AMQP_PASSWORD, channel_service=self)

        logging.info(f"ChannelService : {channel_name}, Queue : {channel_queue_name}")

        ChannelProperty().name = channel_name
        ChannelProperty().amqp_target = amqp_target
        ChannelProperty().crep_root_hash = Hash32.fromhex(conf.CHANNEL_OPTION[channel_name].get('crep_root_hash'))

        StubCollection().amqp_key = amqp_key
        StubCollection().amqp_target = amqp_target

        command_arguments.add_raw_command(command_arguments.Type.Channel, channel_name)
        command_arguments.add_raw_command(command_arguments.Type.AMQPTarget, amqp_target)
        command_arguments.add_raw_command(command_arguments.Type.AMQPKey, amqp_key)

        ObjectManager().channel_service = self
        self.__state_machine = ChannelStateMachine(self)

    @property
    def block_manager(self):
        return self.__block_manager

    @property
    def score_container(self):
        return self.__score_container

    @property
    def score_info(self):
        return self.__score_info

    @property
    def rs_client(self):
        return self.__rs_client

    @property
    def broadcast_scheduler(self):
        return self.__broadcast_scheduler

    @property
    def timer_service(self):
        return self.__timer_service

    @property
    def state_machine(self):
        return self.__state_machine

    @property
    def inner_service(self):
        return self.__inner_service

    @property
    def node_subscriber(self):
        return self.__node_subscriber

    def serve(self):
        async def _serve():
            await StubCollection().create_peer_stub()

            results = await StubCollection().peer_stub.async_task().get_node_info_detail()
            self._init_properties(**results)

            await self._init()
            self.__timer_service.start()
            self.__state_machine.complete_init_components()
            logging.info(f'channel_service: init complete channel: {ChannelProperty().name}, '
                         f'state({self.__state_machine.state})')

        loop = self.__inner_service.loop
        serve_coroutine = _serve() if not self._rollback else self._serve_manual_rollback()
        loop.create_task(serve_coroutine)
        loop.add_signal_handler(signal.SIGINT, self.close, signal.SIGINT)
        loop.add_signal_handler(signal.SIGTERM, self.close, signal.SIGTERM)

        try:
            loop.run_forever()
        except Exception as e:
            traceback.print_exception(type(e), e, e.__traceback__)
        finally:
            loop.run_until_complete(loop.shutdown_asyncgens())
            self._cancel_tasks(loop)
            self._cleanup()
            loop.close()

    async def _serve_manual_rollback(self):
        """Initialize minimum channel resources and manual rollback

        :return: None
        """
        await StubCollection().create_peer_stub()

        results = await StubCollection().peer_stub.async_task().get_node_info_detail()
        self._init_properties(**results)

        self.__init_block_manager()
        await self.__init_score_container()
        await self.__inner_service.connect(conf.AMQP_CONNECTION_ATTEMPTS, conf.AMQP_RETRY_DELAY, exclusive=True)
        await asyncio.sleep(0.01)   # sleep to complete peer service initialization

        message = self._manual_rollback()
        self.shutdown_peer(message=message)

    def _manual_rollback(self) -> str:
        logging.debug("_manual_rollback() start manual rollback")
        if self.block_manager.blockchain.block_height >= 0:
            self.block_manager.rebuild_block()

        if self.block_manager.request_rollback():
            message = "rollback finished"
        else:
            message = "rollback cancelled"

        logging.debug("_manual_rollback() end manual rollback")
        return message

    def close(self, signum=None):
        logging.info(f"close() signum = {repr(signum)}")
        if self.__inner_service:
            self.__inner_service.cleanup()

        self.__inner_service.loop.stop()

    @staticmethod
    def _cancel_tasks(loop):
        for task in asyncio.Task.all_tasks(loop):
            if task.done():
                continue
            task.cancel()
            try:
                loop.run_until_complete(task)
            except asyncio.CancelledError as e:
                logging.info(f"_cancel_tasks() task : {task}, error : {e}")

    def _cleanup(self):
        logging.info("_cleanup() Channel Resources.")

        if self.__timer_service.is_run():
            self.__timer_service.stop()
            self.__timer_service.wait()
            logging.info("_cleanup() TimerService.")

        if self.__score_container:
            self.__score_container.stop()
            self.__score_container.wait()
            self.__score_container = None
            logging.info("_cleanup() ScoreContainer.")

        if self.__broadcast_scheduler:
            self.__broadcast_scheduler.stop()
            self.__broadcast_scheduler.wait()
            self.__broadcast_scheduler = None
            logging.info("_cleanup() BroadcastScheduler.")

        if self.__block_manager:
            self.__block_manager.stop()
            self.__block_manager = None
            logging.info("_cleanup() BlockManager.")

    @staticmethod
    def _init_properties(**kwargs):
        """Initialize properties

        :param kwargs: takes (peer_id, peer_port, peer_target, rest_target)
        within parameters
        :return: None
        """
        loggers.get_preset().peer_id = kwargs.get('peer_id')
        loggers.get_preset().update_logger()

        ChannelProperty().peer_port = kwargs.get('peer_port')
        ChannelProperty().peer_target = kwargs.get('peer_target')
        ChannelProperty().rest_target = kwargs.get('rest_target')
        ChannelProperty().peer_id = kwargs.get('peer_id')
        ChannelProperty().peer_address = ExternalAddress.fromhex_address(ChannelProperty().peer_id)
        ChannelProperty().node_type = conf.NodeType.CitizenNode
        ChannelProperty().rs_target = None

    async def _init(self):
        """Initialize channel resources

        :return: None
        """
        await self.__init_peer_auth()
        self.__init_broadcast_scheduler()
        self.__init_block_manager()

        await self.__init_score_container()
        await self.__inner_service.connect(conf.AMQP_CONNECTION_ATTEMPTS, conf.AMQP_RETRY_DELAY, exclusive=True)
        await self.__init_sub_services()

    async def evaluate_network(self):
        await self._init_rs_client()
        self.__block_manager.blockchain.init_crep_reps()
        await self._select_node_type()
        self.__ready_to_height_sync()
        self.__state_machine.block_sync()

    async def subscribe_network(self):
        await self._select_node_type()

        if self.is_support_node_function(conf.NodeFunction.Vote):
            await self.set_peer_type_in_channel()
        else:
            await self._init_rs_target()
            if ChannelProperty().rs_target is None:
                return
            self.__init_node_subscriber()
            await self.subscribe_to_parent()

        self.__state_machine.complete_subscribe()

        if self.is_support_node_function(conf.NodeFunction.Vote):
            self.turn_on_leader_complain_timer()

    def update_nid(self):
        nid = self.__block_manager.blockchain.find_nid()
        self.__inner_service.update_sub_services_properties(nid=int(nid, 16))

    def _get_node_type_by_peer_list(self):
        epoch = self.block_manager.epoch
        if epoch:
            reps = self.__block_manager.blockchain.find_preps_addresses_by_roothash(
                epoch.reps_hash)
        else:
            reps = self.__block_manager.blockchain.find_preps_addresses_by_roothash(
                ChannelProperty().crep_root_hash)

        if ChannelProperty().peer_address in reps:
            return conf.NodeType.CommunityNode
        return conf.NodeType.CitizenNode

    def _is_role_switched(self) -> bool:
        new_node_type = self._get_node_type_by_peer_list()
        if new_node_type == ChannelProperty().node_type:
            utils.logger.debug(f"By peer manager, maintains the current node type({ChannelProperty().node_type})")
            return False

        return True

    async def _select_node_type(self):
        if self._is_role_switched():
            new_node_type = self._get_node_type_by_peer_list()
            utils.logger.info(f"Role switching to new node type: {new_node_type.name}")
            ChannelProperty().node_type = new_node_type
        self.__inner_service.update_sub_services_properties(node_type=ChannelProperty().node_type.value)

    def switch_role(self):
        self.__block_manager.blockchain.reset_leader_made_block_count(need_check_switched_role=True)
        if self._is_role_switched():
            self.__state_machine.switch_role()

    async def reset_network(self):
        utils.logger.info("Reset network")
        self.__timer_service.clean(except_key=TimerService.TIMER_KEY_BROADCAST_SEND_UNCONFIRMED_BLOCK)
        self.__rs_client = None
        self.__state_machine.evaluate_network()

    async def __init_peer_auth(self):
        try:
            node_key: bytes = await StubCollection().peer_stub.async_task().get_node_key()
            self.__peer_auth = Signer.from_prikey(node_key)
            ChannelProperty().peer_auth = self.__peer_auth
        except Exception as e:
            utils.exit_and_msg(f"peer auth init fail cause : {e}")

    def __init_block_manager(self):
        logging.debug(f"__init_block_manager() : channel({ChannelProperty().name})")

        channel_name = ChannelProperty().name
        develop = command_arguments.command_values.get(command_arguments.Type.Develop, False)
        store_id = f"{ChannelProperty().peer_port}_{channel_name}" if develop else channel_name
        try:
            self.__block_manager = BlockManager(
                channel_service=self,
                peer_id=ChannelProperty().peer_id,
                channel_name=channel_name,
                store_id=store_id
            )
        except KeyValueStoreError as e:
            utils.exit_and_msg("KeyValueStoreError(" + str(e) + ")")

    def __init_broadcast_scheduler(self):
        scheduler = BroadcastSchedulerFactory.new(channel=ChannelProperty().name,
                                                  self_target=ChannelProperty().peer_target)
        scheduler.start()
        self.__broadcast_scheduler = scheduler

    def _get_radiostations(self):
        radiostations: list = self.get_channel_option().get('radiostations')
        if not radiostations:
            logging.warning(f"no configurations for radiostations.")
            return None

        radiostations = utils.convert_local_ip_to_private_ip(radiostations)
        try:
            radiostations.remove(ChannelProperty().rest_target)
        except ValueError:
            pass

        return radiostations

    async def _init_rs_target(self, refresh_all: bool = False):
        if refresh_all:
            radiostations = self._get_radiostations()
            if radiostations is None:
                return
            await self.__rs_client.init(radiostations)
        else:
            try:
                self.__rs_client.init_next_target()
            except StopIteration:
                return await self._init_rs_target(refresh_all=True)

        ChannelProperty().rs_target = self.__rs_client.target
        self.__inner_service.update_sub_services_properties(relay_target=ChannelProperty().rs_target)

    async def _init_rs_client(self):
        self.__rs_client = RestClient(channel=ChannelProperty().name)
        await self._init_rs_target(refresh_all=True)

    async def __init_score_container(self):
        """create score container and save score_info and score_stub
        """
        try:
            self.__score_info = await self.__run_score_container()
        except BaseException as e:
            logging.error(e)
            traceback.print_exc()
            utils.exit_and_msg(f"run_score_container failed!!")

    async def __init_sub_services(self):
        self.__inner_service.init_sub_services()
        await StubCollection().create_channel_tx_creator_stub(ChannelProperty().name)
        await StubCollection().create_channel_tx_receiver_stub(ChannelProperty().name)

    def __init_node_subscriber(self):
        self.__node_subscriber = NodeSubscriber(
            channel=ChannelProperty().name,
            rs_target=ChannelProperty().rs_target
        )

    async def __run_score_container(self):
        if conf.RUN_ICON_IN_LAUNCHER:
            process_args = ['python3', '-m', 'loopchain', 'score',
                            '--channel', ChannelProperty().name]
            process_args += command_arguments.get_raw_commands_by_filter(
                command_arguments.Type.AMQPTarget,
                command_arguments.Type.AMQPKey,
                command_arguments.Type.Develop,
                command_arguments.Type.ConfigurationFilePath,
                command_arguments.Type.RadioStationTarget
            )
            self.__score_container = CommonSubprocess(process_args)

        await StubCollection().create_icon_score_stub(ChannelProperty().name)
        await StubCollection().icon_score_stubs[ChannelProperty().name].connect()
        await StubCollection().icon_score_stubs[ChannelProperty().name].async_task().hello()
        return None

    def is_support_node_function(self, node_function):
        return conf.NodeType.is_support_node_function(node_function, ChannelProperty().node_type)

    def get_channel_option(self) -> dict:
        return conf.CHANNEL_OPTION[ChannelProperty().name]

    def generate_genesis_block(self):
        if self.__block_manager.blockchain.block_height > -1:
            logging.debug("genesis block was already generated")
            return

        reps = self.block_manager.blockchain.find_preps_addresses_by_roothash(ChannelProperty().crep_root_hash)
        self.__block_manager.blockchain.generate_genesis_block(reps)

    async def subscribe_to_parent(self):
        def _handle_exception(future: asyncio.Future):
            exc = future.exception()
            logging.debug(f"error: {type(exc)}, {str(exc)}")

            if ChannelProperty().node_type != conf.NodeType.CitizenNode:
                logging.debug(f"This node is not Citizen anymore.")
                return

            if isinstance(exc, AnnounceNewBlockError):
                self.__state_machine.block_sync()
                return

            if exc:
                if (self.__state_machine.state != "SubscribeNetwork"
                        or isinstance(exc, UnregisteredException)):
                    self.__state_machine.subscribe_network()
                else:
                    logging.warning(f"Waiting for next subscribe request...")

        utils.logger.spam(f"try subscribe_call_by_citizen target({ChannelProperty().rest_target})")
        subscribe_event = asyncio.Event()
        # try websocket connection, and handle exception in callback
        task = asyncio.ensure_future(
            self.__node_subscriber.start(
                block_height=self.__block_manager.blockchain.block_height,
                event=subscribe_event
            ),
            loop=self.__inner_service.loop
        )
        task.add_done_callback(_handle_exception)

        await subscribe_event.wait()

    def shutdown_peer(self, **kwargs):
        logging.debug(f"shutdown_peer() kwargs = {kwargs}")
        StubCollection().peer_stub.sync_task().stop(message=kwargs['message'])

    def set_peer_type(self, peer_type):
        """Set peer type when peer init only

        :param peer_type:
        :return:
        """
        self.__block_manager.set_peer_type(peer_type)

    def save_peer_manager(self, peer_manager):
        """Save peer_list to leveldb

        :param peer_manager:
        """
        level_db_key_name = str.encode(conf.LEVEL_DB_KEY_FOR_PEER_LIST)

        try:
            dump = peer_manager.dump()
            key_value_store = self.__block_manager.blockchain.blockchain_store
            key_value_store.put(level_db_key_name, dump)
        except AttributeError as e:
            logging.warning("Fail Save Peer_list: " + str(e))

    async def set_peer_type_in_channel(self):
        peer_type = loopchain_pb2.PEER
        leader_id = self.__block_manager.get_next_leader()
        utils.logger.info(f"channel({ChannelProperty().name}) peer_leader: {leader_id}")

        logger_preset = loggers.get_preset()
        if ChannelProperty().peer_id == leader_id:
            logger_preset.is_leader = True
            utils.logger.info(f"Set Peer Type Leader! channel({ChannelProperty().name})")
            peer_type = loopchain_pb2.BLOCK_GENERATOR
        else:
            logger_preset.is_leader = False
        logger_preset.update_logger()

        self.__block_manager.set_peer_type(peer_type)

    def _is_genesis_node(self):
        return ('genesis_data_path' in self.get_channel_option()
                and self.is_support_node_function(conf.NodeFunction.Vote))

    def __ready_to_height_sync(self):
        if self.block_manager.blockchain.block_height >= 0:
            self.block_manager.rebuild_block()
        else:
            if self._is_genesis_node():
                self.generate_genesis_block()

        if not self.is_support_node_function(conf.NodeFunction.Vote) and not ChannelProperty().rs_target:
            utils.exit_and_msg(f"There's no radiostation target to sync block.")

    def reset_leader(self, new_leader_id, block_height=0, complained=False):
        """

        :param new_leader_id:
        :param block_height:
        :param complained:
        :return:
        """

        blockchain = self.__block_manager.blockchain
        prep_targets = blockchain.find_preps_targets_by_roothash(self.__block_manager.epoch.reps_hash)
        if ChannelProperty().peer_id not in prep_targets:
            if self.is_support_node_function(conf.NodeFunction.Vote):
                utils.logger.warning(f"This peer needs to switch to citizen.")
            return

        leader_peer_target = prep_targets.get(new_leader_id, None)

        if block_height > 0 and block_height != self.block_manager.blockchain.last_block.header.height + 1:
            utils.logger.warning(f"height behind peer can not take leader role. block_height({block_height}), "
                                 f"last_block.header.height("
                                 f"{self.block_manager.blockchain.last_block.header.height})")
            return

        if leader_peer_target is None:
            logging.warning(f"in peer_service:reset_leader There is no peer by peer_id({new_leader_id})")
            return

        utils.logger.spam(f"reset_leader target({leader_peer_target}), complained={complained}")

        if complained:
            self.__block_manager.blockchain.reset_leader_made_block_count()
            self.__block_manager.epoch.new_round(new_leader_id)

        if ChannelProperty().peer_id == new_leader_id:
            utils.logger.debug("Set Peer Type Leader!")
            peer_type = loopchain_pb2.BLOCK_GENERATOR
            self.state_machine.turn_to_leader()
        else:
            utils.logger.debug("Set Peer Type Peer!")
            peer_type = loopchain_pb2.PEER
            self.state_machine.turn_to_peer()

        self.__block_manager.set_peer_type(peer_type)
        self.turn_on_leader_complain_timer()

    def score_write_precommit_state(self, block: Block):
        logging.debug(f"call score commit {ChannelProperty().name} {block.header.height} {block.header.hash.hex()}")

        new_block_hash = block.header.hash
        try:
            old_block_hash = self.__block_manager.get_old_block_hash(block.header.height, new_block_hash)
        except KeyError:
            old_block_hash = new_block_hash

        logging.debug(f"Block Hash : {old_block_hash} -> {new_block_hash}")
        request = {
            "blockHeight": block.header.height,
            "oldBlockHash": old_block_hash.hex(),
            "newBlockHash": new_block_hash.hex()
        }
        request = convert_params(request, ParamType.write_precommit_state)

        stub = StubCollection().icon_score_stubs[ChannelProperty().name]
        precommit_result: dict = stub.sync_task().write_precommit_state(request)
        if "error" in precommit_result:
            raise WritePrecommitStateError(precommit_result['error'])

        self.__block_manager.pop_old_block_hashes(block.header.height)
        return True

    def callback_leader_complain_timeout(self):
        if self.state_machine.state == "BlockGenerate":
            _, new_leader_id = self.block_manager.get_leader_ids_for_complaint()
            if new_leader_id == ChannelProperty().peer_id:
                utils.logger.debug(
                    f"Cannot convert the state to the `LeaderComplain` from the `BlockGenerate`"
                    f", because I'm the BlockGenerate and the next leader candidate.")
                return

        self.state_machine.leader_complain()

    def turn_on_leader_complain_timer(self):
        """Turn on a leader complaint timer by the configuration name of `ALLOW_MAKE_EMPTY_BLOCK`.
        """
        if conf.ALLOW_MAKE_EMPTY_BLOCK:
            self.reset_leader_complain_timer()
        else:
            self.start_leader_complain_timer_if_tx_exists()

    def reset_leader_complain_timer(self):
        utils.logger.spam(f"reset_leader_complain_timer in channel service. ("
                          f"{self.__block_manager.epoch.round}/{self.__block_manager.epoch.complain_duration})")

        if self.__timer_service.get_timer(TimerService.TIMER_KEY_LEADER_COMPLAIN):
            utils.logger.spam(f"Try to stop leader complaint timer for reset.")
            self.stop_leader_complain_timer()

        self.start_leader_complain_timer()

    def start_leader_complain_timer_if_tx_exists(self):
        if not self.block_manager.get_tx_queue().is_empty_in_status(TransactionStatusInQueue.normal):
            utils.logger.debug("Start leader complain timer because unconfirmed tx exists.")
            self.start_leader_complain_timer()

    def start_leader_complain_timer(self, duration=None):
        if duration is None:
            duration = self.__block_manager.epoch.complain_duration
        utils.logger.spam(
            f"start_leader_complain_timer in channel service. ({self.block_manager.epoch.round}/{duration})")
        if self.state_machine.state in ("Vote", "LeaderComplain"):
            self.__timer_service.add_timer_convenient(timer_key=TimerService.TIMER_KEY_LEADER_COMPLAIN,
                                                      duration=duration,
                                                      is_repeat=True, callback=self.callback_leader_complain_timeout)

    def stop_leader_complain_timer(self):
        utils.logger.spam(f"stop_leader_complain_timer in channel service.")
        self.__timer_service.stop_timer(TimerService.TIMER_KEY_LEADER_COMPLAIN)

    def start_subscribe_timer(self):
        self.__timer_service.add_timer_convenient(timer_key=TimerService.TIMER_KEY_SUBSCRIBE,
                                                  duration=conf.SUBSCRIBE_RETRY_TIMER,
                                                  is_repeat=True, callback=self.subscribe_network)

    def stop_subscribe_timer(self):
        self.__timer_service.stop_timer(TimerService.TIMER_KEY_SUBSCRIBE)

    def start_shutdown_timer_when_fail_subscribe(self):
        error = f"Shutdown by Subscribe retry timeout({conf.SHUTDOWN_TIMER} sec)"
        self.__timer_service.add_timer_convenient(timer_key=TimerService.TIMER_KEY_SHUTDOWN_WHEN_FAIL_SUBSCRIBE,
                                                  duration=conf.SHUTDOWN_TIMER, callback=self.shutdown_peer,
                                                  callback_kwargs={"message": error})

    def stop_shutdown_timer_when_fail_subscribe(self):
        self.__timer_service.stop_timer(TimerService.TIMER_KEY_SHUTDOWN_WHEN_FAIL_SUBSCRIBE)

    def start_block_monitoring_timer(self):
        self.__timer_service.add_timer_convenient(timer_key=TimerService.TIMER_KEY_BLOCK_MONITOR,
                                                  duration=conf.TIMEOUT_FOR_BLOCK_MONITOR,
                                                  callback=self.state_machine.subscribe_network)

    def reset_block_monitoring_timer(self):
        if self.__timer_service.get_timer(TimerService.TIMER_KEY_BLOCK_MONITOR):
            self.__timer_service.reset_timer(TimerService.TIMER_KEY_BLOCK_MONITOR)

    def stop_block_monitoring_timer(self):
        self.__timer_service.stop_timer(TimerService.TIMER_KEY_BLOCK_MONITOR)

    def stop_ws_heartbeat_timer(self):
        self.__timer_service.stop_timer(TimerService.TIMER_KEY_WS_HEARTBEAT)
Exemplo n.º 3
0
class RadioStationService:
    """Radiostation 의 main Class
    peer 를 위한 outer service 와 관리용 admin service 두개의 gRPC interface 를 가진다.
    """

    # 인증처리
    __ca = None

    def __init__(self,
                 radio_station_ip=None,
                 cert_path=None,
                 cert_pass=None,
                 rand_seed=None):
        """RadioStation Init

        :param radio_station_ip: radioStation Ip
        :param cert_path: RadioStation 인증서 디렉토리 경로
        :param cert_pass: RadioStation private key password
        """
        logger_preset = loggers.get_preset()
        logger_preset.peer_id = "RadioStation"
        logger_preset.update_logger()

        if radio_station_ip is None:
            radio_station_ip = conf.IP_RADIOSTATION
        logging.info("Set RadioStationService IP: " + radio_station_ip)
        if cert_path is not None:
            logging.info("CA Certificate Path : " + cert_path)

        self.__common_service = CommonService(loopchain_pb2)
        self.__admin_manager = AdminManager("station")
        self.__channel_manager = None
        self.__rest_service = None
        self.__timer_service = TimerService()

        # RS has two status (active, standby) active means enable outer service
        # standby means stop outer service and heartbeat to the other RS (active)
        self.__is_active = False

        # 인증 클래스
        self.__ca = CertificateAuthorization()

        if cert_path is not None:
            # 인증서 로드
            self.__ca.load_pki(cert_path, cert_pass)

        logging.info("Current RadioStation SECURITY_MODE : " +
                     str(self.__ca.is_secure))

        # gRPC service for Radiostation
        self.__outer_service = OuterService()
        self.__admin_service = AdminService(self.__admin_manager)

        # {group_id:[ {peer_id:IP} ] }로 구성된 dictionary
        self.peer_groups = {conf.ALL_GROUP_ID: []}

        # Peer의 보안을 담당
        self.auth = {}

        ObjectManager().rs_service = self

    def __del__(self):
        pass

    def launch_block_generator(self):
        pass

    @property
    def admin_manager(self):
        return self.__admin_manager

    @property
    def channel_manager(self):
        return self.__channel_manager

    @property
    def common_service(self):
        return self.__common_service

    @property
    def timer_service(self):
        return self.__timer_service

    @property
    def random_table(self):
        return self.__random_table

    def __broadcast_new_peer(self, peer_request):
        """새로 들어온 peer 를 기존의 peer 들에게 announce 한다."""

        logging.debug("Broadcast New Peer.... " + str(peer_request))
        if self.__channel_manager is not None:
            self.__channel_manager.broadcast(peer_request.channel,
                                             "AnnounceNewPeer", peer_request)

    def check_peer_status(self, channel):
        """service loop for status heartbeat check to peer list

        :return:
        """
        util.logger.spam(
            f"rs_service:check_peer_status(Heartbeat...{channel}) "
            f"for reset Leader and delete no response Peer")

        peer_manager = self.__channel_manager.get_peer_manager(channel)
        delete_peer_list = peer_manager.check_peer_status()

        for delete_peer in delete_peer_list:
            logging.debug(f"delete peer {delete_peer.peer_id}")
            message = loopchain_pb2.PeerID(peer_id=delete_peer.peer_id,
                                           channel=channel,
                                           group_id=delete_peer.group_id)
            self.__channel_manager.broadcast(channel, "AnnounceDeletePeer",
                                             message)

        # save current peer_manager after heartbeat to peers.
        ObjectManager().rs_service.admin_manager.save_peer_manager(
            channel, peer_manager)

    def __create_random_table(self, rand_seed: int) -> list:
        """create random_table using random_seed
        table size define in conf.RANDOM_TABLE_SIZE

        :param rand_seed: random seed for create random table
        :return: random table
        """
        random.seed(rand_seed)
        random_table = []
        for i in range(conf.RANDOM_TABLE_SIZE):
            random_num: int = random.getrandbits(conf.RANDOM_SIZE)
            random_table.append(random_num)

        return random_table

    def serve(self, port=None, event_for_init: multiprocessing.Event = None):
        """Peer(BlockGenerator Peer) to RadioStation

        :param port: RadioStation Peer
        """
        if port is None:
            port = conf.PORT_RADIOSTATION
        stopwatch_start = timeit.default_timer()

        self.__channel_manager = ChannelManager(self.__common_service)

        if conf.ENABLE_REST_SERVICE:
            self.__rest_service = RestServiceRS(int(port))

        loopchain_pb2_grpc.add_RadioStationServicer_to_server(
            self.__outer_service, self.__common_service.outer_server)
        loopchain_pb2_grpc.add_AdminServiceServicer_to_server(
            self.__admin_service, self.__common_service.inner_server)

        logging.info("Start Radio Station service at port: " + str(port))

        self.__common_service.start(port)
        self.__timer_service.start()

        stopwatch_duration = timeit.default_timer() - stopwatch_start
        logging.info(
            f"Start Radio Station service at port: {port} start duration({stopwatch_duration})"
        )

        if event_for_init is not None:
            event_for_init.set()

        signal.signal(signal.SIGINT, self.close)
        signal.signal(signal.SIGTERM, self.close)

        # service 종료를 기다린다.
        self.__common_service.wait()
        self.__timer_service.wait()

        if self.__rest_service is not None:
            self.__rest_service.stop()

    def close(self, sig, frame):
        self.__common_service.stop()
        self.__timer_service.stop()
class _Broadcaster:
    """broadcast class for each channel"""

    THREAD_INFO_KEY = "thread_info"
    THREAD_VARIABLE_STUB_TO_SELF_PEER = "stub_to_self_peer"
    THREAD_VARIABLE_PEER_STATUS = "peer_status"

    SELF_PEER_TARGET_KEY = "self_peer_target"
    LEADER_PEER_TARGET_KEY = "leader_peer_target"

    def __init__(self, channel: str, self_target: str = None):
        self.__channel = channel
        self.__self_target = self_target

        self.__audience = {}  # self.__audience[peer_target] = stub_manager
        self.__thread_variables = dict()
        self.__thread_variables[
            self.THREAD_VARIABLE_PEER_STATUS] = PeerThreadStatus.normal

        if conf.IS_BROADCAST_ASYNC:
            self.__broadcast_run = self.__broadcast_run_async
        else:
            self.__broadcast_run = self.__broadcast_run_sync

        self.__handler_map = {
            BroadcastCommand.CREATE_TX:
            self.__handler_create_tx,
            BroadcastCommand.CONNECT_TO_LEADER:
            self.__handler_connect_to_leader,
            BroadcastCommand.SUBSCRIBE:
            self.__handler_subscribe,
            BroadcastCommand.UNSUBSCRIBE:
            self.__handler_unsubscribe,
            BroadcastCommand.BROADCAST:
            self.__handler_broadcast,
            BroadcastCommand.MAKE_SELF_PEER_CONNECTION:
            self.__handler_connect_to_self_peer,
        }

        self.__broadcast_with_self_target_methods = {
            "AddTx", "AddTxList", "BroadcastVote"
        }

        self.stored_tx = queue.Queue()

        self.__timer_service = TimerService()

    @property
    def is_running(self):
        return self.__timer_service.is_run()

    def start(self):
        self.__timer_service.start()

    def stop(self):
        if self.__timer_service.is_run():
            self.__timer_service.stop()
            self.__timer_service.wait()

    def handle_command(self, command, params):
        func = self.__handler_map[command]
        func(params)

    def __keep_grpc_connection(self, result, timeout,
                               stub_manager: StubManager):
        return isinstance(result, _Rendezvous) \
               and result.code() in (grpc.StatusCode.DEADLINE_EXCEEDED, grpc.StatusCode.UNAVAILABLE) \
               and stub_manager.elapsed_last_succeed_time() < timeout

    def __broadcast_retry_async(self, peer_target, method_name, method_param,
                                retry_times, timeout, stub, result):
        if isinstance(result,
                      _Rendezvous) and result.code() == grpc.StatusCode.OK:
            return
        if isinstance(result, futures.Future) and not result.exception():
            return

        logging.debug(f"try retry to : peer_target({peer_target})\n")
        if retry_times > 0:
            try:
                stub_manager: StubManager = self.__audience[peer_target]
                if stub_manager is None:
                    logging.warning(
                        f"broadcast_thread:__broadcast_retry_async Failed to connect to ({peer_target})."
                    )
                    return
                retry_times -= 1
                is_stub_reuse = stub_manager.stub != stub or self.__keep_grpc_connection(
                    result, timeout, stub_manager)
                self.__call_async_to_target(peer_target, method_name,
                                            method_param, is_stub_reuse,
                                            retry_times, timeout)
            except KeyError as e:
                logging.debug(
                    f"broadcast_thread:__broadcast_retry_async ({peer_target}) not in audience. ({e})"
                )
        else:
            if isinstance(result, _Rendezvous):
                exception = result.details()
            elif isinstance(result, futures.Future):
                exception = result.exception()

            logging.warning(f"__broadcast_run_async fail({result})\n"
                            f"cause by: {exception}\n"
                            f"peer_target({peer_target})\n"
                            f"method_name({method_name})\n"
                            f"retry_remains({retry_times})\n"
                            f"timeout({timeout})")

    def __call_async_to_target(self, peer_target, method_name, method_param,
                               is_stub_reuse, retry_times, timeout):
        try:
            stub_manager: StubManager = self.__audience[peer_target]
            if stub_manager is None:
                logging.debug(
                    f"broadcast_thread:__call_async_to_target Failed to connect to ({peer_target})."
                )
                return
            call_back_partial = partial(self.__broadcast_retry_async,
                                        peer_target, method_name, method_param,
                                        retry_times, timeout,
                                        stub_manager.stub)
            stub_manager.call_async(method_name=method_name,
                                    message=method_param,
                                    is_stub_reuse=is_stub_reuse,
                                    call_back=call_back_partial,
                                    timeout=timeout)
        except KeyError as e:
            logging.debug(
                f"broadcast_thread:__call_async_to_target ({peer_target}) not in audience. ({e})"
            )

    def __broadcast_run_async(self,
                              method_name,
                              method_param,
                              retry_times=None,
                              timeout=None):
        """call gRPC interface of audience

        :param method_name: gRPC interface
        :param method_param: gRPC message
        """

        if timeout is None:
            timeout = conf.GRPC_TIMEOUT_BROADCAST_RETRY

        retry_times = conf.BROADCAST_RETRY_TIMES if retry_times is None else retry_times
        # logging.debug(f"broadcast({method_name}) async... ({len(self.__audience)})")

        for target in self.__get_broadcast_targets(method_name):
            # util.logger.debug(f"method_name({method_name}), peer_target({target})")
            self.__call_async_to_target(target, method_name, method_param,
                                        True, retry_times, timeout)

    def __broadcast_run_sync(self,
                             method_name,
                             method_param,
                             retry_times=None,
                             timeout=None):
        """call gRPC interface of audience

        :param method_name: gRPC interface
        :param method_param: gRPC message
        """
        # logging.debug(f"broadcast({method_name}) sync... ({len(self.__audience)})")

        if timeout is None:
            timeout = conf.GRPC_TIMEOUT_BROADCAST_RETRY

        retry_times = conf.BROADCAST_RETRY_TIMES if retry_times is None else retry_times

        for target in self.__get_broadcast_targets(method_name):
            try:
                stub_manager: StubManager = self.__audience[target]
                if stub_manager is None:
                    logging.debug(
                        f"broadcast_thread:__broadcast_run_sync Failed to connect to ({target})."
                    )
                    continue

                response = stub_manager.call_in_times(method_name=method_name,
                                                      message=method_param,
                                                      timeout=timeout,
                                                      retry_times=retry_times)
                if response is None:
                    logging.warning(
                        f"broadcast_thread:__broadcast_run_sync fail ({method_name}) "
                        f"target({target}) ")
            except KeyError as e:
                logging.debug(
                    f"broadcast_thread:__broadcast_run_sync ({target}) not in audience. ({e})"
                )

    def __handler_subscribe(self, audience_target):
        logging.debug(
            "BroadcastThread received subscribe command peer_target: " +
            str(audience_target))
        if audience_target not in self.__audience:
            stub_manager = StubManager.get_stub_manager_to_server(
                audience_target,
                loopchain_pb2_grpc.PeerServiceStub,
                time_out_seconds=conf.CONNECTION_RETRY_TIMEOUT_WHEN_INITIAL,
                is_allow_null_stub=True,
                ssl_auth_type=conf.GRPC_SSL_TYPE)
            self.__audience[audience_target] = stub_manager

    def __handler_unsubscribe(self, audience_target):
        # logging.debug(f"BroadcastThread received unsubscribe command peer_target({unsubscribe_peer_target})")
        try:
            del self.__audience[audience_target]
        except KeyError:
            logging.warning(f"Already deleted peer: {audience_target}")

    def __handler_broadcast(self, broadcast_param):
        # logging.debug("BroadcastThread received broadcast command")
        broadcast_method_name = broadcast_param[0]
        broadcast_method_param = broadcast_param[1]
        broadcast_method_kwparam = broadcast_param[2]
        # logging.debug("BroadcastThread method name: " + broadcast_method_name)
        # logging.debug("BroadcastThread method param: " + str(broadcast_method_param))
        self.__broadcast_run(broadcast_method_name, broadcast_method_param,
                             **broadcast_method_kwparam)

    def __make_tx_list_message(self):
        tx_list = []
        tx_list_size = 0
        tx_list_count = 0
        remains = False
        while not self.stored_tx.empty():
            stored_tx_item = self.stored_tx.get()
            tx_list_size += len(stored_tx_item)
            tx_list_count += 1
            if tx_list_size >= conf.MAX_TX_SIZE_IN_BLOCK or tx_list_count >= conf.MAX_TX_COUNT_IN_ADDTX_LIST:
                self.stored_tx.put(stored_tx_item)
                remains = True
                break
            tx_list.append(stored_tx_item.get_tx_message())
        message = loopchain_pb2.TxSendList(channel=self.__channel,
                                           tx_list=tx_list)

        return remains, message

    def __send_tx_by_timer(self, **kwargs):
        # util.logger.spam(f"broadcast_scheduler:__send_tx_by_timer")
        if self.__thread_variables[
                self.
                THREAD_VARIABLE_PEER_STATUS] == PeerThreadStatus.leader_complained:
            logging.warning(
                "Leader is complained your tx just stored in queue by temporally: "
                + str(self.stored_tx.qsize()))
        else:
            # Send single tx for test
            # stored_tx_item = self.stored_tx.get()
            # self.__broadcast_run("AddTx", stored_tx_item.get_tx_message())

            # Send multiple tx
            remains, message = self.__make_tx_list_message()
            self.__broadcast_run("AddTxList", message)
            if remains:
                self.__send_tx_in_timer()

    def __send_tx_in_timer(self, tx_item=None):
        # util.logger.spam(f"broadcast_scheduler:__send_tx_in_timer")
        duration = 0
        if tx_item:
            self.stored_tx.put(tx_item)
            duration = conf.SEND_TX_LIST_DURATION

        if TimerService.TIMER_KEY_ADD_TX not in self.__timer_service.timer_list:
            self.__timer_service.add_timer(
                TimerService.TIMER_KEY_ADD_TX,
                Timer(target=TimerService.TIMER_KEY_ADD_TX,
                      duration=duration,
                      callback=self.__send_tx_by_timer,
                      callback_kwargs={}))
        else:
            pass

    def __handler_create_tx(self, create_tx_param):
        # logging.debug(f"Broadcast create_tx....")
        try:
            tx_item = TxItem.create_tx_item(create_tx_param, self.__channel)
        except Exception as e:
            logging.warning(f"tx in channel({self.__channel})")
            logging.warning(f"__handler_create_tx: meta({create_tx_param})")
            logging.warning(f"tx dumps fail ({e})")
            return

        self.__send_tx_in_timer(tx_item)

    def __handler_connect_to_leader(self, connect_to_leader_param):
        # logging.debug("(tx thread) try... connect to leader: " + str(connect_to_leader_param))
        self.__thread_variables[
            self.LEADER_PEER_TARGET_KEY] = connect_to_leader_param

        # stub_to_self_peer = __thread_variables[self.THREAD_VARIABLE_STUB_TO_SELF_PEER]

        self.__thread_variables[
            self.THREAD_VARIABLE_PEER_STATUS] = PeerThreadStatus.normal

    def __handler_connect_to_self_peer(self, connect_param):
        # 자신을 생성한 부모 Peer 에 접속하기 위한 stub 을 만든다.
        # pipe 를 통한 return 은 pipe send 와 쌍이 맞지 않은 경우 오류를 발생시킬 수 있다.
        # 안전한 연결을 위하여 부모 프로세스와도 gRPC stub 을 이용하여 통신한다.
        logging.debug("try connect to self peer: " + str(connect_param))

        stub_to_self_peer = StubManager.get_stub_manager_to_server(
            connect_param,
            loopchain_pb2_grpc.InnerServiceStub,
            time_out_seconds=conf.CONNECTION_RETRY_TIMEOUT_WHEN_INITIAL,
            is_allow_null_stub=True,
            ssl_auth_type=conf.SSLAuthType.none)
        self.__thread_variables[self.SELF_PEER_TARGET_KEY] = connect_param
        self.__thread_variables[
            self.THREAD_VARIABLE_STUB_TO_SELF_PEER] = stub_to_self_peer

    def __get_broadcast_targets(self, method_name):

        peer_targets = list(self.__audience)
        if ObjectManager().rs_service:
            return peer_targets
        else:
            if self.__self_target is not None and method_name not in self.__broadcast_with_self_target_methods:
                peer_targets.remove(self.__self_target)
            return peer_targets
Exemplo n.º 5
0
class _Broadcaster:
    """broadcast class for each channel"""
    THREAD_VARIABLE_PEER_STATUS = "peer_status"

    def __init__(self, channel: str, self_target: str = None):
        self.__channel = channel
        self.__self_target = self_target

        self.__audience = {}  # self.__audience[peer_target] = stub_manager
        self.__thread_variables = dict()
        self.__thread_variables[
            self.THREAD_VARIABLE_PEER_STATUS] = PeerThreadStatus.normal

        if conf.IS_BROADCAST_ASYNC:
            self.__broadcast_run = self.__broadcast_run_async
        else:
            self.__broadcast_run = self.__broadcast_run_sync

        self.__handler_map = {
            BroadcastCommand.CREATE_TX:
            self.__handler_create_tx,
            BroadcastCommand.UPDATE_AUDIENCE:
            self.__handler_update_audience,
            BroadcastCommand.BROADCAST:
            self.__handler_broadcast,
            BroadcastCommand.SEND_TO_SINGLE_TARGET:
            self.__handler_send_to_single_target,
        }

        self.__broadcast_with_self_target_methods = {
            "AddTx", "AddTxList", "BroadcastVote"
        }

        self.stored_tx = queue.Queue()

        self.__timer_service = TimerService()

    @property
    def is_running(self):
        return self.__timer_service.is_run()

    def start(self):
        self.__timer_service.start()

    def stop(self):
        if self.__timer_service.is_run():
            self.__timer_service.stop()
            self.__timer_service.wait()

    def handle_command(self, command, params):
        func = self.__handler_map[command]
        func(params)

    def __keep_grpc_connection(self, result, timeout,
                               stub_manager: StubManager):
        return isinstance(result, _Rendezvous) \
               and result.code() in (grpc.StatusCode.DEADLINE_EXCEEDED, grpc.StatusCode.UNAVAILABLE) \
               and stub_manager.elapsed_last_succeed_time() < timeout

    def __broadcast_retry_async(self, peer_target, method_name, method_param,
                                retry_times, timeout, stub, result):
        if isinstance(result,
                      _Rendezvous) and result.code() == grpc.StatusCode.OK:
            return
        if isinstance(result, futures.Future) and not result.exception():
            return

        logging.debug(f"try retry to : peer_target({peer_target})\n")
        if retry_times > 0:
            try:
                stub_manager: StubManager = self.__audience[peer_target]
                if stub_manager is None:
                    logging.warning(
                        f"broadcast_thread:__broadcast_retry_async Failed to connect to ({peer_target})."
                    )
                    return
                retry_times -= 1
                is_stub_reuse = stub_manager.stub != stub or self.__keep_grpc_connection(
                    result, timeout, stub_manager)
                self.__call_async_to_target(peer_target, method_name,
                                            method_param, is_stub_reuse,
                                            retry_times, timeout)
            except KeyError as e:
                logging.debug(
                    f"broadcast_thread:__broadcast_retry_async ({peer_target}) not in audience. ({e})"
                )
        else:
            if isinstance(result, _Rendezvous):
                exception = result.details()
            elif isinstance(result, futures.Future):
                exception = result.exception()

            logging.warning(f"__broadcast_run_async fail({result})\n"
                            f"cause by: {exception}\n"
                            f"peer_target({peer_target})\n"
                            f"method_name({method_name})\n"
                            f"retry_remains({retry_times})\n"
                            f"timeout({timeout})")

    def __call_async_to_target(self, peer_target, method_name, method_param,
                               is_stub_reuse, retry_times, timeout):
        try:
            stub_manager: StubManager = self.__audience[peer_target]
            if stub_manager is None:
                logging.debug(
                    f"broadcast_thread:__call_async_to_target Failed to connect to ({peer_target})."
                )
                return
            call_back_partial = partial(self.__broadcast_retry_async,
                                        peer_target, method_name, method_param,
                                        retry_times, timeout,
                                        stub_manager.stub)
            stub_manager.call_async(method_name=method_name,
                                    message=method_param,
                                    is_stub_reuse=is_stub_reuse,
                                    call_back=call_back_partial,
                                    timeout=timeout)
        except KeyError as e:
            logging.debug(
                f"broadcast_thread:__call_async_to_target ({peer_target}) not in audience. ({e})"
            )

    def __broadcast_run_async(self,
                              method_name,
                              method_param,
                              retry_times=None,
                              timeout=None):
        """call gRPC interface of audience

        :param method_name: gRPC interface
        :param method_param: gRPC message
        """

        if timeout is None:
            timeout = conf.GRPC_TIMEOUT_BROADCAST_RETRY

        retry_times = conf.BROADCAST_RETRY_TIMES if retry_times is None else retry_times
        # logging.debug(f"broadcast({method_name}) async... ({len(self.__audience)})")

        for target in self.__get_broadcast_targets(method_name):
            # util.logger.debug(f"method_name({method_name}), peer_target({target})")
            self.__call_async_to_target(target, method_name, method_param,
                                        True, retry_times, timeout)

    def __broadcast_run_sync(self,
                             method_name,
                             method_param,
                             retry_times=None,
                             timeout=None):
        """call gRPC interface of audience

        :param method_name: gRPC interface
        :param method_param: gRPC message
        """
        # logging.debug(f"broadcast({method_name}) sync... ({len(self.__audience)})")

        if timeout is None:
            timeout = conf.GRPC_TIMEOUT_BROADCAST_RETRY

        retry_times = conf.BROADCAST_RETRY_TIMES if retry_times is None else retry_times

        for target in self.__get_broadcast_targets(method_name):
            try:
                stub_manager: StubManager = self.__audience[target]
                if stub_manager is None:
                    logging.debug(
                        f"broadcast_thread:__broadcast_run_sync Failed to connect to ({target})."
                    )
                    continue

                response = stub_manager.call_in_times(method_name=method_name,
                                                      message=method_param,
                                                      timeout=timeout,
                                                      retry_times=retry_times)
                if response is None:
                    logging.warning(
                        f"broadcast_thread:__broadcast_run_sync fail ({method_name}) "
                        f"target({target}) ")
            except KeyError as e:
                logging.debug(
                    f"broadcast_thread:__broadcast_run_sync ({target}) not in audience. ({e})"
                )

    def __handler_send_to_single_target(self, param):
        method_name = param[0]
        method_param = param[1]
        target = param[2]
        self.__call_async_to_target(target, method_name, method_param, True, 0,
                                    conf.GRPC_TIMEOUT_BROADCAST_RETRY)

    def __add_audience(self, audience_target):
        util.logger.debug(f"audience_target({audience_target})")
        if audience_target not in self.__audience:
            stub_manager = StubManager(audience_target,
                                       loopchain_pb2_grpc.PeerServiceStub,
                                       ssl_auth_type=conf.GRPC_SSL_TYPE)
            self.__audience[audience_target] = stub_manager

    def __handler_update_audience(self, audience_targets):
        old_audience = self.__audience.copy()

        for audience_target in audience_targets:
            self.__add_audience(audience_target)
            old_audience.pop(audience_target, None)

        for old_audience_target in old_audience:
            old_stubmanager: StubManager = self.__audience.pop(
                old_audience_target, None)
            # TODO If necessary, close grpc with old_stubmanager. If not necessary just remove this comment.

    def __handler_broadcast(self, broadcast_param):
        # util.logger.debug(f"BroadcastThread received broadcast command")
        broadcast_method_name = broadcast_param[0]
        broadcast_method_param = broadcast_param[1]
        broadcast_method_kwparam = broadcast_param[2]
        # util.logger.debug("BroadcastThread method name: " + broadcast_method_name)
        # util.logger.debug("BroadcastThread method param: " + str(broadcast_method_param))
        self.__broadcast_run(broadcast_method_name, broadcast_method_param,
                             **broadcast_method_kwparam)

    def __make_tx_list_message(self):
        tx_list = []
        tx_list_size = 0
        tx_list_count = 0
        remains = False
        while not self.stored_tx.empty():
            stored_tx_item = self.stored_tx.get()
            tx_list_size += len(stored_tx_item)
            tx_list_count += 1
            if tx_list_size >= conf.MAX_TX_SIZE_IN_BLOCK or tx_list_count >= conf.MAX_TX_COUNT_IN_ADDTX_LIST:
                self.stored_tx.put(stored_tx_item)
                remains = True
                break
            tx_list.append(stored_tx_item.get_tx_message())
        message = loopchain_pb2.TxSendList(channel=self.__channel,
                                           tx_list=tx_list)

        return remains, message

    def __send_tx_by_timer(self, **kwargs):
        # util.logger.spam(f"broadcast_scheduler:__send_tx_by_timer")
        if self.__thread_variables[
                self.
                THREAD_VARIABLE_PEER_STATUS] == PeerThreadStatus.leader_complained:
            logging.warning(
                "Leader is complained your tx just stored in queue by temporally: "
                + str(self.stored_tx.qsize()))
        else:
            # Send single tx for test
            # stored_tx_item = self.stored_tx.get()
            # self.__broadcast_run("AddTx", stored_tx_item.get_tx_message())

            # Send multiple tx
            remains, message = self.__make_tx_list_message()
            self.__broadcast_run("AddTxList", message)
            if remains:
                self.__send_tx_in_timer()

    def __send_tx_in_timer(self, tx_item=None):
        # util.logger.spam(f"broadcast_scheduler:__send_tx_in_timer")
        duration = 0
        if tx_item:
            self.stored_tx.put(tx_item)
            duration = conf.SEND_TX_LIST_DURATION

        if TimerService.TIMER_KEY_ADD_TX not in self.__timer_service.timer_list:
            self.__timer_service.add_timer(
                TimerService.TIMER_KEY_ADD_TX,
                Timer(target=TimerService.TIMER_KEY_ADD_TX,
                      duration=duration,
                      callback=self.__send_tx_by_timer,
                      callback_kwargs={}))
        else:
            pass

    def __handler_create_tx(self, create_tx_param):
        # logging.debug(f"Broadcast create_tx....")
        try:
            tx_item = TxItem.create_tx_item(create_tx_param, self.__channel)
        except Exception as e:
            logging.warning(f"tx in channel({self.__channel})")
            logging.warning(f"__handler_create_tx: meta({create_tx_param})")
            logging.warning(f"tx dumps fail ({e})")
            return

        self.__send_tx_in_timer(tx_item)

    def __get_broadcast_targets(self, method_name):

        peer_targets = list(self.__audience)
        if self.__self_target is not None and method_name not in self.__broadcast_with_self_target_methods:
            peer_targets.remove(self.__self_target)
        return peer_targets
Exemplo n.º 6
0
class ChannelService:
    def __init__(self, channel_name, amqp_target, amqp_key):
        self.__block_manager: BlockManager = None
        self.__score_container: CommonSubprocess = None
        self.__score_info: dict = None
        self.__peer_auth: Signer = None
        self.__peer_manager: PeerManager = None
        self.__broadcast_scheduler: BroadcastScheduler = None
        self.__radio_station_stub = None
        self.__consensus = None
        # self.__proposer: Proposer = None
        # self.__acceptor: Acceptor = None
        self.__timer_service = TimerService()
        self.__node_subscriber: NodeSubscriber = None

        loggers.get_preset().channel_name = channel_name
        loggers.get_preset().update_logger()

        channel_queue_name = conf.CHANNEL_QUEUE_NAME_FORMAT.format(
            channel_name=channel_name, amqp_key=amqp_key)
        self.__inner_service = ChannelInnerService(amqp_target,
                                                   channel_queue_name,
                                                   conf.AMQP_USERNAME,
                                                   conf.AMQP_PASSWORD,
                                                   channel_service=self)

        logging.info(
            f"ChannelService : {channel_name}, Queue : {channel_queue_name}")

        ChannelProperty().name = channel_name
        ChannelProperty().amqp_target = amqp_target

        StubCollection().amqp_key = amqp_key
        StubCollection().amqp_target = amqp_target

        command_arguments.add_raw_command(command_arguments.Type.Channel,
                                          channel_name)
        command_arguments.add_raw_command(command_arguments.Type.AMQPTarget,
                                          amqp_target)
        command_arguments.add_raw_command(command_arguments.Type.AMQPKey,
                                          amqp_key)

        ObjectManager().channel_service = self
        self.__state_machine = ChannelStateMachine(self)

    @property
    def block_manager(self):
        return self.__block_manager

    @property
    def score_container(self):
        return self.__score_container

    @property
    def score_info(self):
        return self.__score_info

    @property
    def radio_station_stub(self):
        return self.__radio_station_stub

    @property
    def peer_auth(self):
        return self.__peer_auth

    @property
    def peer_manager(self):
        return self.__peer_manager

    @property
    def broadcast_scheduler(self):
        return self.__broadcast_scheduler

    @property
    def consensus(self):
        return self.__consensus

    @property
    def acceptor(self):
        return self.__acceptor

    @property
    def timer_service(self):
        return self.__timer_service

    @property
    def state_machine(self):
        return self.__state_machine

    @property
    def inner_service(self):
        return self.__inner_service

    def serve(self):
        async def _serve():
            await StubCollection().create_peer_stub()
            results = await StubCollection().peer_stub.async_task(
            ).get_channel_info_detail(ChannelProperty().name)

            await self.init(*results)

            self.__timer_service.start()
            self.__state_machine.complete_init_components()
            logging.info(
                f'channel_service: init complete channel: {ChannelProperty().name}, '
                f'state({self.__state_machine.state})')

        loop = MessageQueueService.loop
        loop.create_task(_serve())
        loop.add_signal_handler(signal.SIGINT, self.close)
        loop.add_signal_handler(signal.SIGTERM, self.close)

        try:
            loop.run_forever()
        finally:
            loop.run_until_complete(loop.shutdown_asyncgens())
            loop.close()

            self.cleanup()

    def close(self):
        MessageQueueService.loop.stop()

    def cleanup(self):
        logging.info("Cleanup Channel Resources.")

        if self.__block_manager:
            self.__block_manager.stop()
            self.__block_manager = None
            logging.info("Cleanup BlockManager.")

        if self.__score_container:
            self.__score_container.stop()
            self.__score_container.wait()
            self.__score_container = None
            logging.info("Cleanup ScoreContainer.")

        if self.__broadcast_scheduler:
            self.__broadcast_scheduler.stop()
            self.__broadcast_scheduler.wait()
            self.__broadcast_scheduler = None
            logging.info("Cleanup BroadcastScheduler.")

        if self.__consensus:
            self.__consensus.stop()
            self.__consensus.wait()
            logging.info("Cleanup Consensus.")

        if self.__timer_service.is_run():
            self.__timer_service.stop()
            self.__timer_service.wait()
            logging.info("Cleanup TimerService.")

    async def init(self, peer_port, peer_target, rest_target,
                   radio_station_target, peer_id, group_id, node_type,
                   score_package):
        loggers.get_preset().peer_id = peer_id
        loggers.get_preset().update_logger()

        ChannelProperty().peer_port = peer_port
        ChannelProperty().peer_target = peer_target
        ChannelProperty().rest_target = rest_target
        ChannelProperty().radio_station_target = radio_station_target
        ChannelProperty().peer_id = peer_id
        ChannelProperty().group_id = group_id
        ChannelProperty().node_type = conf.NodeType(node_type)
        ChannelProperty().score_package = score_package

        self.__peer_manager = PeerManager(ChannelProperty().name)
        self.__init_peer_auth()
        self.__init_broadcast_scheduler()
        self.__init_block_manager()
        self.__init_radio_station_stub()

        await self.__init_score_container()
        await self.__inner_service.connect(conf.AMQP_CONNECTION_ATTEMPS,
                                           conf.AMQP_RETRY_DELAY,
                                           exclusive=True)

        # if conf.CONSENSUS_ALGORITHM == conf.ConsensusAlgorithm.lft:
        #     util.logger.spam(f"init consensus !")
        #     # load consensus
        #     self.__init_consensus()
        #     # load proposer
        #     self.__init_proposer(peer_id=peer_id)
        #     # load acceptor
        #     self.__init_acceptor(peer_id=peer_id)

        if self.is_support_node_function(conf.NodeFunction.Vote):
            if conf.ENABLE_REP_RADIO_STATION:
                self.connect_to_radio_station()
            else:
                await self.__load_peers_from_file()
                # subscribe to other peers
                self.__subscribe_to_peer_list()
                # broadcast AnnounceNewPeer to other peers
                # If allow broadcast AnnounceNewPeer here, complained peer can be leader again.
        else:
            self.__init_node_subscriber()

        self.block_manager.init_epoch()

    async def evaluate_network(self):
        await self.set_peer_type_in_channel()
        if self.block_manager.peer_type == loopchain_pb2.BLOCK_GENERATOR:
            self.__state_machine.subscribe_network()
        else:
            self.__state_machine.block_sync()

    async def subscribe_network(self):
        # Subscribe to radiostation and block_sync_target_stub
        if self.is_support_node_function(conf.NodeFunction.Vote):
            if conf.ENABLE_REP_RADIO_STATION:
                await self.subscribe_to_radio_station()

            if self.block_manager.peer_type == loopchain_pb2.PEER:
                await self.__subscribe_call_to_stub(
                    peer_stub=self.block_manager.subscribe_target_peer_stub,
                    peer_type=loopchain_pb2.PEER)
        else:
            await self.subscribe_to_radio_station()

        self.generate_genesis_block()

        if conf.CONSENSUS_ALGORITHM == conf.ConsensusAlgorithm.lft:
            if not self.__consensus.is_run():
                self.__consensus.change_epoch(
                    precommit_block=self.__block_manager.get_blockchain(
                    ).last_block)
                self.__consensus.start()
        elif conf.ALLOW_MAKE_EMPTY_BLOCK:
            if not self.block_manager.block_generation_scheduler.is_run():
                self.block_manager.block_generation_scheduler.start()

        self.__state_machine.complete_sync()

    def __init_peer_auth(self):
        try:
            self.__peer_auth = Signer.from_channel(ChannelProperty().name)
        except Exception as e:
            logging.exception(f"peer auth init fail cause : {e}")
            util.exit_and_msg(f"peer auth init fail cause : {e}")

    def __init_block_manager(self):
        logging.debug(
            f"__load_block_manager_each channel({ChannelProperty().name})")
        try:
            self.__block_manager = BlockManager(
                name="loopchain.peer.BlockManager",
                channel_manager=self,
                peer_id=ChannelProperty().peer_id,
                channel_name=ChannelProperty().name,
                level_db_identity=ChannelProperty().peer_target)
        except leveldb.LevelDBError as e:
            util.exit_and_msg("LevelDBError(" + str(e) + ")")

    # def __init_consensus(self):
    #     consensus = Consensus(self, ChannelProperty().name)
    #     self.__consensus = consensus
    #     self.__block_manager.consensus = consensus
    #     consensus.register_subscriber(self.__block_manager)
    #
    # def __init_proposer(self, peer_id: str):
    #     proposer = Proposer(
    #         name="loopchain.consensus.Proposer",
    #         peer_id=peer_id,
    #         channel=ChannelProperty().name,
    #         channel_service=self
    #     )
    #     self.__consensus.register_subscriber(proposer)
    #     self.__proposer = proposer
    #
    # def __init_acceptor(self, peer_id: str):
    #     acceptor = Acceptor(
    #         name="loopchain.consensus.Acceptor",
    #         consensus=self.__consensus,
    #         peer_id=peer_id,
    #         channel=ChannelProperty().name,
    #         channel_service=self
    #     )
    #     self.__consensus.register_subscriber(acceptor)
    #     self.__acceptor = acceptor

    def __init_broadcast_scheduler(self):
        scheduler = BroadcastScheduler(
            channel=ChannelProperty().name,
            self_target=ChannelProperty().peer_target)
        scheduler.start()

        self.__broadcast_scheduler = scheduler

        future = scheduler.schedule_job(BroadcastCommand.SUBSCRIBE,
                                        ChannelProperty().peer_target)
        future.result(conf.TIMEOUT_FOR_FUTURE)

    def __init_radio_station_stub(self):
        if self.is_support_node_function(conf.NodeFunction.Vote):
            if conf.ENABLE_REP_RADIO_STATION:
                self.__radio_station_stub = StubManager.get_stub_manager_to_server(
                    ChannelProperty().radio_station_target,
                    loopchain_pb2_grpc.RadioStationStub,
                    conf.CONNECTION_RETRY_TIMEOUT_TO_RS,
                    ssl_auth_type=conf.GRPC_SSL_TYPE)
        else:
            self.__radio_station_stub = RestStubManager(
                ChannelProperty().radio_station_target,
                ChannelProperty().name)

    async def __init_score_container(self):
        """create score container and save score_info and score_stub
        """
        for i in range(conf.SCORE_LOAD_RETRY_TIMES):
            try:
                self.__score_info = await self.__run_score_container()
            except BaseException as e:
                util.logger.spam(
                    f"channel_manager:load_score_container_each score_info load fail retry({i})"
                )
                logging.error(e)
                traceback.print_exc()
                time.sleep(conf.SCORE_LOAD_RETRY_INTERVAL
                           )  # This blocking main thread is intended.

            else:
                break

    def __init_node_subscriber(self):
        self.__node_subscriber = NodeSubscriber(
            channel=ChannelProperty().name,
            rs_target=ChannelProperty().radio_station_target)

    async def __run_score_container(self):
        if not conf.USE_EXTERNAL_SCORE or conf.EXTERNAL_SCORE_RUN_IN_LAUNCHER:
            process_args = [
                'python3', '-m', 'loopchain', 'score', '--channel',
                ChannelProperty().name, '--score_package',
                ChannelProperty().score_package
            ]
            process_args += command_arguments.get_raw_commands_by_filter(
                command_arguments.Type.AMQPTarget,
                command_arguments.Type.AMQPKey, command_arguments.Type.Develop,
                command_arguments.Type.ConfigurationFilePath,
                command_arguments.Type.RadioStationTarget)
            self.__score_container = CommonSubprocess(process_args)

        await StubCollection().create_icon_score_stub(ChannelProperty().name)
        await StubCollection().icon_score_stubs[ChannelProperty().name
                                                ].connect()
        await StubCollection().icon_score_stubs[ChannelProperty().name
                                                ].async_task().hello()
        return None

    async def __load_score(self):
        channel_name = ChannelProperty().name
        score_package_name = ChannelProperty().score_package

        util.logger.spam(f"peer_service:__load_score --init--")
        logging.info("LOAD SCORE AND CONNECT TO SCORE SERVICE!")

        params = dict()
        params[message_code.MetaParams.ScoreLoad.
               repository_path] = conf.DEFAULT_SCORE_REPOSITORY_PATH
        params[message_code.MetaParams.ScoreLoad.
               score_package] = score_package_name
        params[
            message_code.MetaParams.ScoreLoad.base] = conf.DEFAULT_SCORE_BASE
        params[message_code.MetaParams.ScoreLoad.peer_id] = ChannelProperty(
        ).peer_id
        meta = json.dumps(params)
        logging.debug(f"load score params : {meta}")

        util.logger.spam(f"peer_service:__load_score --1--")
        score_stub = StubCollection().score_stubs[channel_name]
        response = await score_stub.async_task().score_load(meta)

        logging.debug("try score load on score service: " + str(response))
        if not response:
            return None

        if response.code != message_code.Response.success:
            util.exit_and_msg("Fail Get Score from Score Server...")
            return None

        logging.debug("Get Score from Score Server...")
        score_info = json.loads(response.meta)

        logging.info("LOAD SCORE DONE!")
        util.logger.spam(f"peer_service:__load_score --end--")

        return score_info

    async def __load_peers_from_file(self):
        channel_info = await StubCollection().peer_stub.async_task(
        ).get_channel_infos()
        for peer_info in channel_info[ChannelProperty().name]["peers"]:
            self.__peer_manager.add_peer(peer_info)
            self.__broadcast_scheduler.schedule_job(BroadcastCommand.SUBSCRIBE,
                                                    peer_info["peer_target"])
        self.show_peers()

    def is_support_node_function(self, node_function):
        return conf.NodeType.is_support_node_function(
            node_function,
            ChannelProperty().node_type)

    def get_channel_option(self) -> dict:
        channel_option = conf.CHANNEL_OPTION
        return channel_option[ChannelProperty().name]

    def generate_genesis_block(self):
        if self.block_manager.peer_type != loopchain_pb2.BLOCK_GENERATOR:
            return

        block_chain = self.block_manager.get_blockchain()
        if block_chain.block_height > -1:
            logging.debug("genesis block was already generated")
            return

        block_chain.generate_genesis_block()

    def connect_to_radio_station(self, is_reconnect=False):
        response = self.__radio_station_stub.call_in_times(
            method_name="ConnectPeer",
            message=loopchain_pb2.ConnectPeerRequest(
                channel=ChannelProperty().name,
                peer_object=b'',
                peer_id=ChannelProperty().peer_id,
                peer_target=ChannelProperty().peer_target,
                group_id=ChannelProperty().group_id),
            retry_times=conf.CONNECTION_RETRY_TIMES_TO_RS,
            is_stub_reuse=True,
            timeout=conf.CONNECTION_TIMEOUT_TO_RS)

        # start next ConnectPeer timer
        self.__timer_service.add_timer_convenient(
            timer_key=TimerService.TIMER_KEY_CONNECT_PEER,
            duration=conf.CONNECTION_RETRY_TIMER,
            callback=self.connect_to_radio_station,
            callback_kwargs={"is_reconnect": True})

        if is_reconnect:
            return

        if response and response.status == message_code.Response.success:
            peer_list_data = pickle.loads(response.peer_list)
            self.__peer_manager.load(peer_list_data, False)
            peers, peer_list = self.__peer_manager.get_peers_for_debug()
            logging.debug("peer list update: " + peers)

            # add connected peer to processes audience
            for each_peer in peer_list:
                util.logger.spam(
                    f"peer_service:connect_to_radio_station peer({each_peer.target}-{each_peer.status})"
                )
                if each_peer.status == PeerStatus.connected:
                    self.__broadcast_scheduler.schedule_job(
                        BroadcastCommand.SUBSCRIBE, each_peer.target)

    def __subscribe_to_peer_list(self):
        peer_object = self.peer_manager.get_peer(ChannelProperty().peer_id)
        peer_request = loopchain_pb2.PeerRequest(
            channel=ChannelProperty().name,
            peer_target=ChannelProperty().peer_target,
            peer_id=ChannelProperty().peer_id,
            group_id=ChannelProperty().group_id,
            node_type=ChannelProperty().node_type,
            peer_order=peer_object.order)
        self.__broadcast_scheduler.schedule_broadcast("Subscribe",
                                                      peer_request)

    async def subscribe_to_radio_station(self):
        await self.__subscribe_call_to_stub(self.__radio_station_stub,
                                            loopchain_pb2.PEER)

    async def subscribe_to_peer(self, peer_id, peer_type):
        peer = self.peer_manager.get_peer(peer_id)
        peer_stub = self.peer_manager.get_peer_stub_manager(peer)

        await self.__subscribe_call_to_stub(peer_stub, peer_type)
        self.__broadcast_scheduler.schedule_job(BroadcastCommand.SUBSCRIBE,
                                                peer_stub.target)

    async def __subscribe_call_to_stub(self, peer_stub, peer_type):
        if self.is_support_node_function(conf.NodeFunction.Vote):
            await peer_stub.call_async(
                "Subscribe",
                loopchain_pb2.PeerRequest(
                    channel=ChannelProperty().name,
                    peer_target=ChannelProperty().peer_target,
                    peer_type=peer_type,
                    peer_id=ChannelProperty().peer_id,
                    group_id=ChannelProperty().group_id,
                    node_type=ChannelProperty().node_type),
            )
        else:
            await self.__subscribe_call_from_citizen()

    async def __subscribe_call_from_citizen(self):
        def _handle_exception(future: asyncio.Future):
            logging.debug(
                f"error: {type(future.exception())}, {str(future.exception())}"
            )
            if isinstance(future.exception(), NotImplementedError):
                asyncio.ensure_future(
                    self.__subscribe_call_by_rest_stub(subscribe_event))

            elif isinstance(future.exception(), ConnectionError):
                logging.warning(f"Waiting for next subscribe request...")
                if self.__state_machine.state != "SubscribeNetwork":
                    self.__state_machine.subscribe_network()

        subscribe_event = asyncio.Event()
        util.logger.spam(
            f"try subscribe_call_by_citizen target({ChannelProperty().rest_target})"
        )

        # try websocket connection, and handle exception in callback
        asyncio.ensure_future(
            self.__node_subscriber.subscribe(
                block_height=self.block_manager.get_blockchain().block_height,
                event=subscribe_event)).add_done_callback(_handle_exception)
        await subscribe_event.wait()

    async def __subscribe_call_by_rest_stub(self, event):
        if conf.REST_SSL_TYPE == conf.SSLAuthType.none:
            peer_target = ChannelProperty().rest_target
        else:
            peer_target = f"https://{ChannelProperty().rest_target}"

        response = None
        try:
            response = await self.__radio_station_stub.call_async(
                "Subscribe", {
                    'channel': ChannelProperty().name,
                    'peer_target': peer_target
                })

        except Exception as e:
            logging.warning(
                f"Due to Subscription fail to RadioStation(mother peer), "
                f"automatically retrying subscribe call")

        if response and response[
                'response_code'] == message_code.Response.success:
            logging.debug(
                f"Subscription to RadioStation(mother peer) is successful.")
            event.set()
            self.start_check_last_block_rs_timer()

    def __check_last_block_to_rs(self):
        last_block = self.__radio_station_stub.call_async("GetLastBlock")
        if last_block['height'] <= self.__block_manager.get_blockchain(
        ).block_height:
            return

        # RS peer didn't announced new block
        self.stop_check_last_block_rs_timer()
        if self.__state_machine.state != "SubscribeNetwork":
            self.__state_machine.subscribe_network()

    def shutdown_peer(self, **kwargs):
        logging.debug(f"channel_service:shutdown_peer")
        StubCollection().peer_stub.sync_task().stop(message=kwargs['message'])

    def set_peer_type(self, peer_type):
        """Set peer type when peer init only

        :param peer_type:
        :return:
        """
        self.__block_manager.set_peer_type(peer_type)

    def save_peer_manager(self, peer_manager):
        """peer_list 를 leveldb 에 저장한다.

        :param peer_manager:
        """
        level_db_key_name = str.encode(conf.LEVEL_DB_KEY_FOR_PEER_LIST)

        try:
            dump = peer_manager.dump()
            level_db = self.__block_manager.get_level_db()
            level_db.Put(level_db_key_name, dump)
        except AttributeError as e:
            logging.warning("Fail Save Peer_list: " + str(e))

    async def set_peer_type_in_channel(self):
        peer_type = loopchain_pb2.PEER
        peer_leader = self.peer_manager.get_leader_peer(
            is_complain_to_rs=self.is_support_node_function(
                conf.NodeFunction.Vote))
        logging.debug(f"channel({ChannelProperty().name}) peer_leader: " +
                      str(peer_leader))

        logger_preset = loggers.get_preset()
        if self.is_support_node_function(
                conf.NodeFunction.Vote) and ChannelProperty(
                ).peer_id == peer_leader.peer_id:
            logger_preset.is_leader = True
            logging.debug(
                f"Set Peer Type Leader! channel({ChannelProperty().name})")
            peer_type = loopchain_pb2.BLOCK_GENERATOR
        else:
            logger_preset.is_leader = False
        logger_preset.update_logger()

        if conf.CONSENSUS_ALGORITHM == conf.ConsensusAlgorithm.lft:
            self.consensus.leader_id = peer_leader.peer_id

        if peer_type == loopchain_pb2.BLOCK_GENERATOR:
            self.block_manager.set_peer_type(peer_type)
            self.__ready_to_height_sync(True)
        elif peer_type == loopchain_pb2.PEER:
            self.__ready_to_height_sync(False)

    def __ready_to_height_sync(self, is_leader: bool = False):
        block_chain = self.block_manager.get_blockchain()

        block_chain.init_block_chain(is_leader)
        if block_chain.block_height > -1:
            self.block_manager.rebuild_block()

    async def block_height_sync_channel(self):
        # leader 로 시작하지 않았는데 자신의 정보가 leader Peer 정보이면 block height sync 하여
        # 최종 블럭의 leader 를 찾는다.
        peer_manager = self.peer_manager
        peer_leader = peer_manager.get_leader_peer()
        self_peer_object = peer_manager.get_peer(ChannelProperty().peer_id)
        is_delay_announce_new_leader = False
        peer_old_leader = None

        if peer_leader:
            block_sync_target = peer_leader.target
            block_sync_target_stub = StubManager.get_stub_manager_to_server(
                block_sync_target,
                loopchain_pb2_grpc.PeerServiceStub,
                time_out_seconds=conf.CONNECTION_RETRY_TIMEOUT,
                ssl_auth_type=conf.GRPC_SSL_TYPE)
        else:
            block_sync_target = ChannelProperty().radio_station_target
            block_sync_target_stub = self.__radio_station_stub

        if block_sync_target != ChannelProperty().peer_target:
            if block_sync_target_stub is None:
                logging.warning(
                    "You maybe Older from this network... or No leader in this network!"
                )

                is_delay_announce_new_leader = True
                peer_old_leader = peer_leader
                peer_leader = self.peer_manager.leader_complain_to_rs(
                    conf.ALL_GROUP_ID, is_announce_new_peer=False)

                if peer_leader is not None and ChannelProperty(
                ).node_type == conf.NodeType.CommunityNode:
                    block_sync_target_stub = StubManager.get_stub_manager_to_server(
                        peer_leader.target,
                        loopchain_pb2_grpc.PeerServiceStub,
                        time_out_seconds=conf.CONNECTION_RETRY_TIMEOUT,
                        ssl_auth_type=conf.GRPC_SSL_TYPE)

            if self.is_support_node_function(conf.NodeFunction.Vote) and \
                    (not peer_leader or peer_leader.peer_id == ChannelProperty().peer_id):
                peer_leader = self_peer_object
                self.block_manager.set_peer_type(loopchain_pb2.BLOCK_GENERATOR)
            else:
                _, future = self.block_manager.block_height_sync(
                    block_sync_target_stub)
                await future

                self.show_peers()

            if is_delay_announce_new_leader and ChannelProperty(
            ).node_type == conf.NodeType.CommunityNode:
                self.peer_manager.announce_new_leader(
                    peer_old_leader.peer_id,
                    peer_leader.peer_id,
                    self_peer_id=ChannelProperty().peer_id)

    def show_peers(self):
        logging.debug(f"peer_service:show_peers ({ChannelProperty().name}): ")
        for peer in self.peer_manager.get_IP_of_peers_in_group():
            logging.debug("peer_target: " + peer)

    async def reset_leader(self, new_leader_id, block_height=0):
        logging.info(
            f"RESET LEADER channel({ChannelProperty().name}) leader_id({new_leader_id})"
        )
        leader_peer = self.peer_manager.get_peer(new_leader_id, None)

        if block_height > 0 and block_height != self.block_manager.get_blockchain(
        ).last_block.header.height + 1:
            util.logger.warning(
                f"height behind peer can not take leader role. block_height({block_height}), "
                f"last_block.header.height("
                f"{self.block_manager.get_blockchain().last_block.header.height})"
            )
            return

        if leader_peer is None:
            logging.warning(
                f"in peer_service:reset_leader There is no peer by peer_id({new_leader_id})"
            )
            return

        util.logger.spam(
            f"peer_service:reset_leader target({leader_peer.target})")

        self_peer_object = self.peer_manager.get_peer(
            ChannelProperty().peer_id)
        self.peer_manager.set_leader_peer(leader_peer, None)

        peer_leader = self.peer_manager.get_leader_peer()
        peer_type = loopchain_pb2.PEER

        if self_peer_object.target == peer_leader.target:
            logging.debug("Set Peer Type Leader!")
            peer_type = loopchain_pb2.BLOCK_GENERATOR
            self.state_machine.turn_to_leader()

            if conf.CONSENSUS_ALGORITHM != conf.ConsensusAlgorithm.lft:
                if conf.ENABLE_REP_RADIO_STATION:
                    self.peer_manager.announce_new_leader(
                        self.peer_manager.get_leader_peer().peer_id,
                        new_leader_id,
                        is_broadcast=True,
                        self_peer_id=ChannelProperty().peer_id)
        else:
            logging.debug("Set Peer Type Peer!")
            self.state_machine.turn_to_peer()

            # 새 leader 에게 subscribe 하기
            # await self.subscribe_to_radio_station()
            await self.subscribe_to_peer(peer_leader.peer_id,
                                         loopchain_pb2.BLOCK_GENERATOR)

        self.block_manager.set_peer_type(peer_type)
        self.block_manager.epoch.set_epoch_leader(peer_leader.peer_id)

    def set_new_leader(self, new_leader_id, block_height=0):
        logging.info(
            f"SET NEW LEADER channel({ChannelProperty().name}) leader_id({new_leader_id})"
        )

        # complained_leader = self.peer_manager.get_leader_peer()
        leader_peer = self.peer_manager.get_peer(new_leader_id, None)

        if block_height > 0 and block_height != self.block_manager.get_blockchain(
        ).last_block.height + 1:
            logging.warning(f"height behind peer can not take leader role.")
            return

        if leader_peer is None:
            logging.warning(
                f"in channel_service:set_new_leader::There is no peer by peer_id({new_leader_id})"
            )
            return

        util.logger.spam(
            f"channel_service:set_new_leader::leader_target({leader_peer.target})"
        )

        self_peer_object = self.peer_manager.get_peer(
            ChannelProperty().peer_id)
        self.peer_manager.set_leader_peer(leader_peer, None)

        peer_leader = self.peer_manager.get_leader_peer()

        if self_peer_object.target == peer_leader.target:
            loggers.get_preset().is_leader = True
            loggers.get_preset().update_logger()

            logging.debug("I'm Leader Peer!")
        else:
            loggers.get_preset().is_leader = False
            loggers.get_preset().update_logger()

            logging.debug("I'm general Peer!")
            # 새 leader 에게 subscribe 하기
            # await self.subscribe_to_radio_station()
            # await self.subscribe_to_peer(peer_leader.peer_id, loopchain_pb2.BLOCK_GENERATOR)

    def genesis_invoke(self, block: Block) -> ('Block', dict):
        method = "icx_sendTransaction"
        transactions = []
        for tx in block.body.transactions.values():
            tx_serializer = TransactionSerializer.new(
                tx.version,
                self.block_manager.get_blockchain().tx_versioner)
            transaction = {
                "method": method,
                "params": {
                    "txHash": tx.hash.hex()
                },
                "genesisData": tx_serializer.to_full_data(tx)
            }
            transactions.append(transaction)

        request = {
            'block': {
                'blockHeight': block.header.height,
                'blockHash': block.header.hash.hex(),
                'timestamp': block.header.timestamp
            },
            'transactions': transactions
        }
        request = convert_params(request, ParamType.invoke)
        stub = StubCollection().icon_score_stubs[ChannelProperty().name]
        response = stub.sync_task().invoke(request)
        response_to_json_query(response)

        block_builder = BlockBuilder.from_new(
            block,
            self.block_manager.get_blockchain().tx_versioner)
        block_builder.commit_state = {
            ChannelProperty().name: response['stateRootHash']
        }
        new_block = block_builder.build()
        return new_block, response["txResults"]

    def score_invoke(self, _block: Block) -> dict or None:
        method = "icx_sendTransaction"
        transactions = []
        for tx in _block.body.transactions.values():
            tx_serializer = TransactionSerializer.new(
                tx.version,
                self.block_manager.get_blockchain().tx_versioner)

            transaction = {
                "method": method,
                "params": tx_serializer.to_full_data(tx)
            }
            transactions.append(transaction)

        request = {
            'block': {
                'blockHeight':
                _block.header.height,
                'blockHash':
                _block.header.hash.hex(),
                'prevBlockHash':
                _block.header.prev_hash.hex()
                if _block.header.prev_hash else '',
                'timestamp':
                _block.header.timestamp
            },
            'transactions': transactions
        }
        request = convert_params(request, ParamType.invoke)
        stub = StubCollection().icon_score_stubs[ChannelProperty().name]
        response = stub.sync_task().invoke(request)
        response_to_json_query(response)

        block_builder = BlockBuilder.from_new(
            _block,
            self.__block_manager.get_blockchain().tx_versioner)
        block_builder.commit_state = {
            ChannelProperty().name: response['stateRootHash']
        }
        new_block = block_builder.build()

        return new_block, response["txResults"]

    def score_change_block_hash(self, block_height, old_block_hash,
                                new_block_hash):
        change_hash_info = json.dumps({
            "block_height": block_height,
            "old_block_hash": old_block_hash,
            "new_block_hash": new_block_hash
        })

        stub = StubCollection().score_stubs[ChannelProperty().name]
        stub.sync_task().change_block_hash(change_hash_info)

    def score_write_precommit_state(self, block: Block):
        logging.debug(
            f"call score commit {ChannelProperty().name} {block.header.height} {block.header.hash.hex()}"
        )

        request = {
            "blockHeight": block.header.height,
            "blockHash": block.header.hash.hex(),
        }
        request = convert_params(request, ParamType.write_precommit_state)

        stub = StubCollection().icon_score_stubs[ChannelProperty().name]
        stub.sync_task().write_precommit_state(request)
        return True

    def score_remove_precommit_state(self, block: Block):
        invoke_fail_info = json.dumps({
            "block_height": block.height,
            "block_hash": block.block_hash
        })
        stub = StubCollection().score_stubs[ChannelProperty().name]
        stub.sync_task().remove_precommit_state(invoke_fail_info)
        return True

    def get_object_has_queue_by_consensus(self):
        if conf.CONSENSUS_ALGORITHM == conf.ConsensusAlgorithm.lft:
            object_has_queue = self.__consensus
        else:
            object_has_queue = self.__block_manager

        self.start_leader_complain_timer()

        return object_has_queue

    def start_leader_complain_timer(self):
        # util.logger.debug(f"start_leader_complain_timer in channel service.")
        self.__timer_service.add_timer_convenient(
            timer_key=TimerService.TIMER_KEY_LEADER_COMPLAIN,
            duration=conf.TIMEOUT_FOR_LEADER_COMPLAIN,
            is_repeat=True,
            callback=self.state_machine.leader_complain)

    def stop_leader_complain_timer(self):
        # util.logger.debug(f"stop_leader_complain_timer in channel service.")
        self.__timer_service.stop_timer(TimerService.TIMER_KEY_LEADER_COMPLAIN)

    def start_subscribe_timer(self):
        self.__timer_service.add_timer_convenient(
            timer_key=TimerService.TIMER_KEY_SUBSCRIBE,
            duration=conf.SUBSCRIBE_RETRY_TIMER,
            is_repeat=True,
            callback=self.subscribe_network)

    def stop_subscribe_timer(self):
        self.__timer_service.stop_timer(TimerService.TIMER_KEY_SUBSCRIBE)

    def start_check_last_block_rs_timer(self):
        self.__timer_service.add_timer_convenient(
            timer_key=TimerService.
            TIMER_KEY_GET_LAST_BLOCK_KEEP_CITIZEN_SUBSCRIPTION,
            duration=conf.GET_LAST_BLOCK_TIMER,
            is_repeat=True,
            callback=self.__check_last_block_to_rs)

    def stop_check_last_block_rs_timer(self):
        self.__timer_service.stop_timer(
            TimerService.TIMER_KEY_GET_LAST_BLOCK_KEEP_CITIZEN_SUBSCRIPTION)

    def start_shutdown_timer(self):
        error = f"Shutdown by Subscribe retry timeout({conf.SHUTDOWN_TIMER} sec)"
        self.__timer_service.add_timer_convenient(
            timer_key=TimerService.TIMER_KEY_SHUTDOWN_WHEN_FAIL_SUBSCRIBE,
            duration=conf.SHUTDOWN_TIMER,
            callback=self.shutdown_peer,
            callback_kwargs={"message": error})

    def stop_shutdown_timer(self):
        self.__timer_service.stop_timer(
            TimerService.TIMER_KEY_SHUTDOWN_WHEN_FAIL_SUBSCRIBE)
Exemplo n.º 7
0
class ChannelService:
    def __init__(self, channel_name, amqp_target, amqp_key):
        self.__block_manager: BlockManager = None
        self.__score_container: CommonSubprocess = None
        self.__score_info: dict = None
        self.__peer_auth: Signer = None
        self.__peer_manager: PeerManager = None
        self.__broadcast_scheduler: BroadcastScheduler = None
        self.__radio_station_stub = None
        self.__consensus = None
        self.__timer_service = TimerService()
        self.__node_subscriber: NodeSubscriber = None
        self.__channel_infos: dict = None

        loggers.get_preset().channel_name = channel_name
        loggers.get_preset().update_logger()

        channel_queue_name = conf.CHANNEL_QUEUE_NAME_FORMAT.format(
            channel_name=channel_name, amqp_key=amqp_key)
        self.__inner_service = ChannelInnerService(amqp_target,
                                                   channel_queue_name,
                                                   conf.AMQP_USERNAME,
                                                   conf.AMQP_PASSWORD,
                                                   channel_service=self)

        logging.info(
            f"ChannelService : {channel_name}, Queue : {channel_queue_name}")

        ChannelProperty().name = channel_name
        ChannelProperty().amqp_target = amqp_target

        StubCollection().amqp_key = amqp_key
        StubCollection().amqp_target = amqp_target

        command_arguments.add_raw_command(command_arguments.Type.Channel,
                                          channel_name)
        command_arguments.add_raw_command(command_arguments.Type.AMQPTarget,
                                          amqp_target)
        command_arguments.add_raw_command(command_arguments.Type.AMQPKey,
                                          amqp_key)

        ObjectManager().channel_service = self
        self.__state_machine = ChannelStateMachine(self)

    @property
    def block_manager(self):
        return self.__block_manager

    @property
    def score_container(self):
        return self.__score_container

    @property
    def score_info(self):
        return self.__score_info

    @property
    def radio_station_stub(self):
        return self.__radio_station_stub

    @property
    def peer_auth(self):
        return self.__peer_auth

    @property
    def peer_manager(self):
        return self.__peer_manager

    @property
    def broadcast_scheduler(self):
        return self.__broadcast_scheduler

    @property
    def consensus(self):
        return self.__consensus

    @property
    def timer_service(self):
        return self.__timer_service

    @property
    def state_machine(self):
        return self.__state_machine

    @property
    def inner_service(self):
        return self.__inner_service

    def serve(self):
        async def _serve():
            await StubCollection().create_peer_stub()

            channel_name = ChannelProperty().name
            self.__channel_infos = (await StubCollection(
            ).peer_stub.async_task().get_channel_infos())[channel_name]
            results = await StubCollection().peer_stub.async_task(
            ).get_channel_info_detail(channel_name)

            await self.init(*results)

            self.__timer_service.start()
            self.__state_machine.complete_init_components()
            logging.info(
                f'channel_service: init complete channel: {ChannelProperty().name}, '
                f'state({self.__state_machine.state})')

        loop = MessageQueueService.loop
        # loop.set_debug(True)
        loop.create_task(_serve())
        loop.add_signal_handler(signal.SIGINT, self.close)
        loop.add_signal_handler(signal.SIGTERM, self.close)

        try:
            loop.run_forever()
        finally:
            loop.run_until_complete(loop.shutdown_asyncgens())
            loop.close()

            self.cleanup()

    def close(self):
        if self.__inner_service:
            self.__inner_service.cleanup()
            logging.info("Cleanup ChannelInnerService.")

        MessageQueueService.loop.stop()

    def cleanup(self):
        logging.info("Cleanup Channel Resources.")

        if self.__block_manager:
            self.__block_manager.stop()
            self.__block_manager = None
            logging.info("Cleanup BlockManager.")

        if self.__score_container:
            self.__score_container.stop()
            self.__score_container.wait()
            self.__score_container = None
            logging.info("Cleanup ScoreContainer.")

        if self.__broadcast_scheduler:
            self.__broadcast_scheduler.stop()
            self.__broadcast_scheduler.wait()
            self.__broadcast_scheduler = None
            logging.info("Cleanup BroadcastScheduler.")

        if self.__consensus:
            self.__consensus.stop()
            self.__consensus.wait()
            logging.info("Cleanup Consensus.")

        if self.__timer_service.is_run():
            self.__timer_service.stop()
            self.__timer_service.wait()
            logging.info("Cleanup TimerService.")

    async def init(self, peer_port, peer_target, rest_target,
                   radio_station_target, peer_id, group_id, node_type,
                   score_package):
        loggers.get_preset().peer_id = peer_id
        loggers.get_preset().update_logger()

        ChannelProperty().peer_port = peer_port
        ChannelProperty().peer_target = peer_target
        ChannelProperty().rest_target = rest_target
        ChannelProperty().radio_station_target = radio_station_target
        ChannelProperty().peer_id = peer_id
        ChannelProperty().group_id = group_id
        ChannelProperty().node_type = conf.NodeType(node_type)
        ChannelProperty().score_package = score_package

        self.__peer_manager = PeerManager(ChannelProperty().name)
        await self.__init_peer_auth()
        self.__init_broadcast_scheduler()
        self.__init_block_manager()

        await self.__init_score_container()
        await self.__inner_service.connect(conf.AMQP_CONNECTION_ATTEMPS,
                                           conf.AMQP_RETRY_DELAY,
                                           exclusive=True)
        await self.__init_sub_services()

        self.block_manager.init_epoch()

    async def __init_network(self):
        self.__inner_service.update_sub_services_properties(
            node_type=ChannelProperty().node_type.value)

        self.__init_radio_station_stub()

        if self.is_support_node_function(conf.NodeFunction.Vote):
            if conf.ENABLE_REP_RADIO_STATION:
                self.connect_to_radio_station()
            else:
                await self.__load_peers_from_file()
        else:
            self.__init_node_subscriber()

    async def evaluate_network(self):
        self.__ready_to_height_sync()

        # Do not consider to change peer list by IISS this time.
        await self.__select_node_type()
        await self.__init_network()

        self.__state_machine.block_sync()

    async def subscribe_network(self):
        if self.is_support_node_function(conf.NodeFunction.Vote):
            await self.set_peer_type_in_channel()
        else:
            await self.subscribe_to_radio_station()

        if conf.CONSENSUS_ALGORITHM == conf.ConsensusAlgorithm.lft:
            if not self.__consensus.is_run():
                self.__consensus.change_epoch(
                    precommit_block=self.__block_manager.get_blockchain(
                    ).last_block)
                self.__consensus.start()
        elif conf.ALLOW_MAKE_EMPTY_BLOCK:
            if not self.block_manager.block_generation_scheduler.is_run():
                self.block_manager.block_generation_scheduler.start()
        self.__state_machine.complete_subscribe()

        self.start_leader_complain_timer_if_tx_exists()

    def update_sub_services_properties(self):
        nid = self.block_manager.get_blockchain().find_nid()
        self.__inner_service.update_sub_services_properties(nid=int(nid, 16))

    def __get_role_switch_block_height(self):
        # Currently, only one way role switch is supported from Citizen to Rep
        if ChannelProperty().node_type != conf.NodeType.CitizenNode:
            return -1
        return self.get_channel_option().get('role_switch_block_height', -1)

    def __get_node_type_by_peer_list(self):
        # FIXME: this is temporary codes. get peer list with IISS API
        #        IISS peer list include just peer_id and a URL that a server provide peer details
        channels = util.load_json_data(conf.CHANNEL_MANAGE_DATA_PATH)
        for peer_info in channels[ChannelProperty().name]["peers"]:
            if peer_info['id'] == ChannelProperty().peer_id:
                return conf.NodeType.CommunityNode
        return conf.NodeType.CitizenNode

    async def __clean_network(self):
        if self.__node_subscriber is not None:
            await self.__node_subscriber.close()
            self.__node_subscriber: NodeSubscriber = None

        self.__timer_service.clean()

        peer_ids = set()
        for peer_list in self.__peer_manager.peer_list.values():
            for peer_id in peer_list.keys():
                peer_ids.add(peer_id)
        for peer_id in peer_ids:
            self.__peer_manager.remove_peer(peer_id)

        self.__radio_station_stub = None

    async def __select_node_type(self):
        # If block height is under zero this node has not synchronized blocks yet.
        block_height = self.__block_manager.get_blockchain().block_height
        if block_height < 0:
            util.logger.debug(
                f"Currently, Can't select node type without block height. block height={block_height}"
            )
            return

        switch_block_height = self.__get_role_switch_block_height()
        if switch_block_height < 0 or block_height < switch_block_height:
            util.logger.debug(
                f"Does not need to select node type. role switch block height={switch_block_height}"
            )
            return

        node_type: conf.NodeType = self.__get_node_type_by_peer_list()
        if node_type == ChannelProperty().node_type:
            util.logger.info(
                f"Node type equals previous note type ({node_type}). force={force}"
            )
            return

        util.logger.info(f"Selected node type {node_type}")
        ChannelProperty().node_type = node_type

        await StubCollection().peer_stub.async_task().change_node_type(
            node_type.value)

    def reset_network_by_block_height(self, height):
        if height == self.__get_role_switch_block_height():
            self.__state_machine.switch_role()

    async def reset_network(self):
        util.logger.info("Reset network")
        await self.__clean_network()
        self.__state_machine.evaluate_network()

    async def __init_peer_auth(self):
        try:
            node_key: bytes = await StubCollection().peer_stub.async_task(
            ).get_node_key(ChannelProperty().name)
            self.__peer_auth = Signer.from_prikey(node_key)
        except KeyError:
            self.__peer_auth = Signer.from_channel(ChannelProperty().name)
        except Exception as e:
            logging.exception(f"peer auth init fail cause : {e}")
            util.exit_and_msg(f"peer auth init fail cause : {e}")

    def __init_block_manager(self):
        logging.debug(
            f"__load_block_manager_each channel({ChannelProperty().name})")
        try:
            self.__block_manager = BlockManager(
                name="loopchain.peer.BlockManager",
                channel_manager=self,
                peer_id=ChannelProperty().peer_id,
                channel_name=ChannelProperty().name,
                level_db_identity=ChannelProperty().peer_target)
        except leveldb.LevelDBError as e:
            util.exit_and_msg("LevelDBError(" + str(e) + ")")

    def __init_broadcast_scheduler(self):
        scheduler = BroadcastSchedulerFactory.new(
            channel=ChannelProperty().name,
            self_target=ChannelProperty().peer_target)
        scheduler.start()

        self.__broadcast_scheduler = scheduler

        scheduler.schedule_job(BroadcastCommand.SUBSCRIBE,
                               ChannelProperty().peer_target,
                               block=True,
                               block_timeout=conf.TIMEOUT_FOR_FUTURE)

    def __init_radio_station_stub(self):
        if self.is_support_node_function(conf.NodeFunction.Vote):
            if conf.ENABLE_REP_RADIO_STATION:
                self.__radio_station_stub = StubManager.get_stub_manager_to_server(
                    ChannelProperty().radio_station_target,
                    loopchain_pb2_grpc.RadioStationStub,
                    conf.CONNECTION_RETRY_TIMEOUT_TO_RS,
                    ssl_auth_type=conf.GRPC_SSL_TYPE)
        else:
            self.__radio_station_stub = RestStubManager(
                ChannelProperty().radio_station_target,
                ChannelProperty().name)

    async def __init_score_container(self):
        """create score container and save score_info and score_stub
        """
        for i in range(conf.SCORE_LOAD_RETRY_TIMES):
            try:
                self.__score_info = await self.__run_score_container()
            except BaseException as e:
                util.logger.spam(
                    f"channel_manager:load_score_container_each score_info load fail retry({i})"
                )
                logging.error(e)
                traceback.print_exc()
                time.sleep(conf.SCORE_LOAD_RETRY_INTERVAL
                           )  # This blocking main thread is intended.

            else:
                break

    async def __init_sub_services(self):
        self.__inner_service.init_sub_services()
        await StubCollection().create_channel_tx_creator_stub(
            ChannelProperty().name)
        await StubCollection().create_channel_tx_receiver_stub(
            ChannelProperty().name)

    def __init_node_subscriber(self):
        self.__node_subscriber = NodeSubscriber(
            channel=ChannelProperty().name,
            rs_target=ChannelProperty().radio_station_target)

    async def __run_score_container(self):
        if conf.RUN_ICON_IN_LAUNCHER:
            process_args = [
                'python3', '-m', 'loopchain', 'score', '--channel',
                ChannelProperty().name, '--score_package',
                ChannelProperty().score_package
            ]
            process_args += command_arguments.get_raw_commands_by_filter(
                command_arguments.Type.AMQPTarget,
                command_arguments.Type.AMQPKey, command_arguments.Type.Develop,
                command_arguments.Type.ConfigurationFilePath,
                command_arguments.Type.RadioStationTarget)
            self.__score_container = CommonSubprocess(process_args)

        await StubCollection().create_icon_score_stub(ChannelProperty().name)
        await StubCollection().icon_score_stubs[ChannelProperty().name
                                                ].connect()
        await StubCollection().icon_score_stubs[ChannelProperty().name
                                                ].async_task().hello()
        return None

    async def __load_score(self):
        channel_name = ChannelProperty().name
        score_package_name = ChannelProperty().score_package

        util.logger.spam(f"peer_service:__load_score --init--")
        logging.info("LOAD SCORE AND CONNECT TO SCORE SERVICE!")

        params = dict()
        params[message_code.MetaParams.ScoreLoad.
               repository_path] = conf.DEFAULT_SCORE_REPOSITORY_PATH
        params[message_code.MetaParams.ScoreLoad.
               score_package] = score_package_name
        params[
            message_code.MetaParams.ScoreLoad.base] = conf.DEFAULT_SCORE_BASE
        params[message_code.MetaParams.ScoreLoad.peer_id] = ChannelProperty(
        ).peer_id
        meta = json.dumps(params)
        logging.debug(f"load score params : {meta}")

        util.logger.spam(f"peer_service:__load_score --1--")
        score_stub = StubCollection().score_stubs[channel_name]
        response = await score_stub.async_task().score_load(meta)

        logging.debug("try score load on score service: " + str(response))
        if not response:
            return None

        if response.code != message_code.Response.success:
            util.exit_and_msg("Fail Get Score from Score Server...")
            return None

        logging.debug("Get Score from Score Server...")
        score_info = json.loads(response.meta)

        logging.info("LOAD SCORE DONE!")
        util.logger.spam(f"peer_service:__load_score --end--")

        return score_info

    async def __load_peers_from_file(self):
        channel_info = await StubCollection().peer_stub.async_task(
        ).get_channel_infos()
        for peer_info in channel_info[ChannelProperty().name]["peers"]:
            self.__peer_manager.add_peer(peer_info)
            self.__broadcast_scheduler.schedule_job(BroadcastCommand.SUBSCRIBE,
                                                    peer_info["peer_target"])
        self.show_peers()

    def is_support_node_function(self, node_function):
        return conf.NodeType.is_support_node_function(
            node_function,
            ChannelProperty().node_type)

    def get_channel_option(self) -> dict:
        channel_option = conf.CHANNEL_OPTION
        return channel_option[ChannelProperty().name]

    def get_channel_infos(self) -> dict:
        return self.__channel_infos

    def get_rep_ids(self) -> list:
        return [
            ExternalAddress.fromhex_address(peer.get('id'),
                                            allow_malformed=True)
            for peer in self.get_channel_infos()['peers']
        ]

    def generate_genesis_block(self):
        blockchain = self.block_manager.get_blockchain()
        if blockchain.block_height > -1:
            logging.debug("genesis block was already generated")
            return

        reps = self.get_rep_ids()
        blockchain.generate_genesis_block(reps)

    def connect_to_radio_station(self, is_reconnect=False):
        response = self.__radio_station_stub.call_in_times(
            method_name="ConnectPeer",
            message=loopchain_pb2.ConnectPeerRequest(
                channel=ChannelProperty().name,
                peer_object=b'',
                peer_id=ChannelProperty().peer_id,
                peer_target=ChannelProperty().peer_target,
                group_id=ChannelProperty().group_id),
            retry_times=conf.CONNECTION_RETRY_TIMES_TO_RS,
            is_stub_reuse=True,
            timeout=conf.CONNECTION_TIMEOUT_TO_RS)

        # start next ConnectPeer timer
        self.__timer_service.add_timer_convenient(
            timer_key=TimerService.TIMER_KEY_CONNECT_PEER,
            duration=conf.CONNECTION_RETRY_TIMER,
            callback=self.connect_to_radio_station,
            callback_kwargs={"is_reconnect": True})

        if is_reconnect:
            return

        if response and response.status == message_code.Response.success:
            try:
                peer_list_data = PeerListData.load(response.peer_list)
            except Exception as e:
                traceback.print_exc()
                logging.error(
                    f"Invalid peer list. Check your Radio Station. exception={e}"
                )
                return

            self.__peer_manager.set_peer_list(peer_list_data)
            peers, peer_list = self.__peer_manager.get_peers_for_debug()
            logging.debug("peer list update: " + peers)

            # add connected peer to processes audience
            for each_peer in peer_list:
                util.logger.spam(
                    f"peer_service:connect_to_radio_station peer({each_peer.target}-{each_peer.status})"
                )
                if each_peer.status == PeerStatus.connected:
                    self.__broadcast_scheduler.schedule_job(
                        BroadcastCommand.SUBSCRIBE, each_peer.target)

    def __subscribe_to_peer_list(self):
        peer_object = self.peer_manager.get_peer(ChannelProperty().peer_id)
        peer_request = loopchain_pb2.PeerRequest(
            channel=ChannelProperty().name,
            peer_target=ChannelProperty().peer_target,
            peer_id=ChannelProperty().peer_id,
            group_id=ChannelProperty().group_id,
            node_type=ChannelProperty().node_type,
            peer_order=peer_object.order)
        self.__broadcast_scheduler.schedule_broadcast("Subscribe",
                                                      peer_request)

    async def subscribe_to_radio_station(self):
        await self.__subscribe_call_to_stub(self.__radio_station_stub,
                                            loopchain_pb2.PEER)

    async def subscribe_to_peer(self, peer_id, peer_type):
        peer = self.peer_manager.get_peer(peer_id)
        peer_stub = self.peer_manager.get_peer_stub_manager(peer)

        await self.__subscribe_call_to_stub(peer_stub, peer_type)
        self.__broadcast_scheduler.schedule_job(BroadcastCommand.SUBSCRIBE,
                                                peer_stub.target)

    async def __subscribe_call_to_stub(self, peer_stub, peer_type):
        if self.is_support_node_function(conf.NodeFunction.Vote):
            await peer_stub.call_async(
                "Subscribe",
                loopchain_pb2.PeerRequest(
                    channel=ChannelProperty().name,
                    peer_target=ChannelProperty().peer_target,
                    peer_type=peer_type,
                    peer_id=ChannelProperty().peer_id,
                    group_id=ChannelProperty().group_id,
                    node_type=ChannelProperty().node_type),
            )
        else:
            await self.__subscribe_call_from_citizen()

    async def __subscribe_call_from_citizen(self):
        def _handle_exception(future: asyncio.Future):
            logging.debug(
                f"error: {type(future.exception())}, {str(future.exception())}"
            )

            if ChannelProperty().node_type != conf.NodeType.CitizenNode:
                logging.debug(f"This node is not Citizen anymore.")
                return

            elif isinstance(future.exception(), ConnectionError):
                logging.warning(f"Waiting for next subscribe request...")
                if self.__state_machine.state != "SubscribeNetwork":
                    self.__state_machine.subscribe_network()

        subscribe_event = asyncio.Event()
        util.logger.spam(
            f"try subscribe_call_by_citizen target({ChannelProperty().rest_target})"
        )

        # try websocket connection, and handle exception in callback
        asyncio.ensure_future(
            self.__node_subscriber.subscribe(
                block_height=self.block_manager.get_blockchain().block_height,
                event=subscribe_event)).add_done_callback(_handle_exception)
        await subscribe_event.wait()

    def shutdown_peer(self, **kwargs):
        logging.debug(f"channel_service:shutdown_peer")
        StubCollection().peer_stub.sync_task().stop(message=kwargs['message'])

    def set_peer_type(self, peer_type):
        """Set peer type when peer init only

        :param peer_type:
        :return:
        """
        self.__block_manager.set_peer_type(peer_type)

    def save_peer_manager(self, peer_manager):
        """peer_list 를 leveldb 에 저장한다.

        :param peer_manager:
        """
        level_db_key_name = str.encode(conf.LEVEL_DB_KEY_FOR_PEER_LIST)

        try:
            dump = peer_manager.dump()
            level_db = self.__block_manager.get_level_db()
            level_db.Put(level_db_key_name, dump)
        except AttributeError as e:
            logging.warning("Fail Save Peer_list: " + str(e))

    async def set_peer_type_in_channel(self):
        peer_type = loopchain_pb2.PEER
        blockchain = self.block_manager.get_blockchain()
        last_block = blockchain.last_unconfirmed_block or blockchain.last_block

        leader_id = None
        if last_block and last_block.header.next_leader is not None:
            leader_id = last_block.header.next_leader.hex_hx()
            peer = self.peer_manager.get_peer(leader_id)
            if peer is None:
                leader_id = None
            else:
                self.peer_manager.set_leader_peer(peer)
        if leader_id is None:
            leader_id = self.peer_manager.get_leader_peer().peer_id
        logging.debug(
            f"channel({ChannelProperty().name}) peer_leader: {leader_id}")

        logger_preset = loggers.get_preset()
        if ChannelProperty().peer_id == leader_id:
            logger_preset.is_leader = True
            logging.debug(
                f"Set Peer Type Leader! channel({ChannelProperty().name})")
            peer_type = loopchain_pb2.BLOCK_GENERATOR
        else:
            logger_preset.is_leader = False
        logger_preset.update_logger()

        if conf.CONSENSUS_ALGORITHM == conf.ConsensusAlgorithm.lft:
            self.consensus.leader_id = leader_id

        self.block_manager.set_peer_type(peer_type)

    def __ready_to_height_sync(self):
        blockchain = self.block_manager.get_blockchain()

        blockchain.init_blockchain()
        if blockchain.block_height == -1 and 'genesis_data_path' in conf.CHANNEL_OPTION[
                ChannelProperty().name]:
            self.generate_genesis_block()
        elif blockchain.block_height > -1:
            self.block_manager.rebuild_block()

    def show_peers(self):
        logging.debug(f"peer_service:show_peers ({ChannelProperty().name}): ")
        for peer in self.peer_manager.get_IP_of_peers_in_group():
            logging.debug("peer_target: " + peer)

    def reset_leader(self, new_leader_id, block_height=0, complained=False):
        """

        :param new_leader_id:
        :param block_height:
        :param complained:
        :return:
        """
        if self.peer_manager.get_leader_id(conf.ALL_GROUP_ID) == new_leader_id:
            return

        utils.logger.info(
            f"RESET LEADER channel({ChannelProperty().name}) leader_id({new_leader_id}), "
            f"complained={complained}")
        leader_peer = self.peer_manager.get_peer(new_leader_id, None)

        if block_height > 0 and block_height != self.block_manager.get_blockchain(
        ).last_block.header.height + 1:
            util.logger.warning(
                f"height behind peer can not take leader role. block_height({block_height}), "
                f"last_block.header.height("
                f"{self.block_manager.get_blockchain().last_block.header.height})"
            )
            return

        if leader_peer is None:
            logging.warning(
                f"in peer_service:reset_leader There is no peer by peer_id({new_leader_id})"
            )
            return

        util.logger.spam(
            f"peer_service:reset_leader target({leader_peer.target}), complained={complained}"
        )

        self_peer_object = self.peer_manager.get_peer(
            ChannelProperty().peer_id)
        self.peer_manager.set_leader_peer(leader_peer, None)
        if complained:
            self.block_manager.epoch.new_round(leader_peer.peer_id)
        else:
            self.block_manager.epoch = Epoch.new_epoch(leader_peer.peer_id)
        logging.info(
            f"Epoch height({self.block_manager.epoch.height}), leader ({self.block_manager.epoch.leader_id})"
        )

        if self_peer_object.peer_id == leader_peer.peer_id:
            logging.debug("Set Peer Type Leader!")
            peer_type = loopchain_pb2.BLOCK_GENERATOR
            self.state_machine.turn_to_leader()
        else:
            logging.debug("Set Peer Type Peer!")
            peer_type = loopchain_pb2.PEER
            self.state_machine.turn_to_peer()

        self.block_manager.set_peer_type(peer_type)

    def set_new_leader(self, new_leader_id, block_height=0):
        logging.info(
            f"SET NEW LEADER channel({ChannelProperty().name}) leader_id({new_leader_id})"
        )

        # complained_leader = self.peer_manager.get_leader_peer()
        leader_peer = self.peer_manager.get_peer(new_leader_id, None)

        if block_height > 0 and block_height != self.block_manager.get_blockchain(
        ).last_block.height + 1:
            logging.warning(f"height behind peer can not take leader role.")
            return

        if leader_peer is None:
            logging.warning(
                f"in channel_service:set_new_leader::There is no peer by peer_id({new_leader_id})"
            )
            return

        util.logger.spam(
            f"channel_service:set_new_leader::leader_target({leader_peer.target})"
        )

        self_peer_object = self.peer_manager.get_peer(
            ChannelProperty().peer_id)
        self.peer_manager.set_leader_peer(leader_peer, None)

        peer_leader = self.peer_manager.get_leader_peer()

        if self_peer_object.target == peer_leader.target:
            loggers.get_preset().is_leader = True
            loggers.get_preset().update_logger()

            logging.debug("I'm Leader Peer!")
        else:
            loggers.get_preset().is_leader = False
            loggers.get_preset().update_logger()

            logging.debug("I'm general Peer!")
            # 새 leader 에게 subscribe 하기
            # await self.subscribe_to_radio_station()
            # await self.subscribe_to_peer(peer_leader.peer_id, loopchain_pb2.BLOCK_GENERATOR)

    def genesis_invoke(self, block: Block) -> ('Block', dict):
        method = "icx_sendTransaction"
        transactions = []
        for tx in block.body.transactions.values():
            tx_serializer = TransactionSerializer.new(
                tx.version,
                self.block_manager.get_blockchain().tx_versioner)
            transaction = {
                "method": method,
                "params": {
                    "txHash": tx.hash.hex()
                },
                "genesisData": tx_serializer.to_full_data(tx)
            }
            transactions.append(transaction)

        request = {
            'block': {
                'blockHeight': block.header.height,
                'blockHash': block.header.hash.hex(),
                'timestamp': block.header.timestamp
            },
            'transactions': transactions
        }
        request = convert_params(request, ParamType.invoke)
        stub = StubCollection().icon_score_stubs[ChannelProperty().name]
        response = stub.sync_task().invoke(request)
        response_to_json_query(response)

        tx_receipts = response["txResults"]
        block_builder = BlockBuilder.from_new(
            block,
            self.block_manager.get_blockchain().tx_versioner)
        block_builder.reset_cache()
        block_builder.peer_id = block.header.peer_id
        block_builder.signature = block.header.signature

        block_builder.commit_state = {
            ChannelProperty().name: response['stateRootHash']
        }
        block_builder.state_hash = Hash32(
            bytes.fromhex(response['stateRootHash']))
        block_builder.receipts = tx_receipts
        block_builder.reps = self.get_rep_ids()
        new_block = block_builder.build()
        return new_block, tx_receipts

    def score_invoke(self, _block: Block) -> dict or None:
        method = "icx_sendTransaction"
        transactions = []
        for tx in _block.body.transactions.values():
            tx_serializer = TransactionSerializer.new(
                tx.version,
                self.block_manager.get_blockchain().tx_versioner)

            transaction = {
                "method": method,
                "params": tx_serializer.to_full_data(tx)
            }
            transactions.append(transaction)

        request = {
            'block': {
                'blockHeight':
                _block.header.height,
                'blockHash':
                _block.header.hash.hex(),
                'prevBlockHash':
                _block.header.prev_hash.hex()
                if _block.header.prev_hash else '',
                'timestamp':
                _block.header.timestamp
            },
            'transactions': transactions
        }
        request = convert_params(request, ParamType.invoke)
        stub = StubCollection().icon_score_stubs[ChannelProperty().name]
        response = stub.sync_task().invoke(request)
        response_to_json_query(response)

        tx_receipts = response["txResults"]
        block_builder = BlockBuilder.from_new(
            _block,
            self.__block_manager.get_blockchain().tx_versioner)
        block_builder.reset_cache()
        block_builder.peer_id = _block.header.peer_id
        block_builder.signature = _block.header.signature

        block_builder.commit_state = {
            ChannelProperty().name: response['stateRootHash']
        }
        block_builder.state_hash = Hash32(
            bytes.fromhex(response['stateRootHash']))
        block_builder.receipts = tx_receipts
        block_builder.reps = self.get_rep_ids()
        new_block = block_builder.build()
        return new_block, tx_receipts

    def score_change_block_hash(self, block_height, old_block_hash,
                                new_block_hash):
        change_hash_info = json.dumps({
            "block_height": block_height,
            "old_block_hash": old_block_hash,
            "new_block_hash": new_block_hash
        })

        stub = StubCollection().score_stubs[ChannelProperty().name]
        stub.sync_task().change_block_hash(change_hash_info)

    def score_write_precommit_state(self, block: Block):
        logging.debug(
            f"call score commit {ChannelProperty().name} {block.header.height} {block.header.hash.hex()}"
        )

        request = {
            "blockHeight": block.header.height,
            "blockHash": block.header.hash.hex(),
        }
        request = convert_params(request, ParamType.write_precommit_state)

        stub = StubCollection().icon_score_stubs[ChannelProperty().name]
        stub.sync_task().write_precommit_state(request)
        return True

    def score_remove_precommit_state(self, block: Block):
        invoke_fail_info = json.dumps({
            "block_height": block.height,
            "block_hash": block.block_hash
        })
        stub = StubCollection().score_stubs[ChannelProperty().name]
        stub.sync_task().remove_precommit_state(invoke_fail_info)
        return True

    def reset_leader_complain_timer(self):
        if self.__timer_service.get_timer(
                TimerService.TIMER_KEY_LEADER_COMPLAIN):
            self.stop_leader_complain_timer()

        self.start_leader_complain_timer()

    def start_leader_complain_timer_if_tx_exists(self):
        if not self.block_manager.get_tx_queue().is_empty_in_status(
                TransactionStatusInQueue.normal):
            util.logger.debug(
                "Start leader complain timer because unconfirmed tx exists.")
            self.start_leader_complain_timer()

    def start_leader_complain_timer(self, duration=None):
        if not duration:
            duration = self.block_manager.epoch.complain_duration

        # util.logger.spam(
        #     f"start_leader_complain_timer in channel service. ({self.block_manager.epoch.round}/{duration})")
        if self.state_machine.state not in ("BlockGenerate", "BlockSync",
                                            "Watch"):
            self.__timer_service.add_timer_convenient(
                timer_key=TimerService.TIMER_KEY_LEADER_COMPLAIN,
                duration=duration,
                is_repeat=True,
                callback=self.state_machine.leader_complain)

    def stop_leader_complain_timer(self):
        util.logger.spam(f"stop_leader_complain_timer in channel service.")
        self.__timer_service.stop_timer(TimerService.TIMER_KEY_LEADER_COMPLAIN)

    def start_subscribe_timer(self):
        self.__timer_service.add_timer_convenient(
            timer_key=TimerService.TIMER_KEY_SUBSCRIBE,
            duration=conf.SUBSCRIBE_RETRY_TIMER,
            is_repeat=True,
            callback=self.subscribe_network)

    def stop_subscribe_timer(self):
        self.__timer_service.stop_timer(TimerService.TIMER_KEY_SUBSCRIBE)

    def start_shutdown_timer(self):
        error = f"Shutdown by Subscribe retry timeout({conf.SHUTDOWN_TIMER} sec)"
        self.__timer_service.add_timer_convenient(
            timer_key=TimerService.TIMER_KEY_SHUTDOWN_WHEN_FAIL_SUBSCRIBE,
            duration=conf.SHUTDOWN_TIMER,
            callback=self.shutdown_peer,
            callback_kwargs={"message": error})

    def stop_shutdown_timer(self):
        self.__timer_service.stop_timer(
            TimerService.TIMER_KEY_SHUTDOWN_WHEN_FAIL_SUBSCRIBE)
Exemplo n.º 8
0
class BroadcastScheduler(CommonThread):
    """broadcast class for each channel"""

    THREAD_INFO_KEY = "thread_info"
    THREAD_VARIABLE_STUB_TO_SELF_PEER = "stub_to_self_peer"
    THREAD_VARIABLE_PEER_STATUS = "peer_status"

    SELF_PEER_TARGET_KEY = "self_peer_target"
    LEADER_PEER_TARGET_KEY = "leader_peer_target"

    def __init__(self, channel="", self_target=""):
        super().__init__()

        self.__channel = channel
        self.__self_target = self_target

        self.__audience = {}  # self.__audience[peer_target] = stub_manager
        self.__thread_variables = dict()
        self.__thread_variables[
            self.THREAD_VARIABLE_PEER_STATUS] = PeerThreadStatus.normal

        if conf.IS_BROADCAST_ASYNC:
            self.__broadcast_run = self.__broadcast_run_async
        else:
            self.__broadcast_run = self.__broadcast_run_sync

        self.__handler_map = {
            BroadcastCommand.CREATE_TX:
            self.__handler_create_tx,
            BroadcastCommand.CONNECT_TO_LEADER:
            self.__handler_connect_to_leader,
            BroadcastCommand.SUBSCRIBE:
            self.__handler_subscribe,
            BroadcastCommand.UNSUBSCRIBE:
            self.__handler_unsubscribe,
            BroadcastCommand.UPDATE_AUDIENCE:
            self.__handler_update_audience,
            BroadcastCommand.BROADCAST:
            self.__handler_broadcast,
            BroadcastCommand.MAKE_SELF_PEER_CONNECTION:
            self.__handler_connect_to_self_peer,
        }

        self.__broadcast_with_self_target_methods = {
            "AddTx", "AddTxList", "BroadcastVote"
        }

        self.stored_tx = queue.Queue()

        self.__broadcast_pool = futures.ThreadPoolExecutor(
            conf.MAX_BROADCAST_WORKERS, "BroadcastThread")
        self.__broadcast_queue = queue.PriorityQueue()

        self.__timer_service = TimerService()

    def stop(self):
        super().stop()
        self.__broadcast_queue.put((None, None, None, None))
        self.__broadcast_pool.shutdown(False)
        if self.__timer_service.is_run():
            self.__timer_service.stop()
            self.__timer_service.wait()

    def run(self, event: threading.Event):
        event.set()
        self.__timer_service.start()

        def _callback(curr_future: futures.Future,
                      executor_future: futures.Future):
            if executor_future.exception():
                curr_future.set_exception(executor_future.exception())
                logging.error(executor_future.exception())
            else:
                curr_future.set_result(executor_future.result())

        while self.is_run():
            priority, command, params, future = self.__broadcast_queue.get()
            if command is None:
                break

            func = self.__handler_map[command]
            return_future = self.__broadcast_pool.submit(func, params)
            return_future.add_done_callback(partial(_callback, future))

    def schedule_job(self, command, params):
        if command == BroadcastCommand.CREATE_TX:
            priority = (10, time.time())
        elif isinstance(params, tuple) and params[0] == "AddTx":
            priority = (10, time.time())
        else:
            priority = (0, time.time())

        future = futures.Future()
        self.__broadcast_queue.put((priority, command, params, future))
        util.logger.spam(
            f"broadcast_scheduler:schedule_job qsize({self.__broadcast_queue.qsize()})"
        )
        return future

    def schedule_broadcast(self,
                           method_name,
                           method_param,
                           *,
                           retry_times=None,
                           timeout=None):
        """등록된 모든 Peer 의 동일한 gRPC method 를 같은 파라미터로 호출한다.
        """
        # logging.warning("broadcast in process ==========================")
        # logging.debug("pickle method_param: " + str(pickle.dumps(method_param)))

        kwargs = {}
        if retry_times is not None:
            kwargs['retry_times'] = retry_times

        if timeout is not None:
            kwargs['timeout'] = timeout

        self.schedule_job(BroadcastCommand.BROADCAST,
                          (method_name, method_param, kwargs))

    def __broadcast_retry_async(self, peer_target, method_name, method_param,
                                retry_times, timeout, result):
        if isinstance(result,
                      _Rendezvous) and result.code() == grpc.StatusCode.OK:
            return
        if isinstance(result, futures.Future) and not result.exception():
            return

        logging.debug(f"try retry to : peer_target({peer_target})\n")
        if retry_times > 0:
            retry_times -= 1
            self.__call_async_to_target(peer_target, method_name, method_param,
                                        False, retry_times, timeout)
        else:
            if isinstance(result, _Rendezvous):
                exception = result.details()
            elif isinstance(result, futures.Future):
                exception = result.exception()

            logging.warning(f"__broadcast_run_async fail({result})\n"
                            f"cause by: {exception}\n"
                            f"peer_target({peer_target})\n"
                            f"method_name({method_name})\n"
                            f"retry_remains({retry_times})\n"
                            f"timeout({timeout})")

    def __call_async_to_target(self, peer_target, method_name, method_param,
                               is_stub_reuse, retry_times, timeout):
        try:
            call_back_partial = None
            stub_item = None

            if peer_target in self.__audience.keys():
                call_back_partial = partial(self.__broadcast_retry_async,
                                            peer_target, method_name,
                                            method_param, retry_times, timeout)
                stub_item = self.__audience[peer_target]
        except KeyError as e:
            logging.debug(
                f"broadcast_thread:__call_async_to_target ({peer_target}) not in audience. ({e})"
            )
        else:
            if stub_item:
                stub_item.call_async(method_name=method_name,
                                     message=method_param,
                                     is_stub_reuse=is_stub_reuse,
                                     call_back=call_back_partial,
                                     timeout=timeout)

    def __broadcast_run_async(self,
                              method_name,
                              method_param,
                              retry_times=None,
                              timeout=None):
        """call gRPC interface of audience

        :param method_name: gRPC interface
        :param method_param: gRPC message
        """

        if timeout is None:
            timeout = conf.GRPC_TIMEOUT_BROADCAST_RETRY

        retry_times = conf.BROADCAST_RETRY_TIMES if retry_times is None else retry_times
        # logging.debug(f"broadcast({method_name}) async... ({len(self.__audience)})")

        for target in self.__get_broadcast_targets(method_name):
            # util.logger.debug(f"method_name({method_name}), peer_target({target})")
            self.__call_async_to_target(target, method_name, method_param,
                                        True, retry_times, timeout)

    def __broadcast_run_sync(self,
                             method_name,
                             method_param,
                             retry_times=None,
                             timeout=None):
        """call gRPC interface of audience

        :param method_name: gRPC interface
        :param method_param: gRPC message
        """
        # logging.debug(f"broadcast({method_name}) sync... ({len(self.__audience)})")

        if timeout is None:
            timeout = conf.GRPC_TIMEOUT_BROADCAST_RETRY

        retry_times = conf.BROADCAST_RETRY_TIMES if retry_times is None else retry_times

        for target in self.__get_broadcast_targets(method_name):
            if target in self.__audience.keys():
                stub_item = self.__audience[target]

            response = stub_item.call_in_times(method_name=method_name,
                                               message=method_param,
                                               timeout=timeout,
                                               retry_times=retry_times)

            if response is None:
                logging.warning(
                    f"broadcast_thread:__broadcast_run_sync fail ({method_name}) "
                    f"target({target}) ")

    def __handler_subscribe(self, audience_target):
        logging.debug(
            "BroadcastThread received subscribe command peer_target: " +
            str(audience_target))
        if audience_target not in self.__audience:
            stub_manager = StubManager.get_stub_manager_to_server(
                audience_target,
                loopchain_pb2_grpc.PeerServiceStub,
                time_out_seconds=conf.CONNECTION_RETRY_TIMEOUT_WHEN_INITIAL,
                is_allow_null_stub=True,
                ssl_auth_type=conf.GRPC_SSL_TYPE)
            self.__audience[audience_target] = stub_manager

    def __handler_unsubscribe(self, audience_target):
        # logging.debug(f"BroadcastThread received unsubscribe command peer_target({unsubscribe_peer_target})")
        try:
            del self.__audience[audience_target]
        except KeyError:
            logging.warning(f"Already deleted peer: {audience_target}")

    def __handler_update_audience(self, audience_param):
        util.logger.spam(
            f"broadcast_thread:__handler_update_audience audience_param({audience_param})"
        )
        peer_manager = PeerManager(ChannelProperty().name)
        peer_list_data = pickle.loads(audience_param)
        peer_manager.load(peer_list_data, False)

        for peer_id in list(peer_manager.peer_list[conf.ALL_GROUP_ID]):
            peer_each = peer_manager.peer_list[conf.ALL_GROUP_ID][peer_id]
            if peer_each.target != self.__self_target:
                logging.warning(
                    f"broadcast thread peer_targets({peer_each.target})")
                self.__handler_subscribe(peer_each.target)

    def __handler_broadcast(self, broadcast_param):
        # logging.debug("BroadcastThread received broadcast command")
        broadcast_method_name = broadcast_param[0]
        broadcast_method_param = broadcast_param[1]
        broadcast_method_kwparam = broadcast_param[2]
        # logging.debug("BroadcastThread method name: " + broadcast_method_name)
        # logging.debug("BroadcastThread method param: " + str(broadcast_method_param))
        self.__broadcast_run(broadcast_method_name, broadcast_method_param,
                             **broadcast_method_kwparam)

    def __make_tx_list_message(self):
        tx_list = []
        tx_list_size = 0
        tx_list_count = 0
        remains = False
        while not self.stored_tx.empty():
            stored_tx_item = self.stored_tx.get()
            tx_list_size += len(stored_tx_item)
            tx_list_count += 1
            if tx_list_size >= conf.MAX_TX_SIZE_IN_BLOCK or tx_list_count >= conf.MAX_TX_COUNT_IN_ADDTX_LIST:
                self.stored_tx.put(stored_tx_item)
                remains = True
                break
            tx_list.append(stored_tx_item.get_tx_message())
        message = loopchain_pb2.TxSendList(channel=self.__channel,
                                           tx_list=tx_list)

        return remains, message

    def __send_tx_by_timer(self, **kwargs):
        # util.logger.spam(f"broadcast_scheduler:__send_tx_by_timer")
        if self.__thread_variables[
                self.
                THREAD_VARIABLE_PEER_STATUS] == PeerThreadStatus.leader_complained:
            logging.warning(
                "Leader is complained your tx just stored in queue by temporally: "
                + str(self.stored_tx.qsize()))
        else:
            # Send single tx for test
            # stored_tx_item = self.stored_tx.get()
            # self.__broadcast_run("AddTx", stored_tx_item.get_tx_message())

            # Send multiple tx
            remains, message = self.__make_tx_list_message()
            self.__broadcast_run("AddTxList", message)
            if remains:
                self.__send_tx_in_timer()

    def __send_tx_in_timer(self, tx_item=None):
        # util.logger.spam(f"broadcast_scheduler:__send_tx_in_timer")
        duration = 0
        if tx_item:
            self.stored_tx.put(tx_item)
            duration = conf.SEND_TX_LIST_DURATION

        if TimerService.TIMER_KEY_ADD_TX not in self.__timer_service.timer_list:
            self.__timer_service.add_timer(
                TimerService.TIMER_KEY_ADD_TX,
                Timer(target=TimerService.TIMER_KEY_ADD_TX,
                      duration=duration,
                      callback=self.__send_tx_by_timer,
                      callback_kwargs={}))
        else:
            pass

    def __handler_create_tx(self, create_tx_param):
        # logging.debug(f"Broadcast create_tx....")
        try:
            tx_item = TxItem.create_tx_item(create_tx_param, self.__channel)
        except Exception as e:
            logging.warning(f"tx in channel({self.__channel})")
            logging.warning(f"__handler_create_tx: meta({create_tx_param})")
            logging.warning(f"tx dumps fail ({e})")
            return

        self.__send_tx_in_timer(tx_item)

    def __handler_connect_to_leader(self, connect_to_leader_param):
        # logging.debug("(tx thread) try... connect to leader: " + str(connect_to_leader_param))
        self.__thread_variables[
            self.LEADER_PEER_TARGET_KEY] = connect_to_leader_param

        # stub_to_self_peer = __thread_variables[self.THREAD_VARIABLE_STUB_TO_SELF_PEER]

        self.__thread_variables[
            self.THREAD_VARIABLE_PEER_STATUS] = PeerThreadStatus.normal

    def __handler_connect_to_self_peer(self, connect_param):
        # 자신을 생성한 부모 Peer 에 접속하기 위한 stub 을 만든다.
        # pipe 를 통한 return 은 pipe send 와 쌍이 맞지 않은 경우 오류를 발생시킬 수 있다.
        # 안전한 연결을 위하여 부모 프로세스와도 gRPC stub 을 이용하여 통신한다.
        logging.debug("try connect to self peer: " + str(connect_param))

        stub_to_self_peer = StubManager.get_stub_manager_to_server(
            connect_param,
            loopchain_pb2_grpc.InnerServiceStub,
            time_out_seconds=conf.CONNECTION_RETRY_TIMEOUT_WHEN_INITIAL,
            is_allow_null_stub=True,
            ssl_auth_type=conf.SSLAuthType.none)
        self.__thread_variables[self.SELF_PEER_TARGET_KEY] = connect_param
        self.__thread_variables[
            self.THREAD_VARIABLE_STUB_TO_SELF_PEER] = stub_to_self_peer

    def __get_broadcast_targets(self, method_name):

        peer_targets = list(self.__audience)
        if ObjectManager().rs_service:
            return peer_targets
        else:
            if method_name not in self.__broadcast_with_self_target_methods:
                peer_targets.remove(ChannelProperty().peer_target)
            return peer_targets
Exemplo n.º 9
0
class ChannelService:
    def __init__(self, channel_name, amqp_target, amqp_key):
        self.__block_manager: BlockManager = None
        self.__score_container: CommonSubprocess = None
        self.__score_info: dict = None
        self.__peer_auth: PeerAuthorization = None
        self.__peer_manager: PeerManager = None
        self.__broadcast_scheduler: BroadcastScheduler = None
        self.__radio_station_stub = None
        self.__consensus: Consensus = None
        self.__proposer: Proposer = None
        self.__acceptor: Acceptor = None
        self.__timer_service = TimerService()

        loggers.get_preset().channel_name = channel_name
        loggers.get_preset().update_logger()

        channel_queue_name = conf.CHANNEL_QUEUE_NAME_FORMAT.format(
            channel_name=channel_name, amqp_key=amqp_key)
        self.__inner_service = ChannelInnerService(amqp_target,
                                                   channel_queue_name,
                                                   conf.AMQP_USERNAME,
                                                   conf.AMQP_PASSWORD,
                                                   channel_service=self)

        logging.info(
            f"ChannelService : {channel_name}, Queue : {channel_queue_name}")

        ChannelProperty().name = channel_name
        ChannelProperty().amqp_target = amqp_target

        StubCollection().amqp_key = amqp_key
        StubCollection().amqp_target = amqp_target

        command_arguments.add_raw_command(command_arguments.Type.Channel,
                                          channel_name)
        command_arguments.add_raw_command(command_arguments.Type.AMQPTarget,
                                          amqp_target)
        command_arguments.add_raw_command(command_arguments.Type.AMQPKey,
                                          amqp_key)

        ObjectManager().channel_service = self

    @property
    def block_manager(self):
        return self.__block_manager

    @property
    def score_container(self):
        return self.__score_container

    @property
    def score_info(self):
        return self.__score_info

    @property
    def radio_station_stub(self):
        return self.__radio_station_stub

    @property
    def peer_auth(self):
        return self.__peer_auth

    @property
    def peer_manager(self):
        return self.__peer_manager

    @property
    def broadcast_scheduler(self):
        return self.__broadcast_scheduler

    @property
    def consensus(self):
        return self.__consensus

    @property
    def acceptor(self):
        return self.__acceptor

    @property
    def timer_service(self):
        return self.__timer_service

    def serve(self):
        async def _serve():
            await StubCollection().create_peer_stub()
            results = await StubCollection().peer_stub.async_task(
            ).get_channel_info_detail(ChannelProperty().name)

            await self.init(*results)

            self.__timer_service.start()
            logging.info(
                f'channel_service: init complete channel: {ChannelProperty().name}'
            )

        loop = MessageQueueService.loop
        loop.create_task(_serve())
        loop.add_signal_handler(signal.SIGINT, self.close)
        loop.add_signal_handler(signal.SIGTERM, self.close)

        try:
            loop.run_forever()
        finally:
            loop.run_until_complete(loop.shutdown_asyncgens())
            loop.close()

            self.cleanup()

    def close(self):
        MessageQueueService.loop.stop()

    def cleanup(self):
        logging.info("Cleanup Channel Resources.")

        if self.__block_manager:
            self.__block_manager.stop()
            self.__block_manager.wait()
            self.__block_manager = None
            logging.info("Cleanup BlockManager.")

        if self.__score_container:
            self.__score_container.stop()
            self.__score_container.wait()
            self.__score_container = None
            logging.info("Cleanup ScoreContainer.")

        if self.__broadcast_scheduler:
            self.__broadcast_scheduler.stop()
            self.__broadcast_scheduler.wait()
            self.__broadcast_scheduler = None
            logging.info("Cleanup BroadcastSchuduler.")

        if self.__consensus:
            self.__consensus.stop()
            self.__consensus.wait()
            logging.info("Cleanup Consensus.")

        if self.__timer_service.is_run():
            self.__timer_service.stop()
            self.__timer_service.wait()
            logging.info("Cleanup TimerSerivce.")

    async def init(self, peer_port, peer_target, rest_target,
                   radio_station_target, peer_id, group_id, node_type,
                   score_package):
        loggers.get_preset().peer_id = peer_id
        loggers.get_preset().update_logger()

        ChannelProperty().peer_port = peer_port
        ChannelProperty().peer_target = peer_target
        ChannelProperty().rest_target = rest_target
        ChannelProperty().radio_station_target = radio_station_target
        ChannelProperty().peer_id = peer_id
        ChannelProperty().group_id = group_id
        ChannelProperty().node_type = conf.NodeType(node_type)
        ChannelProperty().score_package = score_package

        self.__init_peer_auth()
        self.__init_block_manager()
        self.__init_broadcast_scheduler()
        self.__init_radio_station_stub()

        await self.__init_score_container()
        await self.__inner_service.connect(conf.AMQP_CONNECTION_ATTEMPS,
                                           conf.AMQP_RETRY_DELAY,
                                           exclusive=True)

        self.__peer_manager = PeerManager(ChannelProperty().name)

        if conf.CONSENSUS_ALGORITHM == conf.ConsensusAlgorithm.lft:
            util.logger.spam(f"init consensus !")
            # load consensus
            self.__init_consensus()
            # load proposer
            self.__init_proposer(peer_id=peer_id)
            # load acceptor
            self.__init_acceptor(peer_id=peer_id)

        if self.is_support_node_function(conf.NodeFunction.Vote):
            self.connect_to_radio_station()
        await self.set_peer_type_in_channel()
        await self.subscribe_to_radio_station()

        self.generate_genesis_block()

        if conf.CONSENSUS_ALGORITHM == conf.ConsensusAlgorithm.lft:
            self.__consensus.change_epoch(precommit_block=self.__block_manager.
                                          get_blockchain().last_block)
            self.__consensus.start()
        elif conf.ALLOW_MAKE_EMPTY_BLOCK:
            self.block_manager.block_generation_scheduler.start()

    def __init_peer_auth(self):
        try:
            channel_use_icx = self.get_channel_option(
            )["send_tx_type"] == conf.SendTxType.icx
            channel_authorization = IcxAuthorization if channel_use_icx else PeerAuthorization

            self.__peer_auth = channel_authorization(ChannelProperty().name)

        except Exception as e:
            logging.exception(f"peer auth init fail cause : {e}")
            util.exit_and_msg(f"peer auth init fail cause : {e}")

    def __init_block_manager(self):
        logging.debug(
            f"__load_block_manager_each channel({ChannelProperty().name})")
        try:
            self.__block_manager = BlockManager(
                channel_manager=self,
                peer_id=ChannelProperty().peer_id,
                channel_name=ChannelProperty().name,
                level_db_identity=ChannelProperty().peer_target)

            self.__block_manager.consensus_algorithm = self.__init_consensus_algorithm(
            )

            if conf.CONSENSUS_ALGORITHM != conf.ConsensusAlgorithm.lft:
                self.__block_manager.start()

        except leveldb.LevelDBError as e:
            util.exit_and_msg("LevelDBError(" + str(e) + ")")

    def __init_consensus(self):
        consensus = Consensus(self, ChannelProperty().name)
        self.__consensus = consensus
        self.__block_manager.consensus = consensus
        consensus.multiple_register(self.__block_manager)

    def __init_proposer(self, peer_id: str):
        proposer = Proposer(name="loopchain.consensus.Proposer",
                            peer_id=peer_id,
                            channel=ChannelProperty().name,
                            channel_service=self)
        self.__consensus.multiple_register(proposer)
        self.__proposer = proposer

    def __init_acceptor(self, peer_id: str):
        acceptor = Acceptor(name="loopchain.consensus.Acceptor",
                            consensus=self.__consensus,
                            peer_id=peer_id,
                            channel=ChannelProperty().name,
                            channel_service=self)
        self.__consensus.multiple_register(acceptor)
        self.__acceptor = acceptor

    def __init_broadcast_scheduler(self):
        scheduler = BroadcastScheduler(
            channel=ChannelProperty().name,
            self_target=ChannelProperty().peer_target)
        scheduler.start()

        self.__broadcast_scheduler = scheduler

        future = scheduler.schedule_job(BroadcastCommand.SUBSCRIBE,
                                        ChannelProperty().peer_target)
        future.result(conf.TIMEOUT_FOR_FUTURE)

    def __init_radio_station_stub(self):
        if self.is_support_node_function(conf.NodeFunction.Vote):
            self.__radio_station_stub = StubManager.get_stub_manager_to_server(
                ChannelProperty().radio_station_target,
                loopchain_pb2_grpc.RadioStationStub,
                conf.CONNECTION_RETRY_TIMEOUT_TO_RS,
                ssl_auth_type=conf.GRPC_SSL_TYPE)
        else:
            self.__radio_station_stub = RestStubManager(
                ChannelProperty().radio_station_target)

    async def __init_score_container(self):
        """create score container and save score_info and score_stub
        """
        for i in range(conf.SCORE_LOAD_RETRY_TIMES):
            try:
                self.__score_info = await self.__run_score_container()
            except BaseException as e:
                util.logger.spam(
                    f"channel_manager:load_score_container_each score_info load fail retry({i})"
                )
                logging.error(e)
                traceback.print_exc()
                time.sleep(conf.SCORE_LOAD_RETRY_INTERVAL
                           )  # This blocking main thread is intended.

            else:
                break

    async def __run_score_container(self):
        if not conf.USE_EXTERNAL_SCORE or conf.EXTERNAL_SCORE_RUN_IN_LAUNCHER:
            process_args = [
                'python3', '-m', 'loopchain', 'score', '--channel',
                ChannelProperty().name, '--score_package',
                ChannelProperty().score_package
            ]
            process_args += command_arguments.get_raw_commands_by_filter(
                command_arguments.Type.AMQPTarget,
                command_arguments.Type.AMQPKey, command_arguments.Type.Develop,
                command_arguments.Type.ConfigurationFilePath)
            self.__score_container = CommonSubprocess(process_args)

        if conf.USE_EXTERNAL_SCORE:
            await StubCollection().create_icon_score_stub(
                ChannelProperty().name)
            await StubCollection().icon_score_stubs[ChannelProperty().name
                                                    ].connect()
            await StubCollection().icon_score_stubs[ChannelProperty().name
                                                    ].async_task().hello()
            return None
        else:
            await StubCollection().create_score_stub(
                ChannelProperty().name,
                ChannelProperty().score_package)
            await StubCollection().score_stubs[ChannelProperty().name
                                               ].connect()
            await StubCollection().score_stubs[ChannelProperty().name
                                               ].async_task().hello()

            return await self.__load_score()

    def __init_consensus_algorithm(self):
        """initialize a consensus algorithm by configuration.
        """
        if conf.CONSENSUS_ALGORITHM == conf.ConsensusAlgorithm.none:
            consensus_algorithm = ConsensusNone(self.__block_manager)
        elif conf.CONSENSUS_ALGORITHM == conf.ConsensusAlgorithm.siever:
            consensus_algorithm = ConsensusSiever(self.__block_manager)
        elif conf.CONSENSUS_ALGORITHM == conf.ConsensusAlgorithm.lft:
            consensus_algorithm = ConsensusLFT(self.__block_manager)
        else:
            consensus_algorithm = ConsensusDefault(self.__block_manager)

        return consensus_algorithm

    async def __load_score(self):
        channel_name = ChannelProperty().name
        score_package_name = ChannelProperty().score_package

        util.logger.spam(f"peer_service:__load_score --init--")
        logging.info("LOAD SCORE AND CONNECT TO SCORE SERVICE!")

        params = dict()
        params[message_code.MetaParams.ScoreLoad.
               repository_path] = conf.DEFAULT_SCORE_REPOSITORY_PATH
        params[message_code.MetaParams.ScoreLoad.
               score_package] = score_package_name
        params[
            message_code.MetaParams.ScoreLoad.base] = conf.DEFAULT_SCORE_BASE
        params[message_code.MetaParams.ScoreLoad.peer_id] = ChannelProperty(
        ).peer_id
        meta = json.dumps(params)
        logging.debug(f"load score params : {meta}")

        util.logger.spam(f"peer_service:__load_score --1--")
        score_stub = StubCollection().score_stubs[channel_name]
        response = await score_stub.async_task().score_load(meta)

        logging.debug("try score load on score service: " + str(response))
        if not response:
            return None

        if response.code != message_code.Response.success:
            util.exit_and_msg("Fail Get Score from Score Server...")
            return None

        logging.debug("Get Score from Score Server...")
        score_info = json.loads(response.meta)

        logging.info("LOAD SCORE DONE!")
        util.logger.spam(f"peer_service:__load_score --end--")

        return score_info

    def is_support_node_function(self, node_function):
        return conf.NodeType.is_support_node_function(
            node_function,
            ChannelProperty().node_type)

    def get_channel_option(self) -> dict:
        channel_option = conf.CHANNEL_OPTION
        return channel_option[ChannelProperty().name]

    def generate_genesis_block(self):
        if self.block_manager.peer_type != loopchain_pb2.BLOCK_GENERATOR:
            return

        block_chain = self.block_manager.get_blockchain()
        if block_chain.block_height > -1:
            logging.debug("genesis block was already generated")
            return

        block_chain.generate_genesis_block()

    def connect_to_radio_station(self, is_reconnect=False):
        response = self.__radio_station_stub.call_in_times(
            method_name="ConnectPeer",
            message=loopchain_pb2.ConnectPeerRequest(
                channel=ChannelProperty().name,
                peer_object=b'',
                peer_id=ChannelProperty().peer_id,
                peer_target=ChannelProperty().peer_target,
                group_id=ChannelProperty().group_id,
                cert=self.peer_auth.peer_cert),
            retry_times=conf.CONNECTION_RETRY_TIMES_TO_RS,
            is_stub_reuse=True,
            timeout=conf.CONNECTION_TIMEOUT_TO_RS)

        # start next ConnectPeer timer
        if TimerService.TIMER_KEY_CONNECT_PEER not in self.__timer_service.timer_list.keys(
        ):
            self.__timer_service.add_timer(
                TimerService.TIMER_KEY_CONNECT_PEER,
                Timer(target=TimerService.TIMER_KEY_CONNECT_PEER,
                      duration=conf.CONNECTION_RETRY_TIMER,
                      callback=self.connect_to_radio_station,
                      callback_kwargs={"is_reconnect": True}))

        if is_reconnect:
            return

        if response and response.status == message_code.Response.success:
            peer_list_data = pickle.loads(response.peer_list)
            self.__peer_manager.load(peer_list_data, False)
            peers, peer_list = self.__peer_manager.get_peers_for_debug()
            logging.debug("peer list update: " + peers)

            # add connected peer to processes audience
            for each_peer in peer_list:
                util.logger.spam(
                    f"peer_service:connect_to_radio_station peer({each_peer.target}-{each_peer.status})"
                )
                if each_peer.status == PeerStatus.connected:
                    self.__broadcast_scheduler.schedule_job(
                        BroadcastCommand.SUBSCRIBE, each_peer.target)

    async def subscribe_to_radio_station(self):
        await self.__subscribe_call_to_stub_by_method(
            self.__radio_station_stub, loopchain_pb2.PEER)

    async def subscribe_to_peer(self, peer_id, peer_type):
        peer = self.peer_manager.get_peer(peer_id)
        peer_stub = self.peer_manager.get_peer_stub_manager(peer)

        await self.__subscribe_call_to_stub_by_method(peer_stub, peer_type)
        self.__broadcast_scheduler.schedule_job(BroadcastCommand.SUBSCRIBE,
                                                peer_stub.target)

    async def __subscribe_call_to_stub_by_method(self, peer_stub, peer_type):
        if self.is_support_node_function(conf.NodeFunction.Vote):
            await peer_stub.call_async(
                "Subscribe",
                loopchain_pb2.PeerRequest(
                    channel=ChannelProperty().name,
                    peer_target=ChannelProperty().peer_target,
                    peer_type=peer_type,
                    peer_id=ChannelProperty().peer_id,
                    group_id=ChannelProperty().group_id,
                    node_type=ChannelProperty().node_type),
            )
        else:
            util.logger.spam(
                f"channel_service:__subscribe_call_to_stub_by_method "
                f"peer_target({ChannelProperty().rest_target})")
            response = self.__subscribe_call_to_rs_stub(peer_stub)

            if response['response_code'] != message_code.Response.success:
                error = f"subscribe fail to peer_target({ChannelProperty().radio_station_target}) " \
                        f"reason({response['message']})"
                await StubCollection().peer_stub.async_task().stop(
                    message=error)

    def __subscribe_call_to_rs_stub(self, rs_rest_stub):
        response = {
            'response_code': message_code.Response.fail,
            'message':
            message_code.get_response_msg(message_code.Response.fail)
        }

        try:
            if conf.REST_SSL_TYPE == conf.SSLAuthType.none:
                peer_target = ChannelProperty().rest_target
            else:
                peer_target = f"https://{ChannelProperty().rest_target}"
            response = rs_rest_stub.call("Subscribe", {
                'channel': ChannelProperty().name,
                'peer_target': peer_target
            })

        except Exception as e:
            logging.warning(
                f"Due to Subscription fail to RadioStation(mother peer), "
                f"automatically retrying subscribe call")

        if response['response_code'] == message_code.Response.success:
            if TimerService.TIMER_KEY_SUBSCRIBE in self.__timer_service.timer_list.keys(
            ):
                self.__timer_service.stop_timer(
                    TimerService.TIMER_KEY_SUBSCRIBE)
                self.radio_station_stub.update_methods_version()
                logging.debug(
                    f"Subscription to RadioStation(mother peer) is successful."
                )

            if TimerService.TIMER_KEY_SHUTDOWN_WHEN_FAIL_SUBSCRIBE in self.__timer_service.timer_list.keys(
            ):
                self.__timer_service.stop_timer(
                    TimerService.TIMER_KEY_SHUTDOWN_WHEN_FAIL_SUBSCRIBE)

            # start next get_status timer
            timer_key = TimerService.TIMER_KEY_GET_LAST_BLOCK_KEEP_CITIZEN_SUBSCRIPTION
            if timer_key not in self.__timer_service.timer_list.keys():
                util.logger.spam(
                    f"add timer for check_block_height_call to radiostation..."
                )
                self.__timer_service.add_timer(
                    timer_key,
                    Timer(target=timer_key,
                          duration=conf.GET_LAST_BLOCK_TIMER,
                          is_repeat=True,
                          callback=self.__check_block_height_call_to_rs_stub,
                          callback_kwargs={"rs_rest_stub": rs_rest_stub}))
        else:
            timer_key = TimerService.TIMER_KEY_SHUTDOWN_WHEN_FAIL_SUBSCRIBE
            if timer_key not in self.__timer_service.timer_list.keys():
                error = f"Shutdown by Subscribe retry timeout({conf.SHUTDOWN_TIMER})"
                self.__timer_service.add_timer(
                    timer_key,
                    Timer(target=timer_key,
                          duration=conf.SHUTDOWN_TIMER,
                          callback=self.__shutdown_peer,
                          callback_kwargs={"message": error}))

        return response

    def __check_block_height_call_to_rs_stub(self, **kwargs):
        rs_rest_stub = kwargs.get("rs_rest_stub", None)
        response = dict()
        try:
            response = rs_rest_stub.call("GetLastBlock")
        except Exception as e:
            response['response_code'] = message_code.Response.fail

        if response['response_code'] == message_code.Response.success:
            if response[
                    'block']['height'] <= self.__block_manager.get_blockchain(
                    ).block_height:
                # keep get last block timer, citizen subscription is still valid.
                return

        # citizen needs additional block or failed to connect to mother peer.
        timer_key = TimerService.TIMER_KEY_GET_LAST_BLOCK_KEEP_CITIZEN_SUBSCRIPTION
        if timer_key in self.__timer_service.timer_list.keys():
            util.logger.spam(
                f"stop timer for check_block_height_call to radiostation...")
            self.__timer_service.stop_timer(timer_key)

        timer_key = TimerService.TIMER_KEY_SUBSCRIBE
        if timer_key not in self.__timer_service.timer_list.keys():
            self.__timer_service.add_timer(
                timer_key,
                Timer(target=timer_key,
                      duration=conf.SUBSCRIBE_RETRY_TIMER,
                      is_repeat=True,
                      callback=self.__subscribe_call_to_rs_stub,
                      callback_kwargs={"rs_rest_stub": rs_rest_stub}))

    def __shutdown_peer(self, **kwargs):
        util.logger.spam(f"channel_service:__shutdown_peer")
        StubCollection().peer_stub.sync_task().stop(message=kwargs['message'])

    def set_peer_type(self, peer_type):
        """Set peer type when peer init only

        :param peer_type:
        :return:
        """
        self.__block_manager.set_peer_type(peer_type)

    def save_peer_manager(self, peer_manager):
        """peer_list 를 leveldb 에 저장한다.

        :param peer_manager:
        """
        level_db_key_name = str.encode(conf.LEVEL_DB_KEY_FOR_PEER_LIST)

        try:
            dump = peer_manager.dump()
            level_db = self.__block_manager.get_level_db()
            level_db.Put(level_db_key_name, dump)
        except AttributeError as e:
            logging.warning("Fail Save Peer_list: " + str(e))

    async def set_peer_type_in_channel(self):
        peer_type = loopchain_pb2.PEER
        peer_leader = self.peer_manager.get_leader_peer(
            is_complain_to_rs=self.is_support_node_function(
                conf.NodeFunction.Vote))
        logging.debug(f"channel({ChannelProperty().name}) peer_leader: " +
                      str(peer_leader))

        logger_preset = loggers.get_preset()
        if self.is_support_node_function(
                conf.NodeFunction.Vote) and ChannelProperty(
                ).peer_id == peer_leader.peer_id:
            logger_preset.is_leader = True
            logging.debug(
                f"Set Peer Type Leader! channel({ChannelProperty().name})")
            peer_type = loopchain_pb2.BLOCK_GENERATOR
        else:
            logger_preset.is_leader = False
        logger_preset.update_logger()

        if conf.CONSENSUS_ALGORITHM == conf.ConsensusAlgorithm.lft:
            self.consensus.leader_id = peer_leader.peer_id

        if peer_type == loopchain_pb2.BLOCK_GENERATOR:
            self.block_manager.set_peer_type(peer_type)
            self.__ready_to_height_sync(True)
        elif peer_type == loopchain_pb2.PEER:
            self.__ready_to_height_sync(False)
            await self.__block_height_sync_channel()

    def __ready_to_height_sync(self, is_leader: bool = False):
        block_chain = self.block_manager.get_blockchain()

        block_chain.init_block_chain(is_leader)
        if block_chain.block_height > -1:
            self.block_manager.rebuild_block()

    async def __block_height_sync_channel(self):
        # leader 로 시작하지 않았는데 자신의 정보가 leader Peer 정보이면 block height sync 하여
        # 최종 블럭의 leader 를 찾는다.
        peer_manager = self.peer_manager
        peer_leader = peer_manager.get_leader_peer()
        self_peer_object = peer_manager.get_peer(ChannelProperty().peer_id)
        is_delay_announce_new_leader = False
        peer_old_leader = None

        if peer_leader:
            block_sync_target = peer_leader.target
            block_sync_target_stub = StubManager.get_stub_manager_to_server(
                block_sync_target,
                loopchain_pb2_grpc.PeerServiceStub,
                time_out_seconds=conf.CONNECTION_RETRY_TIMEOUT,
                ssl_auth_type=conf.GRPC_SSL_TYPE)
        else:
            block_sync_target = ChannelProperty().radio_station_target
            block_sync_target_stub = self.__radio_station_stub

        if block_sync_target != ChannelProperty().peer_target:
            if block_sync_target_stub is None:
                logging.warning(
                    "You maybe Older from this network... or No leader in this network!"
                )

                is_delay_announce_new_leader = True
                peer_old_leader = peer_leader
                peer_leader = self.peer_manager.leader_complain_to_rs(
                    conf.ALL_GROUP_ID, is_announce_new_peer=False)

                if peer_leader is not None and ChannelProperty(
                ).node_type == conf.NodeType.CommunityNode:
                    block_sync_target_stub = StubManager.get_stub_manager_to_server(
                        peer_leader.target,
                        loopchain_pb2_grpc.PeerServiceStub,
                        time_out_seconds=conf.CONNECTION_RETRY_TIMEOUT,
                        ssl_auth_type=conf.GRPC_SSL_TYPE)

            if self.is_support_node_function(conf.NodeFunction.Vote) and \
                    (not peer_leader or peer_leader.peer_id == ChannelProperty().peer_id):
                peer_leader = self_peer_object
                self.block_manager.set_peer_type(loopchain_pb2.BLOCK_GENERATOR)
            else:
                _, future = self.block_manager.block_height_sync(
                    block_sync_target_stub)
                await future

                if block_sync_target_stub is None:
                    util.exit_and_msg("Fail connect to leader!!")

                self.show_peers()

            if block_sync_target_stub is not None and self.is_support_node_function(
                    conf.NodeFunction.Vote):
                await self.__subscribe_call_to_stub_by_method(
                    block_sync_target_stub, loopchain_pb2.BLOCK_GENERATOR)

            if is_delay_announce_new_leader:
                self.peer_manager.announce_new_leader(
                    peer_old_leader.peer_id,
                    peer_leader.peer_id,
                    self_peer_id=ChannelProperty().peer_id)

    def show_peers(self):
        logging.debug(f"peer_service:show_peers ({ChannelProperty().name}): ")
        for peer in self.peer_manager.get_IP_of_peers_in_group():
            logging.debug("peer_target: " + peer)

    async def reset_leader(self, new_leader_id, block_height=0):
        logging.info(
            f"RESET LEADER channel({ChannelProperty().name}) leader_id({new_leader_id})"
        )

        complained_leader = self.peer_manager.get_leader_peer()
        leader_peer = self.peer_manager.get_peer(new_leader_id, None)

        if block_height > 0 and block_height != self.block_manager.get_blockchain(
        ).last_block.height + 1:
            logging.warning(f"height behind peer can not take leader role.")
            return

        if leader_peer is None:
            logging.warning(
                f"in peer_service:reset_leader There is no peer by peer_id({new_leader_id})"
            )
            return

        util.logger.spam(
            f"peer_service:reset_leader target({leader_peer.target})")

        self_peer_object = self.peer_manager.get_peer(
            ChannelProperty().peer_id)
        self.peer_manager.set_leader_peer(leader_peer, None)

        peer_leader = self.peer_manager.get_leader_peer()
        peer_type = loopchain_pb2.PEER

        if self_peer_object.target == peer_leader.target:
            loggers.get_preset().is_leader = True
            loggers.get_preset().update_logger()

            logging.debug("Set Peer Type Leader!")
            peer_type = loopchain_pb2.BLOCK_GENERATOR
            self.block_manager.get_blockchain().reset_made_block_count()
            self.peer_manager.announce_new_leader(
                complained_leader.peer_id,
                new_leader_id,
                is_broadcast=True,
                self_peer_id=ChannelProperty().peer_id)
        else:
            loggers.get_preset().is_leader = False
            loggers.get_preset().update_logger()

            logging.debug("Set Peer Type Peer!")
            # 새 leader 에게 subscribe 하기
            await self.subscribe_to_radio_station()
            await self.subscribe_to_peer(peer_leader.peer_id,
                                         loopchain_pb2.BLOCK_GENERATOR)

        # update candidate blocks
        self.block_manager.get_candidate_blocks().set_last_block(
            self.block_manager.get_blockchain().last_block)
        self.block_manager.set_peer_type(peer_type)

    def set_new_leader(self, new_leader_id, block_height=0):
        logging.info(
            f"SET NEW LEADER channel({ChannelProperty().name}) leader_id({new_leader_id})"
        )

        # complained_leader = self.peer_manager.get_leader_peer()
        leader_peer = self.peer_manager.get_peer(new_leader_id, None)

        if block_height > 0 and block_height != self.block_manager.get_blockchain(
        ).last_block.height + 1:
            logging.warning(f"height behind peer can not take leader role.")
            return

        if leader_peer is None:
            logging.warning(
                f"in channel_service:set_new_leader::There is no peer by peer_id({new_leader_id})"
            )
            return

        util.logger.spam(
            f"channel_service:set_new_leader::leader_target({leader_peer.target})"
        )

        self_peer_object = self.peer_manager.get_peer(
            ChannelProperty().peer_id)
        self.peer_manager.set_leader_peer(leader_peer, None)

        peer_leader = self.peer_manager.get_leader_peer()

        if self_peer_object.target == peer_leader.target:
            loggers.get_preset().is_leader = True
            loggers.get_preset().update_logger()

            logging.debug("I'm Leader Peer!")
        else:
            loggers.get_preset().is_leader = False
            loggers.get_preset().update_logger()

            logging.debug("I'm general Peer!")
            # 새 leader 에게 subscribe 하기
            # await self.subscribe_to_radio_station()
            # await self.subscribe_to_peer(peer_leader.peer_id, loopchain_pb2.BLOCK_GENERATOR)

    def genesis_invoke(self, block: Block) -> dict or None:
        if conf.USE_EXTERNAL_SCORE:
            method = "icx_sendTransaction"
            transactions = []
            for tx in block.confirmed_transaction_list:
                transaction = {
                    "method": method,
                    "params": {
                        "txHash": tx.tx_hash
                    },
                    "genesisData": tx.genesis_origin_data
                }
                transactions.append(transaction)

            request = {
                'block': {
                    'blockHeight': block.height,
                    'blockHash': block.block_hash,
                    'timestamp': block.time_stamp
                },
                'transactions': transactions
            }
            request = convert_params(request, ParamType.invoke)
            stub = StubCollection().icon_score_stubs[ChannelProperty().name]
            response = stub.sync_task().invoke(request)
            response_to_json_query(response)
            block.commit_state[
                ChannelProperty().name] = response['stateRootHash']
            return response["txResults"]
        else:
            block_object = pickle.dumps(block)
            stub = StubCollection().score_stubs[ChannelProperty().name]
            response = stub.sync_task().genesis_invoke(block_object)
            if response.code == message_code.Response.success:
                return json.loads(response.meta)

        return None

    def score_invoke(self, _block: Block) -> dict or None:
        if conf.USE_EXTERNAL_SCORE:
            method = "icx_sendTransaction"
            transactions = []
            for tx in _block.confirmed_transaction_list:
                data = tx.icx_origin_data
                transaction = {"method": method, "params": data}
                transactions.append(transaction)

            request = {
                'block': {
                    'blockHeight': _block.height,
                    'blockHash': _block.block_hash,
                    'prevBlockHash': _block.prev_block_hash,
                    'timestamp': _block.time_stamp
                },
                'transactions': transactions
            }
            request = convert_params(request, ParamType.invoke)
            stub = StubCollection().icon_score_stubs[ChannelProperty().name]
            response = stub.sync_task().invoke(request)
            response_to_json_query(response)
            _block.commit_state[
                ChannelProperty().name] = response['stateRootHash']
            return response["txResults"]
        else:
            stub = StubCollection().score_stubs[ChannelProperty().name]
            response = stub.sync_task().score_invoke(_block)

            if response.code == message_code.Response.success:
                commit_state = pickle.loads(response.object)
                _block.commit_state = commit_state
                return json.loads(response.meta)

        return None

    def score_change_block_hash(self, block_height, old_block_hash,
                                new_block_hash):
        change_hash_info = json.dumps({
            "block_height": block_height,
            "old_block_hash": old_block_hash,
            "new_block_hash": new_block_hash
        })

        if not conf.USE_EXTERNAL_SCORE:
            stub = StubCollection().score_stubs[ChannelProperty().name]
            stub.sync_task().change_block_hash(change_hash_info)

    def score_write_precommit_state(self, block: Block):
        logging.debug(
            f"call score commit {ChannelProperty().name} {block.height} {block.block_hash}"
        )

        if conf.USE_EXTERNAL_SCORE:
            request = {
                "blockHeight": block.height,
                "blockHash": block.block_hash,
            }
            request = convert_params(request, ParamType.write_precommit_state)

            stub = StubCollection().icon_score_stubs[ChannelProperty().name]
            stub.sync_task().write_precommit_state(request)
            return True
        else:
            block_commit_info = json.dumps({
                "block_height": block.height,
                "block_hash": block.block_hash
            })
            stub = StubCollection().score_stubs[ChannelProperty().name]
            response = stub.sync_task().write_precommit_state(
                block_commit_info)

            if response.code == message_code.Response.success:
                return True
            else:
                logging.error(f"score db commit fail cause {response.message}")
                return False

    def score_remove_precommit_state(self, block: Block):
        if conf.USE_EXTERNAL_SCORE:
            request = {
                "blockHeight": block.height,
                "blockHash": block.block_hash,
            }
            request = convert_params(request, ParamType.remove_precommit_state)

            stub = StubCollection().icon_score_stubs[ChannelProperty().name]
            stub.sync_task().remove_precommit_state(request)

            return True
        else:
            invoke_fail_info = json.dumps({
                "block_height": block.height,
                "block_hash": block.block_hash
            })
            stub = StubCollection().score_stubs[ChannelProperty().name]
            stub.sync_task().remove_precommit_state(invoke_fail_info)
            return True

    def get_object_has_queue_by_consensus(self):
        if conf.CONSENSUS_ALGORITHM == conf.ConsensusAlgorithm.lft:
            object_has_queue = self.__consensus
        else:
            object_has_queue = self.__block_manager

        return object_has_queue
Exemplo n.º 10
0
class PeerService:
    """Peer Service 의 main Class
    outer 와 inner gRPC 인터페이스를 가진다.
    서비스 루프 및 공통 요소는 commonservice 를 통해서 처리한다.
    channel 관련 instance 는 channel manager 를 통해서 관리한다.
    """
    def __init__(self,
                 group_id=None,
                 radio_station_ip=None,
                 radio_station_port=None,
                 public_path=None,
                 private_path=None,
                 cert_pass=None):
        """Peer는 Radio Station 에 접속하여 leader 및 다른 Peer에 대한 접속 정보를 전달 받는다.

        :param group_id: Peer Group 을 구분하기 위한 ID, None 이면 Single Peer Group 이 된다. (peer_id is group_id)
        conf.PEER_GROUP_ID 를 사용하면 configure 파일에 저장된 값을 group_id 로 사용하게 된다.
        :param radio_station_ip: RS IP
        :param radio_station_port: RS Port
        :param public_path: Peer 인증서 디렉토리 경로
        :param private_path: Cert Private key
        :param cert_pass: Peer private key password
        :return:
        """
        if radio_station_ip is None:
            radio_station_ip = conf.IP_RADIOSTATION
        if radio_station_port is None:
            radio_station_port = conf.PORT_RADIOSTATION
        if public_path is None:
            public_path = conf.PUBLIC_PATH
        if private_path is None:
            private_path = conf.PRIVATE_PATH
        if cert_pass is None:
            cert_pass = conf.DEFAULT_PW

        util.logger.spam(f"Your Peer Service runs on debugging MODE!")
        util.logger.spam(
            f"You can see many terrible garbage logs just for debugging, R U Really want it?"
        )

        self.__send_to_process_thread = SendToProcess()

        self.__radio_station_target = radio_station_ip + ":" + str(
            radio_station_port)
        logging.info("Set Radio Station target is " +
                     self.__radio_station_target)

        self.__stub_to_radio_station = None

        self.__level_db = None
        self.__level_db_path = ""

        self.__peer_id = None
        self.__group_id = group_id
        if self.__group_id is None and conf.PEER_GROUP_ID != "":
            self.__group_id = conf.PEER_GROUP_ID

        self.__common_service = None
        self.__channel_manager: ChannelManager = None

        self.__rest_service = None
        self.__timer_service = TimerService()

        # TODO peer 서비스의 .__score를 삭제, set chain code 테스트에서만 쓰인다. (검토후 제거할 것)
        self.__score = None
        self.__peer_target = None
        self.__inner_target = None
        self.__peer_port = 0

        # For Send tx to leader
        self.__tx_process = None

        if conf.ENABLE_KMS:
            rand_table = self.__get_random_table()
            self.__auth = PeerAuthorization(rand_table=rand_table)
        else:
            self.__auth = PeerAuthorization(public_path, private_path,
                                            cert_pass)

        # gRPC service for Peer
        self.__inner_service = InnerService()
        self.__outer_service = OuterService()

        self.__reset_voter_in_progress = False

    @property
    def common_service(self):
        return self.__common_service

    @property
    def timer_service(self):
        return self.__timer_service

    @property
    def channel_manager(self):
        return self.__channel_manager

    @property
    def send_to_process_thread(self):
        return self.__send_to_process_thread

    @property
    def tx_process(self):
        return self.__tx_process

    @property
    def peer_target(self):
        return self.__peer_target

    @property
    def auth(self):
        return self.__auth

    @property
    def stub_to_radiostation(self) -> StubManager:
        if self.__stub_to_radio_station is None:
            self.__stub_to_radio_station = StubManager.get_stub_manager_to_server(
                self.__radio_station_target,
                loopchain_pb2_grpc.RadioStationStub,
                conf.CONNECTION_RETRY_TIMEOUT_TO_RS)

        return self.__stub_to_radio_station

    @property
    def peer_id(self):
        return self.__peer_id

    @property
    def group_id(self):
        if self.__group_id is None:
            self.__group_id = self.__peer_id
        return self.__group_id

    @property
    def peer_target(self):
        return self.__peer_target

    def __get_random_table(self) -> list:
        """request get rand_table to rs

        :return: rand_table from rs
        """
        try:
            response = self.stub_to_radiostation.call_in_time(
                "GetRandomTable", loopchain_pb2.CommonRequest(request=""))
            if response.response_code == message_code.Response.success:
                random_table = json.loads(response.message)
            else:
                util.exit_and_msg(f"get random table fail \n"
                                  f"cause by {response.message}")
            return random_table
        except Exception as e:
            util.exit_and_msg(f"get random table and init peer_auth fail \n"
                              f"cause by : {e}")

    def rotate_next_leader(self, channel_name):
        """Find Next Leader Id from peer_list and reset leader to that peer"""

        # logging.debug("rotate next leader...")
        util.logger.spam(f"peer_service:rotate_next_leader")
        peer_manager = self.__channel_manager.get_peer_manager(channel_name)
        next_leader = peer_manager.get_next_leader_peer(is_only_alive=True)

        # Check Next Leader is available...
        if next_leader is not None and next_leader.peer_id != self.peer_id:
            try:
                stub_manager = peer_manager.get_peer_stub_manager(next_leader)
                response = stub_manager.call(
                    "GetStatus",
                    loopchain_pb2.StatusRequest(request="get_leader_peer"),
                    is_stub_reuse=True)

                # Peer 가 leader 로 변경되는데 시간이 필요함으로 접속 여부만 확인한다.
                # peer_status = json.loads(response.status)
                # if peer_status["peer_type"] != str(loopchain_pb2.BLOCK_GENERATOR):
                #     logging.warning("next rotate is not a leader")
                #     raise Exception

            except Exception as e:
                logging.warning(f"rotate next leader exceptions({e})")
                next_leader = peer_manager.leader_complain_to_rs(
                    conf.ALL_GROUP_ID)

        if next_leader is not None:
            self.reset_leader(next_leader.peer_id, channel_name)
        else:
            util.logger.warning(
                f"peer_service:rotate_next_leader next_leader is None({next_leader})"
            )

    def reset_leader(self, new_leader_id, channel: str):
        logging.info(
            f"RESET LEADER channel({channel}) leader_id({new_leader_id})")

        block_manager = self.__channel_manager.get_block_manager(channel)
        peer_manager = self.__channel_manager.get_peer_manager(channel)
        complained_leader = peer_manager.get_leader_peer()
        leader_peer = peer_manager.get_peer(new_leader_id, None)

        if leader_peer is None:
            logging.warning(
                f"in peer_service:reset_leader There is no peer by peer_id({new_leader_id})"
            )
            return

        util.logger.spam(
            f"peer_service:reset_leader target({leader_peer.target})")

        peer_manager.set_leader_peer(leader_peer, None)

        self_peer_object = peer_manager.get_peer(self.__peer_id)
        peer_leader = peer_manager.get_leader_peer()
        peer_type = loopchain_pb2.PEER

        if self_peer_object.target == peer_leader.target:
            util.change_log_color_set(True)
            logging.debug("Set Peer Type Leader!")
            peer_type = loopchain_pb2.BLOCK_GENERATOR
            block_manager.get_blockchain().reset_made_block_count()

            # TODO 아래 코드는 중복된 의미이다. 하지만, leader 가 변경되길 기다리는 코드로 의미를 명확히 할 경우
            # 블록체인 동작 지연으로 인한 오류가 발생한다. 우선 더 안정적인 테스트 결과를 보이는 상태로 유지한다.
            response = peer_manager.get_peer_stub_manager(
                self_peer_object).call("Request",
                                       loopchain_pb2.Message(
                                           code=message_code.Request.status,
                                           channel=channel),
                                       is_stub_reuse=True)

            peer_status = json.loads(response.meta)
            if peer_status['peer_type'] == str(loopchain_pb2.BLOCK_GENERATOR):
                is_broadcast = True
            else:
                is_broadcast = False

            peer_manager.announce_new_leader(complained_leader.peer_id,
                                             new_leader_id,
                                             is_broadcast=is_broadcast)
        else:
            util.change_log_color_set()
            logging.debug("Set Peer Type Peer!")
            # 새 leader 에게 subscribe 하기
            self.__common_service.subscribe(
                channel=channel,
                subscribe_stub=peer_manager.get_peer_stub_manager(peer_leader),
                peer_type=loopchain_pb2.BLOCK_GENERATOR)

        # update candidate blocks
        block_manager.get_candidate_blocks().set_last_block(
            block_manager.get_blockchain().last_block)
        block_manager.set_peer_type(peer_type)

        if self.__tx_process is not None:
            # peer_process 의 남은 job 을 처리한다. (peer->leader 인 경우),
            # peer_process 를 리더 정보를 변경한다. (peer->peer 인 경우)
            self.__tx_process_connect_to_leader(self.__tx_process,
                                                peer_leader.target)

    def show_peers(self, channel_name):
        logging.debug(f"peer_service:show_peers ({channel_name}): ")
        for peer in self.__channel_manager.get_peer_manager(
                channel_name).get_IP_of_peers_in_group():
            logging.debug("peer_target: " + peer)

    def service_stop(self):
        self.__channel_manager.stop_block_managers()
        self.__common_service.stop()

    def score_invoke(self, block, channel) -> dict:
        block_object = pickle.dumps(block)
        response = self.channel_manager.get_score_container_stub(channel).call(
            method_name="Request",
            message=loopchain_pb2.Message(
                code=message_code.Request.score_invoke, object=block_object),
            timeout=conf.SCORE_INVOKE_TIMEOUT,
            is_raise=True)
        # logging.debug("Score Server says: " + str(response))
        if response.code == message_code.Response.success:
            return json.loads(response.meta)

    def __connect_to_all_channel(self) -> bool:
        """connect to radiostation with all channel

        :return: is radiostation connected
        """
        response = self.__get_channel_infos()
        is_radiostation_connected = response is not None

        if is_radiostation_connected:
            logging.info(f"Connect to channels({response.channel_infos})")
            channels = json.loads(response.channel_infos)
            score_container_port_diff = 0

            for channel in list(channels.keys()):
                logging.debug(f"Try join channel({channel})")
                self.__channel_manager.load_block_manager(peer_id=self.peer_id,
                                                          channel=channel)
                self.__channel_manager.load_peer_manager(channel=channel)

                is_score_container_loaded = self.__channel_manager.load_score_container_each(
                    channel_name=channel,
                    score_package=channels[channel]["score_package"],
                    container_port=self.__peer_port +
                    conf.PORT_DIFF_SCORE_CONTAINER + score_container_port_diff,
                    peer_target=self.__peer_target)

                if is_score_container_loaded is False:
                    util.exit_and_msg(
                        f"peer_service:__connect_to_all_channel score container load Fail ({channel})"
                    )

                score_container_port_diff = score_container_port_diff + conf.PORT_DIFF_BETWEEN_SCORE_CONTAINER
                response = self.connect_to_radiostation(channel=channel)
                if response is not None:
                    self.__channel_manager.save_peer_manager(
                        self.__channel_manager.get_peer_manager(channel),
                        channel)

        return is_radiostation_connected

    def __get_channel_infos(self):
        response = self.stub_to_radiostation.call_in_times(
            method_name="GetChannelInfos",
            message=loopchain_pb2.GetChannelInfosRequest(
                peer_id=self.__peer_id,
                peer_target=self.__peer_target,
                group_id=self.group_id,
                cert=self.__auth.get_public_der()),
            retry_times=conf.CONNECTION_RETRY_TIMES_TO_RS,
            is_stub_reuse=True,
            timeout=conf.CONNECTION_TIMEOUT_TO_RS)

        return response

    def connect_to_radiostation(
            self,
            channel: str,
            is_reconnect: bool = False) -> loopchain_pb2.ConnectPeerReply:
        """connect to radiostation with channel

        :return: 접속정보, 실패시 None
        """
        logging.debug(f"try to connect to radiostation channel({channel})")

        if self.stub_to_radiostation is None:
            logging.warning("fail make stub to Radio Station!!")
            return None

        # 공통 부분
        response = self.stub_to_radiostation.call_in_times(
            method_name="ConnectPeer",
            message=loopchain_pb2.ConnectPeerRequest(
                channel=channel,
                peer_object=b'',
                peer_id=self.__peer_id,
                peer_target=self.__peer_target,
                group_id=self.group_id,
                cert=self.__auth.get_public_der()),
            retry_times=conf.CONNECTION_RETRY_TIMES_TO_RS,
            is_stub_reuse=True,
            timeout=conf.CONNECTION_TIMEOUT_TO_RS)

        if not is_reconnect:
            if response is not None and response.status == message_code.Response.success:
                peer_list_data = pickle.loads(response.peer_list)
                self.__channel_manager.get_peer_manager(channel).load(
                    peer_list_data, False)
                logging.debug("peer list update: " +
                              self.__channel_manager.get_peer_manager(
                                  channel).get_peers_for_debug())
            else:
                logging.debug("using local peer list: " +
                              self.__channel_manager.get_peer_manager(
                                  channel).get_peers_for_debug())

        return response

    def add_unconfirm_block(self, block_unloaded, channel_name=None):
        if channel_name is None:
            channel_name = conf.LOOPCHAIN_DEFAULT_CHANNEL

        block = pickle.loads(block_unloaded)
        block_hash = block.block_hash

        response_code, response_msg = message_code.get_response(
            message_code.Response.fail_validate_block)

        # block 검증
        block_is_validated = False
        try:
            block_is_validated = Block.validate(block)
        except Exception as e:
            logging.error(e)

        if block_is_validated:
            # broadcast 를 받으면 받은 블럭을 검증한 후 검증되면 자신의 blockchain 의 unconfirmed block 으로 등록해 둔다.
            confirmed, reason = \
                self.__channel_manager.get_block_manager(channel_name).get_blockchain().add_unconfirm_block(block)

            if confirmed:
                response_code, response_msg = message_code.get_response(
                    message_code.Response.success_validate_block)
            elif reason == "block_height":
                # Announce 되는 블럭과 자신의 height 가 다르면 Block Height Sync 를 다시 시도한다.
                self.__channel_manager.get_block_manager(
                    channel_name).block_height_sync()

        return response_code, response_msg, block_hash

    def __tx_process_connect_to_leader(self, peer_process, leader_target):
        logging.debug("try... Peer Process connect_to_leader: " +
                      leader_target)
        logging.debug("peer_process: " + str(peer_process))
        peer_process.send_to_process(
            (BroadcastProcess.CONNECT_TO_LEADER_COMMAND, leader_target))
        peer_process.send_to_process(
            (BroadcastProcess.SUBSCRIBE_COMMAND, leader_target))

    def __run_tx_process(self, inner_channel_info):
        tx_process = BroadcastProcess("Tx Process")
        tx_process.start()
        tx_process.send_to_process(("status", ""))

        wait_times = 0
        wait_for_process_start = None

        # TODO process wait loop 를 살리고 시간을 조정하였음, 이 상태에서 tx process 가 AWS infra 에서 시작되는지 확인 필요.
        # time.sleep(conf.WAIT_SECONDS_FOR_SUB_PROCESS_START)

        while wait_for_process_start is None:
            time.sleep(conf.SLEEP_SECONDS_FOR_SUB_PROCESS_START)
            logging.debug(f"wait start tx process....")
            wait_for_process_start = tx_process.get_receive("status")

            if wait_for_process_start is None and wait_times > conf.WAIT_SUB_PROCESS_RETRY_TIMES:
                util.exit_and_msg("Tx Process start Fail!")

        logging.debug(f"Tx Process start({wait_for_process_start})")
        tx_process.send_to_process(
            (BroadcastProcess.MAKE_SELF_PEER_CONNECTION_COMMAND,
             inner_channel_info))

        return tx_process

    def __stop_tx_process(self):
        if self.__tx_process is not None:
            self.__tx_process.stop()
            self.__tx_process.wait()

    def reset_voter_count(self):
        """peer_list 의 활성화 상태(gRPC 응답)을 갱신하여 voter 수를 변경한다.

        :return:
        """
        # if self.__reset_voter_in_progress is not True:
        #     self.__reset_voter_in_progress = True
        #     logging.debug("reset voter count before: " +
        #                   str(ObjectManager().peer_service.peer_manager.get_peer_count()))
        #
        #     # TODO peer_list 를 순회하면서 gRPC 오류인 사용자를 remove_audience 한다.
        #     self.__channel_manager.get_peer_manager(
        #         conf.LOOPCHAIN_DEFAULT_CHANNEL).reset_peers(None, self.__common_service.remove_audience)
        #     logging.debug("reset voter count after: " +
        #                   str(ObjectManager().peer_service.peer_manager.get_peer_count()))
        #     self.__reset_voter_in_progress = False
        pass

    def set_chain_code(self, score):
        """Score를 패스로 전달하지 않고 (serve(...)의 score 는 score 의 파일 Path 이다.)
        Object 를 직접 할당하기 위한 인터페이스로 serve 호출전에 지정되어야 한다.

        :param score: score Object
        """
        # TODO 현재는 테스트를 위해서만 사용되고 있다. 검토후 제거 할 것
        self.__score = score

        # TODO 아래 세줄은 삭제 가능할 듯 검토 후 다음 merge 때 삭제 부탁합니다. assign to @godong
        self.__score_info = dict()
        self.__score_info[
            message_code.MetaParams.ScoreInfo.score_id] = self.__score.id()
        self.__score_info[message_code.MetaParams.ScoreInfo.
                          score_version] = self.__score.version()

    def __port_init(self, port):
        # service 초기화 작업
        self.__peer_target = util.get_private_ip() + ":" + str(port)
        self.__inner_target = conf.IP_LOCAL + ":" + str(port)
        self.__peer_port = int(port)

        # SCORE Service check Using Port
        # check Port Using
        if util.check_port_using(conf.IP_PEER,
                                 int(port) + conf.PORT_DIFF_SCORE_CONTAINER):
            util.exit_and_msg('Score Service Port is Using ' +
                              str(int(port) + conf.PORT_DIFF_SCORE_CONTAINER))

    def __run_inner_services(self, port):
        if conf.ENABLE_REST_SERVICE:
            if conf.USE_GUNICORN_HA_SERVER:
                # Run web app on gunicorn in another process.
                new_rest_port = int(
                    port) + conf.PORT_DIFF_REST_SERVICE_CONTAINER
                logging.debug(
                    f'Launch gunicorn proxy server. Port = {new_rest_port}')
                subprocess.Popen(
                    ['python3', './rest_proxy.py', '-p',
                     str(port), '&'])
            else:
                # Run web app as it is.
                logging.debug(f'Launch Flask RESTful server. Port = {port}')
                self.__rest_service = RestService(int(port))

    def __make_peer_id(self):
        """네트워크에서 Peer 를 식별하기 위한 UUID를 level db 에 생성한다.
        """
        try:
            uuid_bytes = bytes(
                self.__level_db.Get(conf.LEVEL_DB_KEY_FOR_PEER_ID))
            peer_id = uuid.UUID(bytes=uuid_bytes)
        except KeyError:  # It's first Run
            peer_id = None

        if peer_id is None:
            peer_id = uuid.uuid1()
            logging.info("make new peer_id: " + str(peer_id))
            self.__level_db.Put(conf.LEVEL_DB_KEY_FOR_PEER_ID, peer_id.bytes)

        self.__peer_id = str(peer_id)

    def timer_test_callback_function(self, message):
        logging.debug(f'timer test callback function :: ({message})')

    def __block_height_sync_channel(self, channel_name):
        # leader 로 시작하지 않았는데 자신의 정보가 leader Peer 정보이면 block height sync 하여
        # 최종 블럭의 leader 를 찾는다.
        block_sync_target_stub = None
        peer_manager = self.__channel_manager.get_peer_manager(channel_name)
        peer_leader = peer_manager.get_leader_peer()
        self_peer_object = peer_manager.get_peer(self.__peer_id)
        is_delay_announce_new_leader = False
        peer_old_leader = None

        if peer_leader.target != self.__peer_target:
            block_sync_target_stub = StubManager.get_stub_manager_to_server(
                peer_leader.target,
                loopchain_pb2_grpc.PeerServiceStub,
                time_out_seconds=conf.CONNECTION_RETRY_TIMEOUT)

            if block_sync_target_stub is None:
                logging.warning(
                    "You maybe Older from this network... or No leader in this network!"
                )

                # TODO 이 상황에서 rs 에 leader complain 을 진행한다
                is_delay_announce_new_leader = True
                peer_old_leader = peer_leader
                peer_leader = self.__channel_manager.get_peer_manager(
                    channel_name).leader_complain_to_rs(
                        conf.ALL_GROUP_ID, is_announce_new_peer=False)

                if peer_leader is not None:
                    block_sync_target_stub = StubManager.get_stub_manager_to_server(
                        peer_leader.target,
                        loopchain_pb2_grpc.PeerServiceStub,
                        time_out_seconds=conf.CONNECTION_RETRY_TIMEOUT)

            if peer_leader is None or peer_leader.peer_id == self.__peer_id:
                peer_leader = self_peer_object
                self.__channel_manager.get_block_manager(
                    channel_name).set_peer_type(loopchain_pb2.BLOCK_GENERATOR)
            else:
                self.__channel_manager.get_block_manager(
                    channel_name).block_height_sync(block_sync_target_stub)
                # # TODO 마지막 블럭으로 leader 정보를 판단하는 로직은 리더 컴플레인 알고리즘 수정 후 유효성을 다시 판단할 것
                # last_block_peer_id = self.__channel_manager.get_block_manager().get_blockchain().last_block.peer_id
                #
                # if last_block_peer_id != "" and last_block_peer_id != self.__peer_list.get_leader_peer().peer_id:
                #     logging.debug("make leader stub after block height sync...")
                #     new_leader_peer = self.__peer_list.get_peer(last_block_peer_id)
                #
                #     if new_leader_peer is None:
                #         new_leader_peer = self.__peer_list.leader_complain_to_rs(conf.ALL_GROUP_ID)
                #
                #     self.__peer_list.set_leader_peer(new_leader_peer, None)
                #     # TODO 리더가 상단의 next_leader_pear 와 같을 경우 stub 을 재설정하게 되는데 문제 없는지 확인 할 것
                #     peer_leader = new_leader_peer
                # else:

                if block_sync_target_stub is None:
                    util.exit_and_msg("Fail connect to leader!!")

                self.show_peers(channel_name)

            if block_sync_target_stub is not None:
                self.__common_service.subscribe(channel_name,
                                                block_sync_target_stub,
                                                loopchain_pb2.BLOCK_GENERATOR)

            if is_delay_announce_new_leader:
                self.__channel_manager.get_peer_manager(
                    channel_name).announce_new_leader(peer_old_leader.peer_id,
                                                      peer_leader.peer_id)

    def __start_base_services(self, score):
        """start base services >> common_service, channel_manager, tx_process

        :param score:
        :return:
        """
        inner_service_port = conf.PORT_INNER_SERVICE or (
            self.__peer_port + conf.PORT_DIFF_INNER_SERVICE)

        self.__common_service = CommonService(loopchain_pb2,
                                              inner_service_port)

        self.__channel_manager = ChannelManager(
            common_service=self.__common_service,
            level_db_identity=self.__peer_target)

        self.__tx_process = self.__run_tx_process(
            inner_channel_info=conf.IP_LOCAL + ":" + str(inner_service_port))

    def serve(self, port, score=None):
        """start func of Peer Service ===================================================================

        :param port:
        :param score:
        """
        if score is None:
            score = conf.DEFAULT_SCORE_PACKAGE

        stopwatch_start = timeit.default_timer()
        peer_type = loopchain_pb2.PEER

        is_all_service_safe_start = True

        self.__port_init(port)
        self.__level_db, self.__level_db_path = util.init_level_db(
            self.__peer_target)
        self.__make_peer_id()
        self.__run_inner_services(port)
        self.__start_base_services(score=score)

        is_radiostation_connected = self.__connect_to_all_channel()

        if is_radiostation_connected is False:
            util.exit_and_msg(
                "There is no peer_list, initial network is not allowed without RS!"
            )

        # start timer service.
        if conf.CONSENSUS_ALGORITHM == conf.ConsensusAlgorithm.lft:
            self.__timer_service.start()

        # TODO LOOPCHAIN-61 인증서 로드
        _cert = None
        # TODO LOOPCHAIN-61 인증서 키로드
        _private_key = None
        # TODO 인증정보 요청

        for channel in self.__channel_manager.get_channel_list():
            peer_leader = self.__channel_manager.get_peer_manager(
                channel).get_leader_peer(is_complain_to_rs=True)
            logging.debug(f"channel({channel}) peer_leader: " +
                          str(peer_leader))

            # TODO 이 부분을 조건 검사가 아니라 leader complain 을 이용해서 리더가 되도록 하는 방법 검토하기
            # 자기가 peer_list 의 유일한 connected PEER 이거나 rs 의 leader 정보와 같을 때 block generator 가 된다.
            if self.__peer_id == peer_leader.peer_id:
                if is_radiostation_connected is True or self.__channel_manager.get_peer_manager(
                        channel).get_connected_peer_count(None) == 1:
                    util.change_log_color_set(True)
                    logging.debug(f"Set Peer Type Leader! channel({channel})")
                    peer_type = loopchain_pb2.BLOCK_GENERATOR

            # load score 는 score 서비스가 시작된 이후 block height sync 가 시작되기전에 이루어져야 한다.
            # is_all_service_safe_start &= self.__load_score(score)

            if peer_type == loopchain_pb2.BLOCK_GENERATOR:
                self.__channel_manager.get_block_manager(
                    channel).set_peer_type(peer_type)
            elif peer_type == loopchain_pb2.PEER:
                self.__block_height_sync_channel(channel)

            if conf.CONSENSUS_ALGORITHM == conf.ConsensusAlgorithm.lft:
                self.__common_service.update_audience(
                    self.channel_manager.get_peer_manager().dump())

        loopchain_pb2_grpc.add_PeerServiceServicer_to_server(
            self.__outer_service, self.__common_service.outer_server)
        loopchain_pb2_grpc.add_InnerServiceServicer_to_server(
            self.__inner_service, self.__common_service.inner_server)
        logging.info("Start peer service at port: " + str(port))

        self.__channel_manager.start_block_managers()
        self.__common_service.start(port, self.__peer_id, self.__group_id)

        if self.stub_to_radiostation is not None:
            for channel in self.__channel_manager.get_channel_list():
                self.__common_service.subscribe(
                    channel=channel, subscribe_stub=self.stub_to_radiostation)

        for channel in self.__channel_manager.get_channel_list():
            channel_leader = self.__channel_manager.get_peer_manager(
                channel).get_leader_peer()
            if channel_leader is not None:
                util.logger.spam(
                    f"connnect to channel({channel}) leader({channel_leader.target})"
                )
                self.__tx_process_connect_to_leader(self.__tx_process,
                                                    channel_leader.target)

        self.__send_to_process_thread.set_process(self.__tx_process)
        self.__send_to_process_thread.start()

        stopwatch_duration = timeit.default_timer() - stopwatch_start
        logging.info(
            f"Start Peer Service start duration({stopwatch_duration})")

        # service 종료를 기다린다.
        if is_all_service_safe_start:
            self.__common_service.wait()
        else:
            self.service_stop()

        self.__send_to_process_thread.stop()
        self.__send_to_process_thread.wait()

        if self.__timer_service.is_run():
            self.__timer_service.stop()
            self.__timer_service.wait()

        logging.info("Peer Service Ended.")
        self.__channel_manager.stop_score_containers()
        if self.__rest_service is not None:
            self.__rest_service.stop()
        self.__stop_tx_process()