Exemplo n.º 1
0
    def __find_last_height_peer(self, group_id):
        # 강제로 list 를 적용하여 값을 복사한 다음 사용한다. (중간에 값이 변경될 때 발생하는 오류를 방지하기 위해서)
        most_height = 0
        most_height_peer = None
        for peer_id in list(self.peer_list[group_id]):
            peer_each = self.peer_list[group_id][peer_id]
            stub_manager = self.get_peer_stub_manager(peer_each, group_id)
            try:
                response = stub_manager.call(
                    "GetStatus",
                    loopchain_pb2.StatusRequest(
                        request="reset peers in group"),
                    is_stub_reuse=True)

                peer_status = json.loads(response.status)
                if int(peer_status["block_height"]) >= most_height:
                    most_height = int(peer_status["block_height"])
                    most_height_peer = peer_each
            except Exception as e:
                logging.warning("gRPC Exception: " + str(e))

        if len(self.peer_list[group_id]
               ) == 0 and group_id != conf.ALL_GROUP_ID:
            del self.peer_list[group_id]

        return most_height_peer
Exemplo n.º 2
0
def run_radio_station_as_process_and_stub(port):
    process = run_radio_station_as_process(port)
    channel = grpc.insecure_channel('localhost:' + str(port))
    stub = loopchain_pb2_grpc.RadioStationStub(channel)
    util.request_server_in_time(stub.GetStatus,
                                loopchain_pb2.StatusRequest(request=""))
    return process, stub
Exemplo n.º 3
0
    def complain_leader(self, group_id=conf.ALL_GROUP_ID, is_announce=False):
        """When current leader is offline, Find last height alive peer and set as a new leader.

        :param complain_peer:
        :param group_id:
        :param is_announce:
        :return:
        """
        leader_peer = self.get_leader_peer(group_id=group_id, is_peer=False)
        try:
            stub_manager = self.get_peer_stub_manager(leader_peer, group_id)
            response = stub_manager.call("GetStatus", loopchain_pb2.StatusRequest(request=""), is_stub_reuse=True)

            status_json = json.loads(response.status)
            logging.warning(f"stub_manager target({stub_manager.target}) type({status_json['peer_type']})")

            if status_json["peer_type"] == str(loopchain_pb2.BLOCK_GENERATOR):
                return leader_peer
            else:
                raise Exception
        except Exception as e:
            new_leader = self.__find_last_height_peer(group_id=group_id)
            if new_leader is not None:
                # 변경된 리더를 announce 해야 한다
                logging.warning("Change peer to leader that complain old leader.")
                self.set_leader_peer(new_leader, None)
                if is_announce is True:
                    self.announce_new_leader(
                        complained_leader_id=new_leader.peer_id, new_leader_id=new_leader.peer_id,
                        is_broadcast=True
                    )
        return new_leader
Exemplo n.º 4
0
    def rotate_next_leader(self):
        """Find Next Leader Id from peer_list and reset leader to that peer

        :return:
        """

        # logging.debug("rotate next leader...")
        next_leader = self.__peer_manager.get_next_leader_peer(
            is_only_alive=True)

        # Check Next Leader is available...
        if next_leader is not None and next_leader.peer_id != self.peer_id:
            try:
                stub_manager = self.__peer_manager.get_peer_stub_manager(
                    next_leader)
                response = stub_manager.call(
                    "GetStatus",
                    loopchain_pb2.StatusRequest(request="get_leader_peer"),
                    is_stub_reuse=False)

                # Peer 가 leader 로 변경되는데 시간이 필요함으로 접속 여부만 확인한다.
                # peer_status = json.loads(response.status)
                # if peer_status["peer_type"] != str(loopchain_pb2.BLOCK_GENERATOR):
                #     logging.warning("next rotate is not a leader")
                #     raise Exception

            except Exception as e:
                logging.warning(f"rotate next leader exceptions({e})")
                next_leader = self.__peer_manager.leader_complain_to_rs(
                    conf.ALL_GROUP_ID)

        if next_leader is not None:
            self.reset_leader(next_leader.peer_id)
Exemplo n.º 5
0
def menu4_1(params=None):
    admin_manager = AdminManager("demotool")

    print("\nInput Peer Target [IP]:[port] (default '' -> 127.0.0.1:7100, [port] -> 127.0.0.1:[port])")
    choice = input(" >>  ")
    if choice == "":
        choice = "127.0.0.1:7100"
    elif choice.find(':') == -1:
        choice = "127.0.0.1:" + choice

    select_channel_index = 0
    select_channel_string = ""
    for channel in admin_manager.get_channel_list():
        if select_channel_index != 0:
            select_channel_string += ", "
        select_channel_string += f"{select_channel_index}: {admin_manager.get_channel_list()[select_channel_index]}"
        select_channel_index += 1

    print(f"Select Channel ({select_channel_string})")
    channel_choice = input(" >>  ")
    try:
        test_globals["channel_name"] = admin_manager.get_channel_list()[int(channel_choice)]
    except Exception as e:
        print(f"wrong channel number! Now use default channel({admin_manager.get_channel_list()[0]})\n")
        test_globals["channel_name"] = admin_manager.get_channel_list()[0]

    print("your input: " + choice)
    channel = grpc.insecure_channel(choice)
    peer_stub = loopchain_pb2_grpc.PeerServiceStub(channel)
    response = peer_stub.GetStatus(loopchain_pb2.StatusRequest(request="hello"), conf.GRPC_TIMEOUT)
    print("Peer Status: " + str(response))
    menu4(peer_stub)
Exemplo n.º 6
0
def run_radio_station_as_process_and_stub_manager(port):
    process = run_radio_station_as_process(port)
    stub_manager = StubManager.get_stub_manager_to_server(
        'localhost:' + str(port), loopchain_pb2_grpc.RadioStationStub)
    util.request_server_in_time(stub_manager.stub.GetStatus,
                                loopchain_pb2.StatusRequest(request=""))
    return process, stub_manager
Exemplo n.º 7
0
    def reset_leader(self, new_leader_id):
        logging.warning("RESET LEADER: " + str(new_leader_id))

        complained_leader = self.__peer_manager.get_leader_peer()

        leader_peer = self.__peer_manager.get_peer(new_leader_id, None)
        if leader_peer is None:
            logging.warning(
                f"in peer_service::reset_leader There is no peer by peer_id({new_leader_id})"
            )
            return

        self.__peer_manager.set_leader_peer(leader_peer, None)

        self.__peer_object = self.__peer_manager.get_peer(self.peer_id)
        peer_leader = self.__peer_manager.get_leader_peer()

        if self.__peer_object.target == peer_leader.target:
            logging.debug("Set Peer Type Block Generator!")
            self.__peer_type = loopchain_pb2.BLOCK_GENERATOR
            self.__block_manager.get_blockchain().reset_made_block_count()

            # TODO 아래 코드는 중복된 의미이다. 하지만, leader 가 변경되길 기다리는 코드로 의미를 명확히 할 경우
            # 블록체인 동작 지연으로 인한 오류가 발생한다. 우선 더 안정적인 테스트 결과를 보이는 상태로 유지한다.
            response = self.peer_list.get_peer_stub_manager(
                self.__peer_object).call(
                    "GetStatus",
                    loopchain_pb2.StatusRequest(request="reset_leader"),
                    is_stub_reuse=True)

            status_json = json.loads(response.status)
            if status_json['peer_type'] == str(loopchain_pb2.BLOCK_GENERATOR):
                is_broadcast = True
            else:
                is_broadcast = False

            self.__peer_manager.announce_new_leader(complained_leader.peer_id,
                                                    new_leader_id,
                                                    is_broadcast=is_broadcast)
        else:
            logging.debug("Set Peer Type Peer!")
            self.__peer_type = loopchain_pb2.PEER
            self.__stub_to_blockgenerator = self.__peer_manager.get_peer_stub_manager(
                peer_leader)
            # 새 leader 에게 subscribe 하기
            self.__common_service.subscribe(self.__stub_to_blockgenerator,
                                            loopchain_pb2.BLOCK_GENERATOR)

        self.__common_service.set_peer_type(self.__peer_type)
        # update candidate blocks
        self.__block_manager.get_candidate_blocks().set_last_block(
            self.__block_manager.get_blockchain().last_block)
        self.__block_manager.set_peer_type(self.__peer_type)

        if self.__tx_process is not None:
            # peer_process 의 남은 job 을 처리한다. (peer->leader 인 경우),
            # peer_process 를 리더 정보를 변경한다. (peer->peer 인 경우)
            self.__tx_process_connect_to_leader(self.__tx_process,
                                                peer_leader.target)
Exemplo n.º 8
0
def run_radio_station_as_process_and_stub_manager(port, timeout=None):
    process = run_radio_station_as_process(port)
    stub_manager = StubManager(f"localhost:{port}",
                               loopchain_pb2_grpc.RadioStationStub,
                               conf.GRPC_SSL_TYPE)
    util.request_server_in_time(stub_manager.stub.GetStatus,
                                loopchain_pb2.StatusRequest(request=""))
    return process, stub_manager
Exemplo n.º 9
0
    def __get_peer_stub_list(self):
        """It updates peer list for block manager refer to peer list on the loopchain network.
        This peer list is not same to the peer list of the loopchain network.

        :return max_height: a height of current blockchain
        :return peer_stubs: current peer list on the loopchain network
        """
        max_height = -1  # current max height
        unconfirmed_block_height = -1
        peer_stubs = []  # peer stub list for block height synchronization

        if not ObjectManager().channel_service.is_support_node_function(
                conf.NodeFunction.Vote):
            rest_stub = ObjectManager().channel_service.radio_station_stub
            peer_stubs.append(rest_stub)
            last_block = rest_stub.call("GetLastBlock")
            max_height = self.__blockchain.block_versioner.get_height(
                last_block)

            return max_height, unconfirmed_block_height, peer_stubs

        # Make Peer Stub List [peer_stub, ...] and get max_height of network
        peer_target = ChannelProperty().peer_target
        peer_manager = ObjectManager().channel_service.peer_manager
        target_dict = peer_manager.get_IP_of_peers_dict()
        target_list = [
            peer_target for peer_id, peer_target in target_dict.items()
            if peer_id != ChannelProperty().peer_id
        ]

        for target in target_list:
            if target != peer_target:
                logging.debug(f"try to target({target})")
                channel = GRPCHelper().create_client_channel(target)
                stub = loopchain_pb2_grpc.PeerServiceStub(channel)
                try:
                    response = stub.GetStatus(
                        loopchain_pb2.StatusRequest(
                            request="",
                            channel=self.__channel_name,
                        ), conf.GRPC_TIMEOUT_SHORT)

                    response.block_height = max(
                        response.block_height,
                        response.unconfirmed_block_height)

                    if response.block_height > max_height:
                        # Add peer as higher than this
                        max_height = response.block_height
                        unconfirmed_block_height = response.unconfirmed_block_height
                        peer_stubs.append(stub)

                except Exception as e:
                    logging.warning(
                        f"This peer has already been removed from the block height target node. {e}"
                    )

        return max_height, unconfirmed_block_height, peer_stubs
Exemplo n.º 10
0
    def __get_next_peer(self, peer, group_id=None, is_only_alive=False):
        if peer is None:
            return None

        if group_id is None:
            group_id = conf.ALL_GROUP_ID

        order_list = list(self.peer_order_list[group_id].keys())
        order_list.sort()

        # logging.debug("order list: " + str(order_list))
        # logging.debug("peer.order: " + str(peer.order))

        peer_order_position = order_list.index(peer.order)
        next_order_position = peer_order_position + 1
        peer_count = len(order_list)

        for i in range(peer_count):
            # Prevent out of range
            if next_order_position >= peer_count:
                next_order_position = 0

            # It doesn't matter that peer status is connected or not, when 'is_only_alive' is false.
            if not is_only_alive:
                break

            peer_order = order_list[next_order_position]
            peer_id = self.peer_order_list[group_id][peer_order]
            peer_each = self.peer_list[group_id][peer_id]

            # It need to check real time status of peer, if 'is_only_alive' is true and status is connected.
            if is_only_alive and peer_each.status == PeerStatus.connected:

                next_peer_id = self.peer_order_list[group_id][order_list[next_order_position]]
                leader_peer = self.peer_list[group_id][next_peer_id]
                stub_manager = self.get_peer_stub_manager(leader_peer)

                response = stub_manager.call_in_times(
                    "GetStatus",
                    loopchain_pb2.StatusRequest(request="peer_list.__get_next_peer"),
                    conf.GRPC_TIMEOUT
                )

                # If it has no response, increase count of 'next_order_position' for checking next peer.

                if response is not None and response.status != "":
                    break

            next_order_position += 1

        try:
            next_peer_id = self.peer_order_list[group_id][order_list[next_order_position]]
            logging.debug("next_leader_peer_id: " + str(next_peer_id))
            return self.peer_list[group_id][next_peer_id]
        except IndexError as e:
            logging.warning(f"there is no next peer ({e})")
            return None
Exemplo n.º 11
0
    def get_peer_status(self):
        """
        Score 에서 peer의 정보를 요청 한다

        :return: peer의 정보
        """
        response = self.__stub_to_peer_service.GetStatus(
            loopchain_pb2.StatusRequest(request="ScoreService.get_peer_status"), conf.GRPC_TIMEOUT)
        logging.debug("GET PEER STATUS IN Score Service %s", response)
        return response
Exemplo n.º 12
0
    def get_status(self, channel: str):
        response = self.call("GetStatus",
                             loopchain_pb2.StatusRequest(request="", channel=channel),
                             self.REST_GRPC_TIMEOUT)
        status_json_data = json.loads(response.status)
        status_json_data['block_height'] = response.block_height
        status_json_data['unconfirmed_block_height'] = response.unconfirmed_block_height
        status_json_data['total_tx'] = response.total_tx
        status_json_data['leader_complaint'] = response.is_leader_complaining

        return status_json_data
Exemplo n.º 13
0
def run_peer_server_as_process_and_stub(
        port,
        radiostation_port=conf.PORT_RADIOSTATION,
        group_id=None,
        score=None):
    process = run_peer_server_as_process(port, radiostation_port, group_id,
                                         score)
    channel = grpc.insecure_channel('localhost:' + str(port))
    stub = loopchain_pb2_grpc.PeerServiceStub(channel)
    util.request_server_in_time(stub.GetStatus,
                                loopchain_pb2.StatusRequest(request=""))
    return process, stub
Exemplo n.º 14
0
def menu4_7(params):

    print("Input monitoring interval seconds (default: 1)")
    choice = input(" >>  ")
    if choice == "":
        choice = 1

    try:
        while True:
            peer_stub = params[0]
            response = peer_stub.GetStatus(loopchain_pb2.StatusRequest(request="GetStatus"), conf.GRPC_TIMEOUT)
            print("Peer Status: " + str(response))
            print("this is monitoring loop (if you want exit make KeyboardInterrupt(ctrl+c)...)")
            time.sleep(int(choice))
    except KeyboardInterrupt:
        menu4(peer_stub)
Exemplo n.º 15
0
    def check_peer_status(self, group_id=conf.ALL_GROUP_ID):
        delete_peer_list = []
        alive_peer_last = None
        check_leader_peer_count = 0
        for peer_id in list(self.peer_list[group_id]):
            peer_each = self.peer_list[group_id][peer_id]
            stub_manager = self.get_peer_stub_manager(peer_each, group_id)
            try:
                response = stub_manager.call(
                    "GetStatus",
                    loopchain_pb2.StatusRequest(request=""),
                    is_stub_reuse=True)
                if response is None:
                    raise Exception
                peer_each.status = PeerStatus.connected
                peer_status = json.loads(response.status)
                # logging.debug(f"Check Peer Status ({peer_status['peer_type']})")
                if peer_status["peer_type"] == "1":
                    check_leader_peer_count += 1
                alive_peer_last = peer_each
            except Exception as e:
                logging.warning("there is disconnected peer peer_id(" +
                                peer_each.peer_id + ") gRPC Exception: " +
                                str(e))
                peer_each.status = PeerStatus.disconnected
                logging.debug(
                    f"diff mins {util.datetime_diff_in_mins(peer_each.status_update_time)}"
                )
                # if util.datetime_diff_in_mins(peer_each.status_update_time) >= conf.TIMEOUT_PEER_REMOVE_IN_LIST:
                #     logging.debug(f"peer status update time: {peer_each.status_update_time}")
                #     logging.debug(f"this peer will remove {peer_each.peer_id}")
                #     self.remove_peer(peer_each.peer_id, peer_each.group_id)
                #     delete_peer_list.append(peer_each)

        logging.debug(f"Leader Peer Count: ({check_leader_peer_count})")
        if check_leader_peer_count != 1:
            # reset network leader by RS
            if alive_peer_last is not None:
                self.set_leader_peer(alive_peer_last, None)
                self.announce_new_leader(
                    complained_leader_id=alive_peer_last.peer_id,
                    new_leader_id=alive_peer_last.peer_id,
                    is_broadcast=True)
            else:
                logging.error("There is no leader in this network.")

        return delete_peer_list
Exemplo n.º 16
0
def menu4_1(params=None):
    print(
        "Input Peer Target [IP]:[port] (default '' -> 127.0.0.1:7100, [port] -> 127.0.0.1:[port])"
    )
    choice = input(" >>  ")
    if choice == "":
        choice = "127.0.0.1:7100"
    elif choice.find(':') == -1:
        choice = "127.0.0.1:" + choice

    print("your input: " + choice)
    channel = grpc.insecure_channel(choice)
    peer_stub = loopchain_pb2_grpc.PeerServiceStub(channel)
    response = peer_stub.GetStatus(
        loopchain_pb2.StatusRequest(request="hello"), conf.GRPC_TIMEOUT)
    print("Peer Status: " + str(response))
    menu4(peer_stub)
Exemplo n.º 17
0
    def __reset_peers_in_group(self, group_id, reset_action):
        # 강제로 list 를 적용하여 값을 복사한 다음 사용한다. (중간에 값이 변경될 때 발생하는 오류를 방지하기 위해서)
        for peer_id in list(self.peer_list[group_id]):
            peer_each = self.peer_list[group_id][peer_id]
            stub_manager = self.get_peer_stub_manager(peer_each, group_id)
            try:
                stub_manager.call("GetStatus", loopchain_pb2.StatusRequest(request="reset peers in group"),
                                  is_stub_reuse=True)
            except Exception as e:
                logging.warning("gRPC Exception: " + str(e))
                logging.debug("remove this peer(target): " + str(peer_each.target))
                self.remove_peer(peer_each.peer_id, group_id)

                if reset_action is not None:
                    reset_action(peer_each.peer_id, peer_each.target)

        if len(self.peer_list[group_id]) == 0 and group_id != conf.ALL_GROUP_ID:
            del self.peer_list[group_id]
Exemplo n.º 18
0
        def __handler_connect_to_self_peer(connect_param):
            # 자신을 생성한 부모 Peer 에 접속하기 위한 stub 을 만든다.
            # pipe 를 통한 return 은 pipe send 와 쌍이 맞지 않은 경우 오류를 발생시킬 수 있다.
            # 안전한 연결을 위하여 부모 프로세스와도 gRPC stub 을 이용하여 통신한다.
            logging.debug("try connect to self peer: " + str(connect_param))

            stub_to_self_peer = StubManager.get_stub_manager_to_server(
                connect_param,
                loopchain_pb2_grpc.InnerServiceStub,
                time_out_seconds=conf.CONNECTION_RETRY_TIMEOUT,
                is_allow_null_stub=True)
            __process_variables[
                self.PROCESS_VARIABLE_STUB_TO_SELF_PEER] = stub_to_self_peer

            response = util.request_server_wait_response(
                stub_to_self_peer.stub.GetStatus,
                loopchain_pb2.StatusRequest(
                    request="(tx process) connect to self peer"))
            logging.debug("connect to inner channel: " + str(response))
Exemplo n.º 19
0
 def get_score_status(self):
     return self.__stub_to_peer_service.GetScoreStatus(
         loopchain_pb2.StatusRequest(request=""), self.REST_GRPC_TIMEOUT)
Exemplo n.º 20
0
 def get_status(self):
     return self.__stub_to_peer_service.GetStatus(
         loopchain_pb2.StatusRequest(request=""), conf.GRPC_TIMEOUT)
Exemplo n.º 21
0
    def __get_peer_stub_list(self) -> Tuple[int, int, List[Tuple]]:
        """It updates peer list for block manager refer to peer list on the loopchain network.
        This peer list is not same to the peer list of the loopchain network.

        :return max_height: a height of current blockchain
        :return unconfirmed_block_height: unconfirmed_block_height on the network
        :return peer_stubs: current peer list on the network (target, peer_stub)
        """
        max_height = -1  # current max height
        unconfirmed_block_height = -1
        peer_stubs = []  # peer stub list for block height synchronization

        if not ObjectManager().channel_service.is_support_node_function(
                conf.NodeFunction.Vote):
            rs_client = ObjectManager().channel_service.rs_client
            status_response = rs_client.call(RestMethod.Status)
            max_height = status_response['block_height']
            peer_stubs.append((rs_client.target, rs_client))
            return max_height, unconfirmed_block_height, peer_stubs

        # Make Peer Stub List [peer_stub, ...] and get max_height of network
        self.__block_height_sync_bad_targets = {
            k: v
            for k, v in self.__block_height_sync_bad_targets.items()
            if v > self.blockchain.block_height
        }
        util.logger.info(
            f"Bad Block Sync Peer : {self.__block_height_sync_bad_targets}")
        peer_target = ChannelProperty().peer_target
        my_height = self.blockchain.block_height

        if self.blockchain.last_block:
            reps_hash = self.blockchain.get_reps_hash_by_header(
                self.blockchain.last_block.header)
        else:
            reps_hash = ChannelProperty().crep_root_hash
        rep_targets = self.blockchain.find_preps_targets_by_roothash(reps_hash)
        target_list = list(rep_targets.values())
        for target in target_list:
            if target == peer_target:
                continue
            if target in self.__block_height_sync_bad_targets:
                continue
            util.logger.debug(f"try to target({target})")
            channel = GRPCHelper().create_client_channel(target)
            stub = loopchain_pb2_grpc.PeerServiceStub(channel)
            try:
                response = stub.GetStatus(
                    loopchain_pb2.StatusRequest(
                        request='block_sync',
                        channel=self.__channel_name,
                    ), conf.GRPC_TIMEOUT_SHORT)
                target_block_height = max(response.block_height,
                                          response.unconfirmed_block_height)

                if target_block_height > my_height:
                    peer_stubs.append((target, stub))
                    max_height = max(max_height, target_block_height)
                    unconfirmed_block_height = max(
                        unconfirmed_block_height,
                        response.unconfirmed_block_height)

            except Exception as e:
                util.logger.warning(
                    f"This peer has already been removed from the block height target node. {e}"
                )

        return max_height, unconfirmed_block_height, peer_stubs
Exemplo n.º 22
0
def menu4_4(params):
    peer_stub = params[0]
    response = peer_stub.GetStatus(loopchain_pb2.StatusRequest(request="GetStatus"), conf.GRPC_TIMEOUT)
    print("Peer Status: " + str(response))
    menu4(peer_stub)
Exemplo n.º 23
0
    def block_height_sync(self, target_peer_stub=None):
        """Peer간의 데이타 동기화
        """
        if self.__block_height_sync_lock is True:
            # ***** 이 보정 프로세스는 AnnounceConfirmBlock 메시지를 받았을때 블럭 Height 차이로 Peer 가 처리하지 못한 경우에도 진행한다.
            # 따라서 이미 sync 가 진행 중일때의 요청은 무시한다.
            logging.warning("block height sync is already running...")
            return

        self.__block_height_sync_lock = True
        if target_peer_stub is None:
            target_peer_stub = self.__stub_to_blockgenerator

        ### Block Height 보정 작업, Peer의 데이타 동기화 Process ###
        ### Love&Hate Algorithm ###
        logging.info("try block height sync...with love&hate")

        # Make Peer Stub List [peer_stub, ...] and get max_height of network
        max_height = 0
        peer_stubs = []
        for peer_target in self.__peer_manager.get_IP_of_peers_in_group():
            target = ":".join(peer_target.split(":")[1:])
            if target != self.__peer_target:
                logging.debug(f"try to target({target})")
                channel = grpc.insecure_channel(target)
                stub = loopchain_pb2_grpc.PeerServiceStub(channel)
                try:
                    response = stub.GetStatus(
                        loopchain_pb2.StatusRequest(request=""))
                    if response.block_height > max_height:
                        # Add peer as higher than this
                        max_height = response.block_height
                        peer_stubs.append(stub)
                except Exception as e:
                    logging.warning("Already bad.... I don't love you" +
                                    str(e))

        my_height = self.__block_manager.get_blockchain().block_height

        if max_height > my_height:  # 자기가 가장 높은 블럭일때 처리 필요 TODO
            logging.info(
                f"You need block height sync to: {max_height} yours: {my_height}"
            )
            # 자기(현재 Peer)와 다르면 Peer 목록을 순회하며 마지막 block 에서 자기 Height Block 까지 역순으로 요청한다.
            # (blockchain 의 block 탐색 로직 때문에 height 순으로 조회는 비효율적이다.)

            preload_blocks = {}  # height : block dictionary

            # Target Peer 의 마지막 block hash 부터 시작한다.
            response = target_peer_stub.call(
                "GetLastBlockHash", loopchain_pb2.StatusRequest(request=""))
            logging.debug(response)
            request_hash = response.block_hash

            max_try = max_height - my_height
            while self.__block_manager.get_blockchain(
            ).last_block.block_hash != request_hash and max_try > 0:

                for peer_stub in peer_stubs:
                    response = None
                    try:
                        # 이때 요청 받은 Peer 는 해당 Block 과 함께 자신의 현재 Height 를 같이 보내준다.
                        # TODO target peer 의 마지막 block 보다 높은 Peer 가 있으면 현재 target height 까지 완료 후
                        # TODO Height Sync 를 다시 한다.
                        response = peer_stub.BlockSync(
                            loopchain_pb2.BlockSyncRequest(
                                block_hash=request_hash), conf.GRPC_TIMEOUT)
                    except Exception as e:
                        logging.warning("There is a bad peer, I hate you: " +
                                        str(e))

                    if response is not None and response.response_code == message_code.Response.success:
                        dump = response.block
                        block = pickle.loads(dump)

                        # 마지막 블럭에서 역순으로 블럭을 구한다.
                        request_hash = block.prev_block_hash

                        # add block to preload_blocks
                        logging.debug("Add preload_blocks Height: " +
                                      str(block.height))
                        preload_blocks[block.height] = block

                        if response.max_block_height > max_height:
                            max_height = response.max_block_height

                        if (my_height + 1) == block.height:
                            max_try = 0  # 더이상 요청을 진행하지 않는다.
                            logging.info("Block Height Sync Complete.")
                            break
                        max_try -= 1
                    else:
                        # 이 반복 요청중 응답 하지 않은 Peer 는 반복중에 다시 요청하지 않는다.
                        # (TODO: 향후 Bad에 대한 리포트 전략은 별도로 작업한다.)
                        peer_stubs.remove(peer_stub)
                        logging.warning(
                            "Make this peer to bad (error above or no response): "
                            + str(peer_stub))

            if preload_blocks.__len__() > 0:
                while my_height < max_height:
                    add_height = my_height + 1
                    logging.debug("try add block height: " + str(add_height))
                    try:
                        self.__block_manager.add_block(
                            preload_blocks[add_height])
                        my_height = add_height
                    except KeyError as e:
                        logging.error("fail block height sync: " + str(e))
                        break
                    except exception.BlockError as e:
                        logging.error(
                            "Block Error Clear all block and restart peer.")
                        self.__block_manager.clear_all_blocks()
                        util.exit_and_msg(
                            "Block Error Clear all block and restart peer.")

            if my_height < max_height:
                # block height sync 가 완료되지 않았으면 다시 시도한다.
                logging.warning(
                    "fail block height sync in one time... try again...")
                self.__block_height_sync_lock = False
                self.block_height_sync(target_peer_stub)

        self.__block_height_sync_lock = False
Exemplo n.º 24
0
 def get_status(self, channel):
     return self.__stub_to_peer_service.GetStatus(
         loopchain_pb2.StatusRequest(request="", channel=channel),
         self.REST_GRPC_TIMEOUT)