Пример #1
0
    def add_block(self,
                  block: Block,
                  confirm_info=None,
                  need_to_write_tx_info=True,
                  need_to_score_invoke=True) -> bool:
        """

        :param block:
        :param confirm_info: additional info for this block, but It came from next block of this block.
        :param need_to_write_tx_info:
        :param need_to_score_invoke:
        :return:
        """
        with self.__add_block_lock:
            if need_to_write_tx_info and need_to_score_invoke and \
                    not self.prevent_next_block_mismatch(block.header.height):
                return True

            peer_id = ChannelProperty().peer_id
            util.apm_event(
                peer_id, {
                    'event_type': 'TotalTx',
                    'peer_id': peer_id,
                    'peer_name': conf.PEER_NAME,
                    'channel_name': self.__channel_name,
                    'data': {
                        'block_hash': block.header.hash.hex(),
                        'total_tx': self.total_tx
                    }
                })

            return self.__add_block(block, confirm_info, need_to_write_tx_info,
                                    need_to_score_invoke)
Пример #2
0
    def add_tx(self, request) -> None:
        tx_json = request.tx_json

        tx_versioner = self._channel_service.block_manager.get_blockchain(
        ).tx_versioner
        tx_version = tx_versioner.get_version(tx_json)

        ts = TransactionSerializer.new(tx_version, tx_versioner)
        tx = ts.from_(tx_json)

        tv = TransactionVerifier.new(tx_version, tx_versioner)
        tv.verify(tx)

        object_has_queue = self._channel_service.get_object_has_queue_by_consensus(
        )
        if tx is not None:
            object_has_queue.add_tx_obj(tx)
            util.apm_event(
                ChannelProperty().peer_id, {
                    'event_type': 'AddTx',
                    'peer_id': ChannelProperty().peer_id,
                    'peer_name': conf.PEER_NAME,
                    'channel_name': ChannelProperty().name,
                    'data': {
                        'tx_hash': tx.tx_hash
                    }
                })
Пример #3
0
    def __add_tx_list(self, tx_list):
        for tx in tx_list:
            if tx.hash.hex() in self._block_manager.get_tx_queue():
                util.logger.debug(
                    f"tx hash {tx.hash.hex_0x()} already exists in transaction queue."
                )
                continue
            if self._blockchain.find_tx_by_key(tx.hash.hex()):
                util.logger.debug(
                    f"tx hash {tx.hash.hex_0x()} already exists in blockchain."
                )
                continue

            self._block_manager.add_tx_obj(tx)
            util.apm_event(
                ChannelProperty().peer_id, {
                    'event_type': 'AddTx',
                    'peer_id': ChannelProperty().peer_id,
                    'peer_name': conf.PEER_NAME,
                    'channel_name': ChannelProperty().name,
                    'data': {
                        'tx_hash': tx.hash.hex()
                    }
                })

        if not conf.ALLOW_MAKE_EMPTY_BLOCK:
            self._channel_service.start_leader_complain_timer_if_tx_exists()
Пример #4
0
    def add_block(self, block_: Block, vote_: Vote = None) -> bool:
        """

        :param block_: block to add
        :param vote_: additional info for this block, but It came from next block
        :return:
        """
        result = self.__blockchain.add_block(block_, vote_)

        last_block = self.__blockchain.last_block

        peer_id = ChannelProperty().peer_id
        util.apm_event(
            peer_id, {
                'event_type': 'TotalTx',
                'peer_id': peer_id,
                'peer_name': conf.PEER_NAME,
                'channel_name': self.__channel_name,
                'data': {
                    'block_hash': block_.header.hash.hex(),
                    'total_tx': self.__blockchain.total_tx
                }
            })

        return result
Пример #5
0
    def add_tx(self, request) -> None:
        tx_json = request.tx_json

        tx_versioner = self._blockchain.tx_versioner
        tx_version, tx_type = tx_versioner.get_version(tx_json)

        ts = TransactionSerializer.new(tx_version, tx_type, tx_versioner)
        tx = ts.from_(tx_json)

        tv = TransactionVerifier.new(tx_version, tx_type, tx_versioner)
        tv.verify(tx)

        if tx is not None:
            self._block_manager.add_tx_obj(tx)
            util.apm_event(
                ChannelProperty().peer_id, {
                    'event_type': 'AddTx',
                    'peer_id': ChannelProperty().peer_id,
                    'peer_name': conf.PEER_NAME,
                    'channel_name': ChannelProperty().name,
                    'data': {
                        'tx_hash': tx.tx_hash
                    }
                })

        if not conf.ALLOW_MAKE_EMPTY_BLOCK:
            self._channel_service.start_leader_complain_timer_if_tx_exists()
Пример #6
0
    def create_tx(self, data):
        tx = Transaction()
        score_id = ""
        score_version = ""

        try:
            score_info = self._channel_service.score_info
            score_id = score_info[message_code.MetaParams.ScoreInfo.score_id]
            score_version = score_info[message_code.MetaParams.ScoreInfo.score_version]
        except KeyError as e:
            logging.debug(f"CreateTX : load score info fail\n"
                          f"cause : {e}")

        send_tx_type = self._channel_service.get_channel_option()["send_tx_type"]
        tx.init_meta(ChannelProperty().peer_id, score_id, score_version, ChannelProperty().name, send_tx_type)
        tx.put_data(data)
        tx.sign_hash(self._channel_service.peer_auth)

        self._channel_service.broadcast_scheduler.schedule_job(BroadcastCommand.CREATE_TX, tx)

        try:
            data_log = json.loads(data)
        except Exception as e:
            data_log = {'tx_hash': tx.tx_hash}

        util.apm_event(ChannelProperty().peer_id, {
            'event_type': 'CreateTx',
            'peer_id': ChannelProperty().peer_id,
            'peer_name': conf.PEER_NAME,
            'channel_name': ChannelProperty().name,
            'tx_hash': tx.tx_hash,
            'data': data_log})

        return tx.tx_hash
Пример #7
0
    def __add_tx_list(self, tx_list):
        block_manager = self._channel_service.block_manager
        blockchain = block_manager.get_blockchain()

        for tx in tx_list:
            if tx.hash.hex() in block_manager.get_tx_queue():
                util.logger.warning(
                    f"hash {tx.hash.hex()} already exists in transaction queue. tx({tx})"
                )
                continue
            if blockchain.find_tx_by_key(tx.hash.hex()):
                util.logger.warning(
                    f"hash {tx.hash.hex()} already exists in blockchain. tx({tx})"
                )
                continue

            block_manager.add_tx_obj(tx)
            util.apm_event(
                ChannelProperty().peer_id, {
                    'event_type': 'AddTx',
                    'peer_id': ChannelProperty().peer_id,
                    'peer_name': conf.PEER_NAME,
                    'channel_name': ChannelProperty().name,
                    'data': {
                        'tx_hash': tx.hash.hex()
                    }
                })

        self._channel_service.start_leader_complain_timer_if_tx_exists()
Пример #8
0
    def add_tx_list(self, request) -> tuple:
        tx_validate_count = 0
        tx_validator = get_tx_validator(ChannelProperty().name)

        for tx_item in request.tx_list:
            tx_dumped = tx_validator.load_dumped_tx(tx_item)
            tx = tx_validator.validate_dumped_tx_message(tx_dumped)
            # util.logger.spam(f"channel_inner_service:add_tx tx({tx.get_data_string()})")

            object_has_queue = self._channel_service.get_object_has_queue_by_consensus(
            )
            if tx is not None:
                object_has_queue.add_tx_obj(tx)
                tx_validate_count += 1
                util.apm_event(
                    ChannelProperty().peer_id, {
                        'event_type': 'AddTx',
                        'peer_id': ChannelProperty().peer_id,
                        'peer_name': conf.PEER_NAME,
                        'channel_name': ChannelProperty().name,
                        'data': {
                            'tx_hash': tx.tx_hash
                        }
                    })

        if tx_validate_count == 0:
            response_code = message_code.Response.fail
            message = "fail tx validate while AddTxList"
        else:
            response_code = message_code.Response.success
            message = f"success ({tx_validate_count})/({len(request.tx_list)})"

        return response_code, message
Пример #9
0
    def get_invoke_result(self, tx_hash):
        try:
            invoke_result = self._channel_service.block_manager.get_invoke_result(tx_hash)
            invoke_result_str = json.dumps(invoke_result)
            response_code = message_code.Response.success
            logging.debug('invoke_result : ' + invoke_result_str)

            util.apm_event(ChannelProperty().peer_id, {
                'event_type': 'GetInvokeResult',
                'peer_id': ChannelProperty().peer_id,
                'peer_name': conf.PEER_NAME,
                'channel_name': ChannelProperty().name,
                'data': {'invoke_result': invoke_result, 'tx_hash': tx_hash}})

            if 'code' in invoke_result:
                if invoke_result['code'] == ScoreResponse.NOT_EXIST:
                    logging.debug(f"get invoke result NOT_EXIST tx_hash({tx_hash})")
                    response_code = message_code.Response.fail_invalid_key_error
                elif invoke_result['code'] == ScoreResponse.NOT_INVOKED:
                    logging.info(f"get invoke result NOT_INVOKED tx_hash({tx_hash})")
                    response_code = message_code.Response.fail_tx_not_invoked

            return response_code, invoke_result_str
        except BaseException as e:
            logging.error(f"get invoke result error : {e}")
            util.apm_event(ChannelProperty().peer_id, {
                'event_type': 'Error',
                'peer_id': ChannelProperty().peer_id,
                'peer_name': conf.PEER_NAME,
                'channel_name': ChannelProperty().name,
                'data': {
                    'error_type': 'InvokeResultError',
                    'code': message_code.Response.fail,
                    'message': f"get invoke result error : {e}"}})
            return message_code.Response.fail, None
Пример #10
0
    def __add_block(self,
                    block: Block,
                    confirm_info,
                    need_to_write_tx_info=True,
                    need_to_score_invoke=True):
        with self.__add_block_lock:
            invoke_results = self.__invoke_results.get(block.header.hash.hex(),
                                                       None)
            if invoke_results is None and need_to_score_invoke:
                if block.header.height == 0:
                    block, invoke_results = ObjectManager(
                    ).channel_service.genesis_invoke(block)
                else:
                    block, invoke_results = ObjectManager(
                    ).channel_service.score_invoke(block)

            try:
                if need_to_write_tx_info:
                    self.__add_tx_to_block_db(block, invoke_results)
                if need_to_score_invoke:
                    ObjectManager(
                    ).channel_service.score_write_precommit_state(block)
            except Exception as e:
                logging.warning(f"blockchain:add_block FAIL "
                                f"channel_service.score_write_precommit_state")
                raise e
            finally:
                self.__invoke_results.pop(block.header.hash.hex(), None)
            next_total_tx = self.__write_block_data(block, confirm_info)

            self.__last_block = block
            self.__block_height = self.__last_block.header.height
            self.__total_tx = next_total_tx
            logging.debug(
                f"blockchain add_block set block_height({self.__block_height}), "
                f"last_block({self.__last_block.header.hash.hex()})")
            logging.info(f"ADD BLOCK HEIGHT : {block.header.height} , "
                         f"HASH : {block.header.hash.hex()} , "
                         f"CHANNEL : {self.__channel_name}")
            logging.debug(f"ADDED BLOCK HEADER : {block.header}")

            util.apm_event(
                self.__peer_id, {
                    'event_type': 'AddBlock',
                    'peer_id': self.__peer_id,
                    'peer_name': conf.PEER_NAME,
                    'channel_name': self.__channel_name,
                    'data': {
                        'block_height': self.__block_height
                    }
                })

            # notify new block
            ObjectManager().channel_service.inner_service.notify_new_block()
            # reset_network_by_block_height is called in critical section by self.__add_block_lock.
            # Other Blocks must not be added until reset_network_by_block_height function finishes.
            ObjectManager().channel_service.reset_network_by_block_height(
                self.__last_block.header.height)

            return True
Пример #11
0
    def __add_block(self, block: Block, vote: Vote = None):
        with self.__add_block_lock:
            invoke_results = self.__invoke_results.get(block.header.hash.hex(),
                                                       None)
            if invoke_results is None:
                if block.header.height == 0:
                    block, invoke_results = ObjectManager(
                    ).channel_service.genesis_invoke(block)
                else:
                    block, invoke_results = ObjectManager(
                    ).channel_service.score_invoke(block)

            try:
                self.__add_tx_to_block_db(block, invoke_results)
                ObjectManager().channel_service.score_write_precommit_state(
                    block)
            except Exception as e:
                logging.warning(f"blockchain:add_block FAIL "
                                f"channel_service.score_write_precommit_state")
                raise e
            finally:
                self.__invoke_results.pop(block.header.hash, None)

            next_total_tx = self.__write_block_data(block, vote)

            self.__last_block = block
            self.__block_height = self.__last_block.header.height
            self.__total_tx = next_total_tx
            logging.debug(
                f"blockchain add_block set block_height({self.__block_height}), "
                f"last_block({self.__last_block.header.hash.hex()})")
            logging.info(f"ADD BLOCK HEIGHT : {block.header.height} , "
                         f"HASH : {block.header.hash.hex()} , "
                         f"CHANNEL : {self.__channel_name}")
            logging.debug(f"ADDED BLOCK HEADER : {block.header}")

            util.apm_event(
                self.__peer_id, {
                    'event_type': 'AddBlock',
                    'peer_id': self.__peer_id,
                    'peer_name': conf.PEER_NAME,
                    'channel_name': self.__channel_name,
                    'data': {
                        'block_height': self.__block_height
                    }
                })

            # stop leader complain timer
            ObjectManager().channel_service.stop_leader_complain_timer()

            # start new epoch
            ObjectManager(
            ).channel_service.block_manager.epoch = Epoch.new_epoch(
                block.header.height + 1)

            # notify new block
            ObjectManager().channel_service.inner_service.notify_new_block()

            return True
Пример #12
0
    def check_peer_status(self, group_id=None):
        if group_id is None:
            group_id = conf.ALL_GROUP_ID
        nonresponse_peer_list = []
        check_leader_peer_count = 0

        for peer_id in list(self.__peer_object_list[group_id]):
            peer_info: PeerInfo = self.peer_list[group_id][peer_id]
            stub_manager = self.get_peer_stub_manager(peer_info, group_id)
            peer_object: PeerObject = self.__peer_object_list[group_id][peer_id]

            try:
                response = stub_manager.call(
                    "Request", loopchain_pb2.Message(
                        code=message_code.Request.status,
                        channel=self.__channel_name,
                        message="check peer status by rs",
                        meta=json.dumps({"highest_block_height": self.__highest_block_height})
                    ), is_stub_reuse=True)
                if response.code != message_code.Response.success:
                    raise Exception

                peer_object.no_response_count_reset()
                peer_info.status = PeerStatus.connected
                peer_status = json.loads(response.meta)

                if peer_status["state"] == "BlockGenerate":
                    check_leader_peer_count += 1

                if peer_status["block_height"] >= self.__highest_block_height:
                    self.__highest_block_height = peer_status["block_height"]
            except Exception as e:
                util.apm_event(conf.RADIO_STATION_NAME, {
                    'event_type': 'DisconnectedPeer',
                    'peer_name': conf.PEER_NAME,
                    'channel_name': self.__channel_name,
                    'data': {
                        'message': 'there is disconnected peer gRPC Exception: ' + str(e),
                        'peer_id': peer_info.peer_id}})

                logging.warning("there is disconnected peer peer_id(" + peer_info.peer_id +
                                ") gRPC Exception: " + str(e))
                peer_object.no_response_count_up()

                util.logger.spam(
                    f"peer_manager::check_peer_status "
                    f"peer_id({peer_object.peer_info.peer_id}) "
                    f"no response count up({peer_object.no_response_count})")

                if peer_object.no_response_count >= conf.NO_RESPONSE_COUNT_ALLOW_BY_HEARTBEAT:
                    peer_info.status = PeerStatus.disconnected
                    logging.debug(f"peer status update time: {peer_info.status_update_time}")
                    logging.debug(f"this peer not respond {peer_info.peer_id}")
                    nonresponse_peer_list.append(peer_info)

        logging.info(f"non response peer list : {nonresponse_peer_list}")
Пример #13
0
    async def query(self, params):
        if not util.check_is_json_string(params):
            return message_code.Response.fail_validate_params, ""

        logging.debug(f'Query request with {params}')

        try:
            if self._score_service.score is None:
                logging.error("There is no score!!")
                ret = json.dumps({
                    'code': ScoreResponse.EXCEPTION,
                    'message': 'There is no score'
                })
            else:
                try:
                    plugin_result = self._score_service.score_plugin.query(
                        query=params)
                    if plugin_result == PluginReturns.CONTINUE:
                        plugin_result = self._score_service.score.query(params)
                    ret = plugin_result
                except Exception as e:
                    logging.error(f'query {params} raise exception {e}')
                    exception_response = {
                        'code': ScoreResponse.EXCEPTION,
                        'message': f'Query Raise Exception : {e}'
                    }
                    ret = json.dumps(exception_response)
                    return message_code.Response.success, ret

            response = ret

            peer_id = self._score_service.peer_id
            util.apm_event(
                peer_id, {
                    'event_type': 'Query',
                    'peer_id': peer_id,
                    'peer_name': conf.PEER_NAME,
                    'channel_name': self._score_service.channel_name,
                    'data': {
                        'score_query': json.loads(params)
                    }
                })

        except Exception as e:
            logging.error(f'Execute Query Error : {e}')
            return message_code.Response.fail, ""

        if util.check_is_json_string(response):
            response_code = message_code.Response.success
        else:
            response_code = message_code.Response.fail

        return response_code, response
Пример #14
0
    def __add_tx_list(self, tx_list):
        for tx in tx_list:
            # util.logger.spam(f"channel_inner_service:add_tx tx({tx.get_data_string()})")

            object_has_queue = self._channel_service.get_object_has_queue_by_consensus(
            )
            object_has_queue.add_tx_obj(tx)
            util.apm_event(
                ChannelProperty().peer_id, {
                    'event_type': 'AddTx',
                    'peer_id': ChannelProperty().peer_id,
                    'peer_name': conf.PEER_NAME,
                    'channel_name': ChannelProperty().name,
                    'data': {
                        'tx_hash': tx.hash.hex()
                    }
                })
Пример #15
0
    def add_tx_list(self, request) -> tuple:
        tx_validate_count = 0

        for tx_item in request.tx_list:
            tx_json = json.loads(tx_item.tx_json)

            tv = TransactionVersions()
            tx_version = tv.get_version(tx_json)
            tx_hash_version = self._channel_service.get_channel_option(
            )["tx_hash_version"]

            ts = TransactionSerializer.new(tx_version, tx_hash_version)
            tx = ts.from_(tx_json)

            tv = TransactionVerifier.new(tx_version, tx_hash_version)
            tv.verify(tx)

            # util.logger.spam(f"channel_inner_service:add_tx tx({tx.get_data_string()})")

            object_has_queue = self._channel_service.get_object_has_queue_by_consensus(
            )
            if tx is not None:
                object_has_queue.add_tx_obj(tx)
                tx_validate_count += 1
                util.apm_event(
                    ChannelProperty().peer_id, {
                        'event_type': 'AddTx',
                        'peer_id': ChannelProperty().peer_id,
                        'peer_name': conf.PEER_NAME,
                        'channel_name': ChannelProperty().name,
                        'data': {
                            'tx_hash': tx.hash.hex()
                        }
                    })

        if tx_validate_count == 0:
            response_code = message_code.Response.fail
            message = "fail tx validate while AddTxList"
        else:
            response_code = message_code.Response.success
            message = f"success ({tx_validate_count})/({len(request.tx_list)})"

        return response_code, message
Пример #16
0
    def add_tx(self, request) -> None:
        tx_validator = get_tx_validator(ChannelProperty().name)
        tx_dumped = tx_validator.load_dumped_tx(request)
        tx = tx_validator.validate_dumped_tx_message(tx_dumped)
        # util.logger.spam(f"channel_inner_service:add_tx tx({tx.get_data_string()})")

        object_has_queue = self._channel_service.get_object_has_queue_by_consensus(
        )
        if tx is not None:
            object_has_queue.add_tx_obj(tx)
            util.apm_event(
                ChannelProperty().peer_id, {
                    'event_type': 'AddTx',
                    'peer_id': ChannelProperty().peer_id,
                    'peer_name': conf.PEER_NAME,
                    'channel_name': ChannelProperty().name,
                    'data': {
                        'tx_hash': tx.tx_hash
                    }
                })
Пример #17
0
    def get_confirmed_block(self, block_hash=None):
        """검증에 성공한 block 을 얻는다.
        해당 블럭은 CandidateBlocks 에서 제거된다.

        :return: 검증에 성공한 block(이 block 은 BlockChain 에 추가되어야 한다.),
                 해당 block 이 검증되지 않았을때에는 Exception(해당블럭이 없다, 해당블럭이 아직 검증되지 않았다.) 을 발생한다.
        """
        if block_hash is None:
            candidate_block = self.get_candidate_block()
            if candidate_block is None:
                return None
            block_hash = candidate_block.block_hash

        if block_hash not in self.__unconfirmed_blocks.keys():
            util.apm_event(self.__peer_id, {
                'event_type': 'NoExistBlock',
                'peer_id': self.__peer_id,
                'data': {
                    'message': 'No Exist block in candidate blocks by hash',
                    'block_hash': block_hash}})
            raise NoExistBlock("No Exist block in candidate blocks by hash: " + block_hash)

        if self.__unconfirmed_blocks[block_hash][0].get_result(block_hash, conf.VOTING_RATIO):
            logging.info("Confirmed block pop from candidate blocks hash: " + block_hash)
            return self.__unconfirmed_blocks.pop(block_hash)[1]
        else:
            if self.__unconfirmed_blocks[block_hash][0].is_failed_vote(block_hash, conf.VOTING_RATIO):
                logging.warning("This block fail to validate!!")
                self.remove_broken_block(block_hash)
                util.apm_event(self.__peer_id, {
                    'event_type': 'InvalidatedBlock',
                    'peer_id': self.__peer_id,
                    'data': {
                        'message': 'This block fail to validate',
                        'block_hash': candidate_block.block_hash}})
                raise InvalidatedBlock("This block fail to validate", candidate_block)
            else:
                logging.warning("There is Not Complete Validation.")
                util.apm_event(self.__peer_id, {
                    'event_type': 'NotCompleteValidation',
                    'peer_id': self.__peer_id,
                    'data': {
                        'message': 'There is Not Complete Validation.',
                        'block_hash': candidate_block.block_hash}})
                raise NotCompleteValidation("Not Complete Validation", candidate_block)
Пример #18
0
    def __add_block(self, block: Block):
        with self.__add_block_lock:
            need_to_commit = True

            invoke_results = self.__invoke_results.get(block.header.hash.hex(),
                                                       None)
            if invoke_results is None:
                if block.header.height == 0:
                    block, invoke_results = ObjectManager(
                    ).channel_service.genesis_invoke(block)
                else:
                    block, invoke_results = ObjectManager(
                    ).channel_service.score_invoke(block)

            try:
                if need_to_commit:
                    self.__add_tx_to_block_db(block, invoke_results)
                    ObjectManager(
                    ).channel_service.score_write_precommit_state(block)
            except Exception as e:
                logging.warning(f"blockchain:add_block FAIL "
                                f"channel_service.score_write_precommit_state")
                raise e
            finally:
                self.__invoke_results.pop(block.header.hash, None)

            # a condition for the exception case of genesis block.
            next_total_tx = self.__total_tx
            if block.header.height > 0:
                next_total_tx += len(block.body.transactions)

            bit_length = next_total_tx.bit_length()
            byte_length = (bit_length + 7) // 8
            next_total_tx_bytes = next_total_tx.to_bytes(byte_length,
                                                         byteorder='big')

            block_serializer = BlockSerializer.new("0.1a")
            block_serialized = json.dumps(block_serializer.serialize(block))
            block_hash_encoded = block.header.hash.hex().encode(
                encoding='UTF-8')

            batch = leveldb.WriteBatch()
            batch.Put(block_hash_encoded, block_serialized.encode("utf-8"))
            batch.Put(BlockChain.LAST_BLOCK_KEY, block_hash_encoded)
            batch.Put(BlockChain.TRANSACTION_COUNT_KEY, next_total_tx_bytes)
            batch.Put(
                BlockChain.BLOCK_HEIGHT_KEY + block.header.height.to_bytes(
                    conf.BLOCK_HEIGHT_BYTES_LEN, byteorder='big'),
                block_hash_encoded)
            self.__confirmed_block_db.Write(batch)

            self.__last_block = block
            self.__block_height = self.__last_block.header.height
            self.__total_tx = next_total_tx
            logging.debug(
                f"blockchain add_block set block_height({self.__block_height}), "
                f"last_block({self.__last_block.header.hash.hex()})")
            logging.info(f"ADD BLOCK HEIGHT : {block.header.height} , "
                         f"HASH : {block.header.hash.hex()} , "
                         f"CHANNEL : {self.__channel_name}")

            util.apm_event(
                self.__peer_id, {
                    'event_type': 'AddBlock',
                    'peer_id': self.__peer_id,
                    'peer_name': conf.PEER_NAME,
                    'channel_name': self.__channel_name,
                    'data': {
                        'block_height': self.__block_height
                    }
                })

            # notify new block
            ObjectManager().channel_service.inner_service.notify_new_block()

            return True
Пример #19
0
    def add_block(self, block: Block):
        """인증된 블럭만 추가합니다.

        :param block: 인증완료된 추가하고자 하는 블럭
        :return:
        """
        # util.logger.spam(f"blockchain:add_block --start--")
        if block.block_status is not BlockStatus.confirmed:
            raise BlockInValidError("미인증 블럭")
        elif self.__last_block is not None and self.__last_block.height > 0:
            if self.__last_block.block_hash != block.prev_block_hash:
                # 마지막 블럭의 hash값이 추가되는 블럭의 prev_hash값과 다르면 추가 하지 않고 익셉션을 냅니다.
                logging.debug("self.last_block.block_hash: " +
                              self.__last_block.block_hash)
                logging.debug("block.prev_block_hash: " +
                              block.prev_block_hash)
                raise BlockError("최종 블럭과 해쉬값이 다릅니다.")

        # util.logger.spam(f"blockchain:add_block --1-- {block.prev_block_hash}, {block.height}")
        if block.height == 0 or ObjectManager().peer_service is None:
            # all results to success
            success_result = {'code': int(message_code.Response.success)}
            invoke_results = self.__create_invoke_result_specific_case(
                block.confirmed_transaction_list, success_result)
        else:
            try:
                invoke_results = ObjectManager().peer_service.score_invoke(
                    block, self.__channel_name)

            except Exception as e:
                # When Grpc Connection Raise Exception
                # save all result{'code': ScoreResponse.SCORE_CONTAINER_EXCEPTION, 'message': str(e)}
                logging.error(f'Error While Invoke Score fail add block : {e}')
                score_container_exception_result = {
                    'code': ScoreResponse.SCORE_CONTAINER_EXCEPTION,
                    'message': str(e)
                }
                invoke_results = self.__create_invoke_result_specific_case(
                    block.confirmed_transaction_list,
                    score_container_exception_result)

        # util.logger.spam(f"blockchain:add_block --2--")
        self.__add_tx_to_block_db(block, invoke_results)

        block_hash_encoded = block.block_hash.encode(encoding='UTF-8')

        batch = leveldb.WriteBatch()
        batch.Put(block_hash_encoded, block.serialize_block())
        batch.Put(BlockChain.LAST_BLOCK_KEY, block_hash_encoded)
        batch.Put(
            BlockChain.BLOCK_HEIGHT_KEY + block.height.to_bytes(
                conf.BLOCK_HEIGHT_BYTES_LEN, byteorder='big'),
            block_hash_encoded)
        self.__confirmed_block_db.Write(batch)

        self.__last_block = block
        self.__block_height = self.__last_block.height

        # logging.debug("ADD BLOCK Height : %i", block.height)
        # logging.debug("ADD BLOCK Hash : %s", block.block_hash)
        # logging.debug("ADD BLOCK MERKLE TREE Hash : %s", block.merkle_tree_root_hash)
        # logging.debug("ADD BLOCK Prev Hash : %s ", block.prev_block_hash)
        logging.info("ADD BLOCK HEIGHT : %i , HASH : %s", block.height,
                     block.block_hash)
        # 블럭의 Transaction 의 데이터를 저장 합니다.
        # Peer에서 Score를 파라미터로 넘김으로써 체인코드를 실행합니다.

        # util.logger.spam(f"blockchain:add_block --end--")

        util.apm_event(
            self.__peer_id, {
                'event_type': 'AddBlock',
                'peer_id': self.__peer_id,
                'data': {
                    'block_height': self.__block_height,
                    'block_type': block.block_type.name
                }
            })

        return True
Пример #20
0
    def check_peer_status(self, group_id=None):
        if group_id is None:
            group_id = conf.ALL_GROUP_ID
        delete_peer_list = []
        alive_peer_last = None
        check_leader_peer_count = 0
        for peer_id in list(self.__peer_object_list[group_id]):
            peer_each = self.peer_list[group_id][peer_id]
            stub_manager = self.get_peer_stub_manager(peer_each, group_id)
            peer_object_each = self.__peer_object_list[group_id][peer_id]

            try:
                response = stub_manager.call(
                    "Request",
                    loopchain_pb2.Message(code=message_code.Request.status,
                                          channel=self.__channel_name),
                    is_stub_reuse=True)
                if response is None:
                    raise Exception

                peer_object_each.no_response_count_reset()
                peer_each.status = PeerStatus.connected
                peer_status = json.loads(response.meta)

                # logging.debug(f"Check Peer Status ({peer_status['peer_type']})")
                if peer_status["peer_type"] == loopchain_pb2.BLOCK_GENERATOR:
                    check_leader_peer_count += 1

                alive_peer_last = peer_each

            except Exception as e:
                util.apm_event(
                    self.__peer_id, {
                        'event_type': 'DisconnectedPeer',
                        'peer_id': self.__peer_id,
                        'data': {
                            'message':
                            'there is disconnected peer gRPC Exception: ' +
                            str(e),
                            'peer_id':
                            peer_each.peer_id
                        }
                    })

                logging.warning("there is disconnected peer peer_id(" +
                                peer_each.peer_id + ") gRPC Exception: " +
                                str(e))
                peer_object_each.no_response_count_up()

                util.logger.spam(
                    f"peer_manager::check_peer_status "
                    f"peer_id({peer_object_each.peer_info.peer_id}) "
                    f"no response count up({peer_object_each.no_response_count})"
                )

                if peer_object_each.no_response_count >= conf.NO_RESPONSE_COUNT_ALLOW_BY_HEARTBEAT:
                    peer_each.status = PeerStatus.disconnected
                    logging.debug(
                        f"peer status update time: {peer_each.status_update_time}"
                    )
                    logging.debug(f"this peer will remove {peer_each.peer_id}")
                    self.remove_peer(peer_each.peer_id, peer_each.group_id)
                    delete_peer_list.append(peer_each)

                # logging.debug(f"diff mins {util.datetime_diff_in_mins(peer_each.status_update_time)}")
                # if util.datetime_diff_in_mins(peer_each.status_update_time) >= conf.TIMEOUT_PEER_REMOVE_IN_LIST:
                #     logging.debug(f"peer status update time: {peer_each.status_update_time}")
                #     logging.debug(f"this peer will remove {peer_each.peer_id}")
                #     self.remove_peer(peer_each.peer_id, peer_each.group_id)
                #     delete_peer_list.append(peer_each)

        logging.debug(
            f"({self.__channel_name}) Leader Peer Count: ({check_leader_peer_count})"
        )
        if len(delete_peer_list) > 0 and check_leader_peer_count != 1:
            if alive_peer_last is not None:
                logging.warning(
                    f"reset network({self.__channel_name}) "
                    f"leader by RS new leader({alive_peer_last.peer_id}) "
                    f"target({alive_peer_last.target})")

                self.set_leader_peer(alive_peer_last, None)
                self.announce_new_leader(
                    complained_leader_id=alive_peer_last.peer_id,
                    new_leader_id=alive_peer_last.peer_id,
                    is_broadcast=True)
            else:
                logging.error("There is no leader in this network.")

        return delete_peer_list
Пример #21
0
    def add_block(self, block: Block, is_commit_state_validation=False) -> bool:
        """add committed block

        :param block: a block after confirmation
        'STORE_VALID_TRANSACTION_ONLY'가 True로 설정 된 경우, 필수 parameter.
        :param is_commit_state_validation: if True: add only commit state validate pass
        :return: to add block is success or not
        """

        with self.__add_block_lock:
            util.logger.spam(f"ENGINE-308 blockchain:add_block is_commit_state_validation({is_commit_state_validation})")

            if block.height != 0 and block.height != (self.last_block.height + 1) and not is_commit_state_validation:
                logging.warning(f"blockchain:add_block invalid block height({block.height})")
                return False

            try:
                self.__verify_block_connection(block)
            except Exception as e:
                logging.error(f"add_block error! caused by : {e}")
                return False

            if conf.CHANNEL_OPTION[self.__channel_name]["send_tx_type"] == conf.SendTxType.icx and block.height == 0:
                invoke_results = ObjectManager().channel_service.genesis_invoke(block)
                ObjectManager().channel_service.score_write_precommit_state(block)
                self.__add_tx_to_block_db(block, invoke_results)
            elif not conf.CHANNEL_OPTION[self.__channel_name]['store_valid_transaction_only']:
                # STORE_VALID_TRANSACTION_ONLY
                if block.height == 0 or ObjectManager().channel_service is None:
                    # all results to success
                    success_result = {'code': int(message_code.Response.success)}
                    invoke_results = util.create_invoke_result_specific_case(
                        block.confirmed_transaction_list, success_result)
                    self.__add_tx_to_block_db(block, invoke_results)
                else:
                    try:
                        invoke_results = self.__score_invoke_with_state_integrity(block, is_commit_state_validation)
                    except Exception as e:
                        # When Grpc Connection Raise Exception
                        # save all result{'code': ScoreResponse.SCORE_CONTAINER_EXCEPTION, 'message': str(e)}
                        logging.error(f'Error While Invoke Score fail add block : {e}')
                        score_container_exception_result = {
                            'code': ScoreResponse.SCORE_CONTAINER_EXCEPTION, 'message': str(e)}
                        invoke_results = \
                            util.create_invoke_result_specific_case(
                                block.confirmed_transaction_list,
                                score_container_exception_result
                            )
                    self.__add_tx_to_block_db(block, invoke_results)
                    ObjectManager().channel_service.score_write_precommit_state(block)
            else:
                need_to_commit = True
                invoke_results = self.__invoke_results.get(block.block_hash, None)
                if not invoke_results:
                    need_to_commit = self.__prevent_next_block_mismatch(block, is_commit_state_validation)
                    if need_to_commit:
                        invoke_results = self.__score_invoke_with_state_integrity(block, is_commit_state_validation)

                try:
                    if need_to_commit:
                        self.__add_tx_to_block_db(block, invoke_results)
                        ObjectManager().channel_service.score_write_precommit_state(block)
                        # invoke_results = self.__invoke_results[block.block_hash]
                except Exception as e:
                    logging.warning(f"blockchain:add_block FAIL "
                                    f"channel_service.score_write_precommit_state")
                    raise e
                finally:
                    self.__invoke_results.pop(block.block_hash, None)

            # a condition for the exception case of genesis block.
            next_total_tx = self.__total_tx
            if block.height > 0:
                next_total_tx += block.confirmed_tx_len

            bit_length = next_total_tx.bit_length()
            byte_length = (bit_length + 7) // 8
            next_total_tx_bytes = next_total_tx.to_bytes(byte_length, byteorder='big')

            block_hash_encoded = block.block_hash.encode(encoding='UTF-8')

            batch = leveldb.WriteBatch()
            batch.Put(block_hash_encoded, block.serialize_block())
            batch.Put(BlockChain.LAST_BLOCK_KEY, block_hash_encoded)
            batch.Put(BlockChain.TRANSACTION_COUNT_KEY, next_total_tx_bytes)
            batch.Put(
                BlockChain.BLOCK_HEIGHT_KEY +
                block.height.to_bytes(conf.BLOCK_HEIGHT_BYTES_LEN, byteorder='big'),
                block_hash_encoded)
            self.__confirmed_block_db.Write(batch)

            self.__last_block = block
            self.__block_height = self.__last_block.height
            self.__total_tx = next_total_tx
            logging.debug(f"blockchain add_block set block_height({self.__block_height}), "
                          f"last_block({self.__last_block.block_hash})")
            logging.info(
                f"ADD BLOCK HEIGHT : {block.height} , HASH : {block.block_hash} , CHANNEL : {self.__channel_name}")

            util.apm_event(self.__peer_id, {
                'event_type': 'AddBlock',
                'peer_id': self.__peer_id,
                'peer_name': conf.PEER_NAME,
                'channel_name': self.__channel_name,
                'data': {
                    'block_height': self.__block_height,
                    'block_type': block.block_type.name}})

            return True
Пример #22
0
    def check_peer_status(self, group_id=None):
        if group_id is None:
            group_id = conf.ALL_GROUP_ID
        delete_peer_list = []
        delete_doubt_peers = []
        check_leader_peer_count = 0
        highest_peer = None

        for peer_id in list(self.__peer_object_list[group_id]):
            peer_each = self.peer_list[group_id][peer_id]
            stub_manager = self.get_peer_stub_manager(peer_each, group_id)
            peer_object_each = self.__peer_object_list[group_id][peer_id]

            try:
                response = stub_manager.call(
                    "Request",
                    loopchain_pb2.Message(code=message_code.Request.status,
                                          channel=self.__channel_name,
                                          message="check peer status by rs",
                                          meta=json.dumps({
                                              "highest_block_height":
                                              self.__highest_block_height
                                          })),
                    is_stub_reuse=True)
                if response.code != message_code.Response.success:
                    raise Exception

                peer_object_each.no_response_count_reset()
                peer_each.status = PeerStatus.connected
                peer_status = json.loads(response.meta)

                if peer_status["state"] == "BlockGenerate":
                    check_leader_peer_count += 1

                if peer_status["block_height"] >= self.__highest_block_height:
                    self.__highest_block_height = peer_status["block_height"]
                    highest_peer = peer_each
            except Exception as e:
                util.apm_event(
                    conf.RADIO_STATION_NAME, {
                        'event_type': 'DisconnectedPeer',
                        'peer_name': conf.PEER_NAME,
                        'channel_name': self.__channel_name,
                        'data': {
                            'message':
                            'there is disconnected peer gRPC Exception: ' +
                            str(e),
                            'peer_id':
                            peer_each.peer_id
                        }
                    })

                logging.warning("there is disconnected peer peer_id(" +
                                peer_each.peer_id + ") gRPC Exception: " +
                                str(e))
                peer_object_each.no_response_count_up()

                util.logger.spam(
                    f"peer_manager::check_peer_status "
                    f"peer_id({peer_object_each.peer_info.peer_id}) "
                    f"no response count up({peer_object_each.no_response_count})"
                )

                delete_doubt_peers.append(peer_each)

                if peer_object_each.no_response_count >= conf.NO_RESPONSE_COUNT_ALLOW_BY_HEARTBEAT:
                    peer_each.status = PeerStatus.disconnected
                    logging.debug(
                        f"peer status update time: {peer_each.status_update_time}"
                    )
                    logging.debug(f"this peer will remove {peer_each.peer_id}")
                    self.remove_peer(peer_each.peer_id, peer_each.group_id)
                    delete_peer_list.append(peer_each)

                # logging.debug(f"diff mins {util.datetime_diff_in_mins(peer_each.status_update_time)}")
                # if util.datetime_diff_in_mins(peer_each.status_update_time) >= conf.TIMEOUT_PEER_REMOVE_IN_LIST:
                #     logging.debug(f"peer status update time: {peer_each.status_update_time}")
                #     logging.debug(f"this peer will remove {peer_each.peer_id}")
                #     self.remove_peer(peer_each.peer_id, peer_each.group_id)
                #     delete_peer_list.append(peer_each)

        # if len(delete_peer_list) > 0 and check_leader_peer_count != 1:
        if check_leader_peer_count != 1:
            logging.warning(
                f"({self.__channel_name}) Leader Peer Count: ({check_leader_peer_count}) "
                f"remain heartbeat count("
                f"{conf.NO_RESPONSE_COUNT_ALLOW_BY_HEARTBEAT - self.__leader_complain_count}"
                f") before leader complain by rs")
            self.__leader_complain_count += 1

            if self.__leader_complain_count > conf.NO_RESPONSE_COUNT_ALLOW_BY_HEARTBEAT_LEADER:
                if highest_peer is not None:
                    logging.warning(
                        f"reset network({self.__channel_name}) "
                        f"leader by RS new leader({highest_peer.peer_id}) "
                        f"target({highest_peer.target})")

                    self.set_leader_peer(highest_peer, None)
                    self.announce_new_leader(
                        complained_leader_id=highest_peer.peer_id,
                        new_leader_id=highest_peer.peer_id,
                        is_broadcast=True)

                    # return all delete doubt peers when leader is down! (for network recover immediately)
                    self.__leader_complain_count = 0
                    return delete_doubt_peers
                else:
                    logging.error("There is no leader in this network.")
        else:
            self.__leader_complain_count = 0
            logging.debug(
                f"({self.__channel_name}) Leader Peer Count: ({check_leader_peer_count})"
            )

        # return delete confirmed peers only when leader is alive (while network is working).
        return delete_peer_list
Пример #23
0
    def __handler_score_invoke(self, request, context):
        logging.debug("ScoreService handler invoke...")
        results = {}
        # dict key

        # TODO score invoke 관련 code, message 등을 별도의 파일로 정의하면 아래의 define 도 옮길것!
        code_key = 'code'
        error_message_key = 'message'

        if self.__score is None:
            logging.error("There is no score!!")
            return loopchain_pb2.Message(code=message_code.Response.fail)
        else:
            block = pickle.loads(request.object)
            logging.debug('tx_list_length : %d ',
                          len(block.confirmed_transaction_list))
            for transaction in block.confirmed_transaction_list:
                if isinstance(
                        transaction,
                        Transaction) and transaction.get_tx_hash() is not None:
                    tx_hash = transaction.get_tx_hash()
                    results[tx_hash] = {}
                    # put score invoke result to results[tx_hash]
                    try:
                        invoke_result = self.__score.invoke(transaction, block)
                        if invoke_result is None:
                            results[tx_hash] = {
                                'code': message_code.Response.success
                            }
                            # logging.debug(f"handler_score_invoke: ({invoke_result})")
                        else:
                            if code_key not in invoke_result:
                                code_not_return = "Score not return code"
                                if error_message_key in invoke_result:
                                    raise ScoreInvokeError(
                                        code_not_return + ": " +
                                        invoke_result[error_message_key])
                                raise ScoreInvokeError(code_not_return)
                            elif error_message_key in invoke_result:
                                results[tx_hash][
                                    error_message_key] = invoke_result[
                                        error_message_key]
                            results[tx_hash][code_key] = invoke_result[
                                code_key]

                    # if score raise exception result to fail and put error message
                    except Exception as e:
                        logging.exception("tx %s score invoke is fail!! : %s ",
                                          str(tx_hash), e)
                        results[tx_hash][code_key] = ScoreResponse.EXCEPTION
                        results[tx_hash][error_message_key] = str(e)
                        continue

            # logging.debug('results : %s', str(results))
            util.apm_event(
                self.__peer_id, {
                    'event_type': 'InvokeResult',
                    'peer_id': self.__peer_id,
                    'data': {
                        'invoke_result': invoke_result
                    }
                })

            meta = json.dumps(results)
            return loopchain_pb2.Message(code=message_code.Response.success,
                                         meta=meta)
Пример #24
0
    def score_invoke(self, block: Block):
        logging.debug("ScoreService handler invoke...")

        invoke_result_list = {}
        code_key = 'code'
        error_message_key = 'message'

        with self.__precommit_usage_lock:
            if not self._score_service.score:
                logging.error("There is no score!!")
                return loopchain_pb2.Message(code=message_code.Response.fail)
            else:
                # get invoke_data if before invoke same block
                saved_results = self.__temp_invoke_results[block.height].get(
                    block.block_hash)
                logging.debug(
                    f"saved invoke result {block.height}, {block.block_hash} : {saved_results}"
                )
                if saved_results:
                    commit_state = ScoreHelper().get_block_commit_state(
                        block.height, block.block_hash)
                    return loopchain_pb2.Message(
                        code=message_code.Response.success,
                        meta=json.dumps(saved_results),
                        object=pickle.dumps(commit_state))

                logging.debug('tx_list_length : %d ', block.confirmed_tx_len)
                ScoreHelper().init_invoke(block)
                for transaction in block.confirmed_transaction_list:
                    if isinstance(
                            transaction,
                            Transaction) and transaction.tx_hash is not None:
                        tx_hash = transaction.tx_hash
                        invoke_result_list[tx_hash] = {}
                        # put score invoke result to results[tx_hash]
                        invoke_result = {}
                        try:
                            plugin_result = self._score_service.score_plugin.invoke(
                                transaction=transaction, block=block)
                            if plugin_result == PluginReturns.CONTINUE:
                                plugin_result = self._score_service.score.invoke(
                                    transaction, block)
                            invoke_result = plugin_result
                            if invoke_result is None:
                                invoke_result_list[tx_hash] = {
                                    code_key: message_code.Response.success
                                }
                                ScoreHelper().commit_tx_state()
                            else:
                                if code_key not in invoke_result:
                                    code_not_return = "Score not return code"
                                    if error_message_key in invoke_result:
                                        raise ScoreInvokeError(
                                            code_not_return + ": " +
                                            invoke_result[error_message_key])
                                    raise ScoreInvokeError(code_not_return)
                                elif invoke_result[
                                        code_key] == message_code.Response.success:
                                    ScoreHelper().commit_tx_state()
                                elif error_message_key in invoke_result:
                                    invoke_result_list[tx_hash][
                                        error_message_key] = invoke_result[
                                            error_message_key]
                                    ScoreHelper().reset_tx_state()
                                invoke_result_list[tx_hash][
                                    code_key] = invoke_result[code_key]

                        # if score raise exception result to fail and put error message
                        except Exception as e:
                            logging.exception(
                                "tx %s score invoke is fail!! : %s ",
                                str(tx_hash), e)
                            ScoreHelper().reset_tx_state()
                            invoke_result[code_key] = ScoreResponse.EXCEPTION
                            invoke_result[error_message_key] = str(e)
                            invoke_result_list[tx_hash] = invoke_result

                        peer_id = transaction.meta[Transaction.PEER_ID_KEY]

                        util.apm_event(
                            self._score_service.peer_id, {
                                'event_type': 'ScoreInvoke',
                                'peer_id': self._score_service.peer_id,
                                'peer_name': conf.PEER_NAME,
                                'channel_name':
                                self._score_service.channel_name,
                                'data': {
                                    'request_peer_id': peer_id,
                                    'tx_data': transaction.get_data_string(),
                                    'invoke_result': invoke_result
                                }
                            })

                try:
                    self._score_service.iiss_plugin.after_invoke(
                        invoke_result_list=invoke_result_list, block=block)
                except Exception as e:
                    logging.error(f"IISS Plugin Exception({e})")
                    util.exit_and_msg(
                        f"Shutdown Peer by IISS Plugin Exception({e})")

                ScoreHelper().precommit_state()

                self.__temp_invoke_results[block.height][
                    block.block_hash] = invoke_result_list

                if block.confirmed_tx_len > 0:
                    commit_state = ScoreHelper().get_block_commit_state(
                        block.height, block.block_hash)
                else:
                    commit_state = {}

                meta = json.dumps(invoke_result_list)
                return loopchain_pb2.Message(
                    code=message_code.Response.success,
                    meta=meta,
                    object=pickle.dumps(commit_state))
Пример #25
0
    def genesis_invoke(self, block_pickled):
        logging.debug("ScoreService handler genesis invoke...")
        results = {}
        # dict key

        code_key = 'code'
        error_message_key = 'message'

        if not self._score_service.score:
            logging.error("There is no score!!")
            return loopchain_pb2.Message(code=message_code.Response.fail)
        else:
            block = pickle.loads(block_pickled)
            logging.debug('tx_list_length : %d ', block.confirmed_tx_len)
            ScoreHelper().init_invoke(block)
            for transaction in block.confirmed_transaction_list:
                if isinstance(transaction,
                              Transaction) and transaction.tx_hash is not None:
                    tx_hash = transaction.tx_hash
                    results[tx_hash] = {}
                    # put score invoke result to results[tx_hash]
                    try:
                        plugin_result = self._score_service.score_plugin.genesis_invoke(
                            transaction=transaction, block=block)
                        if plugin_result == PluginReturns.CONTINUE:
                            plugin_result = self._score_service.score.genesis_invoke(
                                transaction, block)
                        invoke_result = plugin_result
                        if invoke_result is None:
                            results[tx_hash] = {
                                code_key: message_code.Response.success
                            }
                            ScoreHelper().commit_tx_state()
                        else:
                            if code_key not in invoke_result:
                                code_not_return = "Score not return code"
                                if error_message_key in invoke_result:
                                    raise ScoreInvokeError(
                                        code_not_return + ": " +
                                        invoke_result[error_message_key])
                                raise ScoreInvokeError(code_not_return)
                            elif invoke_result[
                                    code_key] == message_code.Response.success:
                                ScoreHelper().commit_tx_state()
                            elif error_message_key in invoke_result:
                                results[tx_hash][
                                    error_message_key] = invoke_result[
                                        error_message_key]
                                ScoreHelper().reset_tx_state()
                            results[tx_hash][code_key] = invoke_result[
                                code_key]

                    # if score raise exception result to fail and put error message
                    except Exception as e:
                        logging.exception("tx %s score invoke is fail!! : %s ",
                                          str(tx_hash), e)
                        ScoreHelper().reset_tx_state()
                        results[tx_hash][code_key] = ScoreResponse.EXCEPTION
                        results[tx_hash][error_message_key] = str(e)
                        continue

                    util.apm_event(
                        self._score_service.peer_id, {
                            'event_type': 'GenesisInvoke',
                            'peer_id': self._score_service.peer_id,
                            'peer_name': conf.PEER_NAME,
                            'channel_name': self._score_service.channel_name,
                            'data': {
                                'request_peer_id': None,
                                'tx_data': transaction.get_genesis_tx_data(),
                                'invoke_result': invoke_result
                            }
                        })

            logging.debug('results : %s', str(results))
            ScoreHelper().precommit_state()
            meta = json.dumps(results)
            return loopchain_pb2.Message(code=message_code.Response.success,
                                         meta=meta)