async def node_ws_PublishNewBlock(self, **kwargs): block_dict, votes_dumped = kwargs.get('block'), kwargs.get('confirm_info', '') try: votes_serialized = json.loads(votes_dumped) vote = BlockVotes.deserialize_votes(votes_serialized) except json.JSONDecodeError: vote = votes_dumped blockchain = ObjectManager().channel_service.block_manager.blockchain new_block_height = blockchain.block_versioner.get_height(block_dict) if new_block_height > blockchain.block_height: block_version = blockchain.block_versioner.get_version(new_block_height) block_serializer = BlockSerializer.new(block_version, blockchain.tx_versioner) confirmed_block = block_serializer.deserialize(block_dict) block_verifier = BlockVerifier.new(block_version, blockchain.tx_versioner) block_verifier.invoke_func = blockchain.score_invoke reps_getter = blockchain.find_preps_addresses_by_roothash try: block_verifier.verify(confirmed_block, blockchain.last_block, blockchain, generator=blockchain.get_expected_generator(confirmed_block), reps_getter=reps_getter) except Exception as e: self._exception = AnnounceNewBlockError(f"error: {type(e)}, message: {str(e)}") else: logging.debug(f"add_confirmed_block height({confirmed_block.header.height}), " f"hash({confirmed_block.header.hash.hex()}), votes_dumped({votes_dumped})") ObjectManager().channel_service.block_manager.add_confirmed_block(confirmed_block=confirmed_block, confirm_info=vote) finally: ObjectManager().channel_service.reset_block_monitoring_timer()
def __add_block(self, block: Block, confirm_info, need_to_write_tx_info=True, need_to_score_invoke=True): with self.__add_block_lock: invoke_results = self.__invoke_results.get(block.header.hash.hex(), None) if invoke_results is None and need_to_score_invoke: if block.header.height == 0: block, invoke_results = ObjectManager( ).channel_service.genesis_invoke(block) else: block, invoke_results = ObjectManager( ).channel_service.score_invoke(block) try: if need_to_write_tx_info: self.__add_tx_to_block_db(block, invoke_results) if need_to_score_invoke: ObjectManager( ).channel_service.score_write_precommit_state(block) except Exception as e: logging.warning(f"blockchain:add_block FAIL " f"channel_service.score_write_precommit_state") raise e finally: self.__invoke_results.pop(block.header.hash.hex(), None) next_total_tx = self.__write_block_data(block, confirm_info) self.__last_block = block self.__block_height = self.__last_block.header.height self.__total_tx = next_total_tx logging.debug( f"blockchain add_block set block_height({self.__block_height}), " f"last_block({self.__last_block.header.hash.hex()})") logging.info(f"ADD BLOCK HEIGHT : {block.header.height} , " f"HASH : {block.header.hash.hex()} , " f"CHANNEL : {self.__channel_name}") logging.debug(f"ADDED BLOCK HEADER : {block.header}") util.apm_event( self.__peer_id, { 'event_type': 'AddBlock', 'peer_id': self.__peer_id, 'peer_name': conf.PEER_NAME, 'channel_name': self.__channel_name, 'data': { 'block_height': self.__block_height } }) # notify new block ObjectManager().channel_service.inner_service.notify_new_block() # reset_network_by_block_height is called in critical section by self.__add_block_lock. # Other Blocks must not be added until reset_network_by_block_height function finishes. ObjectManager().channel_service.reset_network_by_block_height( self.__last_block.header.height) return True
def AnnounceNewLeader(self, request, context): channel_name = conf.LOOPCHAIN_DEFAULT_CHANNEL if request.channel == '' else request.channel new_leader_peer = ObjectManager( ).rs_service.channel_manager.get_peer_manager(channel_name).get_peer( request.new_leader_id, None) if new_leader_peer is None: logging.warning( f"RadioStation Has No live Peer Connection(candidate reason is RS's restart)" ) logging.warning( f"RadioStation Request to Peers make Re-Connection") return loopchain_pb2.CommonReply( response_code=message_code.Response.fail_no_peer_info_in_rs, message=message_code.get_response_msg( message_code.Response.fail_no_peer_info_in_rs)) else: logging.debug(f"AnnounceNewLeader({channel_name}) " f"id({request.new_leader_id}) " f"target({new_leader_peer.target}): " + request.message) ObjectManager().rs_service.channel_manager.get_peer_manager( channel_name).set_leader_peer(peer=new_leader_peer, group_id=None) return loopchain_pb2.CommonReply( response_code=message_code.Response.success, message="success")
def __init__(self, blockchain_db=None, channel_name=None): if channel_name is None: channel_name = conf.LOOPCHAIN_DEFAULT_CHANNEL self.__block_height = -1 self.__last_block = None self.__save_tx_by_address_strategy = None self.__channel_name = channel_name self.__set_send_tx_type(conf.CHANNEL_OPTION[channel_name]["send_tx_type"]) self.__peer_id = None if ObjectManager().peer_service is not None: self.__peer_id = ObjectManager().peer_service.peer_id # block db has [ block_hash - block | block_height - block_hash | BlockChain.LAST_BLOCK_KEY - block_hash ] self.__confirmed_block_db = blockchain_db # logging.debug(f"BlockChain::init confirmed_block_db({self.__confirmed_block_db})") if self.__confirmed_block_db is None: try: self.__confirmed_block_db = leveldb.LevelDB(conf.DEFAULT_LEVEL_DB_PATH) except leveldb.LevelDBError: raise leveldb.LevelDBError("Fail To Create Level DB(path): " + conf.DEFAULT_LEVEL_DB_PATH) # made block count as a leader self.__made_block_count = 0 self.__invoke_results = {} self.last_commit_state_height = 0 self.last_commit_state = {} self.__add_block_lock = threading.Lock() self.__confirmed_block_lock = threading.Lock() self.__total_tx = 0
def __block_request(self, peer_stub, block_height): """request block by gRPC or REST :param peer_stub: :param block_height: :return block, max_block_height, confirm_info, response_code """ if ObjectManager().channel_service.is_support_node_function( conf.NodeFunction.Vote): response = peer_stub.BlockSync( loopchain_pb2.BlockSyncRequest(block_height=block_height, channel=self.__channel_name), conf.GRPC_TIMEOUT) try: block = self.__blockchain.block_loads(response.block) except Exception as e: traceback.print_exc() raise exception.BlockError( f"Received block is invalid: original exception={e}") return block, response.max_block_height, response.unconfirmed_block_height,\ response.confirm_info, response.response_code else: # request REST(json-rpc) way to RS peer return self.__block_request_by_citizen( block_height, ObjectManager().channel_service.radio_station_stub)
def Subscribe(self, request, context): """RadioStation 이 broadcast 하는 채널에 Peer 를 등록한다. :param request: SubscribeRequest :param context: :return: CommonReply """ channel = conf.LOOPCHAIN_DEFAULT_CHANNEL if request.channel == '' else request.channel logging.debug("Radio Station Subscription peer_id: " + str(request)) ObjectManager().rs_service.channel_manager.add_audience(channel, request.peer_target) peer: PeerInfo = ObjectManager().rs_service.channel_manager.get_peer_manager(channel).update_peer_status( peer_id=request.peer_id, peer_status=PeerStatus.connected) try: peer_dumped = peer.dump() request.peer_order = peer.order request.peer_object = peer_dumped ObjectManager().rs_service.channel_manager.get_peer_manager(channel).announce_new_peer(request) return loopchain_pb2.CommonReply( response_code=message_code.get_response_code(message_code.Response.success), message=message_code.get_response_msg(message_code.Response.success)) except Exception as e: logging.warning("Fail Peer Dump: " + str(e)) return loopchain_pb2.CommonReply(response_code=message_code.get_response_code(message_code.Response.fail), message=message_code.get_response_msg(message_code.Response.fail))
def AnnounceUnconfirmedBlock(self, request, context): """수집된 tx 로 생성한 Block 을 각 peer 에 전송하여 검증을 요청한다. :param request: :param context: :return: """ channel_name = conf.LOOPCHAIN_DEFAULT_CHANNEL if request.channel == '' else request.channel logging.debug( f"peer_outer_service::AnnounceUnconfirmedBlock channel({channel_name})" ) unconfirmed_block = pickle.loads(request.block) logging.warning("Black Peer makes Fail validate Message by intention!") vote_code, message = message_code.get_response( message_code.Response.fail_validate_block) block_vote = loopchain_pb2.BlockVote( vote_code=vote_code, channel=channel_name, message=message, block_hash=unconfirmed_block.block_hash, peer_id=ObjectManager().peer_service.peer_id, group_id=ObjectManager().peer_service.group_id) self.peer_service.common_service.broadcast("VoteUnconfirmedBlock", block_vote) return loopchain_pb2.CommonReply( response_code=message_code.Response.success, message="success")
def announce_new_leader(self, complained_leader_id, new_leader_id, is_broadcast=True): """Announce New Leader Id to Network :param complained_leader_id: :param new_leader_id: :param is_broadcast: False(notify to RS only), True(broadcast to network include RS) :return: """ announce_message = loopchain_pb2.ComplainLeaderRequest( complained_leader_id=complained_leader_id, new_leader_id=new_leader_id, message="Announce New Leader" ) # new_leader_peer = self.get_peer(new_leader_id) # Announce New Leader to Radio station try: if ObjectManager().peer_service.stub_to_radiostation is not None: ObjectManager().peer_service.stub_to_radiostation.call("AnnounceNewLeader", announce_message) except Exception as e: logging.debug("in RS there is no peer_service....") if is_broadcast is True: for peer_id in list(self.peer_list[conf.ALL_GROUP_ID]): peer_each = self.peer_list[conf.ALL_GROUP_ID][peer_id] stub_manager = self.get_peer_stub_manager(peer_each, conf.ALL_GROUP_ID) try: stub_manager.call("AnnounceNewLeader", announce_message, is_stub_reuse=True) except Exception as e: logging.warning("gRPC Exception: " + str(e)) logging.debug("No response target: " + str(peer_each.target))
def reset_all_peers(self, reps_hash, reps, update_now=True): util.logger.debug(f"reset_all_peers." f"\nresult roothash({reps_hash})" f"\npeer_list roothash({self.reps_hash().hex()})" f"\nupdate now({update_now})") if not update_now: self._reps_reset_data = (reps_hash, reps) return blockchain = ObjectManager().channel_service.block_manager.blockchain if reps_hash == self.reps_hash().hex(): util.logger.debug(f"There is no change in load_peers_from_iiss.") return self._peer_list_data.peer_list.clear() self._prepared_reps_hash = None for order, rep_info in enumerate(reps, 1): peer = Peer(rep_info["id"], rep_info["p2pEndpoint"], order=order) self.add_peer(peer) new_reps = blockchain.find_preps_addresses_by_roothash( Hash32.fromhex(reps_hash, ignore_prefix=True)) new_node_type = NodeType.CommunityNode if ChannelProperty( ).peer_address in new_reps else NodeType.CitizenNode is_switched_role = new_node_type != ChannelProperty().node_type blockchain.reset_leader_made_block_count(is_switched_role)
def GetPeerStatus(self, request, context): # request parsing channel_name = conf.LOOPCHAIN_DEFAULT_CHANNEL if not request.channel else request.channel logging.debug( f"rs service GetPeerStatus peer_id({request.peer_id}) group_id({request.group_id})" ) # get stub of target peer peer_manager = ObjectManager( ).rs_service.channel_manager.get_peer_manager(channel_name) peer = peer_manager.get_peer(request.peer_id) if peer is not None: peer_stub_manager = peer_manager.get_peer_stub_manager(peer) if peer_stub_manager is not None: try: response = peer_stub_manager.call_in_times( "GetStatus", loopchain_pb2.StatusRequest( request="get peer status from rs", channel=channel_name)) if response is not None: return response except Exception as e: logging.warning(f"fail GetStatus... ({e})") return loopchain_pb2.StatusReply(status="", block_height=0, total_tx=0)
def remove_peer(self, peer_id, group_id=None): logging.debug(f"remove peer : {peer_id}") if group_id is None: group_id = conf.ALL_GROUP_ID try: remove_peer = self.peer_list[group_id].pop(peer_id) self.__peer_object_list[group_id].pop(peer_id) if group_id != conf.ALL_GROUP_ID: self.peer_list[conf.ALL_GROUP_ID].pop(peer_id) self.__peer_object_list[conf.ALL_GROUP_ID].pop(peer_id) logging.debug("remove_peer: " + str(remove_peer.order)) if remove_peer.order in self.peer_order_list[group_id].keys(): del self.peer_order_list[group_id][remove_peer.order] if remove_peer.order in self.peer_order_list[ conf.ALL_GROUP_ID].keys(): del self.peer_order_list[conf.ALL_GROUP_ID][remove_peer.order] if ObjectManager().peer_service is not None: util.logger.spam( f"peer_manager:remove_peer try remove audience in sub processes" ) ObjectManager().peer_service.common_service.remove_audience( peer_id, remove_peer.target) return True except KeyError as e: logging.debug(f"peer_manager:remove_peer there is no peer({e})") return False
def __block_request_by_citizen(self, block_height): rs_client = ObjectManager().channel_service.rs_client get_block_result = rs_client.call( RestMethod.GetBlockByHeight, RestMethod.GetBlockByHeight.value.params(height=str(block_height))) last_block = rs_client.call(RestMethod.GetLastBlock) if not last_block: raise exception.InvalidBlockSyncTarget( "The Radiostation may not be ready. It will retry after a while." ) max_height = self.blockchain.block_versioner.get_height(last_block) block_version = self.blockchain.block_versioner.get_version( block_height) block_serializer = BlockSerializer.new(block_version, self.blockchain.tx_versioner) block = block_serializer.deserialize(get_block_result['block']) votes_dumped: str = get_block_result.get('confirm_info', '') try: votes_serialized = json.loads(votes_dumped) version = self.blockchain.block_versioner.get_version(block_height) votes = Votes.get_block_votes_class(version).deserialize_votes( votes_serialized) except json.JSONDecodeError: votes = votes_dumped return block, max_height, -1, votes, message_code.Response.success
def find_invoke_result_by_tx_hash(self, tx_hash: Union[str, Hash32]): """find invoke result matching tx_hash and return result if not in blockchain return code delay :param tx_hash: tx_hash :return: {"code" : "code", "error_message" : "error_message if not fail this is not exist"} """ if isinstance(tx_hash, Hash32): tx_hash = tx_hash.hex() try: tx_info = self.find_tx_info(tx_hash) except KeyError as e: block_manager = ObjectManager().channel_service.block_manager if tx_hash in block_manager.get_tx_queue(): # this case is tx pending logging.debug( f"blockchain:find_invoke_result_by_tx_hash pending tx({tx_hash})" ) return {'code': ScoreResponse.NOT_INVOKED} else: logging.debug("blockchain::find invoke_result KeyError: " + str(e)) # This transaction is considered a failure. return {'code': ScoreResponse.NOT_EXIST} return tx_info['result']
def AnnounceUnconfirmedBlock(self, request, context): """수집된 tx 로 생성한 Block 을 각 peer 에 전송하여 검증을 요청한다. :param request: :param context: :return: """ # self.__block_manager.add_unconfirmed_block(request.block) unconfirmed_block = pickle.loads(request.block) logging.warning("Black Peer makes Fail validate Message by intention!") vote_code, message = message_code.get_response( message_code.Response.fail_validate_block) self.peer_service.stub_to_blockgenerator.call( "VoteUnconfirmedBlock", loopchain_pb2.BlockVote( vote_code=vote_code, message=message, block_hash=unconfirmed_block.block_hash, peer_id=ObjectManager().peer_service.peer_id, group_id=ObjectManager().peer_service.group_id)) return loopchain_pb2.CommonReply( response_code=message_code.Response.success, message="success")
def __prevent_next_block_mismatch(self, next_block: Block) -> bool: logging.debug(f"prevent_block_mismatch...") score_stub = StubCollection().icon_score_stubs[self.__channel_name] request = { "method": "ise_getStatus", "params": { "filter": ["lastBlock"] } } response = score_stub.sync_task().query(request) score_last_block_height = int(response['lastBlock']['blockHeight'], 16) if score_last_block_height == next_block.header.height: logging.debug(f"already invoked block in score...") return False if score_last_block_height < next_block.header.height: for invoke_block_height in range(score_last_block_height + 1, next_block.header.height): logging.debug( f"mismatch invoke_block_height({invoke_block_height}) " f"score_last_block_height({score_last_block_height}) " f"next_block_height({next_block.header.height})") invoke_block = self.find_block_by_height(invoke_block_height) if invoke_block is None: raise RuntimeError( "Error raised during prevent mismatch block, " f"Cannot find block({invoke_block_height}") invoke_block, invoke_block_result = ObjectManager( ).channel_service.score_invoke(invoke_block) self.__add_tx_to_block_db(invoke_block, invoke_block_result) ObjectManager().channel_service.score_write_precommit_state( invoke_block) return True if score_last_block_height == next_block.header.height + 1: try: invoke_result_block_height_bytes = \ self.__confirmed_block_db.Get(BlockChain.INVOKE_RESULT_BLOCK_HEIGHT_KEY) invoke_result_block_height = int.from_bytes( invoke_result_block_height_bytes, byteorder='big') if invoke_result_block_height == next_block.header.height: logging.debug(f"already saved invoke result...") return False except KeyError: logging.debug(f"There is no invoke result height in db.") else: util.exit_and_msg( "Too many different(over 2) of block height between the loopchain and score. " "Peer will be down. : " f"loopchain({next_block.header.height})/score({score_last_block_height})" ) return True
def _load_peers_from_db() -> list: blockchain = ObjectManager().channel_service.block_manager.blockchain last_block = blockchain.last_block rep_root_hash = (last_block.header.reps_hash if last_block else Hash32.fromhex(conf.CHANNEL_OPTION[ ChannelProperty().name].get('crep_root_hash'))) return blockchain.find_preps_by_roothash(rep_root_hash)
def announce_new_leader(self, complained_leader_id, new_leader_id, is_broadcast=True, self_peer_id=None): """Announce New Leader Id to Network :param complained_leader_id: :param new_leader_id: :param is_broadcast: False(notify to RS only), True(broadcast to network include RS) :param self_peer_id: :return: """ util.logger.spam(f"peer_manager:announce_new_leader channel({self.__channel_name}), " f"complained_leader_id({complained_leader_id}), " f"new_leader_id({new_leader_id}), " f"is_broadcast({is_broadcast})") is_rs = ObjectManager().rs_service is not None announce_message = loopchain_pb2.ComplainLeaderRequest( complained_leader_id=complained_leader_id, channel=self.__channel_name, new_leader_id=new_leader_id, message="Announce New Leader" ) # new_leader_peer = self.get_peer(new_leader_id) # Announce New Leader to Radio station try: channel_service = ObjectManager().channel_service if channel_service: response = channel_service.radio_station_stub.call("AnnounceNewLeader", announce_message) if response.response_code == message_code.Response.fail_no_peer_info_in_rs: util.logger.spam( f"peer_manager:announce_new_leader fail no peer info in rs! is_broadcast({is_broadcast})") announce_message.message = message_code.get_response_msg( message_code.Response.fail_no_peer_info_in_rs) ObjectManager().channel_service.connect_to_radio_station(is_reconnect=True) ObjectManager().channel_service.broadcast_scheduler.schedule_broadcast( "Request", loopchain_pb2.Message( code=message_code.Request.peer_reconnect_to_rs, channel=self.__channel_name)) except Exception as e: # logging.debug("in RS there is no peer_service....") is_rs = True if is_broadcast is True: for peer_id in list(self.peer_list[conf.ALL_GROUP_ID]): if new_leader_id == peer_id and is_rs is not True: util.logger.spam(f"Prevent reset leader loop in AnnounceNewLeader message") continue peer_each = self.peer_list[conf.ALL_GROUP_ID][peer_id] stub_manager = self.get_peer_stub_manager(peer_each, conf.ALL_GROUP_ID) try: stub_manager.call_async("AnnounceNewLeader", announce_message, is_stub_reuse=True) except Exception as e: logging.warning("gRPC Exception: " + str(e)) logging.debug("No response target: " + str(peer_each.target))
def verify_through_score_invoke(self, is_leader: bool=False): # Block에 속한 tx목록을 순회하면서 Invoke 실행 is_verified = True invoke_results = {} if ObjectManager().channel_service is None: # all results to success success_result = dict(code=int(message_code.Response.success)) invoke_results = util.create_invoke_result_specific_case(self.confirmed_transaction_list, success_result) else: try: origin_commit_state = copy.deepcopy(self.commit_state) invoke_results = ObjectManager().channel_service.score_invoke(self) if is_leader: # set commit state as a leader while do nothing, block commit_state set by score_invoke util.logger.spam(f"verify_through_score_invoke commit_state({self.commit_state})") else: # verify commit state with leader's(origin_commit_state) # this block must have leader's commit state if origin_commit_state != self.commit_state: logging.warning(f"block:verify_through_score_invoke fail commit state integrity!!") is_verified = False else: util.logger.spam(f"verify_through_score_invoke commit state verified.") # peer have to restore origin_commit_state. # And when receive block confirm message check again origin and peer's commit state. self.commit_state = copy.deepcopy(origin_commit_state) except Exception as e: # When Grpc Connection Raise Exception # save all result{'code': ScoreResponse.SCORE_CONTAINER_EXCEPTION, 'message': str(e)} logging.error(f'This error occurred while Score_invoke has failed in verify block : {e}') invoke_results = {} # util.logger.spam(f'Block::verify_through_score_invoke >>>>> invoke_results :: {invoke_results}') need_rebuild = False if not util.channel_use_icx(self.__channel_name): fail_list = [tx_hash for tx_hash, invoke_result in invoke_results.items() if invoke_result["code"] != message_code.Response.success] need_rebuild = len(fail_list) > 0 if is_leader: if need_rebuild: for tx_hash in fail_list: tx = self.find_tx_by_hash(tx_hash) self.confirmed_transaction_list.discard(tx) is_verified = self.confirmed_tx_len > 0 elif conf.ALLOW_MAKE_EMPTY_BLOCK and not need_rebuild: is_verified = True else: is_verified = not need_rebuild return is_verified, need_rebuild, invoke_results
def add_audience(self, peer_info): """broadcast 를 수신 받을 peer 를 등록한다. :param peer_info: SubscribeRequest """ logging.debug("Try add audience: " + str(peer_info)) if ObjectManager().peer_service is not None: ObjectManager().peer_service.tx_process.send_to_process( (BroadcastProcess.SUBSCRIBE_COMMAND, peer_info.peer_target)) self.__broadcast_process.send_to_process((BroadcastProcess.SUBSCRIBE_COMMAND, peer_info.peer_target))
def set_mock(test): peer_auth = test_util.create_default_peer_auth() test.peer_auth = peer_auth peer_service_mock = PeerServiceMock() peer_service_mock.peer_manager = PeerManagerMock(peer_auth) peer_service_mock.channel_service = ChannelServiceMock(conf.LOOPCHAIN_DEFAULT_CHANNEL) ObjectManager().peer_service = peer_service_mock ObjectManager().channel_service = peer_service_mock.channel_service
def set_mock(test): peer_auth = Signer.from_prikey(os.urandom(32)) test.peer_auth = peer_auth peer_service_mock = PeerServiceMock() peer_service_mock.peer_manager = PeerManagerMock(peer_auth) peer_service_mock.channel_service = ChannelServiceMock(conf.LOOPCHAIN_DEFAULT_CHANNEL) ObjectManager().peer_service = peer_service_mock ObjectManager().channel_service = peer_service_mock.channel_service
def confirm_prev_block(self, current_block: Block): """confirm prev unconfirmed block by votes in current block :param current_block: Next unconfirmed block what has votes for prev unconfirmed block. :return: confirm_Block """ # util.logger.debug(f"-------------------confirm_prev_block---current_block is " # f"tx count({len(current_block.body.transactions)}), " # f"height({current_block.header.height})") candidate_blocks = ObjectManager( ).channel_service.block_manager.candidate_blocks with self.__confirmed_block_lock: logging.debug( f"BlockChain:confirm_block channel({self.__channel_name})") try: unconfirmed_block = candidate_blocks.blocks[ current_block.header.prev_hash].block logging.debug("confirmed_block_hash: " + current_block.header.prev_hash.hex()) if unconfirmed_block: logging.debug("unconfirmed_block.block_hash: " + unconfirmed_block.header.hash.hex()) logging.debug("unconfirmed_block.prev_block_hash: " + unconfirmed_block.header.prev_hash.hex()) else: logging.warning( "There is no unconfirmed_block in candidate_blocks") return except KeyError: if self.last_block.header.hash == current_block.header.prev_hash: logging.warning( f"Already added block hash({current_block.header.prev_hash.hex()})" ) return else: except_msg = ( "there is no unconfirmed block in this peer " f"block_hash({current_block.header.prev_hash.hex()})") logging.warning(except_msg) raise BlockchainError(except_msg) if unconfirmed_block.header.hash != current_block.header.prev_hash: logging.warning( "It's not possible to add block while check block hash is fail-" ) raise BlockchainError('확인하는 블럭 해쉬 값이 다릅니다.') # util.logger.debug(f"-------------------confirm_prev_block---before add block," # f"height({unconfirmed_block.header.height})") self.add_block(unconfirmed_block) self.last_unconfirmed_block = current_block candidate_blocks.remove_block(current_block.header.prev_hash) return unconfirmed_block
def __start_broadcast_send_unconfirmed_block_timer(broadcast_func): timer_key = TimerService.TIMER_KEY_BROADCAST_SEND_UNCONFIRMED_BLOCK timer_service = ObjectManager().channel_service.timer_service timer_service.add_timer( timer_key, Timer(target=timer_key, duration=conf.INTERVAL_BROADCAST_SEND_UNCONFIRMED_BLOCK, is_repeat=True, is_run_at_start=True, callback=broadcast_func))
def _load_peers_from_rest_call(): rs_client = ObjectManager().channel_service.rs_client crep_root_hash = conf.CHANNEL_OPTION[ChannelProperty().name].get( 'crep_root_hash') reps = rs_client.call(RestMethod.GetReps, RestMethod.GetReps.value.params(crep_root_hash)) return [{ "id": rep["address"], "p2pEndpoint": rep["p2pEndpoint"] } for rep in reps]
def main(argv): logging.info("Peer main got argv(list): " + str(argv)) try: opts, args = getopt.getopt(argv, "dhr:p:c:", ["help", "radio_station_ip=", "radio_station_port=", "port=", "score=", "cert=" ]) except getopt.GetoptError as e: logging.error(e) usage() sys.exit(1) # default option values port = conf.PORT_PEER radio_station_ip = conf.IP_RADIOSTATION radio_station_port = conf.PORT_RADIOSTATION score = conf.DEFAULT_SCORE_PACKAGE cert = None pw = None # apply option values for opt, arg in opts: if (opt == "-r") or (opt == "--radio_station_ip"): radio_station_ip = arg elif (opt == "-p") or (opt == "--port"): port = arg elif (opt == "-c") or (opt == "--score"): score = arg elif opt == "--cert": cert = arg elif opt == "-d": util.set_log_level_debug() elif (opt == "-h") or (opt == "--help"): usage() return # run peer service with parameters logging.info("\nTry Peer Service run with: \nport(" + str(port) + ") \nRadio Station(" + radio_station_ip + ":" + str(radio_station_port) + ") \nScore(" + score + ") \n") # check Port Using if util.check_port_using(conf.IP_PEER, int(port)): logging.error('Peer Service Port is Using '+str(port)) return ObjectManager().peer_service = PeerService(None, radio_station_ip, radio_station_port, cert, pw) logging.info("loopchain peer_service is: " + str(ObjectManager().peer_service)) ObjectManager().peer_service.serve(port, score)
def __do_vote(self): """Announce 받은 unconfirmed block 에 투표를 한다. """ if not self.__unconfirmedBlockQueue.empty(): unconfirmed_block = self.__unconfirmedBlockQueue.get() logging.debug("we got unconfirmed block ....") else: time.sleep(conf.SLEEP_SECONDS_IN_SERVICE_LOOP) # logging.debug("No unconfirmed block ....") return logging.info("PeerService received unconfirmed block: " + unconfirmed_block.block_hash) if unconfirmed_block.confirmed_transaction_list.__len__() == 0 and \ unconfirmed_block.block_type is not BlockType.peer_list: # siever 에서 사용하는 vote block 은 tx 가 없다. (검증 및 투표 불필요) # siever 에서 vote 블럭 발송 빈도를 보기 위해 warning 으로 로그 남김, 그 외의 경우 아래 로그는 주석처리 할 것 # logging.warning("This is vote block by siever") pass else: # block 검증 block_is_validated = False try: block_is_validated = Block.validate(unconfirmed_block, self.__txQueue) except Exception as e: logging.error(e) if block_is_validated: # broadcast 를 받으면 받은 블럭을 검증한 후 검증되면 자신의 blockchain 의 unconfirmed block 으로 등록해 둔다. confirmed, reason = self.__blockchain.add_unconfirm_block( unconfirmed_block) if confirmed: # block is confirmed # validated 일 때 투표 할 것이냐? confirmed 일 때 투표할 것이냐? 현재는 validate 만 체크 pass elif reason == "block_height": # Announce 되는 블럭과 자신의 height 가 다르면 Block Height Sync 를 다시 시도한다. self.block_height_sync() self.__common_service.vote_unconfirmed_block( unconfirmed_block.block_hash, block_is_validated, self.__channel_name) if conf.CONSENSUS_ALGORITHM == conf.ConsensusAlgorithm.lft: # turn on timer when peer type is general after vote # TODO: set appropriate callback function and parameters timer = Timer( unconfirmed_block.block_hash, conf.TIMEOUT_FOR_PEER_VOTE, ObjectManager().peer_service.timer_test_callback_function, ["test after vote by block_manager"]) ObjectManager().peer_service.timer_service.add_timer( unconfirmed_block.block_hash, timer)
async def __start_shutdown_timer(self): timer_key = TimerService.TIMER_KEY_SHUTDOWN_WHEN_FAIL_SUBSCRIBE if timer_key not in ObjectManager( ).channel_service.timer_service.timer_list: error = f"Shutdown by Subscribe retry timeout({conf.SHUTDOWN_TIMER} sec)" ObjectManager().channel_service.timer_service.add_timer( timer_key, Timer(target=timer_key, duration=conf.SHUTDOWN_TIMER, callback=self.__shutdown_peer, callback_kwargs={"message": error}))
def tx_validate_hash_unique(self, confirmed_tx_list): block_manager = ObjectManager().channel_service.block_manager for confirmed_tx_hash in confirmed_tx_list: tx = block_manager.get_tx(confirmed_tx_hash) if tx is not None: logging.warning(f"block:tx_validate_hash_unique There is duplicated tx_hash({confirmed_tx_hash})") return False return True
def load_peers(self) -> None: PeerLoader.load(peer_manager=self) blockchain = ObjectManager().channel_service.block_manager.blockchain reps_hash = self.reps_hash() reps_in_db = blockchain.find_preps_by_roothash(reps_hash) if not reps_in_db: preps = self.serialize_as_preps() util.logger.spam(f"in _load_peers serialize_as_preps({preps})") blockchain.write_preps(reps_hash, preps)
def _start_consensus_timer(self, delay): if delay < 0: delay = 0 timer_key = TimerService.TIMER_KEY_BLOCK_GENERATE timer_service = ObjectManager().channel_service.timer_service timer_service.add_timer( timer_key, Timer(target=timer_key, duration=delay, is_repeat=False, callback=self.consensus))