async def create_icon_score_stub(self, channel_name): Logger.debug(f"create_icon_score_stub") queue_name = ICON_SCORE_QUEUE_NAME_FORMAT.format(channel_name=channel_name, amqp_key=self.amqp_key) stub = IconScoreInnerStub(self.amqp_target, queue_name) await stub.connect() self.icon_score_stubs[channel_name] = stub return stub
def _remove_backup_file(cls, path: str): try: os.remove(path) except FileNotFoundError: pass except BaseException as e: Logger.debug(tag=TAG, msg=str(e))
def _load_address_from_storage(self, context: Optional['IconScoreContext'], storage: IcxStorage, db_key: str) -> None: """Load address info from state db according to db_key :param context: :param storage: state db manager :param db_key: db key info """ Logger.debug( f'_load_address_from_storage() start(address type: {db_key})', ICX_LOG_TAG) text = storage.get_text(context, db_key) if text: obj = json.loads(text) # Support to load MainNet 1.0 db address: str = obj['address'] if len(address) == 40: address = f'hx{address}' address: Address = Address.from_string(address) if db_key == self._GENESIS_DB_KEY: self._genesis_address = address elif db_key == self._TREASURY_DB_KEY: self._fee_treasury_address = address Logger.info(f'{db_key}: {address}', ICX_LOG_TAG) Logger.debug( f'_load_address_from_storage() end(address type: {db_key})', ICX_LOG_TAG)
def stop(self): Logger.debug(tag=_TAG, msg="stop() start") self._stop_message_queue() self._ipc_server.stop() Logger.debug(tag=_TAG, msg="stop() end")
async def ready_tasks(): Logger.debug('rest_server:initialize') if self.conf.get(ConfigKey.TBEARS_MODE, False): channel_name = self.conf.get(ConfigKey.CHANNEL, 'loopchain_default') await StubCollection().create_channel_stub(channel_name) await StubCollection().create_icon_score_stub(channel_name) RestProperty().node_type = NodeType.CommunityNode RestProperty().rs_target = None else: await StubCollection().create_peer_stub() channels_info = await StubCollection().peer_stub.async_task( ).get_channel_infos() channel_name = None for channel_name, channel_info in channels_info.items(): await StubCollection().create_channel_stub(channel_name) await StubCollection().create_icon_score_stub(channel_name) results = await StubCollection().peer_stub.async_task( ).get_channel_info_detail(channel_name) RestProperty().node_type = NodeType(results[6]) RestProperty().rs_target = results[3] Logger.debug( f'rest_server:initialize complete. ' f'node_type({RestProperty().node_type}), rs_target({RestProperty().rs_target})' )
def commit_claim(self, success: bool, address: 'Address', block_height: int, block_hash: bytes, tx_index: int, tx_hash: bytes): Logger.debug(tag=_TAG, msg=f"commit_claim() start: " f"success={success} " f"address={address} " f"block_height={block_height} " f"block_hash={bytes_to_hex(block_hash)} " f"tx_index={tx_index} " f"tx_hash={bytes_to_hex(tx_hash)}") future: concurrent.futures.Future = asyncio.run_coroutine_threadsafe( self._commit_claim(success, address, block_height, block_hash, tx_index, tx_hash), self._loop) try: future.result(self._ipc_timeout) except asyncio.TimeoutError: future.cancel() raise TimeoutException( "COMMIT_CLAIM message to RewardCalculator has timed-out") Logger.debug(tag=_TAG, msg="commit_claim() end")
def commit_block(self, success: bool, block_height: int, block_hash: bytes) -> tuple: """Notify reward calculator of block confirmation It is called on invoke thread :param success: true for success, false for failure :param block_height: the height of block :param block_hash: the hash of block :return: [success(bool), block_height(int), block_hash(bytes)] :exception TimeoutException: The operation has timed-out """ Logger.debug(tag=_TAG, msg=f"commit_block() start: success={success}, " f"block_height={block_height}, " f"block_hash={bytes_to_hex(block_hash)}") future: concurrent.futures.Future = asyncio.run_coroutine_threadsafe( self._commit_block(success, block_height, block_hash), self._loop) try: response: 'CommitBlockResponse' = future.result(self._ipc_timeout) except asyncio.TimeoutError: future.cancel() raise TimeoutException( "commit_block message to RewardCalculator has timed-out") Logger.debug(tag=_TAG, msg=f"commit_block() end. response: {response}") return response.success, response.block_height, response.block_hash
async def _run(conf: 'IconConfig'): Logger.print_config(conf, ICON_RPCSERVER_CLI) ServerComponents.conf = conf ServerComponents().set_resource() Logger.debug( f"Run gunicorn webserver for HA. Port = {conf[ConfigKey.PORT]}") # Configure SSL. ssl_context = ServerComponents().ssl_context certfile = '' keyfile = '' if ssl_context is not None: certfile = ssl_context[0] keyfile = ssl_context[1] options = conf.get(ConfigKey.GUNICORN_CONFIG, {}) options.update({ 'bind': f'{conf[ConfigKey.HOST]}:{conf[ConfigKey.PORT]}', 'certfile': certfile, 'keyfile': keyfile, 'SERVER_SOFTWARE': gunicorn.SERVER_SOFTWARE, 'capture_output': False }) # Launch gunicorn web server. ServerComponents.conf = conf ServerComponents().ready() StandaloneApplication(ServerComponents().app, options).run()
async def relay_tx_request(relay_target, message, path, version=ApiVersion.v3.name): method_name = "icx_sendTransaction" relay_uri = f"{relay_target}/{path}" Logger.debug(f'relay_uri: {relay_uri}') async with aiohttp.ClientSession() as session: Logger.info(f"relay_tx_request : " f"message[{message}], " f"relay_target[{relay_target}], " f"version[{version}], " f"method[{method_name}]") try: response = await NewAiohttpClient(session, relay_uri, timeout=10).request( method_name, **message) except exceptions.ReceivedNon2xxResponseError as e: raise GenericJsonRpcServerError( code=JsonError.INTERNAL_ERROR, message=str(e), http_status=status.HTTP_BAD_REQUEST) from e if isinstance(response.data, list): raise NotImplementedError( f"Received batch response. Data: {response.data}") else: result = response.data.result Logger.debug(f"relay_tx_request result[{result}]") return result
def __init__(self): super().__init__() Logger.debug(tag=_TAG, msg="PRepEngine.__init__() start") self._invoke_handlers: dict = { "registerPRep": self.handle_register_prep, "setPRep": self.handle_set_prep, "setGovernanceVariables": self.handle_set_governance_variables, "unregisterPRep": self.handle_unregister_prep } self._query_handler: dict = { "getPRep": self.handle_get_prep, "getMainPReps": self.handle_get_main_preps, "getSubPReps": self.handle_get_sub_preps, "getPReps": self.handle_get_preps, "getPRepTerm": self.handle_get_prep_term, "getInactivePReps": self.handle_get_inactive_preps } self.preps = PRepContainer() # self.term should be None before decentralization self.term: Optional['Term'] = None self._initial_irep: Optional[int] = None self._penalty_imposer: Optional['PenaltyImposer'] = None Logger.debug(tag=_TAG, msg="PRepEngine.__init__() end")
def handle_claim_iscore(self, context: 'IconScoreContext', _params: dict): """Handles claimIScore JSON-RPC request :param context: :param _params: :return: """ Logger.debug(tag=_TAG, msg=f"handle_claim_iscore() start") iscore, block_height = self._claim_iscore(context) if iscore > 0: self._commit_claim(context, iscore) else: Logger.info(tag=_TAG, msg="I-Score is zero") EventLogEmitter.emit_event_log( context, score_address=ZERO_SCORE_ADDRESS, event_signature="IScoreClaimed(int,int)", arguments=[iscore, self._iscore_to_icx(iscore)], indexed_args_count=0 ) Logger.debug(tag=_TAG, msg="handle_claim_iscore() end")
async def publish_new_block(ws, channel_name, height, peer_id): exception = None error_code = None channel_stub = get_channel_stub_by_channel_name(channel_name) try: while ws.open: new_block_dumped, confirm_info_bytes = await \ channel_stub.async_task().announce_new_block(subscriber_block_height=height, subscriber_id=peer_id) new_block: dict = json.loads(new_block_dumped) confirm_info = confirm_info_bytes.decode('utf-8') request = Request("node_ws_PublishNewBlock", block=new_block, confirm_info=confirm_info) Logger.debug(f"node_ws_PublishNewBlock: {request}") await ws.send(json.dumps(request)) height += 1 except exceptions.ConnectionClosed as e: exception = e error_code = message_code.Response.fail_connection_closed except Exception as e: exception = e error_code = message_code.Response.fail_announce_block traceback.print_exc() if not exception: exception = ConnectionError("Connection closed.") await WSDispatcher.send_and_raise_exception(ws, "node_ws_PublishNewBlock", exception, error_code)
def set_block_to_batch(self, revision: int): Logger.debug(tag="DB", msg=f"set_block_to_batch() block={self.block}") block_key: bytes = IcxStorage.LAST_BLOCK_KEY block_value: tuple = TransactionBatchValue( self.block.to_bytes(revision), False) super().__setitem__(block_key, block_value)
async def dispatch(request: 'SanicRequest'): req = request.json url = request.url context = {"url": url} response: Union[Response, DictResponse, BatchResponse] try: client_ip = request.remote_addr if request.remote_addr else request.ip Logger.info(f'rest_server_v2 request with {req}', DISPATCH_V2_TAG) Logger.info(f"{client_ip} requested {req} on {url}") validate_jsonschema_v2(request=req) except GenericJsonRpcServerError as e: Logger.debug(f'dispatch() validate exception = {e}') response = ExceptionResponse(e, id=req.get('id', 0), debug=False) else: response = await async_dispatch(request.body, methods, context=context) Logger.info(f'rest_server_v2 response with {response}', DISPATCH_V2_TAG) return sanic_response.json(response.deserialized(), status=response.http_status, dumps=json.dumps)
async def create_icx_tx(self, kwargs: dict) -> Tuple[int, Optional[str]]: """ Handler of 'create_icx_tx' message. 'create_icx_tx' is generated by 'icx_sendTransaction' Validate transaction and enqueue to transaction queue :param kwargs: transaction data :return: message code and transaction hash """ Logger.debug(f'Get create_tcx_tx message!! {kwargs}', "create_icx_tx") block_manager = self._block_manager # generate tx hash tx_hash = create_hash(json.dumps(kwargs).encode()) # check duplication duplicated_tx = False for tx in block_manager.tx_queue: if tx_hash == tx['txHash']: duplicated_tx = True if duplicated_tx is False and block_manager._block.get_transaction( tx_hash=tx_hash): duplicated_tx = True if duplicated_tx: return message_code.Response.fail_tx_invalid_duplicated_hash, None # append to transaction queue block_manager.add_tx(tx_hash=tx_hash, tx=kwargs) Logger.debug(f'Response create_icx_tx!!', "create_icx_tx") return message_code.Response.success, f"0x{tx_hash}"
def query_iscore(self, address: 'Address', block: Optional[Block], tx_hash: Optional[bytes]) -> Tuple[int, int]: """Returns the I-Score of a given address It should be called on query thread :param address: the address to query :param tx_hash: the hash of transaction where this query is called, it should be None under query mode :return: [i-score(int), block_height(int)] :exception TimeoutException: The operation has timed-out """ assert isinstance(address, Address) Logger.debug(tag=_TAG, msg="query_iscore() start") future: concurrent.futures.Future = asyncio.run_coroutine_threadsafe( self._query_iscore(address, block, tx_hash), self._loop) try: response: 'QueryResponse' = future.result(self._ipc_timeout) except asyncio.TimeoutError: future.cancel() raise TimeoutException( "query_iscore message to RewardCalculator has timed-out") Logger.debug(tag=_TAG, msg="query_iscore() end") return response.iscore, response.block_height
def write_server_conf(conf: dict): write_conf = { "hostAddress": conf['hostAddress'], "port": conf['port'], "scoreRootPath": conf['scoreRootPath'], "stateDbRootPath": conf['stateDbRootPath'], TConfigKey.CHANNEL: conf.get(TConfigKey.CHANNEL, None), # to stop iconservice TConfigKey.AMQP_TARGET: conf.get(TConfigKey.AMQP_TARGET, None), # to stop iconservice TConfigKey.AMQP_KEY: conf.get(TConfigKey.AMQP_KEY, None) # to stop iconservice } Logger.debug(f"Write server Info.({conf}) to {TBEARS_CLI_ENV}", TBEARS_CLI_TAG) file_path = TBEARS_CLI_ENV file_name = file_path[file_path.rfind('/') + 1:] parent_directory = file_path[:file_path.rfind('/')] try: write_file(parent_directory=parent_directory, file_name=file_name, contents=json.dumps(write_conf), overwrite=True) except Exception as e: print(f"Can't write conf to file. {e}") except TBearsWriteFileException as e: print(f"{e}")
def rollback(self, block_height: int, block_hash: bytes) -> Tuple[bool, int, bytes]: """Request reward calculator to rollback the DB of the reward calculator to the specific block height. Reward calculator DOES NOT process other messages while processing ROLLBACK message :param block_height: :param block_hash: :return: """ Logger.debug( tag=_TAG, msg= f"rollback() start: block_height={block_height}, block_hash={bytes_to_hex(block_hash)}" ) future: concurrent.futures.Future = asyncio.run_coroutine_threadsafe( self._rollback(block_height, block_hash), self._loop) try: response: 'RollbackResponse' = future.result(self._ipc_timeout) except asyncio.TimeoutError: future.cancel() raise TimeoutException( "rollback message to RewardCalculator has timed-out") Logger.debug(tag=_TAG, msg=f"rollback() end. response: {response}") return response.success, response.block_height, response.block_hash
def query_iscore(self, address: 'Address') -> tuple: """Returns the I-Score of a given address It should be called on query thread :param address: the address to query :return: [i-score(int), block_height(int)] :exception TimeoutException: The operation has timed-out """ assert isinstance(address, Address) Logger.debug(tag=_TAG, msg="query_iscore() start") future: concurrent.futures.Future = asyncio.run_coroutine_threadsafe( self._query_iscore(address), self._loop) try: response: 'QueryResponse' = future.result(self._ipc_timeout) except asyncio.TimeoutError: future.cancel() raise TimeoutException( "query_iscore message to RewardCalculator has timed-out") Logger.debug(tag=_TAG, msg="query_iscore() end") return response.iscore, response.block_height
def _on_term_ended(self, context: 'IconScoreContext') -> Tuple[dict, 'Term']: """Called in IconServiceEngine.invoke() every time when a term is ended Update P-Rep grades according to PRep.delegated """ self._put_last_term_info(context, self.term) if self.term: main_preps: List['Address'] = [prep.address for prep in self.term.main_preps] else: # first term new_preps: List['PRep'] = context.preps.get_preps(start_index=0, size=context.main_prep_count) main_preps: List['Address'] = [prep.address for prep in new_preps] context.storage.meta.put_last_main_preps(context, main_preps) # All block validation penalties are released self._reset_block_validation_penalty(context) # Create a term with context.preps whose grades are up-to-date new_term: 'Term' = self._create_next_term(context, self.term) next_preps: dict = self._get_updated_main_preps( context=context, term=new_term, state=PRepResultState.NORMAL ) Logger.debug(tag=_TAG, msg=f"{new_term}") return next_preps, new_term
def ready_handler(self, response: 'Response'): Logger.debug(tag=_TAG, msg=f"ready_handler() start {response}") if self._ready_callback is not None: self._ready_callback(response) self._ready_future.set_result(RCStatus.READY)
def __init__(self): super().__init__() Logger.debug(tag=_TAG, msg="PRepEngine.__init__() start") self._invoke_handlers: dict = { Method.REGISTER: self.handle_register_prep, Method.UNREGISTER: self.handle_unregister_prep, Method.SET_PREP: self.handle_set_prep, Method.SET_GOVERNANCE_VARIABLES: self.handle_set_governance_variables, Method.GET_PREP: self.handle_get_prep, Method.GET_MAIN_PREPS: self.handle_get_main_preps, Method.GET_SUB_PREPS: self.handle_get_sub_preps, Method.GET_PREPS: self.handle_get_preps, Method.GET_PREP_TERM: self.handle_get_prep_term, Method.GET_INACTIVE_PREPS: self.handle_get_inactive_preps } self._query_handler: dict = { Method.GET_PREP: self.handle_get_prep, Method.GET_MAIN_PREPS: self.handle_get_main_preps, Method.GET_SUB_PREPS: self.handle_get_sub_preps, Method.GET_PREPS: self.handle_get_preps, Method.GET_PREP_TERM: self.handle_get_prep_term, Method.GET_INACTIVE_PREPS: self.handle_get_inactive_preps } self.preps: 'PRepContainer' = PRepContainer() # self.term should be None before decentralization self.term: Optional['Term'] = None self._initial_irep: Optional[int] = None self._penalty_imposer: Optional['PenaltyImposer'] = None self.prep_address_converter: 'PRepAddressConverter' = None Logger.debug(tag=_TAG, msg="PRepEngine.__init__() end")
def run_on_init(self, current_block_height: int) -> int: """Clean up all stale backup files on iconservice startup :param current_block_height: :return: """ Logger.debug(tag=_TAG, msg=f"run_on_init() start") ret = 0 start_block_height = max(0, current_block_height - self._backup_files) with os.scandir(self._backup_root_path) as it: for entry in it: # backup filename: ex) 0000012345.bak if entry.is_file() and self._is_backup_filename_valid( entry.name): block_height: int = self._get_block_height_from_filename( entry.name) if block_height < 0: continue # Do nothing for the latest backup files if start_block_height <= block_height < current_block_height: continue # Remove stale backup files if self._remove_file(entry.path): ret += 1 Logger.debug(tag=_TAG, msg=f"run_on_init() end: ret={ret}") return ret
def claim_iscore(self, address: 'Address', block_height: int, block_hash: bytes) -> tuple: """Claim IScore of a given address It is called on invoke thread :param address: the address to claim :param block_height: the height of block which contains this claim tx :param block_hash: the hash of block which contains this claim tx :return: [i-score(int), block_height(int)] :exception TimeoutException: The operation has timed-out """ Logger.debug( tag=_TAG, msg= f"claim_iscore() start: address({address}) block_height({block_height}) block_hash({block_hash.hex()})" ) future: concurrent.futures.Future = asyncio.run_coroutine_threadsafe( self._claim_iscore(address, block_height, block_hash), self._loop) try: response: 'ClaimResponse' = future.result(self.IPC_TIMEOUT) except asyncio.TimeoutError: future.cancel() raise TimeoutException( "claim_iscore message to RewardCalculator has timed-out") Logger.debug(tag=_TAG, msg=f"claim_iscore() end: iscore({response.iscore})") return response.iscore, response.block_height
async def publish_new_block(ws, channel_name, height, peer_id): call_method = WSDispatcher.PUBLISH_NEW_BLOCK channel_stub = get_channel_stub_by_channel_name(channel_name) try: while True: new_block_dumped, confirm_info_bytes = await channel_stub.async_task().announce_new_block( subscriber_block_height=height, subscriber_id=peer_id ) new_block: dict = json.loads(new_block_dumped) if "error" in new_block: Logger.error(f"announce_new_block error: {new_block}, to citizen({peer_id})") break confirm_info = confirm_info_bytes.decode('utf-8') request = Request(call_method, block=new_block, confirm_info=confirm_info) Logger.debug(f"{call_method}: {request}") await ws.send(json.dumps(request)) height += 1 except exceptions.ConnectionClosed: Logger.debug("Connection Closed by child.") # TODO: Useful message needed. except Exception as e: traceback.print_exc() # TODO: Keep this tb? await WSDispatcher.send_exception( ws, call_method, exception=e, error_code=message_code.Response.fail_announce_block )
async def process_block_data(self): """ Process block data. Invoke block and save transactions, transaction results and block. Update block height and previous block hash. :return: """ Logger.debug(f'process_block_data started!!', TBEARS_BLOCK_MANAGER) # clear tx_queue tx_list = self.clear_tx() if len(tx_list) == 0: if self._conf[TConfigKey.BLOCK_CONFIRM_EMPTY]: Logger.debug(f'Confirm empty block', TBEARS_BLOCK_MANAGER) else: Logger.debug(f'There are no transactions for block confirm. Bye~', TBEARS_BLOCK_MANAGER) return # make block hash. tbears block_manager is dev util block_timestamp_us = int(time.time() * 10 ** 6) block_hash = create_hash(block_timestamp_us.to_bytes(DEFAULT_BYTE_SIZE, DATA_BYTE_ORDER)) # send invoke message to ICON prev_block_timestamp = block_timestamp_us - self._conf.get(TConfigKey.BLOCK_CONFIRM_INTERVAL, 0) self._prep_manager.set_prev_votes(self.block.block_height, self.block.prev_block_hash, prev_block_timestamp) response = await self._invoke_block(tx_list=tx_list, block_hash=block_hash, block_timestamp=block_timestamp_us) if response is None: Logger.debug(f'iconservice response None for invoke request.', TBEARS_BLOCK_MANAGER) return # send write precommit message and confirm block await self._confirm_block(tx_list=tx_list, invoke_response=response, block_hash=block_hash, timestamp=block_timestamp_us) Logger.debug(f'process_block_data done!!', TBEARS_BLOCK_MANAGER)
def start_process(conf: 'IconConfig'): Logger.debug('start_server() start') python_module_string = 'iconrpcserver.icon_rpcserver_app' converted_params = { '-p': conf[ConfigKey.PORT], '-c': conf.get(ConfigKey.CONFIG), '-at': conf[ConfigKey.AMQP_TARGET], '-ak': conf[ConfigKey.AMQP_KEY], '-ch': conf[ConfigKey.CHANNEL] } custom_argv = [] for k, v in converted_params.items(): if v is None: continue custom_argv.append(k) custom_argv.append(str(v)) if conf[ConfigKey.TBEARS_MODE]: custom_argv.append('-tbears') is_foreground = conf.get('foreground', False) if is_foreground: from iconrpcserver.icon_rpcserver_app import run_in_foreground del conf['foreground'] run_in_foreground(conf) else: subprocess.Popen( [sys.executable, '-m', python_module_string, *custom_argv], close_fds=True) Logger.debug('start_process() end')
def _rename_iiss_db_to_current_db(self, calc_end_block_height: int): """Rename iiss_db to current_db """ Logger.debug( tag=TAG, msg= f"_rename_iiss_db_to_current_db() start: calc_end_block_height={calc_end_block_height}" ) filename = RewardCalcStorage.get_iiss_rc_db_name(calc_end_block_height) src_path = os.path.join(self._rc_data_path, filename) dst_path = os.path.join(self._rc_data_path, RewardCalcStorage.CURRENT_IISS_DB_NAME) Logger.info( tag=TAG, msg=f"rename_iiss_db: src_path={src_path} dst_path={dst_path}") # Consider the case that renaming iiss_db to current_db has been already done if os.path.isdir(src_path): # Remove a new current_db shutil.rmtree(dst_path, ignore_errors=True) # Rename iiss_rc_db_{BH} to current_db shutil.move(src_path, dst_path) Logger.debug(tag=TAG, msg="_rename_iiss_db_to_current_db() end")
def save(self, path: str): Logger.debug(tag=TAG, msg=f"save() start: {path}") with open(path, "wb") as f: f.write(self.to_bytes()) Logger.debug(tag=TAG, msg=f"save() end")
def start_reward_calc(self, log_dir: str, sock_path: str, iiss_db_path: str): """ Start reward calculator process :param log_dir: log directory :param sock_path: unix domain socket path for IPC :param iiss_db_path: IISS data DB path :return: void """ Logger.debug(tag=_TAG, msg=f'run reward calc') iscore_db_path, _ = os.path.split(iiss_db_path) iscore_db_path = os.path.join(iscore_db_path, 'rc') log_path = os.path.join(log_dir, 'rc.log') reward_calculator_path: str = self._get_reward_calculator_path(self._icon_rc_path) if self._reward_calc is None: args = [ reward_calculator_path, "-client", "-monitor", "-db-count", "16", "-db", f"{iscore_db_path}", "-iissdata", f"{iiss_db_path}", "-ipc-addr", f"{sock_path}", "-log-file", f"{log_path}", ] Logger.info(tag=_TAG, msg=f"cmd={' '.join(args)}") self._reward_calc = Popen(args) Logger.debug(tag=_TAG, msg="start_reward_calc() end")