def run_container(type, schain, env, volume_config=None, cpu_limit=None, mem_limit=None, dutils=None): if not dutils: dutils = docker_utils schain_name = schain['name'] image_name, container_name, run_args, custom_args = get_container_info(type, schain_name) add_config_volume(run_args) if custom_args.get('logs', None): run_args['log_config'] = get_logs_config(custom_args['logs']) if custom_args.get('ulimits_list', None): run_args['ulimits'] = get_ulimits_config(custom_args['ulimits_list']) if volume_config: run_args['volumes'].update(volume_config) if cpu_limit: run_args['nano_cpus'] = cpu_limit if mem_limit: run_args['mem_limit'] = mem_limit run_args['environment'] = env logger.info(arguments_list_string({'Container name': container_name, 'Image name': image_name, 'Args': run_args}, 'Running container...')) cont = dutils.client.containers.run(image_name, name=container_name, detach=True, **run_args) logger.info(arguments_list_string({'Container name': container_name, 'Container id': cont.id}, 'Container created', 'success')) return cont
def monitor_schains(self, opts): skale = spawn_skale_lib(self.skale) schains = skale.schains_data.get_schains_for_node(self.node_id) schains_on_node = sum(map(lambda schain: schain['active'], schains)) schains_holes = len(schains) - schains_on_node logger.info( arguments_list_string( { 'Node ID': self.node_id, 'sChains on node': schains_on_node, 'Empty sChain structs': schains_holes }, 'Monitoring sChains')) threads = [] for schain in schains: if not schain['active']: continue schain_thread = CustomThread(f'sChain monitor: {schain["name"]}', self.monitor_schain, opts=schain, once=True) schain_thread.start() threads.append(schain_thread) for thread in threads: thread.join()
def register(self, ip, public_ip, port, name): """ Main node registration function. Parameters: ip (str): P2P IP address that will be assigned to the node public_ip (str): Public IP address that will be assigned to the node port (int): Base port that will be used for sChains on the node name (str): Node name Returns: dict: Execution status and node config """ self._log_node_info('Node create started', ip, public_ip, port, name) if self.config.id is not None: return self._node_already_exist() if not check_required_balance(self.skale): return self._insufficient_funds() try: tx_res = self.skale.manager.create_node(ip, int(port), name, public_ip, wait_for=True) except TransactionFailedError: logger.error( arguments_list_string({'tx': tx_res.hash}, 'Node creation failed', 'error')) return {'status': 0, 'errors': [str(tx_res.receipt)]} self._log_node_info('Node successfully created', ip, public_ip, port, name) self.config.id = self.skale.nodes_data.node_name_to_index(name) run_filebeat_service(public_ip, self.config.id, self.skale) return {'status': 1, 'data': self.config.all()}
def _log_node_info(self, title, ip, public_ip, port, name): log_params = { 'IP': ip, 'Public IP': public_ip, 'Port': port, 'Name': name } logger.info(arguments_list_string(log_params, title))
def wait_for_node_id(self, opts): while self.node_config.id is None: logger.debug('Waiting for the node_id in sChains Cleaner...') sleep(MONITOR_INTERVAL) self.node_id = self.node_config.id self.monitor = CustomThread('sChains cleaner monitor', self.schains_cleaner, interval=CLEANER_INTERVAL) self.monitor.start() logger.info( arguments_list_string({'Node ID': self.node_config.id}, 'sChains cleaner started'))
def generate_sgx_key(config): if not SGX_SERVER_URL: raise SGXConnecionError('SGX server URL is not provided') if not config.sgx_key_name: sgx = SgxClient(SGX_SERVER_URL, SGX_CERTIFICATES_FOLDER) key_info = sgx.generate_key() logger.info( arguments_list_string( { 'Name': key_info.name, 'Address': key_info.address }, 'Generated new SGX key')) config.sgx_key_name = key_info.name
def log_health_check(self): checks = self.get_all() logger.info(f'sChain {self.name} checks: {checks}') failed_checks = [] for check in checks: if not checks[check]: failed_checks.append(check) if len(failed_checks) != 0: failed_checks_str = ", ".join(failed_checks) logger.info( arguments_list_string( { 'sChain name': self.name, 'Failed checks': failed_checks_str }, 'Failed sChain checks', 'error'))
def schains_cleaner(self, opts): schains_on_node = self.get_schains_on_node() schain_ids = self.schain_names_to_ids(schains_on_node) schain_names_on_contracts = self.get_schain_names_from_contract() event_filter = SkaleFilter( self.skale_events.schains.contract.events.SchainDeleted, from_block=0, argument_filters={'schainId': schain_ids}) events = event_filter.get_events() for event in events: name = event['args']['name'] if name in schains_on_node and name not in schain_names_on_contracts: logger.info( arguments_list_string({'sChain name': name}, 'sChain deleted event found')) self.run_cleanup(name)
def run_filebeat_service(node_ip, node_id, skale): contract_address = skale.manager.address template_data = { 'ip': node_ip, 'id': node_id, 'contract_address': contract_address } logger.info( arguments_list_string( { 'Node ID': node_id, 'Node IP': node_ip, 'Manager contract address': contract_address }, 'Processing Filebeat template')) process_template(FILEBEAT_TEMPLATE_PATH, FILEBEAT_CONFIG_PATH, template_data) filebeat_container = dutils.client.containers.get(FILEBEAT_CONTAINER_NAME) filebeat_container.restart()
@app.after_request def after_request(response): g.db.close() return response def create_tables(): if not SChainRecord.table_exists(): SChainRecord.create_table() if __name__ == '__main__': logger.info( arguments_list_string( { 'Endpoint': ENDPOINT, 'Transaction manager': TM_URL, 'SGX Server': sgx_server_text() }, 'Starting Flask server')) from tools.configs.db import MYSQL_DB_PORT logger.info(f'{MYSQL_DB_PORT}') create_tables() generate_sgx_key(node_config) app.secret_key = FLASK_SECRET_KEY_FILE app.run(debug=FLASK_DEBUG_MODE, port=FLASK_APP_PORT, host=FLASK_APP_HOST, use_reloader=False)