def generate_config(url: str, path_to_cdf: str) -> None: provider = ConfStoreProvider(url) utils = Utils(provider) conf_dir = get_config_dir(url) path = os.getenv('PATH') if path: path += os.pathsep + '/opt/seagate/cortx/hare/bin/' python_path = os.pathsep.join(sys.path) transport_type = utils.get_transport_type() cmd = ['configure', '-c', conf_dir, path_to_cdf, '--transport', transport_type, '--log-dir', get_log_dir(url), '--log-file', LOG_FILE, '--uuid', provider.get_machine_id()] locale_info = execute(['locale', '-a']) env = {'PYTHONPATH': python_path, 'PATH': path} if 'en_US.utf-8' in locale_info or 'en_US.utf8' in locale_info: env.update({'LC_ALL': "en_US.utf-8", 'LANG': "en_US.utf-8"}) execute(cmd, env) utils.copy_conf_files(conf_dir) utils.copy_consul_files(conf_dir, mode='client') # consul-kv.json contains key values for the entire cluster. Thus, # it is sufficent to import consul-kv just once. We fetch one of # the consul kv to check if the key-values were already imported # during start up of one of the nodes in the cluster, this avoids # duplicate imports and thus a possible overwriting of the updated # cluster state. if not is_kv_imported(utils): utils.import_kv(conf_dir)
def test_is_cluster_first_node(self): conf = ConfStoreProvider(URL) hostname = socket.gethostname() machine_id = conf.get_machine_id() update_machine(machine_id, hostname) validator = Validator( ConfStoreProvider("json:///tmp/temp-test-conf-store.json")) self.assertEqual(True, validator.is_first_node_in_cluster())
def test_invalid_machine_id(self): conf = ConfStoreProvider(URL) hostname = 'invalid-hostname' machine_id = conf.get_machine_id() update_machine(machine_id, hostname) validator = Validator( ConfStoreProvider("json:///tmp/temp-test-conf-store.json")) with self.assertRaises(RuntimeError): validator._get_machine_id()
def is_mkfs_required(url: str) -> bool: try: conf = ConfStoreProvider(url) utils = Utils(conf) machine_id = conf.get_machine_id() return utils.is_motr_io_present(machine_id) except Exception as error: logging.warn('Failed to get pod type (%s). Current stage will ' 'be assumed as not required by default', error) return False
def get_server_type(url: str) -> str: try: provider = ConfStoreProvider(url) machine_id = provider.get_machine_id() server_type = provider.get(f'server_node>{machine_id}>type') if server_type == 'VM': return 'virtual' else: return 'physical' except Exception as error: logging.error('Cannot get server type (%s)', error) return 'unknown'
def test_template_sane(self): _, path = tempfile.mkstemp() try: with open(path, 'w') as f: f.write(self._get_confstore_template()) store = ConfStoreProvider(f'json://{path}') store.get_machine_id = Mock(return_value='1114a50a6bf6f9c93ebd3c49d07d3fd4') # # the method will raise an exception if either # Dhall is unhappy or some values are not found in ConfStore cdf = CdfGenerator(provider=store, motr_provider=Mock()) cdf._get_m0d_per_cvg = Mock(return_value=1) cdf.generate() finally: os.unlink(path)
def setup_logging(url) -> None: provider = ConfStoreProvider(url) machine_id = provider.get_machine_id() log_path = provider.get('cortx>common>storage>log') log_dir = log_path + LOG_DIR_EXT + machine_id + '/hare_deployment/' log_file = log_dir + LOG_FILE create_logger_directory(log_dir) console = logging.StreamHandler(stream=sys.stdout) fhandler = logging.handlers.RotatingFileHandler(log_file, maxBytes=LOG_FILE_SIZE, mode='a', backupCount=5, encoding=None, delay=False) logging.basicConfig(level=logging.INFO, handlers=[console, fhandler], format='%(asctime)s [%(levelname)s] %(message)s')
def get_config_dir(url) -> str: provider = ConfStoreProvider(url) machine_id = provider.get_machine_id() config_path = provider.get('cortx>common>storage>local') return config_path + CONF_DIR_EXT + '/' + machine_id
def get_log_dir(url) -> str: provider = ConfStoreProvider(url) machine_id = provider.get_machine_id() log_path = provider.get('cortx>common>storage>log') return log_path + LOG_DIR_EXT + machine_id