def generate_config(url: str, path_to_cdf: str) -> None: provider = ConfStoreProvider(url) utils = Utils(provider) conf_dir = get_config_dir(url) path = os.getenv('PATH') if path: path += os.pathsep + '/opt/seagate/cortx/hare/bin/' python_path = os.pathsep.join(sys.path) transport_type = utils.get_transport_type() cmd = ['configure', '-c', conf_dir, path_to_cdf, '--transport', transport_type, '--log-dir', get_log_dir(url), '--log-file', LOG_FILE, '--uuid', provider.get_machine_id()] locale_info = execute(['locale', '-a']) env = {'PYTHONPATH': python_path, 'PATH': path} if 'en_US.utf-8' in locale_info or 'en_US.utf8' in locale_info: env.update({'LC_ALL': "en_US.utf-8", 'LANG': "en_US.utf-8"}) execute(cmd, env) utils.copy_conf_files(conf_dir) utils.copy_consul_files(conf_dir, mode='client') # consul-kv.json contains key values for the entire cluster. Thus, # it is sufficent to import consul-kv just once. We fetch one of # the consul kv to check if the key-values were already imported # during start up of one of the nodes in the cluster, this avoids # duplicate imports and thus a possible overwriting of the updated # cluster state. if not is_kv_imported(utils): utils.import_kv(conf_dir)
def generate_support_bundle(args): try: # Default target directory is /tmp/hare cmd = ['hctl', 'reportbug'] if args.b: cmd.append('-b') cmd.append(args.b[0]) if args.t: cmd.append('-t') cmd.append(args.t[0]) if args.duration: logging.info("Time bound log collection for %s", args.duration) if args.size_limit: logging.info("Collected limited sized logs: %s", args.size_limit) if args.services: logging.info("Logs collection limiting to a single or multiple" " service specific logs: %s", args.services) if args.binlogs: logging.info("Include the binary logs? %s", args.binlogs) if args.coredumps: logging.info("Include core dumps? %s ", args.coredumps) if args.stacktrace: logging.info("Include stacktraces? %s", args.stacktrace) url = args.config[0] log_dir = get_log_dir(url) conf_dir = get_config_dir(url) cmd.append('-l') cmd.append(log_dir) cmd.append('-c') cmd.append(conf_dir) execute(cmd) except Exception as error: raise RuntimeError(f'Error while generating support bundle : {error}')
def start_mkfs(proc_to_start: ProcessStartInfo) -> int: try: logging.info('Starting mkfs process [fid=%s] at hostname=%s', proc_to_start.fid, proc_to_start.hostname) command = proc_to_start.cmd execute(command) logging.info('Started mkfs process [fid=%s]', proc_to_start.fid) rc = 0 except Exception as error: logging.error('Error launching mkfs [fid=%s] at hostname=%s: %s', proc_to_start.fid, proc_to_start.hostname, error) rc = -1 return rc
def check_cluster_status(path_to_cdf: str): cluster_desc = None with open(path_to_cdf, 'r') as stream: cluster_desc = yaml.safe_load(stream) cmd = ['hctl', 'status', '--json'] cluster_info = json.loads(execute(cmd)) nodes_data_hctl = cluster_info['nodes'] node_info_dict = list2dict(nodes_data_hctl) for node in cluster_desc['nodes']: s3_cnt = int(node['m0_clients']['s3']) m0ds = node.get('m0_servers', []) ios_cnt = 0 for m0d in m0ds: if 'runs_confd' in m0d.keys( ) and node_info_dict[node['hostname']]['confd'][0] != 'started': return -1 if m0d['io_disks']['data']: if node_info_dict[ node['hostname']]['ioservice'][ios_cnt] != 'started': return -1 ios_cnt += 1 if s3_cnt and len( node_info_dict[node['hostname']]['s3server']) != s3_cnt: return -1 return 0
def nr_services() -> int: cmd = ['hctl', 'status', '--json'] cluster_info = json.loads(execute(cmd)) # Don't include hax, count it just once later. services = {'confd', 'ioservice', 's3server'} svcs_nr = 0 for node in cluster_info['nodes']: for svc in node['svcs']: if svc['name'] in services: svcs_nr += 1 return svcs_nr + 1
def start_crond(): cmd = ['/usr/sbin/crond', 'start'] execute(cmd)
def start_hax_with_systemd(): cmd = ['systemctl', 'start', 'hare-hax'] execute(cmd)
def disable_hare_consul_agent() -> None: cmd = ['systemctl', 'disable', 'hare-consul-agent'] execute(cmd)