def use(mg_path, cwd): if mg_path is None: if cwd == True: # If --cwd is set we will blidly assume current working directory # to be the must-gather to use c = Config(fail_if_no_path=False) c.save(path='.') print("Using your current working directory") else: # If no args are passed after `omg use` # we print the info about currently selected must-gather path = Config().path project = Config().project print('Current must-gather: %s' % path) print(' Current Project: %s' % project) try: from omg.cmd.get_main import get_resources infra = get_resources('Infrastructure') apiServerURL = [ i['res']['status']['apiServerURL'] for i in infra ] platform = [i['res']['status']['platform'] for i in infra] print(' Cluster API URL: %s' % str(apiServerURL)) print(' Cluster Platform: %s' % str(platform)) except: print( '[ERROR] Unable to determine cluster API URL and Platform.' ) else: c = Config(fail_if_no_path=False) p = mg_path # We traverse up to 3 levels to find the must-gather # At each leve if it has only one dir and we check inside it # When we see see the dir /namespaces and /cluster-scoped-resources, we assume it for _ in [1, 2, 3]: if os.path.isdir(p): if (os.path.isdir(os.path.join(p, 'namespaces')) and os.path.isdir( os.path.join(p, 'cluster-scoped-resources'))): full_path = os.path.abspath(p) c.save(path=full_path) print('Using: ', p) break elif len(os.listdir(p)) == 1: p = os.path.join(p, os.listdir(p)[0]) else: print( '[ERROR] Invalid must-gather path. Please point to the extracted must-gather directory' ) break else: print( '[ERROR] Invalid path. Please give path to the extracted must-gather' ) break
def complete_get(ctx: Context, args, incomplete): """ Pull out objects args from Click context and return completions. """ c = Config() objects = ctx.params.get("objects") return generate_completions(c, objects, incomplete)
def complete_containers(ctx: Context, args, incomplete): """ Callback for container name (within a pod and ns) autocompletion :return: List of matching container names or empty list """ c = Config() if ( len(ctx.args) != 1 ): # If there's no pod specified yet, can't autocomplete a container name return [] ns = ctx.params.get("namespace") or c.project pod = ctx.args[0] container_listing = os.listdir(os.path.join(c.path, "namespaces", ns, "pods", pod)) suggestions = [ c for c in container_listing if incomplete in c and not c.endswith(".yaml") ] # skip .yaml files return suggestions
def list_projects(ctx, args, incomplete): """ Callback for project name autocompletion :return: List of matching namespace names or empty list. """ c = Config() if incomplete is not None: ns_listing = os.listdir(os.path.join(c.path, "namespaces")) suggestions = [ns for ns in ns_listing if incomplete in ns] return suggestions return []
def list_pods(ctx: Context, args, incomplete): """ Callback for pod name (within an ns) autocompletion :return: List of matching pod names or empty list. """ # Get current project and filter Pods c = Config() ns = ctx.params.get("namespace") or c.project pod_listing = os.listdir(os.path.join(c.path, "namespaces", ns, "pods")) suggestions = [pod for pod in pod_listing if incomplete in pod] return suggestions
def project(name): c = Config() ns_dir = os.path.join(c.path, "namespaces") if name is None: # print current project if c.project is None: print("No project selected") else: print('Using project "%s" on must-gather "%s"' % (c.project, c.path)) else: # Set current project if os.path.isdir(os.path.join(ns_dir, name)): if name == c.project: print('Already on project "%s" on server "%s"' % (c.project, c.path)) else: c.save(project=name) print( 'Now using project "%s" on must-gather "%s"' % (c.project, c.path) ) else: print("[ERROR] Project %s not found in %s" % (name, ns_dir))
def use(a): c = Config(fail_if_no_path=False) p = a.mg_path # We traverse up to 3 levels to find the must-gather # At each leve if it has only one dir and we check inside it # When we see see the dir /namespaces and /cluster-scoped-resources, we assume it for i in [1,2,3]: if os.path.isdir(p): if ( os.path.isdir( os.path.join(p, 'namespaces')) and os.path.isdir( os.path.join(p, 'cluster-scoped-resources')) ): full_path = os.path.abspath(p) c.save(path=full_path) print('Using: ',p) break elif len(os.listdir(p)) == 1: p = os.path.join(p,os.listdir(p)[0]) else: print('[ERROR] Invalid must-gather path. Please point to the extracted must-gather directory') break else: print('[ERROR] Invalid path. Please give path to the extracted must-gather') break
def projects(): c = Config() ns_dir = os.path.join(c.path, "namespaces") projects = [p for p in os.listdir(ns_dir) if os.path.isdir(os.path.join(ns_dir, p))] print( "You have access to the following projects and can switch between them with 'omg project <projectname>':" ) print() for proj in projects: if proj == c.project: print(" * ", proj) else: print(" ", proj) print() project(None)
def complete_get(ctx: Context, args, incomplete): """ Pull out objects args from Click context and return completions. """ try: c = Config() objects = ctx.params.get("objects") # If user has set namespace ( with -n), we use that # else we use namespace set by `omg project` namespace = ctx.params.get("namespace") or c.project return generate_completions(objects, incomplete, namespace) except: # Swallow any exception return []
def file_reader(path): """ Read a file to be parsed and return raw buffer. """ try: full_path = os.path.join(Config().path, path) with open(full_path, 'r') as f: return f.read(), False except IsADirectoryError as e: print("WANING: ignoring file reader; Is a directory") return "", True except FileNotFoundError as e: print(f"ERROR: file [{path}] not found") return "", True except Exception as e: print(f"ERROR: Unknow error opening file {path}") return "", True
def get_project(rt, ns, names, yaml_loc, need_ns): import glob mg_path = Config().path yaml_path = os.path.join(mg_path, yaml_loc) # we neeed all namespaces regardless if -A is set or not ns = "_all" yamls = glob.glob(yaml_path) # Collect the resources collected = [] for yp in yamls: try: # record when was this yaml generated (to calc age) gen_ts = os.path.getmtime(yp) res = load_yaml_file(yp) except: print("[ERROR] Could not read file:", yp) sys.exit(1) # add objects to collected if name matches # or if we want to get all the objects (e.g `get pods`) if 'items' in res: # we got a list if res['items'] is not None and len(res['items']) > 0: collected.extend( [{ 'res': r, 'gen_ts': gen_ts } for r in res['items'] if r['metadata']['name'] in names or '_all' in names]) # else the list was empty/none, we dont' add anything to collected elif 'metadata' in res: # we got a single item collected.extend([{ 'res': res, 'gen_ts': gen_ts }] if res['metadata']['name'] in names or '_all' in names else []) return collected
def get_project(ns, names, yaml_loc, need_ns, print_warnings=True): mg_path = Config().path yaml_path = os.path.join(mg_path, yaml_loc) # we neeed all namespaces regardless if -A is set or not yamls = glob.glob(yaml_path) # Collect the resources collected = [] for yp in yamls: try: # record when was this yaml generated (to calc age) gen_ts = os.path.getmtime(yp) res = load_yaml_file(yp, print_warnings) except: print("[ERROR] Could not read file:", yp) sys.exit(1) # add objects to collected if name matches # or if we want to get all the objects (e.g `get pods`) if "items" in res: # we got a list if res["items"] is not None and len(res["items"]) > 0: collected.extend( [{ "res": r, "gen_ts": gen_ts } for r in res["items"] if r["metadata"]["name"] in names or "_all" in names]) # else the list was empty/none, we dont' add anything to collected elif "metadata" in res: # we got a single item collected.extend([{ "res": res, "gen_ts": gen_ts }] if res["metadata"]["name"] in names or "_all" in names else []) return collected
def get_main(a): # a = args passed from cli # Check if -A/--all-namespaces is set # else, Set the namespace # -n/--namespace takes precedence over current project if a.all_namespaces is True: ns = '_all' else: if a.namespace is not None: ns = a.namespace elif Config().project is not None: ns = Config().project else: ns = None # We collect the resources types/names in this dict # e.g for `get pod httpd1 httpd2` this will look like: # objects = { 'pod': ['httpd1', 'httpd2'] } # e.g, for `get pod,svc` this will look like: # objects = { 'pod': ['_all'], 'service': ['_all'] } objects = {} last_object = [] for o in a.objects: # Case where we have a '/' # e.g omg get pod/httpd if '/' in o: if not last_object: pre = o.split('/')[0] r_type = map_res(pre)['type'] r_name = o.split('/')[1] # If its a valid resource type, apppend it to objects if r_type is not None: if r_type in objects: objects[r_type].append(r_name) else: objects[r_type] = [r_name] else: print("[ERROR] Invalid object type: ", pre) sys.exit(1) else: # last_object was set, meaning this should be object name print( "[ERROR] There is no need to specify a resource type as a separate argument when passing arguments in resource/name form" ) sys.exit(1) # Case where we have a ',' e.g `get dc,svc,pod httpd` # These all will be resource_types, not names, # resource_name will come it next iteration (if any) elif ',' in o: if not last_object: r_types = o.split(',') for rt in r_types: check_rt = map_res(rt) if check_rt is None: print("[ERROR] Invalid object type: ", rt) sys.exit(1) else: last_object.append(check_rt['type']) else: # last_object was set, meaning this should be object name print("[ERROR] Invalid resources to get: ", a.objects) sys.exit(1) # Simple word (without , or /) # If last_object was not set, means this is a resource_type elif not last_object: check_rt = map_res(o) if check_rt is not None: last_object = [check_rt['type']] else: print("[ERROR] Invalid resource type to get: ", o) # Simple word (without , or /) # If the last_object was set, means we got resource_type last time, # and this should be a resource_name. elif last_object: for rt in last_object: if rt in objects: objects[rt].append(o) else: objects[rt] = [o] last_object = [] else: # Should never happen print("[ERROR] Invalid resources to get: ", o) sys.exit(1) # If after going through all the args, we have last_object set # means we didn't get a resource_name for these resource_type. # i.e, we need to get all names if last_object: for rt in last_object: check_rt = map_res(rt) objects[check_rt['type']] = ['_all'] # Debug # print(objects) # Object based routing # i.e, call the get function for all the requested types # then call the output function or simply print if its yaml/json for rt in objects.keys(): rt_info = map_res(rt) get_func = rt_info['get_func'] getout_func = rt_info['getout_func'] yaml_loc = rt_info['yaml_loc'] need_ns = rt_info['need_ns'] # Call the get function to get the resoruces res = get_func(rt, ns, objects[rt], yaml_loc, need_ns) # Error out if no objects/resources were collected if len(res) == 0: print('No resources found for type "%s" found in namespace "%s" ' % (rt, ns)) # Yaml dump if -o yaml elif a.output == 'yaml': if len(res) == 1: print(yaml.dump(res[0]['res'])) elif len(res) > 1: print( yaml.dump({ 'apiVersion': 'v1', 'items': [cp['res'] for cp in res] })) # Json dump if -o json elif a.output == 'json': if len(res) == 1: print(json.dumps(res[0]['res'], indent=4)) elif len(res) > 1: print( json.dumps( { 'apiVersion': 'v1', 'items': [cp['res'] for cp in res] }, indent=4)) # Call the respective output fucntion if -o is not set or -o wide elif a.output in [None, 'wide']: # If we displaying more than one resource_type, # we need to display resource_type with the name (type/name) if len(objects) > 1: show_type = True else: show_type = False getout_func(rt, ns, res, a.output, show_type)
def get_main(objects, output, namespace, all_namespaces, show_labels): # Check if -A/--all-namespaces is set # else, Set the namespace # -n/--namespace takes precedence over current project if all_namespaces is True: ns = "_all" else: if namespace is not None: ns = namespace elif Config().project is not None: ns = Config().project else: ns = None # Parse get args and get normalized resources to get try: _, resource_list = parse.parse_get_resources(objects) except parse.ResourceParseError as e: print(e) return # Call the get function for all the requested types # then call the output function or simply print if its yaml/json # Flag to mark if we have already printed something printed_something = False for r_type, r_name in resource_list: res = get_resources(r_type, r_name, ns) if len(res) > 0: # If printing multiple objects, add a blank line between each if printed_something: print("") # Yaml dump if -o yaml if output == "yaml": if len(res) == 1: print(yaml.dump(res[0]["res"])) elif len(res) > 1: print( yaml.dump({ "apiVersion": "v1", "items": [cp["res"] for cp in res] })) # Json dump if -o json elif output == "json": if len(res) == 1: print(json.dumps(res[0]["res"], indent=4)) elif len(res) > 1: print( json.dumps( { "apiVersion": "v1", "items": [cp["res"] for cp in res] }, indent=4, )) # Call the respective output function if -o is not set or -o wide elif output in [None, "wide"]: # If we displaying more than one resource_type, # we need to display resource_type with the name (type/name) if len(resource_list) > 1: show_type = True else: show_type = False getout_func = map_res(r_type)["getout_func"] getout_func(r_type, ns, res, output, show_type, show_labels) # We printed something printed_something = True # Error out if nothing was printed if not printed_something: print("No resources found in %s namespace" % ns)
def extract(m): mg_path = Config().path emc_dir = 'extracted-machine-configs' emc_path = os.path.join(mg_path, emc_dir) os.makedirs(emc_path, exist_ok=True) mcs = get_mc(m) for mc in mcs: if 'metadata' in mc and 'name' in mc['metadata']: mc_name = mc['metadata']['name'] else: print('[WARNING] Skipping machine-config. Name not found') continue mc_path = os.path.join(emc_path, mc_name) os.makedirs(mc_path, exist_ok=True) if 'spec' in mc and 'config' in mc['spec']: # storage if 'storage' in mc['spec']['config']: storage_path = os.path.join(mc_path, 'storage') storage = mc['spec']['config']['storage'] if 'files' in storage: for fi in storage['files']: path = fi['path'] rel_fil = path[1:] rel_dir = os.path.dirname(rel_fil) abs_dir = os.path.join(storage_path, rel_dir) abs_fil = os.path.join(storage_path, rel_fil) os.makedirs(abs_dir,exist_ok=True) with open(abs_fil, 'w') as fh: print(abs_fil) fh.write( decode_content(fi['contents']['source']) ) # TODO directories, links, disks, raid, filesystems # systemd if 'systemd' in mc['spec']['config']: systemd_path = os.path.join(mc_path, 'systemd') systemd = mc['spec']['config']['systemd'] if 'units' in systemd: for unit in systemd['units']: if 'dropins' in unit: systemd_path = os.path.join(mc_path, 'systemd/' + unit['name'] + '.d') for unit in unit['dropins']: write_unit(systemd_path, unit) else: write_unit(systemd_path, unit) # passwd if 'passwd' in mc['spec']['config']: passwd = mc['spec']['config']['passwd'] passwd_path = os.path.join(mc_path, 'passwd') if 'users' in passwd: for user in passwd['users']: os.makedirs(passwd_path,exist_ok=True) name = user['name'] abs_fil = os.path.join(passwd_path, name) with open(abs_fil, 'w') as fh: print(abs_fil) fh.write( yaml.dump(user) # TODO groups )
def get_main(objects, output, namespace, all_namespaces): # a = args passed from cli # Check if -A/--all-namespaces is set # else, Set the namespace # -n/--namespace takes precedence over current project if all_namespaces is True: ns = '_all' else: if namespace is not None: ns = namespace elif Config().project is not None: ns = Config().project else: ns = None try: get_method, resource_list = parse.parse_get_resources(objects) except parse.ResourceParseError as e: print(e) return # Object based routing # i.e, call the get function for all the requested types # then call the output function or simply print if its yaml/json # If printing multiple objects, add a blank line between each mult_objs_blank_line = False for r_type, res_set in resource_list: rt_info = map_res(r_type) get_func = rt_info['get_func'] getout_func = rt_info['getout_func'] yaml_loc = rt_info['yaml_loc'] need_ns = rt_info['need_ns'] # Call the get function to get the resoruces res = get_func(r_type, ns, res_set, yaml_loc, need_ns) # Error out if no objects/resources were collected if len(res) == 0 and get_method is not parse.Method.ALL: print('No resources found for type "%s" in %s namespace' % (r_type, ns)) elif len(res) > 0: # If printing multiple objects, add a blank line between each if mult_objs_blank_line: print('') # Yaml dump if -o yaml if output == 'yaml': if len(res) == 1: print(yaml.dump(res[0]['res'])) elif len(res) > 1: print( yaml.dump({ 'apiVersion': 'v1', 'items': [cp['res'] for cp in res] })) # Json dump if -o json elif output == 'json': if len(res) == 1: print(json.dumps(res[0]['res'], indent=4)) elif len(res) > 1: print( json.dumps( { 'apiVersion': 'v1', 'items': [cp['res'] for cp in res] }, indent=4)) # Call the respective output function if -o is not set or -o wide elif output in [None, 'wide']: # If we displaying more than one resource_type, # we need to display resource_type with the name (type/name) if len(resource_list) > 1: show_type = True else: show_type = False getout_func(r_type, ns, res, output, show_type) # Flag to print multiple objects if not mult_objs_blank_line: mult_objs_blank_line = True # Error out once if multiple objects/resources requested and none collected if not mult_objs_blank_line and get_method == parse.Method.ALL: print('No resources found in %s namespace' % ns)
def extract(m): mg_path = Config().path emc_dir = "extracted-machine-configs" emc_path = os.path.join(mg_path, emc_dir) os.makedirs(emc_path, exist_ok=True) mcs_res = get_resources("machineconfig", m, None) mcs = [mc["res"] for mc in mcs_res] for mc in mcs: if "metadata" in mc and "name" in mc["metadata"]: mc_name = mc["metadata"]["name"] else: print("[WARNING] Skipping machine-config. Name not found") continue mc_path = os.path.join(emc_path, mc_name) os.makedirs(mc_path, exist_ok=True) if "spec" in mc and "config" in mc["spec"]: # storage if "storage" in mc["spec"]["config"]: storage_path = os.path.join(mc_path, "storage") storage = mc["spec"]["config"]["storage"] if "files" in storage: for fi in storage["files"]: path = fi["path"] rel_fil = path[1:] rel_dir = os.path.dirname(rel_fil) abs_dir = os.path.join(storage_path, rel_dir) abs_fil = os.path.join(storage_path, rel_fil) os.makedirs(abs_dir, exist_ok=True) with open(abs_fil, "w") as fh: print(abs_fil) fh.write(decode_content(fi["contents"]["source"])) # TODO directories, links, disks, raid, filesystems # systemd if "systemd" in mc["spec"]["config"]: systemd_path = os.path.join(mc_path, "systemd") systemd = mc["spec"]["config"]["systemd"] if "units" in systemd: for unit in systemd["units"]: if "dropins" in unit: systemd_path = os.path.join( mc_path, "systemd/" + unit["name"] + ".d") for unit in unit["dropins"]: write_unit(systemd_path, unit) else: write_unit(systemd_path, unit) # passwd if "passwd" in mc["spec"]["config"]: passwd = mc["spec"]["config"]["passwd"] passwd_path = os.path.join(mc_path, "passwd") if "users" in passwd: for user in passwd["users"]: os.makedirs(passwd_path, exist_ok=True) name = user["name"] abs_fil = os.path.join(passwd_path, name) with open(abs_fil, "w") as fh: print(abs_fil) fh.write(yaml.dump(user))
def map_res(t): if t is not None: for x in map: # match the input with type: or alias (without case) if t.lower() == x["type"].lower() or t.lower() in [ y.lower() for y in x["aliases"] ]: return x # If we didn't find the resource definition in the map dictionary # We "try" to build the definition dynamically from the crd definitions # Idea credit goes to bostrt: https://github.com/kxr/o-must-gather/issues/34 # This is just fall back and expensive to compute, # ideally these definitions should be added to the map dict for better lookup performance try: crds = from_yaml( "_all", "_all", "cluster-scoped-resources/apiextensions.k8s.io/customresourcedefinitions", False, ) for c in crds: singular = c["res"]["spec"]["names"]["singular"] plural = c["res"]["spec"]["names"]["plural"] if "shortNames" in c["res"]["spec"]["names"]: short = c["res"]["spec"]["names"]["shortNames"] else: short = [] aliases = [plural] + short if t.lower() == singular or t.lower() in aliases: # We found the resource type in crd res_dict = { "type": singular, "aliases": aliases, "need_ns": None, "get_func": from_yaml, "getout_func": simple_out, "yaml_loc": None, } # need_ns if c["res"]["spec"]["scope"] == "Namespaced": res_dict["need_ns"] = True yaml_pre = "namespaces/%s/" elif c["res"]["spec"]["scope"] == "Cluster": res_dict["need_ns"] = False yaml_pre = "cluster-scoped-resources/" else: # Unhandled situation return None # yaml_loc group = c["res"]["spec"]["group"] mg_path = Config().path res_dict["yaml_loc"] = os.path.join( mg_path, yaml_pre, group, plural ) return res_dict except: pass return None
def use(mg_path, cwd): if mg_path is None: if cwd is True: # If --cwd is set we will blindly assume current working directory # to be the must-gather to use c = Config(fail_if_no_path=False) c.save(path=".") print("Using your current working directory") else: # If no args are passed after `omg use` # we print the info about currently selected must-gather path = Config().path project = Config().project print("Current must-gather: %s" % path) print(" Current Project: %s" % project) try: from omg.cmd.get_main import get_resources infra = get_resources("Infrastructure") network = get_resources("Network") apiServerURL = [ i["res"]["status"]["apiServerURL"] for i in infra ] platform = [i["res"]["status"]["platform"] for i in infra] sdn = [n["res"]["status"]["networkType"] for n in network] print(" Cluster API URL: %s" % str(apiServerURL)) print(" Cluster Platform: %s" % str(platform)) print(" Cluster SDN Plugin: %s" % str(sdn)) except: print( "[ERROR] Unable to determine cluster API URL and Platform." ) else: c = Config(fail_if_no_path=False) p = mg_path # Check if the 'path' is a file: if os.path.isfile(p): # So, if is a file, try to inflate it: real_p = inflate_file(p) if not real_p: return # If is all ok, we now set the uncompress directory as the new 'p' # and we let the "use" flow continue: p = real_p # We traverse up to 3 levels to find the must-gather # At each leve if it has only one dir and we check inside it # When we see see the dir /namespaces and /cluster-scoped-resources, we assume it for _ in [1, 2, 3]: if os.path.isdir(p): dirs = [ d for d in os.listdir(p) if os.path.isdir(os.path.join(p, d)) ] if "namespaces" in dirs or "cluster-scoped-resources" in dirs: full_path = os.path.abspath(p) c.save(path=full_path) print("Using: ", p) break elif len(dirs) == 1: p = os.path.join(p, dirs[0]) elif len(dirs) > 1: print("[ERROR] Multiple directories found:", dirs) break else: print( "[ERROR] Invalid must-gather path. Please point to the extracted must-gather directory" ) break else: print( "[ERROR] Invalid path. Please give path to the extracted must-gather" ) break
def from_yaml(ns, names, yaml_loc, need_ns, print_warnings=True): mg_path = Config().path yaml_path = os.path.join(mg_path, yaml_loc) if need_ns: # Error out if it needs ns and its not set. if ns is None: if print_warnings: print( "[ERROR] Namespace not set. Select a project (omg project) or specify a namespace (-n)" ) sys.exit(1) # Get all namespace names if we need all elif ns == '_all': nses = os.listdir(os.path.join(mg_path, 'namespaces')) yaml_paths = [yaml_path % (n) for n in nses] else: yaml_paths = [yaml_path % (ns)] else: yaml_paths = [yaml_path] yamls = [] for ym in yaml_paths: # if yaml_paths is a dir, we will read all yamls from this dir if os.path.isdir(ym): yamls.extend([ os.path.join(ym, y) for y in os.listdir(ym) if y.endswith('.yaml') ]) elif os.path.isfile(ym) and ym.endswith('.yaml'): yamls.append(ym) #Debug # print(yamls) # Collect the resources collected = [] for yp in yamls: try: # record when was this yaml generated (to calc age) gen_ts = os.path.getmtime(yp) res = load_yaml_file(yp, print_warnings) except: if print_warnings: print("[ERROR] Could not read file:", yp) sys.exit(1) # add objects to collected if name matches # or if we want to get all the objects (e.g `get pods`) if 'items' in res: # we got a list if res['items'] is not None and len(res['items']) > 0: collected.extend( [{ 'res': r, 'gen_ts': gen_ts } for r in res['items'] if r['metadata']['name'] in names or '_all' in names]) # else the list was empty/none, we dont' add anything to collected elif 'metadata' in res: # we got a single item collected.extend([{ 'res': res, 'gen_ts': gen_ts }] if res['metadata']['name'] in names or '_all' in names else []) return collected
def log(resource, container, previous, namespace, all_namespaces): if all_namespaces is True: print('[ERROR] All Namespaces is not supported with log') sys.exit(1) else: if namespace is not None: ns = namespace elif Config().project is not None: ns = Config().project else: # ns not set print('[ERROR] Namespaces/project not set') sys.exit(1) ns_dir = os.path.join(Config().path, 'namespaces', ns) # check if ns directory exists if not os.path.isdir(ns_dir): print('[ERROR] Namespace not found:', ns) sys.exit(1) # pod if '/' in resource: r_type = resource.split('/')[0] pod = resource.split('/')[1] if r_type not in ['pod', 'pods']: print('[ERROR] Can not print logs of type:', r_type) sys.exit(1) else: pod = resource # check if top pod directory exits pod_dir = os.path.join(ns_dir, 'pods', pod) if not os.path.isdir(pod_dir): print('[ERROR] Pod directory not found:', pod_dir) sys.exit(1) # Containers are dirs in pod_dir containers = [ c for c in os.listdir(pod_dir) if os.path.isdir(os.path.join(pod_dir, c)) ] # If we have > 1 containers and -c/--container is not specified, error out if len(containers) == 0: print('[ERROR] No container directory not found in pod directory:', pod_dir) sys.exit(1) elif len(containers) > 1: if container is None: print('[ERROR] This pod has more than one containers:') print(' ', str(containers)) print(' Use -c/--container to specify the container') sys.exit(1) else: con_to_log = container else: # len(containers) == 1 con_to_log = containers[0] if previous: log_f = 'previous.log' else: log_f = 'current.log' file_to_cat = os.path.join(pod_dir, con_to_log, con_to_log, 'logs', log_f) if not os.path.isfile(file_to_cat): print('[ERROR] Log file missing: ', file_to_cat) sys.exit(1) # ouput the log file print(file_to_cat) with open(file_to_cat, 'r') as ol: print(ol.read())