def crd_out(t, ns, res, output, show_type, show_labels): output_res = [] # header # we will append the header array at last after sorting if show_labels: header = ['NAME', 'CREATED AT', 'LABELS'] else: header = ['NAME', 'CREATED AT'] # resources for r in res: p = r['res'] row = [] # name if show_type: row.append(t + '/' + p['metadata']['name']) else: row.append(p['metadata']['name']) # creation timestamp try: row.append(p['metadata']['creationTimestamp']) except: row.append('Unknown') # show-labels if show_labels: row.append(extract_labels(p)) output_res.append(row) # sort by 1st column sorted_output = sorted(output_res) sorted_output.insert(0, header) print(tabulate(sorted_output, tablefmt="plain"))
def opsub_out(*args): """ Operator Subscriptions parser. """ ns = args[1] show_labels = args[5] output_res = _build_output_res( *args, fields=["NAME", "PACKAGE", "SOURCE", "CHANNEL"]) for r in args[2]: # resource rs = r["res"] row = [] if ns == "_all": row.append(rs["metadata"]["namespace"]) row.append(rs["metadata"]["name"]) row.append(rs["spec"]["name"]) row.append(rs["spec"]["source"]) row.append(rs["spec"]["channel"]) if show_labels: row.append(extract_labels(rs)) output_res.append(row) print(tabulate(output_res, tablefmt="plain"))
def opgrp_out(*args): """ Operator OperatorGroups parser. """ ns = args[1] show_labels = args[5] output_res = _build_output_res(*args, fields=["NAME", "AGE"]) for r in args[2]: # resource rs = r["res"] row = [] if ns == "_all": row.append(rs["metadata"]["namespace"]) row.append(rs["metadata"]["name"]) row.append(age(rs["metadata"]["creationTimestamp"], r["gen_ts"])) if show_labels: row.append(extract_labels(rs)) output_res.append(row) print(tabulate(output_res, tablefmt="plain"))
def project_out(t, ns, res, output, show_type, show_labels): output_projects = [[]] # header if show_labels: output_projects[0].extend(['NAME', 'DISPLAY NAME', 'STATUS', 'LABELS']) else: output_projects[0].extend(['NAME', 'DISPLAY NAME', 'STATUS']) # project for project in res: p = project['res'] row = [] # name row.append(p['metadata']['name']) # display name if 'openshift.io/display-name' in p['metadata']['annotations']: display_name = p['metadata']['annotations'][ 'openshift.io/display-name'] else: display_name = '' row.append(display_name) # status row.append(p['status']['phase']) # show-labels if show_labels: row.append(extract_labels(p)) output_projects.append(row) print(tabulate(output_projects, tablefmt="plain"))
def opcsv_out(*args): """ Operator ClusterServiceVersions parser. """ ns = args[1] show_labels = args[5] output_res = _build_output_res( *args, fields=["NAME", "DISPLAY", "VERSION", "REPLACES", "PHASE"]) for r in args[2]: # resource rs = r["res"] row = [] if ns == "_all": row.append(rs["metadata"]["namespace"]) row.append(rs["metadata"]["name"]) row.append(rs["spec"]["displayName"]) row.append(rs["spec"]["version"]) try: row.append(rs["spec"]["replaces"]) except KeyError: row.append('') row.append(rs["status"]["phase"]) if show_labels: row.append(extract_labels(rs)) output_res.append(row) print(tabulate(output_res, tablefmt="plain"))
def project_out(t, ns, res, output, show_type, show_labels): output_projects = [[]] # header if show_labels: output_projects[0].extend(["NAME", "DISPLAY NAME", "STATUS", "LABELS"]) else: output_projects[0].extend(["NAME", "DISPLAY NAME", "STATUS"]) # project for project in res: p = project["res"] row = [] # name row.append(p["metadata"]["name"]) # display name if "openshift.io/display-name" in p["metadata"]["annotations"]: display_name = p["metadata"]["annotations"][ "openshift.io/display-name"] else: display_name = "" row.append(display_name) # status row.append(p["status"]["phase"]) # show-labels if show_labels: row.append(extract_labels(p)) output_projects.append(row) print(tabulate(output_projects, tablefmt="plain"))
def opip_out(*args): """ Operator InstallPlans parser. """ ns = args[1] show_labels = args[5] output_res = _build_output_res( *args, fields=["NAME", "CSV", "APPROVAL", "APPROVED"]) for r in args[2]: # resource rs = r["res"] # If there's more than on CSV on this IP, duplicate the row. # TODO check if that is same behavior of CLI. for csv_name in rs["spec"]["clusterServiceVersionNames"]: row = [] if ns == "_all": row.append(rs["metadata"]["namespace"]) row.append(rs["metadata"]["name"]) row.append(csv_name) row.append(rs["spec"]["approval"]) row.append(rs["spec"]["approved"]) if show_labels: row.append(extract_labels(rs)) output_res.append(row) print(tabulate(output_res, tablefmt="plain"))
def opcatsrc_out(*args): """ Operator CatalogSources parser. """ ns = args[1] show_labels = args[5] output_res = _build_output_res( *args, fields=["NAME", "DISPLAY", "TYPE", "PUBLISHER", "AGE"]) for r in args[2]: # resource rs = r["res"] row = [] if ns == "_all": row.append(rs["metadata"]["namespace"]) row.append(rs["metadata"]["name"]) row.append(rs["spec"]["displayName"]) row.append(rs["spec"]["sourceType"]) row.append(rs["spec"]["publisher"]) row.append(age(rs["metadata"]["creationTimestamp"], r["gen_ts"])) if show_labels: row.append(extract_labels(rs)) output_res.append(row) print(tabulate(output_res, tablefmt="plain"))
def crd_out(t, ns, res, output, show_type, show_labels): output_res = [] # header # we will append the header array at last after sorting if show_labels: header = ["NAME", "CREATED AT", "LABELS"] else: header = ["NAME", "CREATED AT"] # resources for r in res: p = r["res"] row = [] # name if show_type: row.append(t + "/" + p["metadata"]["name"]) else: row.append(p["metadata"]["name"]) # creation timestamp try: row.append(p["metadata"]["creationTimestamp"]) except: row.append("Unknown") # show-labels if show_labels: row.append(extract_labels(p)) output_res.append(row) # sort by 1st column sorted_output = sorted(output_res) sorted_output.insert(0, header) print(tabulate(sorted_output, tablefmt="plain"))
def sc_out(t, ns, res, output, show_type, show_labels): output_res = [] # header if show_labels: header = [ "NAME", "PROVISIONER", "RECLAIMPOLICY", "VOLUMEBINDINGMODE", "ALLOWVOLUMEEXPANSION", "AGE", "LABELS", ] else: header = [ "NAME", "PROVISIONER", "RECLAIMPOLICY", "VOLUMEBINDINGMODE", "ALLOWVOLUMEEXPANSION", "AGE", ] # resources for r in res: sc = r["res"] row = [] # name if show_type: row.append(t + "/" + sc["metadata"]["name"]) else: row.append(sc["metadata"]["name"]) # provisioner row.append(sc["provisioner"]) # reclaimpolicy row.append(sc["reclaimPolicy"]) # volumebindingmode row.append(sc["volumeBindingMode"]) # allowvolumeexpansion try: row.append(sc["allowVolumeExpansion"]) except: row.append("false") # age try: ct = str(sc["metadata"]["creationTimestamp"]) ts = r["gen_ts"] row.append(age(ct, ts)) except: row.append("Unknown") # show-labels if show_labels: row.append(extract_labels(sc)) output_res.append(row) # sort by 1st column sorted_output = sorted(output_res) sorted_output.insert(0, header) print(tabulate(sorted_output, tablefmt="plain"))
def build_out(t, ns, res, output, show_type, show_labels): output_res = [[]] # header if ns == '_all': output_res[0].append('NAMESPACE') if show_labels: output_res[0].extend([ 'NAME', 'TYPE', 'FROM', 'STATUS', 'STARTED', 'DURATION', 'LABELS' ]) else: output_res[0].extend( ['NAME', 'TYPE', 'FROM', 'STATUS', 'STARTED', 'DURATION']) # resources for r in res: build = r['res'] row = [] # namespace (for --all-namespaces) if ns == '_all': row.append(build['metadata']['namespace']) # name if show_type: row.append(t + '/' + build['metadata']['name']) else: row.append(build['metadata']['name']) # type try: row.append(build['spec']['strategy']['type']) except: row.append('??') # from try: row.append(build['spec']['source']['type']) except: row.append('??') # status try: row.append(build['status']['phase']) except: row.append('??') # started try: ct = str(build['status']['startTimestamp']) ts = r['gen_ts'] row.append(age(ct, ts)) except: row.append('Unknown') # duration try: row.append( str(int((build['status']['duration']) / 1000000000)) + 's') except: row.append('Unknown') # show-labels if show_labels: row.append(extract_labels(build)) output_res.append(row) print(tabulate(output_res, tablefmt="plain"))
def build_out(t, ns, res, output, show_type, show_labels): output_res = [[]] # header if ns == "_all": output_res[0].append("NAMESPACE") if show_labels: output_res[0].extend([ "NAME", "TYPE", "FROM", "STATUS", "STARTED", "DURATION", "LABELS" ]) else: output_res[0].extend( ["NAME", "TYPE", "FROM", "STATUS", "STARTED", "DURATION"]) # resources for r in res: build = r["res"] row = [] # namespace (for --all-namespaces) if ns == "_all": row.append(build["metadata"]["namespace"]) # name if show_type: row.append(t + "/" + build["metadata"]["name"]) else: row.append(build["metadata"]["name"]) # type try: row.append(build["spec"]["strategy"]["type"]) except: row.append("??") # from try: row.append(build["spec"]["source"]["type"]) except: row.append("??") # status try: row.append(build["status"]["phase"]) except: row.append("??") # started try: ct = str(build["status"]["startTimestamp"]) ts = r["gen_ts"] row.append(age(ct, ts)) except: row.append("Unknown") # duration try: row.append( str(int((build["status"]["duration"]) / 1000000000)) + "s") except: row.append("Unknown") # show-labels if show_labels: row.append(extract_labels(build)) output_res.append(row) print(tabulate(output_res, tablefmt="plain"))
def machine_out(t, ns, res, output, show_type, show_labels): output_res = [] # header header = [] if ns == "_all": header.append("NAMESPACE") if show_labels: header.extend( ["NAME", "PHASE", "TYPE", "REGION", "ZONE", "AGE", "LABELS"]) else: header.extend(["NAME", "PHASE", "TYPE", "REGION", "ZONE", "AGE"]) # resources for r in res: m = r["res"] row = [] # namespace (for --all-namespaces) if ns == "_all": row.append(m["metadata"]["namespace"]) # name if show_type: row.append(t + "/" + m["metadata"]["name"]) else: row.append(m["metadata"]["name"]) # phase if "status" in m and "phase" in m["status"]: row.append(m["status"]["phase"]) else: row.append("") # type, region, zone for l in [ "machine.openshift.io/instance-type", "machine.openshift.io/region", "machine.openshift.io/zone", ]: if "labels" in m["metadata"] and l in m["metadata"]["labels"]: row.append(m["metadata"]["labels"][l]) else: row.append("") try: ct = str(m["metadata"]["creationTimestamp"]) ts = r["gen_ts"] row.append(age(ct, ts)) except: row.append("Unknown") # show-labels if show_labels: row.append(extract_labels(m)) output_res.append(row) # sort by NAME column whose index will be # 1 if we are showing namespaces otherwise 0 ni = 1 if ns == "_all" else 0 sorted_output = sorted(output_res, key=lambda x: x[ni]) sorted_output.insert(0, header) print(tabulate(sorted_output, tablefmt="plain"))
def cj_out(t, ns, res, output, show_type, show_labels): output_res=[[]] # header if ns == '_all': output_res[0].append('NAMESPACE') if show_labels: output_res[0].extend(['NAME','SCHEDULE','SUSPEND','ACTIVE', 'LAST SCHEDULE','AGE','LABELS']) else: output_res[0].extend(['NAME','SCHEDULE','SUSPEND','ACTIVE', 'LAST SCHEDULE','AGE']) # resources for r in res: cj = r['res'] row = [] # namespace (for --all-namespaces) if ns == '_all': row.append(cj['metadata']['namespace']) # name if show_type: row.append(t + '/' + cj['metadata']['name']) else: row.append(cj['metadata']['name']) # schedule try: row.append(cj['spec']['schedule']) except: row.append('??') # suspend try: row.append(cj['spec']['suspend']) except: row.append('??') # active try: row.append(len(cj['status']['active'])) except: row.append('0') # last schedule try: ct = str(cj['status']['lastScheduleTime']) ts = r['gen_ts'] row.append(age(ct,ts)) except: row.append('Unknown') # age try: ct = str(cj['metadata']['creationTimestamp']) ts = r['gen_ts'] row.append(age(ct,ts)) except: row.append('Unknown') # show-labels if show_labels: row.append(extract_labels(cj)) output_res.append(row) print(tabulate(output_res,tablefmt="plain"))
def dc_out(t, ns, res, output, show_type, show_labels): output_res = [[]] # header if ns == '_all': output_res[0].append('NAMESPACE') if show_labels: output_res[0].extend([ 'NAME', 'REVISION', 'DESIRED', 'CURRENT', 'TRIGGERED BY', 'LABELS' ]) else: output_res[0].extend( ['NAME', 'REVISION', 'DESIRED', 'CURRENT', 'TRIGGERED BY']) # resources for r in res: dc = r['res'] row = [] # namespace (for --all-namespaces) if ns == '_all': row.append(dc['metadata']['namespace']) # name if show_type: row.append(t + '/' + dc['metadata']['name']) else: row.append(dc['metadata']['name']) # revision try: row.append(dc['status']['latestVersion']) except: row.append('??') # desired try: row.append(dc['spec']['replicas']) except: row.append('??') # current try: row.append(dc['status']['readyReplicas']) except: row.append('??') # triggered by try: triggered_type = dc['spec']['triggers'][0].get('type') if triggered_type == 'ConfigChange': row.append('config') else: row.append(triggered_type) except: row.append('??') # show-labels if show_labels: row.append(extract_labels(dc)) output_res.append(row) print(tabulate(output_res, tablefmt="plain"))
def job_out(t, ns, res, output, show_type, show_labels): output_res = [[]] # header if ns == '_all': output_res[0].append('NAMESPACE') if show_labels: output_res[0].extend( ['NAME', 'COMPLETIONS', 'DURATION', 'AGE', 'LABELS']) else: output_res[0].extend(['NAME', 'COMPLETIONS', 'DURATION', 'AGE']) # resources for r in res: job = r['res'] row = [] # namespace (for --all-namespaces) if ns == '_all': row.append(job['metadata']['namespace']) # name if show_type: row.append(t + '/' + job['metadata']['name']) else: row.append(job['metadata']['name']) # completions try: # test numerator try: comp_num = str(job['status']['succeeded']) except: comp_num = '0' # append denominator comp_den = str(job['spec']['completions']) row.append(comp_num + '/' + comp_den) except: row.append('??') # duration try: st = str(job['status']['startTime']) ct = str(job['status']['completionTime']) row.append(age(st, ct, ts2_type='iso')) except: row.append('Unknown') # age try: ct = str(job['status']['startTime']) ts = r['gen_ts'] row.append(age(ct, ts)) except: row.append('Unknown') # show-labels if show_labels: row.append(extract_labels(job)) output_res.append(row) print(tabulate(output_res, tablefmt="plain"))
def dc_out(t, ns, res, output, show_type, show_labels): output_res = [[]] # header if ns == "_all": output_res[0].append("NAMESPACE") if show_labels: output_res[0].extend([ "NAME", "REVISION", "DESIRED", "CURRENT", "TRIGGERED BY", "LABELS" ]) else: output_res[0].extend( ["NAME", "REVISION", "DESIRED", "CURRENT", "TRIGGERED BY"]) # resources for r in res: dc = r["res"] row = [] # namespace (for --all-namespaces) if ns == "_all": row.append(dc["metadata"]["namespace"]) # name if show_type: row.append(t + "/" + dc["metadata"]["name"]) else: row.append(dc["metadata"]["name"]) # revision try: row.append(dc["status"]["latestVersion"]) except: row.append("??") # desired try: row.append(dc["spec"]["replicas"]) except: row.append("??") # current try: row.append(dc["status"]["readyReplicas"]) except: row.append("??") # triggered by try: triggered_type = dc["spec"]["triggers"][0].get("type") if triggered_type == "ConfigChange": row.append("config") else: row.append(triggered_type) except: row.append("??") # show-labels if show_labels: row.append(extract_labels(dc)) output_res.append(row) print(tabulate(output_res, tablefmt="plain"))
def job_out(t, ns, res, output, show_type, show_labels): output_res = [[]] # header if ns == "_all": output_res[0].append("NAMESPACE") if show_labels: output_res[0].extend( ["NAME", "COMPLETIONS", "DURATION", "AGE", "LABELS"]) else: output_res[0].extend(["NAME", "COMPLETIONS", "DURATION", "AGE"]) # resources for r in res: job = r["res"] row = [] # namespace (for --all-namespaces) if ns == "_all": row.append(job["metadata"]["namespace"]) # name if show_type: row.append(t + "/" + job["metadata"]["name"]) else: row.append(job["metadata"]["name"]) # completions try: # test numerator try: comp_num = str(job["status"]["succeeded"]) except: comp_num = "0" # append denominator comp_den = str(job["spec"]["completions"]) row.append(comp_num + "/" + comp_den) except: row.append("??") # duration try: st = str(job["status"]["startTime"]) ct = str(job["status"]["completionTime"]) row.append(age(st, ct, ts2_type="iso")) except: row.append("Unknown") # age try: ct = str(job["status"]["startTime"]) ts = r["gen_ts"] row.append(age(ct, ts)) except: row.append("Unknown") # show-labels if show_labels: row.append(extract_labels(job)) output_res.append(row) print(tabulate(output_res, tablefmt="plain"))
def is_out(t, ns, res, output, show_type, show_labels): output_res = [[]] # header if ns == '_all': output_res[0].append('NAMESPACE') if show_labels: output_res[0].extend( ['NAME', 'IMAGE REPOSITORY', 'TAGS', 'UPDATED', 'LABELS']) else: output_res[0].extend(['NAME', 'IMAGE REPOSITORY', 'TAGS', 'UPDATED']) # resources for r in res: is_ = r['res'] row = [] # namespace (for --all-namespaces) if ns == '_all': row.append(is_['metadata']['namespace']) # name if show_type: row.append(t + '/' + is_['metadata']['name']) else: row.append(is_['metadata']['name']) # image repository try: row.append(is_['status']['publicDockerImageRepository']) except: row.append('') # tags try: tags = [] for tag in is_['status']['tags']: tags.append(tag['tag']) row.append(','.join(tags)) except: row.append('') # updated ## TODO: should update this to parse list of tags and determine latest updated image, rather than use metadata.creationTimestamp try: ## TODO: replace this check when reimplementing latest updated image if len(is_['status']['tags']) > 0: ct = str(is_['metadata']['creationTimestamp']) ts = r['gen_ts'] row.append(age(ct, ts)) else: row.append('') except: row.append('') # show-labels if show_labels: row.append(extract_labels(is_)) output_res.append(row) print(tabulate(output_res, tablefmt="plain"))
def ep_out(t, ns, res, output, show_type, show_labels): output_res = [[]] # header if ns == '_all': output_res[0].append('NAMESPACE') if show_labels: output_res[0].extend(['NAME', 'ENDPOINTS', 'AGE', 'LABELS']) else: output_res[0].extend(['NAME', 'ENDPOINTS', 'AGE']) # resources for r in res: ep = r['res'] row = [] # namespace (for --all-namespaces) if ns == '_all': row.append(ep['metadata']['namespace']) # name if show_type: row.append(t + '/' + ep['metadata']['name']) else: row.append(ep['metadata']['name']) # endpoints endpoints = [] if 'subsets' in ep: for s in ep['subsets']: if 'addresses' in s and 'ports' in s: for a in s['addresses']: for p in s['ports']: endpoints.append( str(a['ip']) + ':' + str(p['port'])) if len(endpoints) == 0: row.append('<none>') elif len(endpoints) < 4: row.append(','.join(endpoints)) else: row.append(','.join(endpoints[:3]) + ' + ' + str(len(endpoints) - 3) + ' more...') # age try: ct = str(ep['metadata']['creationTimestamp']) ts = r['gen_ts'] row.append(age(ct, ts)) except: row.append('Unknown') # show-labels if show_labels: row.append(extract_labels(ep)) output_res.append(row) print(tabulate(output_res, tablefmt="plain"))
def cephclusters_out(t, ns, res, output, show_type, show_labels): output_res = [[]] # header if ns == "_all": output_res[0].append("NAMESPACE") if show_labels: output_res[0].extend([ "NAME", "DATADIRHOSTPATH", "MONCOUNT", "AGE", "PHASE", "MESSAGE", "HEALTH", "LABELS" ]) else: output_res[0].extend([ "NAME", "DATADIRHOSTPATH", "MONCOUNT", "AGE", "PHASE", "MESSAGE", "HEALTH" ]) # resources for r in res: cs = r["res"] row = [] # namespace (for --all-namespaces) if ns == "_all": row.append(cs["metadata"]["namespace"]) # name if show_type: row.append(t + "/" + cs["metadata"]["name"]) else: row.append(cs["metadata"]["name"]) # dataDirHostPath row.append(cs["spec"]["dataDirHostPath"]) # mon.count row.append(cs["spec"]["mon"]["count"]) # age try: ct = str(cs["metadata"]["creationTimestamp"]) ts = r["gen_ts"] row.append(age(ct, ts)) except: row.append("Unknown") # phase row.append(cs["status"]["phase"]) # message row.append(cs["status"]["message"]) # health row.append(cs["status"]["ceph"]["health"]) if show_labels: row.append(extract_labels(cs)) output_res.append(row) print(tabulate(output_res, tablefmt="plain"))
def ep_out(t, ns, res, output, show_type, show_labels): output_res = [[]] # header if ns == "_all": output_res[0].append("NAMESPACE") if show_labels: output_res[0].extend(["NAME", "ENDPOINTS", "AGE", "LABELS"]) else: output_res[0].extend(["NAME", "ENDPOINTS", "AGE"]) # resources for r in res: ep = r["res"] row = [] # namespace (for --all-namespaces) if ns == "_all": row.append(ep["metadata"]["namespace"]) # name if show_type: row.append(t + "/" + ep["metadata"]["name"]) else: row.append(ep["metadata"]["name"]) # endpoints endpoints = [] if "subsets" in ep: for s in ep["subsets"]: if "addresses" in s and "ports" in s: for a in s["addresses"]: for p in s["ports"]: endpoints.append(str(a["ip"]) + ":" + str(p["port"])) if len(endpoints) == 0: row.append("<none>") elif len(endpoints) < 4: row.append(",".join(endpoints)) else: row.append( ",".join(endpoints[:3]) + " + " + str(len(endpoints) - 3) + " more..." ) # age try: ct = str(ep["metadata"]["creationTimestamp"]) ts = r["gen_ts"] row.append(age(ct, ts)) except: row.append("Unknown") # show-labels if show_labels: row.append(extract_labels(ep)) output_res.append(row) print(tabulate(output_res, tablefmt="plain"))
def mc_out(t, ns, res, output, show_type, show_labels): output_res = [] # header if show_labels: header = [ "NAME", "GENERATEDBYCONTROLLER", "IGNITIONVERSION", "AGE", "LABELS" ] else: header = ["NAME", "GENERATEDBYCONTROLLER", "IGNITIONVERSION", "AGE"] # resources for r in res: mc = r["res"] row = [] # name if show_type: row.append(t + "/" + mc["metadata"]["name"]) else: row.append(mc["metadata"]["name"]) # generated by cont. try: gen_by = mc["metadata"]["annotations"][ "machineconfiguration.openshift.io/generated-by-controller-version"] except: gen_by = "" row.append(gen_by) # ignition ver i_ver = mc["spec"]["config"]["ignition"]["version"] row.append(i_ver) # age try: ct = str(mc["metadata"]["creationTimestamp"]) ts = r["gen_ts"] row.append(age(ct, ts)) except: row.append("Unknown") # show-labels if show_labels: row.append(extract_labels(mc)) output_res.append(row) # print(tabulate(output_res,tablefmt="plain")) # sort by 1st column (name) sorted_output = sorted(output_res, key=lambda x: x[0]) sorted_output.insert(0, header) print(tabulate(sorted_output, tablefmt="plain"))
def mc_out(t, ns, res, output, show_type, show_labels): output_res = [] # header if show_labels: header = [ 'NAME', 'GENERATEDBYCONTROLLER', 'IGNITIONVERSION', 'AGE', 'LABELS' ] else: header = ['NAME', 'GENERATEDBYCONTROLLER', 'IGNITIONVERSION', 'AGE'] # resources for r in res: mc = r['res'] row = [] # name if show_type: row.append(t + '/' + mc['metadata']['name']) else: row.append(mc['metadata']['name']) # generated by cont. try: gen_by = mc['metadata']['annotations'][ 'machineconfiguration.openshift.io/generated-by-controller-version'] except: gen_by = '' row.append(gen_by) # ignition ver i_ver = mc['spec']['config']['ignition']['version'] row.append(i_ver) # age try: ct = str(mc['metadata']['creationTimestamp']) ts = r['gen_ts'] row.append(age(ct, ts)) except: row.append('Unknown') # show-labels if show_labels: row.append(extract_labels(mc)) output_res.append(row) #print(tabulate(output_res,tablefmt="plain")) # sort by 1st column (name) sorted_output = sorted(output_res, key=lambda x: x[0]) sorted_output.insert(0, header) print(tabulate(sorted_output, tablefmt="plain"))
def bc_out(t, ns, res, output, show_type, show_labels): output_res=[[]] # header if ns == '_all': output_res[0].append('NAMESPACE') if show_labels: output_res[0].extend(['NAME','TYPE','FROM','LATEST','LABELS']) else: output_res[0].extend(['NAME','TYPE','FROM','LATEST']) # resources for r in res: bc = r['res'] row = [] # namespace (for --all-namespaces) if ns == '_all': row.append(bc['metadata']['namespace']) # name if show_type: row.append(t + '/' + bc['metadata']['name']) else: row.append(bc['metadata']['name']) # type try: row.append(bc['spec']['strategy']['type']) except: row.append('??') # from try: row.append(bc['spec']['source']['type']) except: row.append('??') # latest try: row.append(bc['status']['lastVersion']) except: row.append('??') # show-labels if show_labels: row.append(extract_labels(bc)) output_res.append(row) print(tabulate(output_res,tablefmt="plain"))
def secret_out(t, ns, res, output, show_type, show_labels): output_res = [[]] # header if ns == "_all": output_res[0].append("NAMESPACE") if show_labels: output_res[0].extend(["NAME", "TYPE", "DATA", "AGE", "LABELS"]) else: output_res[0].extend(["NAME", "TYPE", "DATA", "AGE"]) # resources for r in res: sec = r["res"] row = [] # namespace (for --all-namespaces) if ns == "_all": row.append(sec["metadata"]["namespace"]) # name if show_type: row.append(t + "/" + sec["metadata"]["name"]) else: row.append(sec["metadata"]["name"]) # type row.append(sec["type"]) # data if "data" in sec: data = len(sec["data"]) else: data = 0 row.append(data) # age try: ct = str(sec["metadata"]["creationTimestamp"]) ts = r["gen_ts"] row.append(age(ct, ts)) except: row.append("Unknown") # show-labels if show_labels: row.append(extract_labels(sec)) output_res.append(row) print(tabulate(output_res, tablefmt="plain"))
def cephobjectstoreusers_out(t, ns, res, output, show_type, show_labels): output_res = [[]] # header header = [] if ns == "_all": output_res[0].append("NAMESPACE") if show_labels: output_res[0].extend(["NAME", "AGE", "LABELS"]) else: output_res[0].extend(["NAME", "AGE"]) # resources for r in res: co = r["res"] row = [] # namespace (for --all-namespaces) if ns == "_all": row.append(co["metadata"]["namespace"]) # name if show_type: row.append(t + "/" + co["metadata"]["name"]) else: row.append(co["metadata"]["name"]) # age try: ct = str(co["metadata"]["creationTimestamp"]) ts = r["gen_ts"] row.append(age(ct, ts)) except: row.append("Unknown") if show_labels: row.append(extract_labels(co)) output_res.append(row) # sort by NAME column whose index will be # 1 if we are showing namespaces otherwise 0 ni = 1 if ns == "_all" else 0 sorted_output = sorted(output_res, key=lambda x: x[ni]) sorted_output.insert(0, header) print(tabulate(sorted_output, tablefmt="plain"))
def bc_out(t, ns, res, output, show_type, show_labels): output_res = [[]] # header if ns == "_all": output_res[0].append("NAMESPACE") if show_labels: output_res[0].extend(["NAME", "TYPE", "FROM", "LATEST", "LABELS"]) else: output_res[0].extend(["NAME", "TYPE", "FROM", "LATEST"]) # resources for r in res: bc = r["res"] row = [] # namespace (for --all-namespaces) if ns == "_all": row.append(bc["metadata"]["namespace"]) # name if show_type: row.append(t + "/" + bc["metadata"]["name"]) else: row.append(bc["metadata"]["name"]) # type try: row.append(bc["spec"]["strategy"]["type"]) except: row.append("??") # from try: row.append(bc["spec"]["source"]["type"]) except: row.append("??") # latest try: row.append(bc["status"]["lastVersion"]) except: row.append("??") # show-labels if show_labels: row.append(extract_labels(bc)) output_res.append(row) print(tabulate(output_res, tablefmt="plain"))
def cm_out(t, ns, res, output, show_type, show_labels): output_res = [[]] # header if ns == '_all': output_res[0].append('NAMESPACE') if show_labels: output_res[0].extend(['NAME', 'DATA', 'AGE', 'LABELS']) else: output_res[0].extend(['NAME', 'DATA', 'AGE']) # resources for r in res: cm = r['res'] row = [] # namespace (for --all-namespaces) if ns == '_all': row.append(cm['metadata']['namespace']) # name if show_type: row.append(t + '/' + cm['metadata']['name']) else: row.append(cm['metadata']['name']) # data if 'data' in cm: row.append(len(cm['data'])) else: row.append('0') # age try: ct = str(cm['metadata']['creationTimestamp']) ts = r['gen_ts'] row.append(age(ct, ts)) except: row.append('Unknown') # show-labels if show_labels: row.append(extract_labels(cm)) output_res.append(row) print(tabulate(output_res, tablefmt="plain"))
def cephfilesystems_out(t, ns, res, output, show_type, show_labels): output_res = [[]] # header if ns == "_all": output_res[0].append("NAMESPACE") if show_labels: output_res[0].extend(["NAME", "ACTIVEMDS", "AGE", "LABELS"]) else: output_res[0].extend(["NAME", "ACTIVEMDS", "AGE"]) # resources for r in res: cf = r["res"] row = [] # namespace (for --all-namespaces) if ns == "_all": row.append(cf["metadata"]["namespace"]) # name if show_type: row.append(t + "/" + cf["metadata"]["name"]) else: row.append(cf["metadata"]["name"]) # activemds row.append(cf["spec"]["metadataServer"]["activeCount"]) # age try: ct = str(cf["metadata"]["creationTimestamp"]) ts = r["gen_ts"] row.append(age(ct, ts)) except: row.append("Unknown") if show_labels: row.append(extract_labels(cf)) output_res.append(row) print(tabulate(output_res, tablefmt="plain"))