def _write_ifconfig_record(fmt, iface): fields = [] for f in fmt.fields: if f in iface: fields.append(base.encode_field(iface[f])) else: fields.append(base.encode_field("")) base.writeline(fmt.render(fields))
def _linux(args, uxy_args): parser = argparse.ArgumentParser("__main__.py du", add_help=False) parser.add_argument("-0", action="store_true", default=argparse.SUPPRESS) parser.add_argument("--null", action="store_true", default=argparse.SUPPRESS) parser.add_argument("-c", action="store_true", default=argparse.SUPPRESS) parser.add_argument("--total", action="store_true", default=argparse.SUPPRESS) parser.add_argument("-h", action="store_true", default=argparse.SUPPRESS) parser.add_argument("--human-readable", action="store_true", default=argparse.SUPPRESS) parser.add_argument("--si", action="store_true", default=argparse.SUPPRESS) parser.add_argument("-s", action="store_true", default=argparse.SUPPRESS) parser.add_argument("--summarize", action="store_true", default=argparse.SUPPRESS) parser.add_argument("--time", nargs="?", default=argparse.SUPPRESS) parser.add_argument("--time-style", nargs=1, default=argparse.SUPPRESS) parser.add_argument("--help", action="store_true", default=argparse.SUPPRESS) base.check_args(args, parser) if uxy_args.long: fmtargs = ['--time', '--time-style=full-iso'] regexp = re.compile( r'\s*([^\s]*)\s+([^\s]*)\s+([^\s]*)\s+([^\s]*)\s+(.*)') fmt = base.Format("USAGE TIME FILE") else: fmtargs = [] regexp = re.compile(r'\s*([^\s]*)\s+(.*)') fmt = base.Format("USAGE FILE") proc = base.launch(uxy_args, ['du'] + fmtargs + args[1:]) base.writeline(fmt.render()) for ln in proc: m = regexp.match(ln) if not m: continue fields = [] if uxy_args.long: time = "%sT%s%s:%s" % (m.group(2), m.group(3), m.group(4)[:-2], m.group(4)[-2:]) fields.append(base.encode_field(m.group(1))) fields.append(base.encode_field(time)) fields.append(base.encode_field(m.group(5))) else: for i in range(1, regexp.groups + 1): fields.append(base.encode_field(m.group(i))) base.writeline(fmt.render(fields)) return proc.wait()
def _bsd(args, uxy_args): parser = argparse.ArgumentParser("__main__.py top") parser.parse_args(args[1:]) proc = base.launch(uxy_args, ['top', '-l 1'] + args[1:]) # Skip the summary. for i in range(0, 12): proc.readline() regexp = re.compile( r'\s*([^\s]*)\s+([^\s]*)\s+([^\s]*)\s+([^\s]*)\s+([^\s]*)\s+([^\s]*)\s+([^\s]*)\s+([^\s]*)\s+([^\s]*)\s+([^\s]*)\s+([^\s]*)\s+([^\s]*)\s+([^\s]*)\s+([^\s]*)\s+([^\s]*)\s+([^\s]*)\s+([^\s]*)\s+([^\s]*)\s+([^\s]*)\s+([^\s]*)\s+([^\s]*)\s+([^\s]*)\s+([^\s]*)\s+([^\s]*)\s+([^\s]*)\s+([^\s]*)\s+([^\s]*)\s+([^\s]*)\s+([^\s]*)\s+([^\s]*)\s+.*' ) fmt = base.Format( "PID CMD CPU TIME TH WQ PORTS MEM PURG CMPRS PGRP PPID STATE BOOSTS CPU_ME CPU_OTHRS UID FAULTS COW MSGSENT MSGRECV SYSBSD SYSMACH CSW PAGEINS IDLEW POWER INSTRS CYCLES USER" ) base.writeline(fmt.render()) for ln in proc: m = regexp.match(ln) if not m: continue fields = [] for i in range(1, regexp.groups + 1): fields.append(base.encode_field(m.group(i))) base.writeline(fmt.render(fields)) return proc.wait()
def _linux(args, uxy_args): parser = argparse.ArgumentParser("__main__.py top") parser.parse_args(args[1:]) proc = base.launch(uxy_args, ['top', '-bn1'] + args[1:]) # Skip the summary. for i in range(0, 7): proc.readline() if uxy_args.long: regexp = re.compile( r'\s*([^\s]*)\s+([^\s]*)\s+([^\s]*)\s+([^\s]*)\s+([^\s]*)\s+([^\s]*)\s+([^\s]*)\s+([^\s]*)\s+([^\s]*)\s+([^\s]*)\s+([^\s]*)\s+(.*)' ) fmt = base.Format( "PID USER PR NI VIRT RES SHR S CPU MEM TIME CMD" ) else: regexp = re.compile( r'\s*([^\s]*)\s+([^\s]*)\s+[^\s]*\s+[^\s]*\s+[^\s]*\s+[^\s]*\s+[^\s]*\s+[^\s]*\s+([^\s]*)\s+([^\s]*)\s+([^\s]*)\s+(.*)' ) fmt = base.Format("PID USER CPU MEM TIME CMD") base.writeline(fmt.render()) for ln in proc: m = regexp.match(ln) if not m: continue fields = [] for i in range(1, regexp.groups + 1): fields.append(base.encode_field(m.group(i))) base.writeline(fmt.render(fields)) return proc.wait()
def lsof(args, uxy_args): proc = base.launch(uxy_args, ['lsof', '+c', '0'] + args[1:]) hdr = proc.readline() parts = re.split("(\s+)", hdr) pos = [len(p) for p in list(itertools.accumulate(parts))] r1 = re.compile(r'([^\s]*)\s+([^\s]*)') fmt = base.Format( "COMMAND PID TID USER FD TYPE DEVICE SIZEOFF NODE NAME" ) base.writeline(fmt.render()) for ln in proc: fields = [] m = r1.match(ln[:pos[2]]) if not m: continue fields.append(m.group(1)) fields.append(m.group(2)) fields.append(ln[pos[2]:pos[4]].strip()) fields.append(ln[pos[4]:pos[6]].strip()) fields.append(ln[pos[6]:pos[8] + 1].strip()) fields.append(ln[pos[8] + 1:pos[10]].strip()) fields.append(ln[pos[10]:pos[12]].strip()) fields.append(ln[pos[12]:pos[14]].strip()) fields.append(ln[pos[14]:pos[16]].strip()) fields.append(ln[pos[16]:].strip()) fields = [base.encode_field(f) for f in fields] base.writeline(fmt.render(fields)) return proc.wait()
def _bsd(args, uxy_args): fmtargs = ['-l'] # -rw-r--r-- 1 501 20 1025 May 31 07:11:49 2019 LICENSE regexp = re.compile( r'\s*(.)([^\s]*)\s+([^\s]*)\s+([^\s]*)\s+([^\s]*)\s+([^\s]*)\s+([^\s]*\s+[^\s]*\s+[^\s]*\s+[^\s]*)\s+(.*)' ) fmt = base.Format( "TYPE PERMISSIONS LINKS OWNER GROUP SIZE TIME NAME" ) proc = base.launch(uxy_args, ['ls'] + fmtargs + args[1:]) base.writeline(fmt.render()) path = "" for ln in proc: if ln.startswith('total'): continue if ln == "": # When running with -R this is the name of the directory. ln = proc.readline() if ln.endswith(":"): path = ln[:-1] + "/" continue m = regexp.match(ln) if not m: continue fields = [] for i in range(1, regexp.groups + 1): fields.append(base.encode_field(m.group(i))) base.writeline(fmt.render(fields)) return proc.wait()
def _linux(args, uxy_args): parser = argparse.ArgumentParser("__main__.py ps", add_help=False) parser.add_argument("-c", action="store_true", default=argparse.SUPPRESS) parser.add_argument("--context", action="store_true", default=argparse.SUPPRESS) parser.add_argument("-f", action="store_true", default=argparse.SUPPRESS) parser.add_argument("-F", action="store_true", default=argparse.SUPPRESS) parser.add_argument("-j", action="store_true", default=argparse.SUPPRESS) parser.add_argument("-l", action="store_true", default=argparse.SUPPRESS) parser.add_argument("-M", action="store_true", default=argparse.SUPPRESS) parser.add_argument("-o", nargs=1, default=argparse.SUPPRESS) parser.add_argument("-O", nargs=1, default=argparse.SUPPRESS) parser.add_argument("-y", action="store_true", default=argparse.SUPPRESS) parser.add_argument("--cols", nargs=1, default=argparse.SUPPRESS) parser.add_argument("--columns", nargs=1, default=argparse.SUPPRESS) parser.add_argument("--forest", action="store_true", default=argparse.SUPPRESS) parser.add_argument("-H", action="store_true", default=argparse.SUPPRESS) parser.add_argument("--headers", action="store_true", default=argparse.SUPPRESS) parser.add_argument("--lines", nargs=1, default=argparse.SUPPRESS) parser.add_argument("--no-headers", action="store_true", default=argparse.SUPPRESS) parser.add_argument("--rows", nargs=1, default=argparse.SUPPRESS) parser.add_argument("--width", nargs=1, default=argparse.SUPPRESS) parser.add_argument("--help", nargs=1, default=argparse.SUPPRESS) base.check_args(args, parser) # TODO: This is better parsed as fixed-width fields. if uxy_args.long: fmtargs = ['-FMlww', '--no-headers'] regexp = re.compile( r'\s*([^\s]*)\s+([^\s]*)\s+([^\s]*)\s+([^\s]*)\s+([^\s]*)\s+([^\s]*)\s+([^\s]*)\s+([^\s]*)\s+([^\s]*)\s+([^\s]*)\s+([^\s]*)\s+([^\s]*)\s+([^\s]*)\s+([^\s]*)\s+([^\s]*)\s+([^\s]*)\s+([^\s]*)\s+(.*)' ) fmt = base.Format( "CONTEXT F S UID PID PPID C PRI NI ADDR SZ WCHAN RSS PSR STIME TTY TIME CMD" ) else: fmtargs = ['-ww', '--no-headers'] regexp = re.compile(r'\s*([^\s]*)\s+([^\s]*)\s+([^\s]*)\s+(.*)') fmt = base.Format("PID TTY TIME CMD") proc = base.launch(uxy_args, ['ps'] + fmtargs + args[1:]) base.writeline(fmt.render()) for ln in proc: m = regexp.match(ln) if not m: continue fields = [] for i in range(1, regexp.groups + 1): fields.append(base.encode_field(m.group(i))) base.writeline(fmt.render(fields)) return proc.wait()
def from_csv(args, uxy_args): parser = argparse.ArgumentParser() subp = parser.add_subparsers().add_parser('from-csv', help="convert CSV to UXY") args = parser.parse_args(args) # Read the headers ln = base.stdin.readline() r = csv.reader(io.StringIO(ln)) for fields in r: fields = " ".join([base.encode_field(f) for f in fields]) fmt = base.Format(fields) base.writeline(fields + "\n") for ln in base.stdin: r = csv.reader(io.StringIO(ln)) for fields in r: fields = [base.encode_field(f) for f in fields] base.writeline(fmt.render(fields)) return 0
def from_yaml(args, uxy_args): parser = argparse.ArgumentParser() subp = parser.add_subparsers().add_parser('from-yaml', help="convert YAML to UXY") args = parser.parse_args(args) # Read the entire input. s = "" for ln in base.stdin: s += ln + "\n" root = yaml.load(s) # Normalize the dict. Collect the field names along the way. fields = {} if not isinstance(root, list): root = [root] for i in range(0, len(root)): if not isinstance(root[i], dict): root[i] = {"COL1": root[i]} for k, _ in root[i].items(): fields[k] = None # Fields will go to the output in alphabetical order. fields = sorted(fields) # Collect the data. At the same time adjust the format sa that data fit in. fmt = base.Format(" ".join([base.encode_field(f) for f in fields])) records = [] for i in range(0, len(root)): record = [] for f in fields: if f in root[i]: record.append(base.encode_field(str(root[i][f]))) else: record.append('""') fmt.adjust(record) records.append(record) # Write the result to output. base.writeline(fmt.render()) for r in records: base.writeline(fmt.render(r)) return 0
def _bsd(args, uxy_args): fmtargs = [] regexp = re.compile(r'\s*([^\s]*)\s+(.*)') fmt = base.Format("USAGE FILE") proc = base.launch(uxy_args, ['du'] + fmtargs + args[1:]) base.writeline(fmt.render()) for ln in proc: m = regexp.match(ln) if not m: continue fields = [] for i in range(1, regexp.groups + 1): fields.append(base.encode_field(m.group(i))) base.writeline(fmt.render(fields)) return proc.wait()
def _linux(args, uxy_args): proc = base.launch(uxy_args, ['netstat', '--inet'] + args[1:]) # Skip header line. proc.readline() hdr = proc.readline() parts = re.split("(\s+)", hdr) pos = [len(p) for p in list(itertools.accumulate(parts))] fmt = base.Format( "PROTO RECVQ SENDQ LOCAL REMOTE STATE" ) base.writeline(fmt.render()) for ln in proc: fields = [] fields.append(ln[0:pos[0]].strip()) fields.append(ln[pos[0]:pos[2]].strip()) fields.append(ln[pos[2]:pos[4]].strip()) fields.append(ln[pos[4]:pos[8]].strip()) fields.append(ln[pos[8]:pos[13]].strip()) fields.append(ln[pos[13]:].strip()) fields = [base.encode_field(f) for f in fields] base.writeline(fmt.render(fields)) return proc.wait()
def from_re(args, uxy_args): parser = argparse.ArgumentParser() subp = parser.add_subparsers().add_parser( 'from-re', help="convert arbitrary input to UXY") subp.add_argument('header', help="UXY header") subp.add_argument('regexp', help="regexp to parse the input lines") args = parser.parse_args(args) # Use the supplied format. fmt = base.Format(args.header) base.writeline(fmt.render()) # Parse the data. regexp = re.compile(args.regexp) for ln in base.stdin: m = regexp.match(ln) # Non-matching lines are ignored. if not m: continue fields = [] for i in range(1, regexp.groups + 1): fields.append(base.encode_field(m.group(i))) base.writeline(fmt.render(fields)) return 0
def _linux(args, uxy_args): parser = argparse.ArgumentParser("__main__.py ls", add_help=False) parser.add_argument("--author", action="store_true", default=argparse.SUPPRESS) parser.add_argument("-b", action="store_true", default=argparse.SUPPRESS) parser.add_argument("--escape", action="store_true", default=argparse.SUPPRESS) parser.add_argument("-C", action="store_true", default=argparse.SUPPRESS) parser.add_argument("--color", nargs="?", default=argparse.SUPPRESS) parser.add_argument("-D", action="store_true", default=argparse.SUPPRESS) parser.add_argument("-f", action="store_true", default=argparse.SUPPRESS) parser.add_argument("--format", nargs="?", default=argparse.SUPPRESS) parser.add_argument("--full-time", action="store_true", default=argparse.SUPPRESS) parser.add_argument("-g", action="store_true", default=argparse.SUPPRESS) parser.add_argument("-h", action="store_true", default=argparse.SUPPRESS) parser.add_argument("--human-readable", action="store_true", default=argparse.SUPPRESS) parser.add_argument("--si", action="store_true", default=argparse.SUPPRESS) parser.add_argument("-G", action="store_true", default=argparse.SUPPRESS) parser.add_argument("--no-group", action="store_true", default=argparse.SUPPRESS) parser.add_argument("-i", action="store_true", default=argparse.SUPPRESS) parser.add_argument("--inode", action="store_true", default=argparse.SUPPRESS) parser.add_argument("-k", action="store_true", default=argparse.SUPPRESS) parser.add_argument("--kibibytes", action="store_true", default=argparse.SUPPRESS) parser.add_argument("-l", action="store_true", default=argparse.SUPPRESS) parser.add_argument("-m", action="store_true", default=argparse.SUPPRESS) parser.add_argument("-N", action="store_true", default=argparse.SUPPRESS) parser.add_argument("--literal", action="store_true", default=argparse.SUPPRESS) parser.add_argument("-o", action="store_true", default=argparse.SUPPRESS) parser.add_argument("-q", action="store_true", default=argparse.SUPPRESS) parser.add_argument("--hide-control-chars", action="store_true", default=argparse.SUPPRESS) parser.add_argument("-Q", action="store_true", default=argparse.SUPPRESS) parser.add_argument("--quote-name", action="store_true", default=argparse.SUPPRESS) parser.add_argument("--quoting-style", nargs=1, default=argparse.SUPPRESS) parser.add_argument("-s", action="store_true", default=argparse.SUPPRESS) parser.add_argument("--time", nargs=1, default=argparse.SUPPRESS) parser.add_argument("--time-style", nargs=1, default=argparse.SUPPRESS) parser.add_argument("-T", nargs=1, default=argparse.SUPPRESS) parser.add_argument("--tabsize", nargs=1, default=argparse.SUPPRESS) parser.add_argument("-w", nargs=1, default=argparse.SUPPRESS) parser.add_argument("--width", nargs=1, default=argparse.SUPPRESS) parser.add_argument("-x", action="store_true", default=argparse.SUPPRESS) parser.add_argument("-Z", action="store_true", default=argparse.SUPPRESS) parser.add_argument("--context", action="store_true", default=argparse.SUPPRESS) parser.add_argument("-1", action="store_true", default=argparse.SUPPRESS) parser.add_argument("--help", action="store_true", default=argparse.SUPPRESS) base.check_args(args, parser) if uxy_args.long: fmtargs = ['-lnNisZ', '--time-style=full-iso'] regexp = re.compile( r'\s*([^\s]*)\s+([^\s]*)\s+(.)([^\s]*)\s+([^\s]*)\s+([^\s]*)\s+([^\s]*)\s+([^\s]*)\s+([^\s]*)\s+([^\s]*)\s+([^\s]*)\s+([^\s]*)\s+(.*)' ) fmt = base.Format( "INODE BLOCKS TYPE PERMISSIONS LINKS OWNER GROUP CONTEXT SIZE TIME NAME" ) owner_col = 6 group_col = 7 else: fmtargs = ['-lnN', '--time-style=full-iso'] regexp = re.compile( r'\s*(.)([^\s]*)\s+([^\s]*)\s+([^\s]*)\s+([^\s]*)\s+([^\s]*)\s+([^\s]*)\s+([^\s]*)\s+([^\s]*)\s+(.*)' ) fmt = base.Format( "TYPE PERMISSIONS LINKS OWNER GROUP SIZE TIME NAME" ) owner_col = 4 group_col = 5 resolve_ids = True if "-n" in args[1:] or "--numeric-uid-gid" in args[1:]: resolve_ids = False proc = base.launch(uxy_args, ['ls'] + fmtargs + args[1:]) base.writeline(fmt.render()) path = "" for ln in proc: if ln.startswith('total'): continue if ln == "": # When running with -R this is the name of the directory. ln = proc.readline() if ln.endswith(":"): path = ln[:-1] + "/" continue m = regexp.match(ln) if not m: continue fields = [] for i in range(1, regexp.groups - 3): field = m.group(i) # In general, __main__.py is not supposed to supplant the functionality provided # by the wrapped tool. However, there's little option here: User names # can contain spaces (e.g. when provided by LDAP), but ls tool doesn't # escape spaces in the names even with run with -b parameter. if resolve_ids: try: if i == owner_col: field = pwd.getpwuid(int(field)).pw_name elif i == group_col: field = grp.getgrgid(int(field)).gr_name except (KeyError, ValueError): pass fields.append(base.encode_field(field)) # Convert to actual ISO8601 format. time = "%sT%s%s:%s" % ( m.group(regexp.groups - 3), m.group(regexp.groups - 2), m.group(regexp.groups - 1)[:-2], m.group(regexp.groups - 1)[-2:]) fields.append(base.encode_field(time)) fields.append(base.encode_field(path + m.group(regexp.groups))) base.writeline(fmt.render(fields)) return proc.wait()