def split_output_lines(out):
    """
    Split the output into field.

    Takes into acount that some lines may end in a :, some might not. If the header exhibits no colon,
    the field count might be off and we might not be able to parse the output as expected.
    """
    header_ends_in_colon = out[0][-1] == ":"

    def clean(line):
        if not header_ends_in_colon and line[-1] == ":":
            return line[:-1]
        else:
            return line

    return [[percentdecode(y) for y in clean(x).strip().split(':')] for x in out.strip().split('\n')]
Exemple #2
0
    def _executeY(self, name, opts=None, prefix=False):
        """Run with -Y and parse output in dict of name:list of values
           type prefix: boolean, if true prefix the -Y to the options (otherwise append the option).
        """
        if opts is None:
            opts = []
        elif isinstance(opts, (tuple, list,)):
            opts = list(opts)
        else:
            self.log.error("_executeY: have to use a list or tuple for options: name %s opts %s" % (name, opts))
            return

        if prefix:
            opts.insert(0, '-Y')
        else:
            opts.append('-Y')

        ec, out = self._execute(name, opts)

        """Output looks like
        [root@node612 ~]# mmlsfs all -Y
        mmlsfs::HEADER:version:reserved:reserved:deviceName:fieldName:data:remarks:
        mmlsfs::0:1:::scratch:minFragmentSize:8192:
        mmlsfs::0:1:::scratch:inodeSize:512:

        # it's percent encoded: first split in :, then decode
        b = [[percentdecode(y) for y in  x.split(':')] for x in a]
        """
        what = [[percentdecode(y) for y in x.strip().split(':')] for x in out.strip().split('\n')]
        expectedheader = [name, '', 'HEADER', 'version', 'reserved', 'reserved']

        # verify result and remove all items that do not match the expected output data
        # e.g. mmrepquota start with single line of unnecessary ouput (which may be repeated for USR, GRP and FILESET)
        retained = dropwhile(lambda line: expectedheader != line[:6], what)

        # sanity check: all output lines should have the same number of fields. if this is not the case, padding is
        # added
        fields = [(len(x), x) for x in retained]
        if len(fields) == 0:
            self.log.raiseException("No valid lines for output: %s" % (out), GpfsOperationError)

        # do we have multiple field counts?
        field_counts = [i for (i, _) in fields]
        if len(nub(field_counts)) > 1:
            maximum_field_count = max(field_counts)
            description_field_count = field_counts[0]
            for (field_count, line) in fields[1:]:
                if field_count == description_field_count:
                    continue
                elif field_count < description_field_count:
                    self.log.debug("Description length %s greater then %s. Adding whitespace. (names %s, row %s)" %
                                   (maximum_field_count, field_count, fields[0][6:], line[6:]))
                    line.extend([''] * (maximum_field_count - field_count))
                else:
                    # try to fix the line
                    self.log.info("Line has too many fields (%d > %d), trying to fix %s" %
                                  (field_count, description_field_count, line))
                    fixed_lines = self.fixup_executeY_line(line, description_field_count)
                    i = fields.index((field_count, line))
                    fields[i:i + 1] = map(lambda fs: (len(fs), fs), fixed_lines)

        # assemble result
        listm = Monoid([], lambda xs, ys: xs + ys)  # not exactly the fastest mappend for lists ...
        res = MonoidDict(listm)
        try:
            for index, name in enumerate(fields[0][1][6:]):
                if name != '':
                    for (_, line) in fields[1:]:
                        res[name] = [line[6 + index]]
        except:
            self.log.raiseException("Failed to regroup data %s (from output %s)" % (fields, out))

        return res