Пример #1
0
    def unserialise_compat(cls, version, obj, absolute=False):
        if version == 1:
            obj['raw_values'] = {}
            if 'SERIES_META' in obj['metadata']:
                obj['raw_values'] = dict([(k,v['RAW_VALUES']) for k,v in
                                          obj['metadata']['SERIES_META'].items()
                                          if 'RAW_VALUES' in v])
            if not obj['raw_values']:
                # No raw values were stored in the old data set. Fake them by
                # using the interpolated values as 'raw'. This ensures there's
                # always some data available as raw values, to facilitate
                # relying on their presence in future code.

                t0 = parse_date(obj['metadata'].get('T0', obj['metadata'].get('TIME')))
                x0 = timegm(t0.timetuple()) + t0.microsecond / 1000000.0
                for name in obj['results'].keys():
                    obj['raw_values'][name] = [{'t': x0+x, 'val': r} for x,r in
                                               zip(obj['x_values'], obj['results'][name])]
                obj['metadata']['FAKE_RAW_VALUES'] = True

            if 'NETPERF_WRAPPER_VERSION' in obj['metadata']:
                obj['metadata']['FLENT_VERSION'] = obj['metadata']['NETPERF_WRAPPER_VERSION']
                del obj['metadata']['NETPERF_WRAPPER_VERSION']

        return obj
Пример #2
0
    def unserialise(cls, obj, absolute=False, SUFFIX=SUFFIX):
        try:
            version = int(obj['version'])
        except (KeyError,ValueError):
            version = 1

        if version > FILEFORMAT_VERSION:
            raise RuntimeError("File format version %d is too new. Please upgrade your version of Flent" % version)
        if version < FILEFORMAT_VERSION:
            obj = cls.unserialise_compat(version, obj, absolute)
        metadata = dict(obj['metadata'])

        if not 'TOTAL_LENGTH' in metadata or metadata['TOTAL_LENGTH'] is None:
            metadata['TOTAL_LENGTH'] = max(obj['x_values'])

        for t in TIME_SETTINGS:
            if t in metadata and metadata[t] is not None:
                metadata[t] = parse_date(metadata[t])
        rset = cls(SUFFIX=SUFFIX, **metadata)
        if absolute:
            t0 = metadata.get('T0', metadata.get('TIME'))
            x0 = timegm(t0.timetuple()) + t0.microsecond / 1000000.0
            rset.x_values = [x+x0 for x in obj['x_values']]
            rset._absolute = True
        else:
            rset.x_values = obj['x_values']
        for k,v in list(obj['results'].items()):
            rset.add_result(k,v)
        rset.raw_values = obj['raw_values']
        return rset
Пример #3
0
    def unserialise_compat(cls, version, obj, absolute=False):
        if version == 1:
            obj['raw_values'] = {}
            if 'SERIES_META' in obj['metadata']:
                obj['raw_values'] = dict([
                    (k, v['RAW_VALUES'])
                    for k, v in obj['metadata']['SERIES_META'].items()
                    if 'RAW_VALUES' in v
                ])
            if not obj['raw_values']:
                # No raw values were stored in the old data set. Fake them by
                # using the interpolated values as 'raw'. This ensures there's
                # always some data available as raw values, to facilitate
                # relying on their presence in future code.

                t0 = parse_date(obj['metadata'].get(
                    'T0', obj['metadata'].get('TIME')))
                x0 = timegm(t0.timetuple()) + t0.microsecond / 1000000.0
                for name in obj['results'].keys():
                    obj['raw_values'][name] = [{
                        't': x0 + x,
                        'val': r
                    } for x, r in zip(obj['x_values'], obj['results'][name])]
                obj['metadata']['FAKE_RAW_VALUES'] = True

            if 'NETPERF_WRAPPER_VERSION' in obj['metadata']:
                obj['metadata']['FLENT_VERSION'] = obj['metadata'][
                    'NETPERF_WRAPPER_VERSION']
                del obj['metadata']['NETPERF_WRAPPER_VERSION']

        return obj
Пример #4
0
    def unserialise(cls, obj, absolute=False, SUFFIX=SUFFIX):
        try:
            version = int(obj['version'])
        except (KeyError, ValueError):
            version = 1

        if version > FILEFORMAT_VERSION:
            raise RuntimeError("File format version %d is too new. "
                               "Please upgrade your version of Flent" %
                               version)
        if version < FILEFORMAT_VERSION:
            obj = cls.unserialise_compat(version, obj, absolute)
        metadata = dict(obj['metadata'])

        if 'TOTAL_LENGTH' not in metadata or metadata['TOTAL_LENGTH'] is None:
            metadata['TOTAL_LENGTH'] = max(obj['x_values'])

        for t in TIME_SETTINGS:
            if t in metadata and metadata[t] is not None:
                metadata[t] = parse_date(metadata[t])
        rset = cls(SUFFIX=SUFFIX, **metadata)
        if absolute:
            t0 = metadata.get('T0', metadata.get('TIME'))
            x0 = timegm(t0.timetuple()) + t0.microsecond / 1000000.0
            rset.x_values = [x + x0 for x in obj['x_values']]
            rset._absolute = True
        else:
            rset.x_values = obj['x_values']
        for k, v in list(obj['results'].items()):
            rset.add_result(k, v)
        rset.raw_values = obj['raw_values']
        return rset
Пример #5
0
    def unserialise(cls, obj, absolute=False, SUFFIX=SUFFIX):
        try:
            version = int(obj['version'])
        except (KeyError, ValueError):
            version = 1

        if version > FILEFORMAT_VERSION:
            raise RuntimeError("File format version %d is too new. "
                               "Please upgrade your version of Flent" %
                               version)
        if version < FILEFORMAT_VERSION:
            logger.debug(
                "Found old file format version %d. "
                "Converting to current version %d.", version,
                FILEFORMAT_VERSION)
            obj = cls.unserialise_compat(version, obj, absolute)

        metadata = dict(obj['metadata'])

        if 'TOTAL_LENGTH' not in metadata or metadata['TOTAL_LENGTH'] is None:
            metadata['TOTAL_LENGTH'] = max(obj['x_values'])

        # We need the minimum timestamp to guess a timezone offset, which we
        # store for subsequent values because it shouldn't be used for
        # BATCH_TIME
        min_t = 10**10
        offset = None
        for v in obj['raw_values'].values():
            min_t = min([min_t] + [i['t'] for i in v if 't' in i])
        for t in TIME_SETTINGS:
            if t in metadata and metadata[t] is not None:
                metadata[t], offset = parse_date(metadata[t],
                                                 min_t=min_t,
                                                 offset=offset)
        rset = cls(SUFFIX=SUFFIX, **metadata)
        if absolute:
            t0 = metadata.get('T0', metadata.get('TIME'))
            x0 = timegm(t0.timetuple()) + t0.microsecond / 1000000.0
            rset.x_values = [x + x0 for x in obj['x_values']]
            rset._absolute = True
        else:
            rset.x_values = obj['x_values']
        for k, v in list(obj['results'].items()):
            rset.add_result(k, v)
        rset.raw_values = obj['raw_values']
        return rset
Пример #6
0
    def unserialise_compat(cls, version, obj, absolute=False):
        fake_raw = False
        if version == 1:
            obj['raw_values'] = {}
            if 'SERIES_META' in obj['metadata']:
                logger.debug("Moving raw values from SERIES_META")
                obj['raw_values'] = dict([(k, v['RAW_VALUES']) for k, v in
                                          obj['metadata']['SERIES_META'].items()
                                          if 'RAW_VALUES' in v])
            if not obj['raw_values']:
                # No raw values were stored in the old data set. Fake them by
                # using the interpolated values as 'raw'. This ensures there's
                # always some data available as raw values, to facilitate
                # relying on their presence in future code.
                logger.debug("No raw values found; synthesising from parsed data")

                t0, offset = parse_date(obj['metadata'].get(
                    'T0', obj['metadata'].get('TIME')))
                x0 = timegm(t0.timetuple()) + t0.microsecond / 1000000.0
                for name in obj['results'].keys():
                    obj['raw_values'][name] = [{'t': x0 + x, 'val': r} for x, r in
                                               zip(obj['x_values'],
                                                   obj['results'][name])]
                obj['metadata']['FAKE_RAW_VALUES'] = fake_raw = True

            if 'NETPERF_WRAPPER_VERSION' in obj['metadata']:
                logger.debug("Converting old NETPERF_WRAPPER_VERSION (%s) "
                             "to FLENT_VERSION",
                             obj['metadata']['NETPERF_WRAPPER_VERSION'])
                obj['metadata']['FLENT_VERSION'] = obj[
                    'metadata']['NETPERF_WRAPPER_VERSION']
                del obj['metadata']['NETPERF_WRAPPER_VERSION']

        if version < 4 and not fake_raw:
            # Version 4 moved the data transform logic to also be applied to
            # raw_values data. So fixup the values in the raw_values structure
            # to apply data transforms where they are missing.

            logger.debug("Applying unit conversion to raw values")
            converted = 0
            for n, values in obj['raw_values'].items():
                # Netperf UDP_RR values
                if 'UDP' in n:
                    logger.debug("Converting data series '%s' from RR to ms", n)
                    for v in values:
                        # Unfortunately this is the best heuristic we have that
                        # this was a netperf UDP_RR runner, since old versions
                        # may not have recorded this fact in the metadata
                        if 'dur' in v:
                            v['val'] = transformers.rr_to_ms(v['val'])
                            converted += 1

                # Convert HTTP latency values from seconds to milliseconds
                elif n == 'HTTP latency':
                    logger.debug("Converting data series '%s' from s to ms", n)
                    for v in values:
                        if 'val' in v:
                            v['val'] = transformers.s_to_ms(v['val'])
                            converted += 1

                # Turn airtime values from cumulative values into counts per
                # interval
                elif values and 'stations' in values[0]:
                    logger.debug("Converting airtime values for series '%s' from "
                                 "cumulative to per-interval", n)
                    last_vals = {}
                    for v in values:
                        if 'stations' not in v:
                            continue
                        for s, d in v['stations'].items():
                            if s not in last_vals:
                                last_vals[s] = {}
                            last = last_vals[s]
                            for k in ('airtime_tx', 'airtime_rx'):
                                if k in d:
                                    converted += 1
                                    if k not in last:
                                        last[k], d[k] = d[k], 0.0
                                    else:
                                        last[k], d[k] = d[k], d[k] - last[k]

                # Ditto for qdisc drops and marks
                elif values and ('dropped' in values[0] or
                                 'ecn_mark' in values[0]):
                    logger.debug("Converting qdisc drops and marks for series "
                                 "'%s' ""from cumulative to per-interval values",
                                 n)
                    last = {}
                    for v in values:
                        for k in ('dropped', 'ecn_mark'):
                            if k in v:
                                converted += 1
                                if k not in last:
                                    last[k], v[k] = v[k], 0.0
                                else:
                                    last[k], v[k] = v[k], v[k] - last[k]

            # Iperf UDP bandwidth is was reported in bits/s, now uses Mbits/s to
            # be consistent with other bandwidth measurements
            if 'SERIES_META' in obj['metadata']:
                for k, v in obj['metadata']['SERIES_META'].items():
                    if 'MEAN_VALUE' in v and v.get('UNITS') == "bits/s":
                        logger.debug("Converting MEAN_VALUE units for series "
                                     "'%s' from bit/s to Mbits/s", k)
                        converted += 1
                        v['MEAN_VALUE'] = transformers.bits_to_mbits(
                            v['MEAN_VALUE'])
                        v['UNITS'] = "Mbits/s"

            logger.debug("Converted a total of %d data points.",
                         converted)

        return obj
Пример #7
0
    def unserialise_compat(cls, version, obj, absolute=False):
        fake_raw = False
        if version == 1:
            obj['raw_values'] = {}
            if 'SERIES_META' in obj['metadata']:
                logger.debug("Moving raw values from SERIES_META")
                obj['raw_values'] = dict([(k, v['RAW_VALUES']) for k, v in
                                          obj['metadata']['SERIES_META'].items()
                                          if 'RAW_VALUES' in v])
            if not obj['raw_values']:
                # No raw values were stored in the old data set. Fake them by
                # using the interpolated values as 'raw'. This ensures there's
                # always some data available as raw values, to facilitate
                # relying on their presence in future code.
                logger.debug("No raw values found; synthesising from parsed data")

                t0 = parse_date(obj['metadata'].get(
                    'T0', obj['metadata'].get('TIME')))
                x0 = timegm(t0.timetuple()) + t0.microsecond / 1000000.0
                for name in obj['results'].keys():
                    obj['raw_values'][name] = [{'t': x0 + x, 'val': r} for x, r in
                                               zip(obj['x_values'],
                                                   obj['results'][name])]
                obj['metadata']['FAKE_RAW_VALUES'] = fake_raw = True

            if 'NETPERF_WRAPPER_VERSION' in obj['metadata']:
                logger.debug("Converting old NETPERF_WRAPPER_VERSION (%s) "
                             "to FLENT_VERSION",
                             obj['metadata']['NETPERF_WRAPPER_VERSION'])
                obj['metadata']['FLENT_VERSION'] = obj[
                    'metadata']['NETPERF_WRAPPER_VERSION']
                del obj['metadata']['NETPERF_WRAPPER_VERSION']

        if version < 4 and not fake_raw:
            # Version 4 moved the data transform logic to also be applied to
            # raw_values data. So fixup the values in the raw_values structure
            # to apply data transforms where they are missing.

            logger.debug("Applying unit conversion to raw values")
            converted = 0
            for n, values in obj['raw_values'].items():
                # Netperf UDP_RR values
                if 'UDP' in n:
                    logger.debug("Converting data series '%s' from RR to ms", n)
                    for v in values:
                        # Unfortunately this is the best heuristic we have that
                        # this was a netperf UDP_RR runner, since old versions
                        # may not have recorded this fact in the metadata
                        if 'dur' in v:
                            v['val'] = transformers.rr_to_ms(v['val'])
                            converted += 1

                # Convert HTTP latency values from seconds to milliseconds
                elif n == 'HTTP latency':
                    logger.debug("Converting data series '%s' from s to ms", n)
                    for v in values:
                        if 'val' in v:
                            v['val'] = transformers.s_to_ms(v['val'])
                            converted += 1

                # Turn airtime values from cumulative values into counts per
                # interval
                elif values and 'stations' in values[0]:
                    logger.debug("Converting airtime values for series '%s' from "
                                 "cumulative to per-interval", n)
                    last_vals = {}
                    for v in values:
                        if 'stations' not in v:
                            continue
                        for s, d in v['stations'].items():
                            if s not in last_vals:
                                last_vals[s] = {}
                            last = last_vals[s]
                            for k in ('airtime_tx', 'airtime_rx'):
                                if k in d:
                                    converted += 1
                                    if k not in last:
                                        last[k], d[k] = d[k], 0.0
                                    else:
                                        last[k], d[k] = d[k], d[k] - last[k]

                # Ditto for qdisc drops and marks
                elif values and ('dropped' in values[0] or
                                 'ecn_mark' in values[0]):
                    logger.debug("Converting qdisc drops and marks for series "
                                 "'%s' ""from cumulative to per-interval values",
                                 n)
                    last = {}
                    for v in values:
                        for k in ('dropped', 'ecn_mark'):
                            if k in v:
                                converted += 1
                                if k not in last:
                                    last[k], v[k] = v[k], 0.0
                                else:
                                    last[k], v[k] = v[k], v[k] - last[k]

            # Iperf UDP bandwidth is was reported in bits/s, now uses Mbits/s to
            # be consistent with other bandwidth measurements
            if 'SERIES_META' in obj['metadata']:
                for k, v in obj['metadata']['SERIES_META'].items():
                    if 'MEAN_VALUE' in v and v.get('UNITS') == "bits/s":
                        logger.debug("Converting MEAN_VALUE units for series "
                                     "'%s' from bit/s to Mbits/s", k)
                        converted += 1
                        v['MEAN_VALUE'] = transformers.bits_to_mbits(
                            v['MEAN_VALUE'])
                        v['UNITS'] = "Mbits/s"

            logger.debug("Converted a total of %d data points.",
                         converted)

        return obj