예제 #1
0
파일: plot.py 프로젝트: sahanasj/rally
def _prepare_data(data, reduce_rows=1000):
    durations = []
    idle_durations = []
    atomic_durations = {}
    num_errors = 0

    for i in data["result"]:
        # TODO(maretskiy): store error value and scenario output

        if i["error"]:
            num_errors += 1

        durations.append(i["duration"])
        idle_durations.append(i["idle_duration"])

        for met, duration in i["atomic_actions"].items():
            try:
                atomic_durations[met].append(duration)
            except KeyError:
                atomic_durations[met] = [duration]

    for k, v in atomic_durations.items():
        atomic_durations[k] = utils.compress(v, limit=reduce_rows)

    return {
        "total_durations": {
            "duration": utils.compress(durations, limit=reduce_rows),
            "idle_duration": utils.compress(idle_durations,
                                            limit=reduce_rows)},
        "atomic_durations": atomic_durations,
        "num_errors": num_errors,
    }
예제 #2
0
    def test_compress(self):
        data64 = range(64)
        data4 = [4, 2, 1, 3]
        mixed = [2, "5", None, 0.5]
        alt_normalize = str
        alt_merge = lambda a, b: str(a) + str(b)
        compress = lambda lst: [(k + 1, float(v)) for k, v in enumerate(lst)]

        # Long list
        self.assertEqual(utils.compress(data64), compress(data64))
        self._compare_items_lists(utils.compress(data64, limit=4),
                                  [(17, 15.0), (33, 31.01), (49, 47.0),
                                   (64, 62.0)])
        self.assertEqual(
            utils.compress(data64,
                           limit=4,
                           normalize=alt_normalize,
                           merge=alt_merge),
            [(17, '012345678910111213141516'),
             (33, '17181920212223242526272829303132'),
             (49, '33343536373839404142434445464748'),
             (64, '495051525354555657585960616263')])

        # Short list
        self.assertEqual(utils.compress(data4, limit=2), [(3, 2.0), (4, 3.0)])
        self.assertEqual(utils.compress(data4, normalize=alt_normalize),
                         [(1, '4'), (2, '2'), (3, '1'), (4, '3')])

        # List with mixed data types
        self.assertEqual(utils.compress(mixed), [(1, 2.0), (2, 5.0), (3, 0.0),
                                                 (4, 0.5)])
        self.assertEqual(utils.compress(mixed, normalize=str), [(1, '2'),
                                                                (2, '5'),
                                                                (3, 'None'),
                                                                (4, '0.5')])
        self.assertRaises(TypeError, utils.compress, mixed, normalize=int)
        self.assertEqual(
            utils.compress(mixed, normalize=alt_normalize, merge=alt_merge),
            [(1, '2'), (2, '5'), (3, 'None'), (4, '0.5')])
예제 #3
0
    def test_compress(self):
        data64 = range(64)
        data4 = [4, 2, 1, 3]
        mixed = [2, "5", None, 0.5]
        alt_normalize = str
        alt_merge = lambda a, b: str(a) + str(b)
        compress = lambda lst: [(k + 1, float(v)) for k, v in enumerate(lst)]

        # Long list
        self.assertEqual(utils.compress(data64), compress(data64))
        self._compare_items_lists(
            utils.compress(data64, limit=4),
            [(17, 15.0), (33, 31.01), (49, 47.0), (64, 62.0)])
        self.assertEqual(
            utils.compress(data64, limit=4,
                           normalize=alt_normalize, merge=alt_merge),
            [(17, "012345678910111213141516"),
             (33, "17181920212223242526272829303132"),
             (49, "33343536373839404142434445464748"),
             (64, "495051525354555657585960616263")])

        # Short list
        self.assertEqual(utils.compress(data4, limit=2),
                         [(3, 2.0), (4, 3.0)])
        self.assertEqual(utils.compress(data4, normalize=alt_normalize),
                         [(1, "4"), (2, "2"), (3, "1"), (4, "3")])

        # List with mixed data types
        self.assertEqual(utils.compress(mixed),
                         [(1, 2.0), (2, 5.0), (3, 0.0), (4, 0.5)])
        self.assertEqual(utils.compress(mixed, normalize=str),
                         [(1, "2"), (2, "5"), (3, "None"), (4, "0.5")])
        self.assertRaises(TypeError, utils.compress, mixed, normalize=int)
        self.assertEqual(
            utils.compress(mixed, normalize=alt_normalize, merge=alt_merge),
            [(1, "2"), (2, "5"), (3, "None"), (4, "0.5")])
예제 #4
0
파일: plot.py 프로젝트: NeCTAR-RC/rally
def _prepare_data(data):
    durations = []
    idle_durations = []
    atomic_durations = {}
    output = {}
    output_errors = []
    output_stacked = []
    errors = []

    # NOTE(maretskiy): We need this extra iteration
    # to determine something that we should know about the data
    # before starting its processing.
    atomic_names = set()
    output_names = set()
    for r in data["result"]:
        atomic_names.update(r["atomic_actions"].keys())
        output_names.update(r["scenario_output"]["data"].keys())

    for idx, r in enumerate(data["result"]):
        # NOTE(maretskiy): Sometimes we miss iteration data.
        # So we care about data integrity by setting zero values
        if len(r["atomic_actions"]) < len(atomic_names):
            for atomic_name in atomic_names:
                r["atomic_actions"].setdefault(atomic_name, 0)

        if len(r["scenario_output"]["data"]) < len(output_names):
            for output_name in output_names:
                r["scenario_output"]["data"].setdefault(output_name, 0)

        if r["scenario_output"]["errors"]:
            output_errors.append((idx, r["scenario_output"]["errors"]))

        for param, value in r["scenario_output"]["data"].items():
            try:
                output[param].append(value)
            except KeyError:
                output[param] = [value]

        if r["error"]:
            type_, message, traceback = r["error"]
            errors.append({"iteration": idx,
                           "type": type_,
                           "message": message,
                           "traceback": traceback})

            # NOTE(maretskiy): Reset failed durations (no sense to display)
            r["duration"] = 0
            r["idle_duration"] = 0

        durations.append(r["duration"])
        idle_durations.append(r["idle_duration"])

        for met, duration in r["atomic_actions"].items():
            try:
                atomic_durations[met].append(duration)
            except KeyError:
                atomic_durations[met] = [duration]

    for k, v in six.iteritems(output):
        output_stacked.append({"key": k, "values": utils.compress(v)})

    for k, v in six.iteritems(atomic_durations):
        atomic_durations[k] = utils.compress(v)

    return {
        "total_durations": {
            "duration": utils.compress(durations),
            "idle_duration": utils.compress(idle_durations)},
        "atomic_durations": atomic_durations,
        "output": output_stacked,
        "output_errors": output_errors,
        "errors": errors,
        "sla": data["sla"],
        "load_duration": data["load_duration"],
        "full_duration": data["full_duration"],
    }
예제 #5
0
파일: plot.py 프로젝트: varunarya10/rally
def _prepare_data(data):
    durations = []
    idle_durations = []
    atomic_durations = {}
    output = {}
    output_errors = []
    output_stacked = []
    errors = []

    # NOTE(maretskiy): We need this extra iteration
    # to determine something that we should know about the data
    # before starting its processing.
    atomic_names = set()
    output_names = set()
    for r in data["result"]:
        atomic_names.update(r["atomic_actions"].keys())
        output_names.update(r["scenario_output"]["data"].keys())

    for idx, r in enumerate(data["result"]):
        # NOTE(maretskiy): Sometimes we miss iteration data.
        # So we care about data integrity by setting zero values
        if len(r["atomic_actions"]) < len(atomic_names):
            for atomic_name in atomic_names:
                r["atomic_actions"].setdefault(atomic_name, 0)

        if len(r["scenario_output"]["data"]) < len(output_names):
            for output_name in output_names:
                r["scenario_output"]["data"].setdefault(output_name, 0)

        if r["scenario_output"]["errors"]:
            output_errors.append((idx, r["scenario_output"]["errors"]))

        for param, value in r["scenario_output"]["data"].items():
            try:
                output[param].append(value)
            except KeyError:
                output[param] = [value]

        if r["error"]:
            type_, message, traceback = r["error"]
            errors.append({
                "iteration": idx,
                "type": type_,
                "message": message,
                "traceback": traceback
            })

            # NOTE(maretskiy): Reset failed durations (no sense to display)
            r["duration"] = 0
            r["idle_duration"] = 0

        durations.append(r["duration"])
        idle_durations.append(r["idle_duration"])

        for met, duration in r["atomic_actions"].items():
            try:
                atomic_durations[met].append(duration)
            except KeyError:
                atomic_durations[met] = [duration]

    for k, v in six.iteritems(output):
        output_stacked.append({"key": k, "values": utils.compress(v)})

    for k, v in six.iteritems(atomic_durations):
        atomic_durations[k] = utils.compress(v)

    return {
        "total_durations": {
            "duration": utils.compress(durations),
            "idle_duration": utils.compress(idle_durations)
        },
        "atomic_durations": atomic_durations,
        "output": output_stacked,
        "output_errors": output_errors,
        "errors": errors,
        "sla": data["sla"],
        "load_duration": data["load_duration"],
        "full_duration": data["full_duration"],
    }