Esempio n. 1
0
    def __init__(self):
        self.ns = "/atf/"
        self.error = False

        # parse configuration
        atf_configuration_parser = ATFConfigurationParser()
        self.config = atf_configuration_parser.get_config()
        self.testblocks = atf_configuration_parser.create_testblocks(self.config, None, True)

        # monitor states for all testblocks
        self.testblock_states = {}
        for testblock in self.testblocks.keys():
            self.testblock_states[testblock] = TestblockState.INVALID

        # create trigger subscriber for all testblocks
        for testblock_name in self.testblocks.keys():
            topic = self.ns + testblock_name + "/trigger"
            #print "create subscriber to '%s' from testblock '%s'" % (topic, testblock_name)
            rospy.Subscriber(topic, TestblockTrigger, self.trigger_callback)

        rospy.loginfo("ATF analyser: started!")
Esempio n. 2
0
    def __init__(self):
        self.ns = "/atf/"
        self.error = False

        # parse configuration
        atf_configuration_parser = ATFConfigurationParser()
        self.config = atf_configuration_parser.get_config()
        self.testblocks = atf_configuration_parser.create_testblocks(
            self.config, None, True)

        # monitor states for all testblocks
        self.testblock_states = {}
        for testblock in self.testblocks.keys():
            self.testblock_states[testblock] = TestblockState.INVALID

        # create trigger subscriber for all testblocks
        for testblock_name in self.testblocks.keys():
            topic = self.ns + testblock_name + "/trigger"
            #print "create subscriber to '%s' from testblock '%s'" % (topic, testblock_name)
            rospy.Subscriber(topic, TestblockTrigger, self.trigger_callback)

        rospy.loginfo("ATF analyser: started!")
Esempio n. 3
0
class Cleaner():
    def __init__(self):
        self.result = False

        self.atf_configuration_parser = ATFConfigurationParser()
        self.config = self.atf_configuration_parser.get_config()

    def clean(self):
        if os.path.exists(self.config["bag_output"]):
            shutil.rmtree(self.config["bag_output"])
        if os.path.exists(self.config["json_output"]):
            shutil.rmtree(self.config["json_output"])
        if os.path.exists(self.config["yaml_output"]):
            shutil.rmtree(self.config["yaml_output"])
        self.result = True
Esempio n. 4
0
    def __init__(self, package_name):
        print "ATF analyser: started!"
        start_time = time.time()
        self.ns = "/atf/"
        self.error = False

        # parse configuration
        self.configuration_parser = ATFConfigurationParser(package_name)
        self.tests = self.configuration_parser.get_tests()

        # generate results
        i = 1
        for test in self.tests:
            inputfile = os.path.join(test.generation_config["bagfile_output"] +
                                     test.name + ".bag")
            print "Processing test %i/%i: %s" % (i, len(self.tests), test.name)
            try:
                bag = rosbag.Bag(inputfile)
            except rosbag.bag.ROSBagException as e:
                print "ERROR empty bag file", e
                i += 1
                continue
            except IOError as e:
                print "Error bag file not found", e
                i += 1
                continue
            if bag.get_message_count() == 0:
                print "ERROR empty bag file"
                i += 1
                continue
            bar = progressbar.ProgressBar(maxval=bag.get_message_count(), \
                    widgets=[progressbar.Bar('=', '[', ']'), ' ', progressbar.Percentage()])
            bar.start()
            j = 0
            count_error = 0

            try:
                for topic, raw_msg, t in bag.read_messages(raw=True):
                    try:
                        msg_type, serialized_bytes, md5sum, pos, pytype = raw_msg
                        msg = pytype()
                        msg.deserialize(serialized_bytes)
                        j += 1
                        for testblock in test.testblocks:
                            #print "testblock", testblock.name
                            #print "testblock.metric_handles", testblock.metric_handles
                            for metric_handle in testblock.metric_handles:
                                if topic == "/atf/status" and msg.name == testblock.name:
                                    testblock.status = msg.status
                                    if testblock.status == TestblockStatus.ACTIVE:
                                        #print "calling start on metric", metric_handle
                                        metric_handle.start(msg)
                                    elif testblock.status == TestblockStatus.SUCCEEDED:
                                        #print "calling stop on metric", metric_handle
                                        metric_handle.stop(msg)
                                else:
                                    metric_handle.update(topic, msg, t)
                    except StopIteration as e:
                        print "stop iterator", type(e), e
                        break
                    except Exception as e:
                        print "general Exception in ATF analyser", type(e), e
                        count_error += 1
                        continue
                    bar.update(j)
            except Exception as e:
                print "FATAL exception in bag file", type(e), e
                continue
            bar.finish()

            print "%d errors detected during test processing" % count_error
            i += 1

        #export test list
        test_list = self.configuration_parser.get_test_list()
        self.configuration_parser.export_to_file(
            test_list,
            os.path.join(
                self.configuration_parser.generation_config["txt_output"],
                "test_list.txt"))
        #self.configuration_parser.export_to_file(test_list, os.path.join(self.configuration_parser.generation_config["json_output"], "test_list.json"))
        #self.configuration_parser.export_to_file(test_list, os.path.join(self.configuration_parser.generation_config["yaml_output"], "test_list.yaml"))

        try:
            print "Processing tests took %s sec" % str(
                round((time.time() - start_time), 4))
        except:
            pass

        print "ATF analyser: done!"
Esempio n. 5
0
class Analyser:
    def __init__(self, package_name):
        print "ATF analyser: started!"
        start_time = time.time()
        self.ns = "/atf/"
        self.error = False

        # parse configuration
        self.configuration_parser = ATFConfigurationParser(package_name)
        self.tests = self.configuration_parser.get_tests()

        # generate results
        i = 1
        for test in self.tests:
            inputfile = os.path.join(test.generation_config["bagfile_output"] +
                                     test.name + ".bag")
            print "Processing test %i/%i: %s" % (i, len(self.tests), test.name)
            try:
                bag = rosbag.Bag(inputfile)
            except rosbag.bag.ROSBagException as e:
                print "ERROR empty bag file", e
                i += 1
                continue
            except IOError as e:
                print "Error bag file not found", e
                i += 1
                continue
            if bag.get_message_count() == 0:
                print "ERROR empty bag file"
                i += 1
                continue
            bar = progressbar.ProgressBar(maxval=bag.get_message_count(), \
                    widgets=[progressbar.Bar('=', '[', ']'), ' ', progressbar.Percentage()])
            bar.start()
            j = 0
            count_error = 0

            try:
                for topic, raw_msg, t in bag.read_messages(raw=True):
                    try:
                        msg_type, serialized_bytes, md5sum, pos, pytype = raw_msg
                        msg = pytype()
                        msg.deserialize(serialized_bytes)
                        j += 1
                        for testblock in test.testblocks:
                            #print "testblock", testblock.name
                            #print "testblock.metric_handles", testblock.metric_handles
                            for metric_handle in testblock.metric_handles:
                                if topic == "/atf/status" and msg.name == testblock.name:
                                    testblock.status = msg.status
                                    if testblock.status == TestblockStatus.ACTIVE:
                                        #print "calling start on metric", metric_handle
                                        metric_handle.start(msg)
                                    elif testblock.status == TestblockStatus.SUCCEEDED:
                                        #print "calling stop on metric", metric_handle
                                        metric_handle.stop(msg)
                                else:
                                    metric_handle.update(topic, msg, t)
                    except StopIteration as e:
                        print "stop iterator", type(e), e
                        break
                    except Exception as e:
                        print "general Exception in ATF analyser", type(e), e
                        count_error += 1
                        continue
                    bar.update(j)
            except Exception as e:
                print "FATAL exception in bag file", type(e), e
                continue
            bar.finish()

            print "%d errors detected during test processing" % count_error
            i += 1

        #export test list
        test_list = self.configuration_parser.get_test_list()
        self.configuration_parser.export_to_file(
            test_list,
            os.path.join(
                self.configuration_parser.generation_config["txt_output"],
                "test_list.txt"))
        #self.configuration_parser.export_to_file(test_list, os.path.join(self.configuration_parser.generation_config["json_output"], "test_list.json"))
        #self.configuration_parser.export_to_file(test_list, os.path.join(self.configuration_parser.generation_config["yaml_output"], "test_list.yaml"))

        try:
            print "Processing tests took %s sec" % str(
                round((time.time() - start_time), 4))
        except:
            pass

        print "ATF analyser: done!"

    def get_file_paths(self, directory, prefix):
        result = []
        for subdir, dirs, files in os.walk(directory):
            for filename in files:
                full_path = os.path.join(subdir, filename)
                if filename.startswith(prefix):
                    result.append((filename, full_path))
        result.sort()
        return result

    def get_result(self):
        atf_result = AtfResult()
        atf_result.header.stamp = rospy.Time(time.time())
        atf_result.groundtruth_result = None
        atf_result.groundtruth_error_message = "Failed ATF tests:"
        for test in self.tests:
            # get result
            test_result = test.get_result()

            # export test result to file
            self.configuration_parser.export_to_file(
                test_result,
                os.path.join(test.generation_config["txt_output"],
                             test.name + ".txt"))
            #self.configuration_parser.export_to_file(test_result, os.path.join(test.generation_config["json_output"], test.name + ".json")) # ROS message object is not JSON serialisable
            #self.configuration_parser.export_to_file(test_result, os.path.join(test.generation_config["yaml_output"], test.name + ".yaml")) # ROS message object is not correctly serialized to yaml

            # append testresult to overall atf result
            atf_result.results.append(test_result)

            # aggregate result
            if test_result.groundtruth_result != None and not test_result.groundtruth_result:
                atf_result.groundtruth_result = False
                atf_result.groundtruth_error_message += "\n - test '%s' (%s, %s, %s, %s): %s" % (
                    test_result.name, test_result.robot, test_result.env,
                    test_result.test_config, test_result.testblockset,
                    test_result.groundtruth_error_message)
            if atf_result.groundtruth_result == None and test_result.groundtruth_result:
                atf_result.groundtruth_result = True

        if len(atf_result.results) == 0:
            raise ATFAnalyserError(
                "Analysing failed, no atf result available.")

        # export overall atf result to file
        #print "\natf_result:\n", atf_result
        self.configuration_parser.export_to_file(
            atf_result,
            os.path.join(test.generation_config["txt_output"],
                         "atf_result.txt"))
        return atf_result

    def print_result(self, atf_result):
        if atf_result.groundtruth_result != None and not atf_result.groundtruth_result:
            print "\n"
            print "*************************"
            print "*** SOME TESTS FAILED ***"
            print "*************************"
            print atf_result.groundtruth_error_message
        else:
            print "\n"
            print "********************"
            print "*** ALL TESTS OK ***"
            print "********************"
            print "\n"

    def print_result_details(self, atf_result):
        print "\n"
        print "**********************"
        print "*** result details ***"
        print "**********************"
        print atf_result
Esempio n. 6
0
    def __init__(self):
        self.result = False

        self.atf_configuration_parser = ATFConfigurationParser()
        self.config = self.atf_configuration_parser.get_config()
Esempio n. 7
0
class Merger():
    def __init__(self):
        self.ns = "/atf/"
        self.result = False

        self.atf_configuration_parser = ATFConfigurationParser()
        self.config = self.atf_configuration_parser.get_config()

    def merge(self):
        test_list = self.atf_configuration_parser.load_data(os.path.join(self.config["json_output"], "test_list.json"))
        #print "test_list=", test_list
        for test in test_list:
            #print "test=", test
            for test_name, test_data in test.items():
                #print "test_name=", test_name
                #print "test_data=", test_data
                subtests = test_data['subtests']
                #print "subtests=", subtests
                test_data_merged = {}
                for subtest in subtests:
                    #print "subtest=", subtest
                    subtest_data = self.atf_configuration_parser.load_data(os.path.join(self.config["json_output"], subtest + ".json"))
                    #print "subtest_data=", subtest_data
                    if subtest_data != None:
                        for testblock_name, testblock_data in subtest_data.items():
                            #print "testblock_name=", testblock_name
                            #print "testblock_data=", testblock_data
                            if testblock_name == "error":
                                rospy.logwarn("subtest '%s' has an error (error_message: '%s'), skipping...", subtest, testblock_data)
                                #TODO: mark subtest as error, so that presenter can show status information
                                continue
                            for metric_name, metric_data_list in testblock_data.items():
                                #print "metric_name=", metric_name
                                #print "metric_data_list=", metric_data_list
                                for metric_data in metric_data_list:
                                    #print "metric_data=", metric_data
                                    #print "metric_data['data']=", metric_data['data']

                                    # check if entry exists
                                    if testblock_name not in test_data_merged:
                                        # create new testblock entry
                                        #print "create new entry for testblock '" + testblock_name + "'"
                                        test_data_merged[testblock_name] = {}
                                    if metric_name not in test_data_merged[testblock_name]:
                                        # create new metric entry
                                        #print "create new entry for metric '" + metric_name + "' in testblock '" + testblock_name + "'"
                                        test_data_merged[testblock_name][metric_name] = []
                                        new_metric_data = copy.deepcopy(metric_data)
                                        new_metric_data['data'] = {}
                                        new_metric_data['data']['values'] = [metric_data['data']]
                                        test_data_merged[testblock_name][metric_name].append(new_metric_data)
                                    #print "test_data_merged0=", test_data_merged
                                    else:
                                        # entry already exists
                                        #print "entry for metric '" + metric_name + "' in testblock '" + testblock_name + "' already exists"

                                        # check if merging is possible, if not: append
                                        is_in, element_number = self.is_in_metric_data_list(copy.deepcopy(metric_data), copy.deepcopy(test_data_merged[testblock_name][metric_name]))
                                        if is_in:
                                            print "--> merge", metric_data['data'], "into element_number:", element_number
                                            # merge values
                                            test_data_merged[testblock_name][metric_name][element_number]['data']['values'].append(metric_data['data'])
                                            # merge groundtruth_result (take the worst result)
                                            test_data_merged[testblock_name][metric_name][element_number]['groundtruth_result'] = test_data_merged[testblock_name][metric_name][element_number]['groundtruth_result'] and metric_data['groundtruth_result']
                                        else:
                                            #print "--> append"
                                            new_metric_data = copy.deepcopy(metric_data)
                                            new_metric_data['data'] = {}
                                            new_metric_data['data']['values'] = [metric_data['data']]
                                            #print "new_metric_data=", new_metric_data
                                            #print "append to:", test_data_merged[testblock_name]
                                            test_data_merged[testblock_name][metric_name].append(new_metric_data)
                                    #print "test_data_merged=", test_data_merged

                #print "test_data_merged before average=", test_data_merged

                # calculate min/max/average
                for testblock_name, testblock_data in test_data_merged.items():
                    #print "testblock_data=", testblock_data
                    for metric_name, metric_data_list in testblock_data.items():
                        #print "metric_data_list=", metric_data_list
                        for i in range(len(metric_data_list)):
                            #print "i=", i
                            #print "test_data_merged[testblock_name][metric_name][i]['data']['values']=", test_data_merged[testblock_name][metric_name][i]['data']['values']
                            test_data_merged[testblock_name][metric_name][i]['data']['min'] = min(test_data_merged[testblock_name][metric_name][i]['data']['values'])
                            test_data_merged[testblock_name][metric_name][i]['data']['max'] = max(test_data_merged[testblock_name][metric_name][i]['data']['values'])
                            test_data_merged[testblock_name][metric_name][i]['data']['average'] = round(sum(test_data_merged[testblock_name][metric_name][i]['data']['values'])/len(test_data_merged[testblock_name][metric_name][i]['data']['values']), 3)

                #print "test_data_merged after average=", test_data_merged

                # write to file
                filename = os.path.join(self.config["json_output"], "merged_" + test_name + ".json")
                stream = file(filename, 'w')
                json.dump(copy.copy(test_data_merged), stream)

                filename = os.path.join(self.config["yaml_output"], "merged_" + test_name + ".yaml")
                if not filename == "":
                    stream = file(filename, 'w')
                    yaml.dump(copy.copy(test_data_merged), stream, default_flow_style=False)
        self.result = True

    def is_in_metric_data_list(self, data, data_list):
        counter = 0
        for dat in data_list:
            is_same = self.is_same_metric_data(copy.deepcopy(data), copy.deepcopy(dat))
            if is_same:
                return True, counter
            counter += 1
        return False, None

    def is_same_metric_data(self, data1, data2):
        data1.pop('data')
        data1.pop('groundtruth_result')
        data2.pop('data')
        data2.pop('groundtruth_result')
        if data1 == data2:
            return True
        else:
            return False
Esempio n. 8
0
class Analyser:
    def __init__(
            self,
            package_name,
            test_generation_config_file="atf/test_generation_config.yaml"):
        print "ATF analyser: started!"
        start_time = time.time()
        self.ns = "/atf/"
        self.error = False
        self.package_name = package_name

        # parse configuration
        self.configuration_parser = ATFConfigurationParser(
            package_name, test_generation_config_file)
        self.tests = self.configuration_parser.get_tests()

        # generate results
        i = 1
        for test in self.tests:
            inputfile = os.path.join(test.generation_config["bagfile_output"] +
                                     test.name + ".bag")
            print "Processing test %i/%i: %s" % (i, len(self.tests), test.name)
            try:
                bag = rosbag.Bag(inputfile)
            except rosbag.bag.ROSBagException as e:
                print "ERROR empty bag file", e
                i += 1
                continue
            except IOError as e:
                print "Error bag file not found", e
                i += 1
                continue
            if bag.get_message_count() == 0:
                print "ERROR empty bag file"
                i += 1
                continue
            bar = progressbar.ProgressBar(maxval=bag.get_message_count(), \
                    widgets=[progressbar.Bar('=', '[', ']'), ' ', progressbar.Percentage()])
            bar.start()
            j = 0
            count_error = 0

            try:
                for topic, raw_msg, t in bag.read_messages(raw=True):
                    try:
                        msg_type, serialized_bytes, md5sum, pos, pytype = raw_msg
                        msg = pytype()
                        msg.deserialize(serialized_bytes)
                        j += 1
                        for testblock in test.testblocks:
                            #print "testblock", testblock.name
                            #print "testblock.metric_handles", testblock.metric_handles
                            for metric_handle in testblock.metric_handles:
                                if topic == "/atf/status" and msg.name == testblock.name:
                                    testblock.status = msg.status
                                    if testblock.status == TestblockStatus.ACTIVE:
                                        #print "calling start on metric", metric_handle
                                        metric_handle.start(msg)
                                    elif testblock.status == TestblockStatus.SUCCEEDED:
                                        #print "calling stop on metric", metric_handle
                                        metric_handle.stop(msg)
                                else:
                                    metric_handle.update(topic, msg, t)
                    except StopIteration as e:
                        print "stop iterator", type(e), e
                        break
                    except Exception as e:
                        print "general Exception in ATF analyser", type(e), e
                        count_error += 1
                        continue
                    bar.update(j)
            except Exception as e:
                print "FATAL exception in bag file", type(e), e
                continue
            bar.finish()

            print "%d errors detected during test processing" % count_error
            i += 1

        #export test list
        test_list = self.configuration_parser.get_test_list()
        self.configuration_parser.export_to_file(
            test_list,
            os.path.join(
                self.configuration_parser.generation_config["txt_output"],
                "test_list.txt"))
        #self.configuration_parser.export_to_file(test_list, os.path.join(self.configuration_parser.generation_config["json_output"], "test_list.json"))
        #self.configuration_parser.export_to_file(test_list, os.path.join(self.configuration_parser.generation_config["yaml_output"], "test_list.yaml"))

        try:
            print "Processing tests took %s sec" % str(
                round((time.time() - start_time), 4))
        except:
            pass

        print "ATF analyser: done!"

    def get_file_paths(self, directory, prefix):
        result = []
        for subdir, dirs, files in os.walk(directory):
            for filename in files:
                full_path = os.path.join(subdir, filename)
                if filename.startswith(prefix):
                    result.append((filename, full_path))
        result.sort()
        return result

    def get_result(self):
        atf_result = AtfResult()
        atf_result.header.stamp = rospy.Time(time.time())
        atf_result.name = self.package_name
        atf_result.result = None
        atf_result.error_message = "All tests OK"
        for test in self.tests:
            # get result
            test_result = test.get_result()

            # export test result to file
            self.configuration_parser.export_to_file(
                test_result,
                os.path.join(test.generation_config["txt_output"],
                             test.name + ".txt"))
            #self.configuration_parser.export_to_file(test_result, os.path.join(test.generation_config["json_output"], test.name + ".json")) # ROS message object is not JSON serialisable
            #self.configuration_parser.export_to_file(test_result, os.path.join(test.generation_config["yaml_output"], test.name + ".yaml")) # ROS message object is not correctly serialized to yaml

            # append testresult to overall atf result
            atf_result.results.append(test_result)

            # aggregate result
            if test_result.result != None and not test_result.result:
                # check if there are already failed tests in atf_result
                if atf_result.result == None:
                    atf_result.error_message = "Failed ATF tests:"
                atf_result.result = False
                atf_result.error_message += "\n - test '%s' (%s, %s, %s, %s): %s" % (
                    test_result.name, test_result.test_config,
                    test_result.robot, test_result.env,
                    test_result.testblockset, test_result.error_message)
            if atf_result.result == None and test_result.result:
                atf_result.result = True

        if len(atf_result.results) == 0:
            raise ATFAnalyserError(
                "Analysing failed, no atf result available.")

        # export overall atf result to file
        #print "\natf_result:\n", atf_result
        self.configuration_parser.export_to_file(
            atf_result,
            os.path.join(test.generation_config["txt_output"],
                         "atf_result.txt"))
        self.configuration_parser.export_to_file(
            atf_result,
            os.path.join(test.generation_config["txt_output"],
                         "atf_result.bag"))

        # aggregate results
        atf_result_aggregated = self.aggregate_results(atf_result)
        #print "\natf_result_aggregated:\n", atf_result_aggregated
        self.configuration_parser.export_to_file(
            atf_result_aggregated,
            os.path.join(test.generation_config["txt_output"],
                         "atf_result_aggregated.txt"))
        self.configuration_parser.export_to_file(
            atf_result_aggregated,
            os.path.join(test.generation_config["txt_output"],
                         "atf_result_aggregated.bag"))

        return atf_result

    def aggregate_results(self, atf_result):
        test_list = self.configuration_parser.get_test_list()

        ret = self.configuration_parser.get_sorted_plot_dicts(
            atf_result, "", "", "")

        mbt = ret['mbt']
        mbt_aggregated = {}
        for metric in mbt.keys():
            #print "m=", metric
            if metric not in mbt_aggregated.keys():
                mbt_aggregated[metric] = {}
            for testblock in mbt[metric].keys():
                #print "  b=", testblock
                if testblock not in mbt_aggregated[metric].keys():
                    mbt_aggregated[metric][testblock] = {}
                for tl_tests in test_list:
                    #print "tl_tests=", tl_tests
                    for tl_test in tl_tests.keys():
                        #print "    tl_test=", tl_test
                        metric_result = MetricResult()
                        started = True
                        finished = True
                        groundtruth_result = True
                        groundtruth_error_message = ""
                        details = []
                        for test in mbt[metric][testblock].keys():
                            if test.startswith(tl_test):
                                # aggregate started/stopped from every metric_result
                                if not mbt[metric][testblock][test].started:
                                    started = False
                                if not mbt[metric][testblock][test].finished:
                                    finished = False

                                # aggregate data from every metric_result
                                data = mbt[metric][testblock][test].data
                                stamp = data.stamp
                                # check if data is set (not all default values anymore)
                                if data.stamp == rospy.Time(
                                        0) and data.data == 0:
                                    stamp = rospy.Time(
                                        0
                                    )  # mark metric result as invalid by settimg timestamp to zero
                                metric_result.series.append(data)

                                # aggregate groundtruth from every metric_result
                                groundtruth = mbt[metric][testblock][
                                    test].groundtruth
                                if groundtruth.result == False:
                                    groundtruth_result = False
                                    if groundtruth_error_message != "":
                                        groundtruth_error_message += "\n"
                                    groundtruth_error_message += "groundtruth missmatch in subtest %s" % (
                                        test)

                                # aggregate details from every metric_result
                                details = details + mbt[metric][testblock][
                                    test].details

                        if len(metric_result.series
                               ) == 0:  # no matching substest found
                            continue

                        metric_result.groundtruth = groundtruth
                        metric_result.groundtruth.result = groundtruth_result
                        metric_result.groundtruth.error_message = groundtruth_error_message

                        metric_result.name = mbt[metric][testblock][test].name
                        metric_result.mode = MetricResult.SPAN_MEAN  # aggregated metrics are always SPAN_MEAN
                        metric_result.started = started
                        metric_result.finished = finished
                        # metric_result.series is set above
                        metric_result.data.stamp = stamp
                        metric_result.data.data = metrics_helper.get_mean(
                            metric_result.series)
                        metric_result.min = metrics_helper.get_min(
                            metric_result.series)
                        metric_result.max = metrics_helper.get_max(
                            metric_result.series)
                        metric_result.mean = metric_result.data.data
                        metric_result.std = metrics_helper.get_std(
                            metric_result.series)
                        # metric_result.groundtruth is set above
                        metric_result.details = details
                        mbt_aggregated[metric][testblock][
                            tl_test] = metric_result

        # convert mbt to tbm
        tbm = {}
        for metric in mbt_aggregated.keys():
            #print "m=", metric
            for testblock in mbt_aggregated[metric].keys():
                #print "  b=", testblock
                for test in mbt_aggregated[metric][testblock].keys():
                    #print "    t=", test
                    if test not in tbm.keys():
                        tbm[test] = {}
                    if testblock not in tbm[test].keys():
                        tbm[test][testblock] = {}
                    tbm[test][testblock][metric] = mbt_aggregated[metric][
                        testblock][test]

        # convert tbm to atf_result_aggregated
        atf_result_aggregated = AtfResult()
        atf_result_aggregated.header = atf_result.header
        atf_result_aggregated.name = atf_result.name
        atf_result_aggregated.result = True
        for test in sorted(tbm.keys()):
            test_result = TestResult()
            test_result.name = test
            test_result.result = True

            # find test metadata in atf_result
            for t in atf_result.results:
                if t.name.startswith(test):
                    test_result.test_config = t.test_config
                    test_result.robot = t.robot
                    test_result.env = t.env
                    test_result.testblockset = t.testblockset
                    break

            for testblock in sorted(tbm[test].keys()):
                testblock_result = TestblockResult()
                testblock_result.name = testblock
                testblock_result.result = True
                for metric in sorted(tbm[test][testblock].keys()):
                    metric_result = tbm[test][testblock][metric]
                    testblock_result.results.append(metric_result)
                    # aggregate metric result
                    if metric_result.groundtruth.result == False:
                        testblock_result.result = False
                        testblock_result.error_message += "\n     - metric '%s': %s" % (
                            metric_result.name,
                            metric_result.groundtruth.error_message)

                test_result.results.append(testblock_result)
                # aggregate testblock result
                if testblock_result.result == False:
                    test_result.result = False
                    test_result.error_message += "\n   - testblock '%s': %s" % (
                        testblock_result.name, testblock_result.error_message)

            atf_result_aggregated.results.append(test_result)
            # aggregate test result
            if test_result.result == False:
                atf_result_aggregated.result = False
                atf_result_aggregated.error_message += "\n - test '%s' (%s, %s, %s, %s): %s" % (
                    test_result.name, test_result.test_config,
                    test_result.robot, test_result.env,
                    test_result.testblockset, test_result.error_message)

        return atf_result_aggregated

    def print_result(self, atf_result):
        if atf_result.result != None and not atf_result.result:
            print "\n"
            print "*************************"
            print "*** SOME TESTS FAILED ***"
            print "*************************"
            print atf_result.error_message
            self.print_result_summary(atf_result)
        else:
            print "\n"
            print "********************"
            print "*** ALL TESTS OK ***"
            print "********************"
            self.print_result_summary(atf_result)

    def print_result_details(self, atf_result):
        print "\n"
        print "**********************"
        print "*** result details ***"
        print "**********************"
        print atf_result

    def print_result_summary(self, atf_result):
        print "\n"
        print "**********************"
        print "*** result summary ***"
        print "**********************"
        for result in atf_result.results:
            if result.result:
                print "test '%s' (%s, %s, %s, %s): succeeded" % (
                    result.name, result.test_config, result.robot, result.env,
                    result.testblockset)
            else:
                print "test '%s' (%s, %s, %s, %s): failed" % (
                    result.name, result.test_config, result.robot, result.env,
                    result.testblockset)
Esempio n. 9
0
    def __init__(self, package_name):
        self.result = False

        self.atf_configuration_parser = ATFConfigurationParser(package_name)
Esempio n. 10
0
class Merger():
    def __init__(self):
        self.ns = "/atf/"
        self.result = False

        self.atf_configuration_parser = ATFConfigurationParser()
        self.config = self.atf_configuration_parser.get_config()

    def merge(self):
        test_list = self.atf_configuration_parser.load_data(
            os.path.join(self.config["json_output"], "test_list.json"))
        #print "test_list=", test_list
        for test in test_list:
            #print "test=", test
            for test_name, test_data in test.items():
                #print "test_name=", test_name
                #print "test_data=", test_data
                subtests = test_data['subtests']
                #print "subtests=", subtests
                test_data_merged = {}
                for subtest in subtests:
                    #print "subtest=", subtest
                    subtest_data = self.atf_configuration_parser.load_data(
                        os.path.join(self.config["json_output"],
                                     subtest + ".json"))
                    #print "subtest_data=", subtest_data
                    if subtest_data != None:
                        for testblock_name, testblock_data in subtest_data.items(
                        ):
                            #print "testblock_name=", testblock_name
                            #print "testblock_data=", testblock_data
                            if testblock_name == "error":
                                rospy.logwarn(
                                    "subtest '%s' has an error (error_message: '%s'), skipping...",
                                    subtest, testblock_data)
                                #TODO: mark subtest as error, so that presenter can show status information
                                continue
                            for metric_name, metric_data_list in testblock_data.items(
                            ):
                                #print "metric_name=", metric_name
                                #print "metric_data_list=", metric_data_list
                                for metric_data in metric_data_list:
                                    #print "metric_data=", metric_data
                                    #print "metric_data['data']=", metric_data['data']

                                    # check if entry exists
                                    if testblock_name not in test_data_merged:
                                        # create new testblock entry
                                        #print "create new entry for testblock '" + testblock_name + "'"
                                        test_data_merged[testblock_name] = {}
                                    if metric_name not in test_data_merged[
                                            testblock_name]:
                                        # create new metric entry
                                        #print "create new entry for metric '" + metric_name + "' in testblock '" + testblock_name + "'"
                                        test_data_merged[testblock_name][
                                            metric_name] = []
                                        new_metric_data = copy.deepcopy(
                                            metric_data)
                                        new_metric_data['data'] = {}
                                        new_metric_data['data']['values'] = [
                                            metric_data['data']
                                        ]
                                        test_data_merged[testblock_name][
                                            metric_name].append(
                                                new_metric_data)
                                    #print "test_data_merged0=", test_data_merged
                                    else:
                                        # entry already exists
                                        #print "entry for metric '" + metric_name + "' in testblock '" + testblock_name + "' already exists"

                                        # check if merging is possible, if not: append
                                        is_in, element_number = self.is_in_metric_data_list(
                                            copy.deepcopy(metric_data),
                                            copy.deepcopy(test_data_merged[
                                                testblock_name][metric_name]))
                                        if is_in:
                                            print "--> merge", metric_data[
                                                'data'], "into element_number:", element_number
                                            # merge values
                                            test_data_merged[testblock_name][
                                                metric_name][element_number][
                                                    'data']['values'].append(
                                                        metric_data['data'])
                                            # merge groundtruth_result (take the worst result)
                                            test_data_merged[testblock_name][
                                                metric_name][element_number][
                                                    'groundtruth_result'] = test_data_merged[
                                                        testblock_name][metric_name][
                                                            element_number][
                                                                'groundtruth_result'] and metric_data[
                                                                    'groundtruth_result']
                                        else:
                                            #print "--> append"
                                            new_metric_data = copy.deepcopy(
                                                metric_data)
                                            new_metric_data['data'] = {}
                                            new_metric_data['data'][
                                                'values'] = [
                                                    metric_data['data']
                                                ]
                                            #print "new_metric_data=", new_metric_data
                                            #print "append to:", test_data_merged[testblock_name]
                                            test_data_merged[testblock_name][
                                                metric_name].append(
                                                    new_metric_data)
                                    #print "test_data_merged=", test_data_merged

                #print "test_data_merged before average=", test_data_merged

                # calculate min/max/average
                for testblock_name, testblock_data in test_data_merged.items():
                    #print "testblock_data=", testblock_data
                    for metric_name, metric_data_list in testblock_data.items(
                    ):
                        #print "metric_data_list=", metric_data_list
                        for i in range(len(metric_data_list)):
                            #print "i=", i
                            #print "test_data_merged[testblock_name][metric_name][i]['data']['values']=", test_data_merged[testblock_name][metric_name][i]['data']['values']
                            test_data_merged[testblock_name][metric_name][i][
                                'data']['min'] = min(
                                    test_data_merged[testblock_name]
                                    [metric_name][i]['data']['values'])
                            test_data_merged[testblock_name][metric_name][i][
                                'data']['max'] = max(
                                    test_data_merged[testblock_name]
                                    [metric_name][i]['data']['values'])
                            test_data_merged[testblock_name][metric_name][i][
                                'data']['average'] = round(
                                    sum(test_data_merged[testblock_name]
                                        [metric_name][i]['data']['values']) /
                                    len(test_data_merged[testblock_name]
                                        [metric_name][i]['data']['values']), 3)

                #print "test_data_merged after average=", test_data_merged

                # write to file
                filename = os.path.join(self.config["json_output"],
                                        "merged_" + test_name + ".json")
                stream = file(filename, 'w')
                json.dump(copy.copy(test_data_merged), stream)

                filename = os.path.join(self.config["yaml_output"],
                                        "merged_" + test_name + ".yaml")
                if not filename == "":
                    stream = file(filename, 'w')
                    yaml.dump(copy.copy(test_data_merged),
                              stream,
                              default_flow_style=False)
        self.result = True

    def is_in_metric_data_list(self, data, data_list):
        counter = 0
        for dat in data_list:
            is_same = self.is_same_metric_data(copy.deepcopy(data),
                                               copy.deepcopy(dat))
            if is_same:
                return True, counter
            counter += 1
        return False, None

    def is_same_metric_data(self, data1, data2):
        data1.pop('data')
        data1.pop('groundtruth_result')
        data2.pop('data')
        data2.pop('groundtruth_result')
        if data1 == data2:
            return True
        else:
            return False
Esempio n. 11
0
    def plot_benchmark(self, style, sharey, hide_groundtruth, hide_min_max,
                       filter_tests, filter_testblocks, filter_metrics):

        sorted_atf_results = ATFConfigurationParser().get_sorted_plot_dicts(
            self.atf_result, filter_tests, filter_testblocks, filter_metrics)

        if style not in sorted_atf_results.keys():
            print "ERROR: style '%s' not implemented" % style
            return
        plot_dict = sorted_atf_results[style]

        rows = []
        cols = []
        plots = []
        nr_unique_plots = 0
        for row in plot_dict.keys():
            if row not in rows:
                rows.append(row)
            for col in plot_dict[row].keys():
                if col not in cols:
                    cols.append(col)
                for plot in plot_dict[row][col].keys():
                    if plot not in plots:
                        plots.append(plot)

        # sort alphabetically
        rows.sort()
        cols.sort()
        plots.sort()

        print "\nplotting in style '%s' (rows: %d, cols: %d, plots: %d)" % (
            style, len(rows), len(cols), len(plots))

        fig, axs = plt.subplots(
            len(rows),
            len(cols),
            squeeze=False,
            sharex=True,
            sharey=sharey,
            figsize=(10, 12))  # FIXME calculate width with nr_testblocks

        # always make this a numpy 2D matrix to access rows and cols correclty if len(rows)=1 or len(cols)=1
        #axs = np.atleast_2d(axs)
        #axs = axs.reshape(len(rows), len(cols))
        # --> not needed anymore due to squeeze=False

        # define colormap (one color per plot)
        clut = cm._generate_cmap('Dark2', len(plots))
        colors = [clut(plots.index(plot)) for plot in plots]
        #colors = [(0.10588235294117647, 0.61960784313725492, 0.46666666666666667, 1.0), (0.85098039215686272, 0.37254901960784315, 0.0078431372549019607, 1.0)]*len(plots)

        for row in rows:
            #print "\nrow=", row

            for col in cols:
                #print "  col=", col

                # select subplot
                ax = axs[rows.index(row)][cols.index(col)]

                # format x axis
                x = np.arange(len(plots))
                ax.set_xticks(x)
                ax.set_xticklabels(plots)
                ax.set_xlim(-1, len(plots))

                # format y axis
                ax.autoscale(enable=True, axis='y', tight=False)
                ax.margins(
                    y=0.2)  # make it a little bigger than the min/max values

                # only set title for upper row and ylabel for left col
                if rows.index(row) == 0:
                    ax.set_title(col)
                if cols.index(col) == 0:
                    ax.set_ylabel(row, rotation=45, ha="right")

                for plot in plots:
                    #print "    plot=", plot
                    try:
                        metric_result = plot_dict[row][col][plot]
                    except KeyError:
                        #print "skip", row, col, plot
                        continue

                    ax.grid(True)
                    nr_unique_plots += 1

                    # set data to plot
                    data = metric_result.data.data
                    lower = metric_result.groundtruth.data - metric_result.groundtruth.epsilon
                    upper = metric_result.groundtruth.data + metric_result.groundtruth.epsilon

                    # set groundtruth marker
                    if metric_result.groundtruth.available and not hide_groundtruth:
                        yerr = [[data - lower], [upper - data]]
                    else:
                        yerr = [[0], [0]]

                    # set marker transparency (filled or transparent)
                    if metric_result.started\
                        and metric_result.finished\
                        and (metric_result.groundtruth.result or not metric_result.groundtruth.available)\
                        and metric_result.data.stamp != rospy.Time(0):
                        markerfacecolor = None  # plot filled marker
                    else:
                        markerfacecolor = 'None'  # plot transparent marker

                    # set color
                    color = colors[plots.index(plot)]

                    # plot data and groundtruth
                    ax.errorbar(plots.index(plot),
                                data,
                                yerr=yerr,
                                fmt='D',
                                markersize=12,
                                markerfacecolor=markerfacecolor,
                                color=color)

                    # plot min and max
                    if not hide_min_max and not metric_result.mode == MetricResult.SNAP:  # only plot min max for SPAN modes
                        ax.plot(plots.index(plot),
                                metric_result.min.data,
                                '^',
                                markersize=8,
                                color=color)
                        ax.plot(plots.index(plot),
                                metric_result.max.data,
                                'v',
                                markersize=8,
                                color=color)

                    # plot a non-visible zero for y-axis scaling
                    ax.plot(plots.index(plot), 0, '')

        fig.autofmt_xdate(rotation=45)
        plt.tight_layout()

        title = "ATF result for %s" % (self.atf_result.name)
        st = fig.suptitle(title, fontsize="large")
        # shift subplots down:
        fig.subplots_adjust(top=0.90)  # move top for title

        fig.set_facecolor("white")

        fig.savefig("/tmp/test.png")
        plt.show()
        return