示例#1
0
文件: atf.py 项目: mojin-robotics/atf
    def __init__(self):
        # get test config
        package_name = rospy.get_param("/atf/package_name")
        print("package_name:", package_name)
        test_generation_config_file = rospy.get_param("/atf/test_generation_config_file")
        print("test_generation_config_file:", test_generation_config_file)
        test_name = rospy.get_param("/atf/test_name")
        print("test_name:", test_name)

        atf_configuration_parser = ATFConfigurationParser(package_name, test_generation_config_file)
        tests = atf_configuration_parser.get_tests()
        for test in tests:
            #print "test.name:", test.name
            if test_name == test.name:
                break
        #print "current test:", test.name
        self.test = test

        self.publisher_trigger = rospy.Publisher("atf/trigger", TestblockTrigger, queue_size=10)
        rospy.Subscriber("/state_machine/machine/smach/container_status", SmachContainerStatus, self._sm_status_cb)
        self.sm_container_status = None

        # make sure to wait with the application for the statemachine in sm_test.py to be initialized
        rospy.loginfo("waiting for smach container in test_sm to be ready...")
        rospy.wait_for_message("/state_machine/machine/smach/container_status", rospy.AnyMsg)
        rospy.sleep(1) # wait for sm_test to initialize all subscribers (rospy bug?)
        rospy.loginfo("...smach container in sm_test is ready.")
示例#2
0
    def __init__(self, arguments):
        self.ns = "/atf/"

        expected_arg_length = 5
        if len(arguments) != expected_arg_length:
            error_message = "ATF configuration Error: invalid number of arguments for generating tests. Number of arguments is %d but should be %d" % (
                len(arguments), expected_arg_length)
            print(error_message)
            sys.exit(1)

        self.package_name = arguments[1]
        self.test_generation_config_file = arguments[2]
        self.package_src_path = arguments[3]
        self.package_bin_path = arguments[4]
        self.atf_configuration_parser = ATFConfigurationParser(
            self.package_src_path,
            self.test_generation_config_file,
            skip_metrics=True)
        self.tests = self.atf_configuration_parser.get_tests()

        self.test_list = {}

        test_generation_config_file_name = self.test_generation_config_file
        # replace directory "/" with "_"
        test_generation_config_file_name = test_generation_config_file_name.replace(
            "/", "_")
        # replace "*.yaml" with "*_yaml"
        test_generation_config_file_name = test_generation_config_file_name.replace(
            ".", "_")
        self.test_generated_path = os.path.join(
            self.package_bin_path, "test_generated",
            test_generation_config_file_name)
        self.create_folders()
示例#3
0
    def __init__(self):
        #rospy.init_node('state_machine')

        # Create a SMACH state machine
        self.sm_top = smach.StateMachine(outcomes=['succeeded', 'error'])
        
        # get test config
        package_name = rospy.get_param("/atf/package_name")
        print("package_name:", package_name)
        test_generation_config_file = rospy.get_param("/atf/test_generation_config_file", "atf/test_generation_config.yaml")
        print("test_generation_config_file:", test_generation_config_file)
        test_name = rospy.get_param("/atf/test_name")
        print("test_name:", test_name)
        
        atf_configuration_parser = ATFConfigurationParser(package_name, test_generation_config_file)
        tests = atf_configuration_parser.get_tests()
        for test in tests:
            #print "test.name:", test.name
            if test_name == test.name:
                break
        #print "current test:", test.name
            
        outcome_map_succeeded = {}
        outcome_map_error = {}
        for testblock in list(test.testblockset_config.keys()):
            outcome_map_succeeded[testblock] = 'succeeded'
            outcome_map_error[testblock] = 'error'
        outcome_map = {'succeeded':outcome_map_succeeded,
                        'error':outcome_map_error}
        
        recorder_handle = ATFRecorder(test)

        # Open the container
        with self.sm_top:

            # Create the sub SMACH state machine
            sm_con = smach.Concurrence(outcomes=['succeeded','error'],
                                       default_outcome='error',
                                       outcome_map=outcome_map)

            sm_con.userdata = self.sm_top.userdata

            # Open the container
            with sm_con:
                # Add states to the container
                for testblock in list(test.testblockset_config.keys()):
                    #print "adding testblock:", testblock
                    smach.Concurrence.add(testblock, SmAtfTestblock(testblock, recorder_handle))
            
            # TODO preempt all other concurrent States as soon as one state returns 'error'

            smach.StateMachine.add('CON', sm_con,
                                    transitions={'succeeded':'succeeded',
                                                'error':'error'})

        self.sis = smach_ros.IntrospectionServer('/state_machine/machine', self.sm_top, 'SM_ATF')
        self.sis.start()
示例#4
0
    def __init__(
            self,
            package_name,
            test_generation_config_file="atf/test_generation_config.yaml"):
        print("ATF analyser: started!")
        self.ns = "/atf/"
        self.package_name = package_name

        # parse configuration
        self.configuration_parser = ATFConfigurationParser(
            package_name, test_generation_config_file)
        self.tests = self.configuration_parser.get_tests()
示例#5
0
class Analyser:
    def __init__(
            self,
            package_name,
            test_generation_config_file="atf/test_generation_config.yaml"):
        print("ATF analyser: started!")
        self.ns = "/atf/"
        self.package_name = package_name

        # parse configuration
        self.configuration_parser = ATFConfigurationParser(
            package_name, test_generation_config_file)
        self.tests = self.configuration_parser.get_tests()

    def analyse(self, dry_run):
        if dry_run:
            return

        # generate results
        start_time = time.time()
        i = 1
        for test in self.tests:
            inputfile = os.path.join(test.generation_config["bagfile_output"] +
                                     test.name + ".bag")
            print("Processing test %i/%i: %s" %
                  (i, len(self.tests), test.name))
            try:
                bag = rosbag.Bag(inputfile)
            except rosbag.bag.ROSBagException as e:
                print("ERROR empty bag file", e)
                i += 1
                continue
            except IOError as e:
                print("Error bag file not found", e)
                i += 1
                continue
            if bag.get_message_count() == 0:
                print("ERROR empty bag file")
                i += 1
                continue
            bar = progressbar.ProgressBar(maxval=bag.get_message_count(), \
                    widgets=[progressbar.Bar('=', '[', ']'), ' ', progressbar.Percentage()])
            bar.start()
            j = 0
            count_error = 0

            try:
                for topic, raw_msg, t in bag.read_messages(raw=True):
                    try:
                        msg_type, serialized_bytes, md5sum, pos, pytype = raw_msg
                        msg = pytype()
                        msg.deserialize(serialized_bytes)
                        j += 1
                        for testblock in test.testblocks:
                            #print "testblock", testblock.name
                            #print "testblock.metric_handles", testblock.metric_handles
                            for metric_handle in testblock.metric_handles:
                                if topic == "/atf/status" and msg.name == testblock.name:
                                    testblock.status = msg.status
                                    if testblock.status == TestblockStatus.ACTIVE:
                                        #print "calling start on metric", metric_handle
                                        metric_handle.start(msg)
                                    elif testblock.status == TestblockStatus.SUCCEEDED:
                                        #print "calling stop on metric", metric_handle
                                        metric_handle.stop(msg)
                                else:
                                    metric_handle.update(topic, msg, t)
                    except StopIteration as e:
                        print("stop iterator", type(e), e)
                        break
                    except Exception as e:
                        print("general Exception in ATF analyser", type(e), e)
                        print(traceback.format_exc())
                        count_error += 1
                        continue
                    bar.update(j)
            except Exception as e:
                print("FATAL exception in bag file", type(e), e)
                print(traceback.format_exc())
                continue
            bar.finish()

            print("%d errors detected during test processing" % count_error)
            i += 1

        #export test list
        test_list = self.configuration_parser.get_test_list()
        self.configuration_parser.export_to_file(
            test_list,
            os.path.join(
                self.configuration_parser.generation_config["txt_output"],
                "test_list.txt"))
        #self.configuration_parser.export_to_file(test_list, os.path.join(self.configuration_parser.generation_config["json_output"], "test_list.json"))
        #self.configuration_parser.export_to_file(test_list, os.path.join(self.configuration_parser.generation_config["yaml_output"], "test_list.yaml"))

        try:
            print("Processing tests took %s sec" %
                  str(round((time.time() - start_time), 4)))
        except:
            pass

        print("ATF analyser: done!")

    def get_file_paths(self, directory, prefix):
        result = []
        for subdir, dirs, files in os.walk(directory):
            for filename in files:
                full_path = os.path.join(subdir, filename)
                if filename.startswith(prefix):
                    result.append((filename, full_path))
        result.sort()
        return result

    def get_result(self, dry_run):
        atf_result = AtfResult()
        atf_result.header.stamp = rospy.Time(time.time())
        atf_result.name = self.package_name
        atf_result.result = None
        atf_result.error_message = "All tests OK"

        for test in self.tests:
            # get result
            if dry_run:
                test_result = TestResult()
                test_result.name = test.name
                test_result.test_config = test.test_config_name
                test_result.robot = test.robot_name
                test_result.env = test.env_name
                test_result.testblockset = test.testblockset_name
                test_result.result = True
                test_result.error_message = "dry run"
            else:
                test_result = test.get_result()

            # export test result to file
            self.configuration_parser.export_to_file(
                test_result,
                os.path.join(test.generation_config["txt_output"],
                             test.name + ".txt"))
            #self.configuration_parser.export_to_file(test_result, os.path.join(test.generation_config["json_output"], test.name + ".json")) # ROS message object is not JSON serialisable
            #self.configuration_parser.export_to_file(test_result, os.path.join(test.generation_config["yaml_output"], test.name + ".yaml")) # ROS message object is not correctly serialized to yaml

            # append testresult to overall atf result
            atf_result.results.append(test_result)

            # aggregate result
            if test_result.result != None and not test_result.result:
                # check if there are already failed tests in atf_result
                if atf_result.result == None:
                    atf_result.error_message = "Failed ATF tests:"
                atf_result.result = False
                atf_result.error_message += "\n - test '%s' (%s, %s, %s, %s): %s" % (
                    test_result.name, test_result.test_config,
                    test_result.robot, test_result.env,
                    test_result.testblockset, test_result.error_message)
            if atf_result.result == None and test_result.result:
                atf_result.result = True

        if len(atf_result.results) == 0:
            raise ATFAnalyserError(
                "Analysing failed, no atf result available.")

        # export overall atf result to file
        #print "\natf_result:\n", atf_result
        self.configuration_parser.export_to_file(
            atf_result,
            os.path.join(test.generation_config["txt_output"],
                         "atf_result.txt"))
        self.configuration_parser.export_to_file(
            atf_result,
            os.path.join(test.generation_config["txt_output"],
                         "atf_result.bag"))

        # aggregate results
        atf_result_aggregated = self.aggregate_results(atf_result)
        #print "\natf_result_aggregated:\n", atf_result_aggregated
        self.configuration_parser.export_to_file(
            atf_result_aggregated,
            os.path.join(test.generation_config["txt_output"],
                         "atf_result_aggregated.txt"))
        self.configuration_parser.export_to_file(
            atf_result_aggregated,
            os.path.join(test.generation_config["txt_output"],
                         "atf_result_aggregated.bag"))

        return atf_result

    def aggregate_results(self, atf_result):
        test_list = self.configuration_parser.get_test_list()

        ret = self.configuration_parser.get_sorted_plot_dicts(
            atf_result, "", "", "")

        mbt = ret['mbt']
        mbt_aggregated = {}
        for metric in list(mbt.keys()):
            #print "m=", metric
            if metric not in list(mbt_aggregated.keys()):
                mbt_aggregated[metric] = {}
            for testblock in list(mbt[metric].keys()):
                #print "  b=", testblock
                if testblock not in list(mbt_aggregated[metric].keys()):
                    mbt_aggregated[metric][testblock] = {}
                for tl_tests in test_list:
                    #print "tl_tests=", tl_tests
                    for tl_test in list(tl_tests.keys()):
                        #print "    tl_test=", tl_test
                        metric_result = MetricResult()
                        status = TestblockStatus.SUCCEEDED
                        groundtruth_result = Groundtruth.SUCCEEDED
                        groundtruth_error_message = ""
                        details = []
                        for test in list(mbt[metric][testblock].keys()):
                            if test.startswith(tl_test):
                                # aggregate status SUCCEEDED from every metric_result
                                if mbt[metric][testblock][
                                        test].status != TestblockStatus.SUCCEEDED:
                                    status = TestblockStatus.ERROR

                                # aggregate data from every metric_result
                                data = mbt[metric][testblock][test].data
                                stamp = data.stamp
                                # check if data is set (not all default values anymore)
                                if data.stamp == rospy.Time(
                                        0) and data.data == 0:
                                    stamp = rospy.Time(
                                        0
                                    )  # mark metric result as invalid by settimg timestamp to zero
                                metric_result.series.append(data)

                                # aggregate groundtruth from every metric_result
                                groundtruth = mbt[metric][testblock][
                                    test].groundtruth
                                if groundtruth.result != Groundtruth.SUCCEEDED:
                                    groundtruth_result = Groundtruth.FAILED
                                    if groundtruth_error_message != "":
                                        groundtruth_error_message += "\n"
                                    groundtruth_error_message += "groundtruth missmatch in subtest %s" % (
                                        test)

                                # aggregate details from every metric_result
                                details = details + mbt[metric][testblock][
                                    test].details

                        if len(metric_result.series
                               ) == 0:  # no matching substest found
                            continue

                        metric_result.groundtruth = groundtruth
                        metric_result.groundtruth.result = groundtruth_result
                        metric_result.groundtruth.error_message = groundtruth_error_message

                        metric_result.name = mbt[metric][testblock][test].name
                        metric_result.unit = mbt[metric][testblock][test].unit
                        metric_result.mode = MetricResult.SPAN_MEAN  # aggregated metrics are always SPAN_MEAN
                        metric_result.status = status
                        # metric_result.series is set above
                        metric_result.data.stamp = stamp
                        metric_result.data.data = metrics_helper.get_mean(
                            metric_result.series)
                        metric_result.min = metrics_helper.get_min(
                            metric_result.series)
                        metric_result.max = metrics_helper.get_max(
                            metric_result.series)
                        metric_result.mean = metric_result.data.data
                        metric_result.std = metrics_helper.get_std(
                            metric_result.series)
                        # metric_result.groundtruth is set above
                        metric_result.details = details
                        mbt_aggregated[metric][testblock][
                            tl_test] = metric_result

        # convert mbt to tbm
        tbm = {}
        for metric in list(mbt_aggregated.keys()):
            #print "m=", metric
            for testblock in list(mbt_aggregated[metric].keys()):
                #print "  b=", testblock
                for test in list(mbt_aggregated[metric][testblock].keys()):
                    #print "    t=", test
                    if test not in list(tbm.keys()):
                        tbm[test] = {}
                    if testblock not in list(tbm[test].keys()):
                        tbm[test][testblock] = {}
                    tbm[test][testblock][metric] = mbt_aggregated[metric][
                        testblock][test]

        # convert tbm to atf_result_aggregated
        atf_result_aggregated = AtfResult()
        atf_result_aggregated.header = atf_result.header
        atf_result_aggregated.name = atf_result.name
        atf_result_aggregated.result = True
        for test in sorted(tbm.keys()):
            test_result = TestResult()
            test_result.name = test
            test_result.result = True

            # find test metadata in atf_result
            for t in atf_result.results:
                if t.name.startswith(test):
                    test_result.test_config = t.test_config
                    test_result.robot = t.robot
                    test_result.env = t.env
                    test_result.testblockset = t.testblockset
                    break

            for testblock in sorted(tbm[test].keys()):
                testblock_result = TestblockResult()
                testblock_result.name = testblock
                testblock_result.result = True
                for metric in sorted(tbm[test][testblock].keys()):
                    metric_result = tbm[test][testblock][metric]
                    testblock_result.results.append(metric_result)
                    # aggregate metric result
                    if metric_result.groundtruth.result != Groundtruth.SUCCEEDED:
                        testblock_result.result = False
                        testblock_result.error_message += "\n     - metric '%s': %s" % (
                            metric_result.name,
                            metric_result.groundtruth.error_message)

                test_result.results.append(testblock_result)
                # aggregate testblock result
                if testblock_result.result == False:
                    test_result.result = False
                    test_result.error_message += "\n   - testblock '%s': %s" % (
                        testblock_result.name, testblock_result.error_message)

            atf_result_aggregated.results.append(test_result)
            # aggregate test result
            if test_result.result == False:
                atf_result_aggregated.result = False
                atf_result_aggregated.error_message += "\n - test '%s' (%s, %s, %s, %s): %s" % (
                    test_result.name, test_result.test_config,
                    test_result.robot, test_result.env,
                    test_result.testblockset, test_result.error_message)

        return atf_result_aggregated

    def print_result(self, atf_result):
        if atf_result.result != None and not atf_result.result:
            print("\n")
            print("*************************")
            print("*** SOME TESTS FAILED ***")
            print("*************************")
            print(atf_result.error_message)
            self.print_result_summary(atf_result)
        else:
            print("\n")
            print("********************")
            print("*** ALL TESTS OK ***")
            print("********************")
            self.print_result_summary(atf_result)

    def print_result_details(self, atf_result):
        print("\n")
        print("**********************")
        print("*** result details ***")
        print("**********************")
        print(atf_result)

    def print_result_summary(self, atf_result):
        print("\n")
        print("**********************")
        print("*** result summary ***")
        print("**********************")
        for result in atf_result.results:
            if result.result:
                print("test '%s' (%s, %s, %s, %s): succeeded" %
                      (result.name, result.test_config, result.robot,
                       result.env, result.testblockset))
            else:
                print("test '%s' (%s, %s, %s, %s): failed" %
                      (result.name, result.test_config, result.robot,
                       result.env, result.testblockset))
示例#6
0
 def __init__(self, package_name, test_generation_config_file):
     # parse configuration
     self.atf_configuration_parser = ATFConfigurationParser(package_name, test_generation_config_file)
示例#7
0
    def plot_benchmark(self, style, sharey, hide_groundtruth, hide_min_max,
                       filter_tests, filter_testblocks, filter_metrics):

        sorted_atf_results = ATFConfigurationParser().get_sorted_plot_dicts(
            self.atf_result, filter_tests, filter_testblocks, filter_metrics)

        if style not in list(sorted_atf_results.keys()):
            print("ERROR: style '%s' not implemented" % style)
            return
        plot_dict = sorted_atf_results[style]

        rows = []
        cols = []
        plots = []
        nr_unique_plots = 0
        for row in list(plot_dict.keys()):
            if row not in rows:
                rows.append(row)
            for col in list(plot_dict[row].keys()):
                if col not in cols:
                    cols.append(col)
                for plot in list(plot_dict[row][col].keys()):
                    if plot not in plots:
                        plots.append(plot)

        # sort alphabetically
        rows.sort()
        cols.sort()
        plots.sort()

        print("\nplotting in style '%s' (rows: %d, cols: %d, plots: %d)" %
              (style, len(rows), len(cols), len(plots)))

        fig, axs = plt.subplots(
            len(rows),
            len(cols),
            squeeze=False,
            sharex=True,
            sharey=sharey,
            figsize=(10, 12))  # FIXME calculate width with nr_testblocks

        # always make this a numpy 2D matrix to access rows and cols correclty if len(rows)=1 or len(cols)=1
        #axs = np.atleast_2d(axs)
        #axs = axs.reshape(len(rows), len(cols))
        # --> not needed anymore due to squeeze=False

        # define colormap (one color per plot)
        clut = cm._generate_cmap('Dark2', len(plots))
        colors = [clut(plots.index(plot)) for plot in plots]
        #colors = [(0.10588235294117647, 0.61960784313725492, 0.46666666666666667, 1.0), (0.85098039215686272, 0.37254901960784315, 0.0078431372549019607, 1.0)]*len(plots)

        for row in rows:
            #print "\nrow=", row

            for col in cols:
                #print "  col=", col

                # select subplot
                ax = axs[rows.index(row)][cols.index(col)]

                # format x axis
                x = np.arange(len(plots))
                ax.set_xticks(x)
                ax.set_xticklabels(plots)
                ax.set_xlim(-1, len(plots))

                # format y axis
                ax.autoscale(enable=True, axis='y', tight=False)
                ax.margins(
                    y=0.2)  # make it a little bigger than the min/max values

                # only set title for upper row and ylabel for left col
                if rows.index(row) == 0:
                    ax.set_title(col)
                if cols.index(col) == 0:
                    ax.set_ylabel(row, rotation=45, ha="right")

                for plot in plots:
                    #print "    plot=", plot
                    try:
                        metric_result = plot_dict[row][col][plot]
                    except KeyError:
                        #print "skip", row, col, plot
                        continue

                    ax.grid(True)
                    nr_unique_plots += 1

                    # set data to plot
                    data = metric_result.data.data
                    lower = metric_result.groundtruth.data - metric_result.groundtruth.epsilon
                    upper = metric_result.groundtruth.data + metric_result.groundtruth.epsilon

                    # set groundtruth marker
                    if metric_result.groundtruth.available and not hide_groundtruth:
                        yerr = [[data - lower], [upper - data]]
                    else:
                        yerr = [[0], [0]]

                    # set marker transparency (filled or transparent)
                    if metric_result.status == TestblockStatus.SUCCEEDED\
                        and (metric_result.groundtruth.result == Groundtruth.SUCCEEDED or not metric_result.groundtruth.available):
                        markerfacecolor = None  # plot filled marker
                    else:
                        markerfacecolor = 'None'  # plot transparent marker

                    # set color
                    color = colors[plots.index(plot)]

                    # plot data and groundtruth
                    ax.errorbar(plots.index(plot),
                                data,
                                yerr=yerr,
                                fmt='D',
                                markersize=12,
                                markerfacecolor=markerfacecolor,
                                color=color)

                    # plot min and max
                    if not hide_min_max and not metric_result.mode == MetricResult.SNAP:  # only plot min max for SPAN modes
                        ax.plot(plots.index(plot),
                                metric_result.min.data,
                                '^',
                                markersize=8,
                                color=color)
                        ax.plot(plots.index(plot),
                                metric_result.max.data,
                                'v',
                                markersize=8,
                                color=color)

                    # plot a non-visible zero for y-axis scaling
                    ax.plot(plots.index(plot), 0, '')

        fig.autofmt_xdate(rotation=45)
        plt.tight_layout()

        title = "ATF result for %s" % (self.atf_result.name)
        st = fig.suptitle(title, fontsize="large")
        # shift subplots down:
        fig.subplots_adjust(top=0.90)  # move top for title

        fig.set_facecolor("white")

        fig.savefig("/tmp/test.png")
        plt.show()
        return
示例#8
0
class GenerateTests:
    def __init__(self, arguments):
        self.ns = "/atf/"

        expected_arg_length = 5
        if len(arguments) != expected_arg_length:
            error_message = "ATF configuration Error: invalid number of arguments for generating tests. Number of arguments is %d but should be %d" % (
                len(arguments), expected_arg_length)
            print(error_message)
            sys.exit(1)

        self.package_name = arguments[1]
        self.test_generation_config_file = arguments[2]
        self.package_src_path = arguments[3]
        self.package_bin_path = arguments[4]
        self.atf_configuration_parser = ATFConfigurationParser(
            self.package_src_path,
            self.test_generation_config_file,
            skip_metrics=True)
        self.tests = self.atf_configuration_parser.get_tests()

        self.test_list = {}

        test_generation_config_file_name = self.test_generation_config_file
        # replace directory "/" with "_"
        test_generation_config_file_name = test_generation_config_file_name.replace(
            "/", "_")
        # replace "*.yaml" with "*_yaml"
        test_generation_config_file_name = test_generation_config_file_name.replace(
            ".", "_")
        self.test_generated_path = os.path.join(
            self.package_bin_path, "test_generated",
            test_generation_config_file_name)
        self.create_folders()

    def create_folders(self):
        # delete old test_generated directory and create new one
        if os.path.exists(self.test_generated_path):
            shutil.rmtree(self.test_generated_path)
        shutil.copyfile(self.package_src_path + "/package.xml",
                        self.package_bin_path + "/package.xml")
        os.makedirs(self.test_generated_path)

    def generate_tests(self):
        em = lxml.builder.ElementMaker()
        xml_launch = em.launch
        xml_arg = em.arg
        xml_include = em.include
        xml_test = em.test
        xml_node = em.node
        xml_param = em.param
        xml_rosparam = em.rosparam

        # Cleaning
        test_clean = xml_launch(
            #param(name=self.ns + "bag_output", value=self.generation_config["bagfile_output"]),
            #param(name=self.ns + "json_output", value=self.generation_config["json_output"]),
            #param(name=self.ns + "yaml_output", value=self.generation_config["yaml_output"]),
            xml_test({
                'test-name':
                "cleaning",
                'pkg':
                "atf_core",
                'type':
                "cleaner.py",
                'time-limit':
                "60",
                'args':
                self.package_name + " -g " + self.test_generation_config_file +
                " -e"
            }))
        xmlstr = minidom.parseString(
            ElementTree.tostring(test_clean)).toprettyxml(indent="    ")
        filepath = os.path.join(self.test_generated_path, "cleaning.test")
        with open(filepath, "w") as f:
            f.write(xmlstr)

        # Recording
        for test in self.tests:
            #print "robot_config=", robot_config
            #print "robot_env_config=", robot_env_config
            #print "self.test_list[test_name]=", self.test_list[test_name]

            test_record = xml_launch(
                xml_arg(name="execute_as_test", default="true"),
                xml_param(name=self.ns + "package_name",
                          value=self.package_name),
                xml_param(name=self.ns + "test_generation_config_file",
                          value=self.test_generation_config_file),
                xml_param(name=self.ns + "test_name", value=test.name),
                #param(name=self.ns + "test_config_name", value=self.test_list[test_name]["test_config"]),
                #param(name=self.ns + "robot_config_name", value=self.test_list[test_name]["robot"]),
                #param(name=self.ns + "robot_env_config_name", value=self.test_list[test_name]["robot_env"]),
                #rosparam(param=self.ns + "test_config", command="load", file="$(find " + self.package_name + ")/" + os.path.join(self.generation_config["test_config_path"], self.test_list[test_name]["test_config"] + ".yaml")),
                #rosparam(param=self.ns + "robot_config", command="load", file="$(find " + self.package_name + ")/" + os.path.join(self.generation_config["robot_config_path"], self.test_list[test_name]["robot"] + ".yaml")),
                #rosparam(param=self.ns + "robot_env_config", command="load", file="$(find " + self.package_name + ")/" + os.path.join(self.generation_config["robot_env_config_path"], self.test_list[test_name]["robot_env"] + ".yaml")),
                #param(name=self.ns + "bagfile_output", value=self.generation_config["bagfile_output"]),
                #param(name=self.ns + "json_output", value=self.generation_config["json_output"]),
                #param(name=self.ns + "yaml_output", value=self.generation_config["yaml_output"]),
            )

            # robot params
            self.append_parameters(test_record, xml_rosparam,
                                   test.robot_config, "additional_parameters")
            # env params
            self.append_parameters(test_record, xml_rosparam, test.env_config,
                                   "additional_parameters")
            # test params
            self.append_parameters(test_record, xml_rosparam, test.test_config,
                                   "additional_parameters")

            if ("app_launch_file" in test.generation_config):
                incl = xml_include(file="$(find " + self.package_name + ")/" +
                                   test.generation_config["app_launch_file"])
                # robot args
                self.append_arguments(incl, xml_arg, test.robot_config,
                                      "additional_arguments")
                # env args
                self.append_arguments(incl, xml_arg, test.env_config,
                                      "additional_arguments")
                # test args
                self.append_arguments(incl, xml_arg, test.test_config,
                                      "additional_arguments")

                test_record.append(incl)

            test_record.append(
                xml_node(pkg=self.package_name,
                         type=test.generation_config['app_executable'],
                         name="$(anon atf_application)",
                         required="true",
                         output="screen")),
            test_record.append(
                xml_test({
                    'if':
                    '$(arg execute_as_test)',
                    'pkg':
                    'atf_core',
                    'type':
                    'sm_test.py',
                    'test-name':
                    "atf_recording_" + test.name,
                    'time-limit':
                    str(test.generation_config["time_limit_recording"]),
                    'required':
                    "true",
                    'args':
                    'execute_as_test'
                }))
            test_record.append(
                xml_node({
                    'unless': '$(arg execute_as_test)',
                    'pkg': 'atf_core',
                    'type': 'sm_test.py',
                    'name': "atf_recording_" + test.name,
                    'required': "true",
                    'output': 'screen'
                }))

            xmlstr = minidom.parseString(
                ElementTree.tostring(test_record)).toprettyxml(indent="    ")
            filepath = os.path.join(self.test_generated_path,
                                    "recording_" + test.name) + ".test"
            with open(filepath, "w") as f:
                f.write(xmlstr)

        # Analysing
        test_analyse = xml_launch(
            xml_test({
                'test-name':
                "analysing",
                'pkg':
                "atf_core",
                'type':
                "analyser.py",
                'time-limit':
                str(self.atf_configuration_parser.
                    generation_config["time_limit_analysing"]),
                'required':
                "true",
                'args':
                self.package_name + " -g " + self.test_generation_config_file +
                " -e"
            }))

        xmlstr = minidom.parseString(
            ElementTree.tostring(test_analyse)).toprettyxml(indent="    ")
        filepath = os.path.join(self.test_generated_path, "analysing.test")
        with open(filepath, "w") as f:
            f.write(xmlstr)

        # Uploading
        test_upload = xml_launch()
        if test.generation_config["upload_data"]:
            test_upload.append(
                xml_test({
                    'test-name':
                    "uploading_data",
                    'pkg':
                    "atf_core",
                    'type':
                    "test_dropbox_uploader.py",
                    'time-limit':
                    str(self.atf_configuration_parser.
                        generation_config["time_limit_uploading"]),
                    'args':
                    "-f " + os.path.join(self.package_src_path,
                                         "atf/.dropbox_uploader_config") +
                    " upload " + self.atf_configuration_parser.
                    generation_config["bagfile_output"] + " " +
                    os.path.join(self.package_name, "data")
                }))

        if test.generation_config["upload_result"]:
            test_upload.append(
                xml_test({
                    'test-name':
                    "uploading_results",
                    'pkg':
                    "atf_core",
                    'type':
                    "test_dropbox_uploader.py",
                    'time-limit':
                    str(self.atf_configuration_parser.
                        generation_config["time_limit_uploading"]),
                    'args':
                    "-f " + os.path.join(self.package_src_path,
                                         "atf/.dropbox_uploader_config") +
                    " upload " + self.atf_configuration_parser.
                    generation_config["txt_output"] + " " +
                    os.path.join(self.package_name, "results")
                }))

        xmlstr = minidom.parseString(
            ElementTree.tostring(test_upload)).toprettyxml(indent="    ")
        filepath = os.path.join(self.test_generated_path, "uploading.test")
        with open(filepath, "w") as f:
            f.write(xmlstr)

    def append_parameters(self, xml_parent, xml_type, dictionary, key):
        if (dictionary != None) and (key in dictionary):
            elements = dictionary[key]
            if len(elements) > 0:
                for element in elements:
                    for name, value in list(element.items()):
                        xml_parent.append(
                            xml_type(str(value),
                                     param=str(name),
                                     subst_value="True"))

    def append_arguments(self, xml_parent, xml_type, dictionary, key):
        if (dictionary != None) and (key in dictionary):
            elements = dictionary[key]
            if len(elements) > 0:
                for element in elements:
                    for name, value in list(element.items()):
                        xml_parent.append(
                            xml_type(name=str(name), value=str(value)))

    @staticmethod
    def remove_pkgname(text, pkgname):
        return text[len(pkgname):]

    def get_path(self, path):
        try:
            rospkg.RosPack().get_path(
                path.split("/")[0]) + self.remove_pkgname(
                    path,
                    path.split("/")[0])
        except rospkg.common.ResourceNotFound:
            return path
        else:
            return rospkg.RosPack().get_path(
                path.split("/")[0]) + self.remove_pkgname(
                    path,
                    path.split("/")[0])