示例#1
0
def test_get_runmode_from_xmlfile_invalid_runmode_value():
    '''test_get_runmode_from_xmlfile_invalid_runmode_value'''
    tree = ET.parse(
        os.path.join(os.path.split(__file__)[0], "common_exe_utils.xml"))
    # get root element
    root = tree.getroot()
    # getting steps
    steps = root.find("Steps")
    sstep = steps[5]
    status = common_execution_utils.get_runmode_from_xmlfile(element=sstep)
    assert status[0] == None
示例#2
0
def test_get_runmode_from_xmlfile_invalid_runmode_timer():
    '''test_get_runmode_from_xmlfile_invalid_runmode_timer'''
    tree = ET.parse(
        os.path.join(os.path.split(__file__)[0], "common_exe_utils.xml"))
    # get root element
    root = tree.getroot()
    # getting steps
    steps = root.find("Steps")
    sstep = steps[2]
    result = common_execution_utils.get_runmode_from_xmlfile(element=sstep)
    assert result[0] == 'RMT'
    assert result[1] == 4
    assert result[2] == None
    def execute_step(self, current_step_number, go_to_step_number):
        """
        This function executes the determined step - step_num (integer index) from the step_list.
        This function is called either from the while loop (normal execution) in function
        execute_steps() or from a for loop (invoked execution)
        """
        self.current_step = self.step_list[current_step_number]
        #store loop iter number in data repository
        loop_iter_number = self.current_step.get("loop_iter_number", None)
        Utils.data_Utils.update_datarepository(
            {"loop_iter_number": loop_iter_number})

        #store loop id in data repository
        loop_id = self.current_step.get("loopid", None)
        Utils.data_Utils.update_datarepository({"loopid": loop_id})
        # Incrementing current_step_number for printing purposes.
        self.current_step_number = current_step_number + 1

        self.go_to_step_number = go_to_step_number
        # execute steps
        # Decide whether or not to execute keyword
        # First decide if this step should be executed in this iteration
        if not self.go_to_step_number or self.go_to_step_number == str(
                self.current_step_number):
            # get Exectype information
            self.run_current_step, self.current_triggered_action = \
                exec_type_driver.main(self.current_step, skip_invoked=self.skip_invoked)
            if not self.run_current_step:
                return self._report_step_as_not_run()

        if not self.go_to_step_number or self.go_to_step_number == str(
                self.current_step_number):
            self.step_status = self._execute_current_step()
        else:
            # Skip because of goto
            return self._skip_because_of_goto()
        runmode, value, runmode_timer = \
            common_execution_utils.get_runmode_from_xmlfile(self.current_step)
        retry_type, retry_cond, retry_cond_value, retry_value, retry_interval = \
            common_execution_utils.get_retry_from_xmlfile(self.current_step)
        if runmode is not None:
            return self._execute_runmode_step(runmode_timer, runmode,
                                              self.step_status, value)

        elif retry_type is not None:
            return self._execute_retry_type_step(retry_type, retry_cond,
                                                 retry_cond_value,
                                                 retry_interval, retry_value)
        else:
            return self._execute_step_otherwise(self.step_status)
示例#4
0
def test_get_runmode_from_xmlfile():
    '''test_get_runmode_from_xmlfile'''
    tree = ET.parse(
        os.path.join(os.path.split(__file__)[0], "common_exe_utils.xml"))
    # get root element
    root = tree.getroot()
    # getting steps
    steps = root.find("Steps")
    sstep = steps[0]
    value = 4
    go_next = 5
    mode = 'runmode'
    tag = 'value'
    result = common_execution_utils.get_runmode_from_xmlfile(element=sstep)
    assert result[0] == 'RMT'
    assert result[1] == 4
    assert result[2] == 5.0
示例#5
0
    def _compute_runmode_goto_operations(self, step, step_status, goto_stepnum,
                                         step_num):
        """
        compute_runmode_goto_operations
        """
        runmode, value, _ = common_execution_utils.get_runmode_from_xmlfile(
            step)

        if runmode is not None:
            # if runmode is 'ruf' & step_status is False, skip the repeated
            # execution of same TC step and move to next actual step
            if all(runmode == "ruf", step_status is False):
                goto_stepnum = str(value)
            # if runmode is 'rup' & step_status is True, skip the repeated
            # execution of same TC step and move to next actual step
            elif runmode == "rup" and step_status is True:
                goto_stepnum = str(value)
            else:
                if any([step_status is False,\
                    str(step_status).upper() == "ERROR",\
                    str(step_status).upper() == "EXCEPTION"]):
                    goto_stepnum = onerror_driver.main(step, self.default_error_action,\
                    self.default_error_value)
                    # if (goto_stepnum == 'ABORT'): break
        else:
            if any([step_status is False, str(step_status).upper() == "ERROR",\
                str(step_status).upper() == "EXCEPTION"]):
                goto_stepnum = onerror_driver.main(step, self.default_error_action,\
                    self.default_error_value)
                if str(goto_stepnum).upper() == 'ABORT':
                    pass
                # when 'onError:goto' value is less than the current step num,
                # change the next iteration point to goto value
                elif goto_stepnum and int(goto_stepnum) < step_num:
                    step_num = int(goto_stepnum) - 1
                    goto_stepnum = False
        return goto_stepnum, step_num
示例#6
0
def execute_sequential_testsuites(testsuite_list, project_repository,
                                  data_repository, auto_defects):
    """ Executes suites in a project sequentially """

    suite_cntr = 0
    goto_testsuite = False
    ts_status_list = []
    ts_impact_list = []
    impact_dict = {"IMPACT": "Impact", "NOIMPACT": "No Impact"}

    project_error_action = project_repository['def_on_error_action']
    project_filepath = project_repository['project_filepath']
    project_dir = os.path.dirname(project_filepath)
    wp_results_execdir = project_repository['wp_results_execdir']
    wp_logs_execdir = project_repository['wp_logs_execdir']
    project_error_value = project_repository['def_on_error_value']
    data_repository['wt_ts_timestamp'] = None

    jiraproj = data_repository['jiraproj']
    pj_junit_object = data_repository['wt_junit_object']

    while suite_cntr < len(testsuite_list):
        testsuite = testsuite_list[suite_cntr]
        suite_cntr += 1

        testsuite_rel_path = testsuite_utils.get_path_from_xmlfile(testsuite)
        if testsuite_rel_path is not None:
            testsuite_path = Utils.file_Utils.getAbsPath(testsuite_rel_path,
                                                         project_dir)
        else:
            testsuite_path = str(testsuite_rel_path)
        print_debug("\n")
        print_debug("<<<< Starting execution of Test suite: {0}>>>>".format(testsuite_path))
        action, testsuite_status = exec_type_driver.main(testsuite)
        testsuite_impact = Utils.testcase_Utils.get_impact_from_xmlfile(testsuite)
        testsuite_name = Utils.file_Utils.getFileName(testsuite_path)
        testsuite_nameonly = Utils.file_Utils.getNameOnly(testsuite_name)
        ts_onError_action = Utils.xml_Utils.get_attributevalue_from_directchildnode(testsuite,
                                                                                    'onError',
                                                                                    'action')
        ts_onError_action = ts_onError_action if ts_onError_action else project_error_action
        if testsuite.find("runmode") is not None and \
           testsuite.find("runmode").get("attempt") is not None:
                # condition to print the start of runmode execution
                if testsuite.find("runmode").get("attempt") == 1:
                    print_info("\n----------------- Start of Testsuite Runmode Execution"
                               " -----------------\n")
                print_info("RUNMODE ATTEMPT: {0}"
                           .format(testsuite.find("runmode").get("attempt")))
        if Utils.file_Utils.fileExists(testsuite_path) or action is False:
            if not goto_testsuite and action is True:

                testsuite_result = testsuite_driver.main(testsuite_path,
                                                         data_repository=data_repository,
                                                         from_project=True,
                                                         auto_defects=auto_defects,
                                                         jiraproj=jiraproj,
                                                         res_startdir=wp_results_execdir,
                                                         logs_startdir=wp_logs_execdir,
                                                         ts_onError_action=ts_onError_action)
                testsuite_status = testsuite_result[0]

            elif goto_testsuite and goto_testsuite == str(suite_cntr)\
                    and action is True:
                testsuite_result = testsuite_driver.main(testsuite_path,
                                                         data_repository=data_repository,
                                                         from_project=True,
                                                         auto_defects=auto_defects,
                                                         jiraproj=jiraproj,
                                                         res_startdir=wp_results_execdir,
                                                         logs_startdir=wp_logs_execdir,
                                                         ts_onError_action=ts_onError_action)
                goto_testsuite = False
                testsuite_status = testsuite_result[0]

            else:
                msg = print_info('skipped testsuite: {0} '.format(testsuite_path))
                tmp_timestamp = str(Utils.datetime_utils.get_current_timestamp())
                time.sleep(2)
                pj_junit_object.create_testsuite(
                    location=os.path.dirname(testsuite_path),
                    name=testsuite_nameonly, timestamp=tmp_timestamp,
                    **pj_junit_object.init_arg())
                pj_junit_object.update_attr("status", "SKIPPED", "ts", tmp_timestamp)
                pj_junit_object.update_attr("skipped", "1", "pj", tmp_timestamp)
                pj_junit_object.update_count("suites", "1", "pj", tmp_timestamp)
                data_repository['testsuite_{}_result'.format(suite_cntr)] = "SKIP"
                pj_junit_object.update_attr("impact", impact_dict.get(testsuite_impact.upper()),
                                            "ts", tmp_timestamp)
                pj_junit_object.update_attr("onerror", "N/A", "ts", tmp_timestamp)
                pj_junit_object.output_junit(wp_results_execdir, print_summary=False)
                continue

        else:

            msg = print_error("Test suite does not exist in "
                              "provided path: {0}".format(testsuite_path))
            testsuite_status = 'ERROR'
            if goto_testsuite and goto_testsuite == str(suite_cntr):
                goto_testsuite = False
            elif goto_testsuite and goto_testsuite != str(suite_cntr):
                data_repository['testsuite_{}_result'.format(suite_cntr)] = "ERROR"
                continue

        goto_testsuite_num = onerror_driver.main(testsuite,
                                                 project_error_action,
                                                 project_error_value)
        if goto_testsuite_num is False:
            onerror = "Next"
        elif goto_testsuite_num == "ABORT":
            onerror = "Abort"
        else:
            onerror = "Goto:" + str(goto_testsuite_num)
        pj_junit_object.update_attr("impact", impact_dict.
                                    get(testsuite_impact.upper()), "ts",
                                    data_repository['wt_ts_timestamp'])
        pj_junit_object.update_attr("onerror", onerror, "ts",
                                    data_repository['wt_ts_timestamp'])

        string_status = {"TRUE": "PASS", "FALSE": "FAIL", "ERROR": "ERROR",
                         "SKIP": "SKIP", "RAN": "RAN"}

        if str(testsuite_status).upper() in list(string_status.keys()):
            data_repository['testsuite_{}_result'.format(suite_cntr)] = \
             string_status[str(testsuite_status).upper()]
        else:
            print_error("unexpected testsuite status, default to exception")
            data_repository['testsuite_%d_result' % suite_cntr] = "ERROR"

        ts_status_list, ts_impact_list = \
            common_execution_utils.compute_status(testsuite, ts_status_list,
                                                  ts_impact_list,
                                                  testsuite_status, testsuite_impact)
        if testsuite_impact.upper() == 'IMPACT':
            msg = "Status of the executed test suite impacts Project result"
        elif testsuite_impact.upper() == 'NOIMPACT':
            msg = "Status of the executed test suite does not impact project result"
        print_debug(msg)

        runmode, value, _ = common_execution_utils.get_runmode_from_xmlfile(testsuite)
        retry_type, retry_cond, retry_cond_value, retry_value,\
            retry_interval = common_execution_utils.get_retry_from_xmlfile(testsuite)
        if runmode is not None:
            # if runmode is 'ruf' & step_status is False, skip the repeated
            # execution of same TC step and move to next actual step
            if not project_error_value and runmode.upper() == "RUF" and\
                    testsuite_status is False:
                goto_testsuite = str(value)
            # if runmode is 'rup' & step_status is True, skip the repeated
            # execution of same TC step and move to next actual step
            elif runmode.upper() == "RUP" and testsuite_status is True:
                goto_testsuite = str(value)
        elif retry_type is not None:
            if testsuite.find("retry") is not None and\
              testsuite.find("retry").get("attempt") is not None:
                print_info("RETRY ATTEMPT: {0}".format(testsuite.find("retry").get("attempt")))
            if retry_type.upper() == 'IF':
                try:
                    if data_repository[retry_cond] == retry_cond_value:
                        condition_met = True
                        pNote("Wait for {0}sec before retrying".format(retry_interval))
                        pNote("The given condition '{0}' matches the expected"
                              "value '{1}'".format(data_repository[retry_cond], retry_cond_value))
                        time.sleep(int(retry_interval))
                    else:
                        condition_met = False
                        print_warning("The condition value '{0}' does not match with the expected "
                                      "value '{1}'".format(data_repository[retry_cond],
                                                           retry_cond_value))
                except KeyError:
                    print_warning("The given condition '{0}' do not exists in "
                                  "the data repository".format(retry_cond_value))

                    condition_met = False
                if condition_met is False:
                    goto_testsuite = str(retry_value)
            else:
                if retry_type.upper() == 'IF NOT':
                    try:
                        if data_repository[retry_cond] != retry_cond_value:
                            condition_met = True
                            pNote("Wait for {0}sec before "
                                  "retrying".format(retry_interval))
                            pNote("The condition value '{0}' does not match "
                                  "with the expected value '{1}'".format(data_repository[retry_cond],
                                                                         retry_cond_value))
                            time.sleep(int(retry_interval))
                        else:
                            condition_met = False
                    except KeyError:
                        condition_met = False
                        print_warning("The given condition '{0}' is not there "
                                      "in the data repository".format(retry_cond_value))
                    if condition_met is False:
                        pNote("The given condition '{0}' matched with the "
                              "value '{1}'".format(data_repository[retry_cond],
                                                   retry_cond_value))
                        goto_testsuite = str(retry_value)
        else:
            if testsuite_status is False or testsuite_status == "ERROR" or\
                    testsuite_status == "EXCEPTION":
                goto_testsuite = onerror_driver.main(testsuite, project_error_action,
                                                     project_error_value)
            if goto_testsuite in ['ABORT', 'ABORT_AS_ERROR']:
                break
            # when 'onError:goto' value is less than the current ts num,
            # change the next iteration point to goto value
            elif goto_testsuite and int(goto_testsuite) < suite_cntr:
                suite_cntr = int(goto_testsuite)-1
                goto_testsuite = False
    # print the end of runmode execution as the steps skip when the condition
    # is met for RUF/RUP or when all the attempts finish
    if testsuite.find("runmode") is not None and \
       testsuite.find("runmode").get("attempt") is not None:
        if testsuite.find("runmode").get("attempt") == \
           testsuite.find("runmode").get("runmode_val"):
            print_info("\n----------------- End of Testsuite Runmode Execution"
                       " -----------------\n")
    project_status = Utils.testcase_Utils.compute_status_using_impact(ts_status_list,
                                                                      ts_impact_list)

    return project_status
def execute_testsuite(testsuite_filepath, data_repository, from_project,
                      auto_defects, jiraproj, res_startdir, logs_startdir,
                      ts_onError_action, queue, ts_parallel):
    """Executes the testsuite (provided as a xml file)
            - Takes a testsuite xml file as input and
            sends each testcase to Basedriver for execution.
            - Computes the testsuite status based on the
            testcase_status and the impact value of the testcase
            - Handles testcase failures as per the default/specific onError action/value
            - Calls the function to report the testsuite status

    Arguments:
    1. testsuite_filepath   = (string) the full path of the testsuite xml file.
    2. Warrior          = (module loader) module loader object to call the Warrior
    3. execution_dir        = (string) the full path of the directory under which the testsuite
                              execution directory will be created (results for the testsuite will
                              be stored in the  testsuite execution directory.)
    """
    testsuite_status_list = []
    suite_start_time = Utils.datetime_utils.get_current_timestamp()
    print_info("[{0}] Testsuite execution starts".format(suite_start_time))
    initialize_suite_fields(data_repository)
    suite_repository = get_suite_details(testsuite_filepath, data_repository,
                                         from_project, res_startdir,
                                         logs_startdir)

    if data_repository.get("random_tc_execution",
                           False) or suite_repository['suite_random_exec']:
        print_debug("Executing test cases in suite in random order")
        randomize = True
    else:
        randomize = False

    testcase_list = common_execution_utils.get_step_list(testsuite_filepath,
                                                         "Testcases",
                                                         "Testcase",
                                                         randomize=randomize)
    for testcase in testcase_list:
        jiraids = testsuite_utils.get_jiraids_from_xmlfile(testcase)
        if not jiraids:
            continue
        jira = Jira(jiraproj)
        skip = False
        for jiraid in jiraids:
            status = jira.get_jira_issue_status(jiraid)
            if status is False:
                print_warning("Cannot get status of jira issue {},"
                              " This issue is not considered for testcase"
                              " skip validation".format(jiraid))
                continue
            if (status not in ["Resolved", "Closed"]):
                skip = True
                break
        if skip:
            tc_path = testsuite_utils.get_path_from_xmlfile(testcase)
            print_info("Associated jira ids : {} are not Resolved or Closed,"
                       " Skipping testcase >>>> {}".format(jiraids, tc_path))
            exec_node = testcase.find("Execute")
            if not exec_node:
                exec_node = ElementTree.SubElement(testcase, "Execute")
            exec_node.set("ExecType", 'no')

    execution_type = suite_repository['suite_exectype'].upper()
    no_of_tests = str(len(testcase_list))

    junit_resultfile = suite_repository['junit_resultfile']
    suite_name = suite_repository['suite_name']
    suite_execution_dir = suite_repository['suite_execution_dir']

    data_repository['wt_suite_execution_dir'] = suite_execution_dir
    data_repository['wt_results_execdir'] = suite_repository[
        'ws_results_execdir']
    data_repository['wt_logs_execdir'] = suite_repository['ws_logs_execdir']
    data_repository['wt_suite_name'] = suite_name

    suite_timestamp = testsuite_utils.get_suite_timestamp()
    data_repository['wt_ts_timestamp'] = suite_timestamp
    suite_repository['wt_ts_timestamp'] = suite_timestamp

    data_repository["suite_exectype"] = "iterative" if execution_type == "ITERATIVE_SEQUENTIAL" or \
    execution_type == "ITERATIVE_PARALLEL" else execution_type

    ts_junit_display = "True"
    pj_junit_display = "False"
    if "wt_junit_object" in data_repository:
        ts_junit_object = data_repository["wt_junit_object"]

    else:
        ts_junit_object = junit_class.Junit(
            filename=suite_name,
            timestamp=suite_timestamp,
            name="customProject_independant_testcase_execution",
            display=pj_junit_display)

        if "jobid" in data_repository:
            ts_junit_object.add_jobid(data_repository["jobid"])
            del data_repository["jobid"]
        data_repository["wt_junit_object"] = ts_junit_object
    suite_repository["wt_junit_object"] = ts_junit_object
    ts_junit_object.create_testsuite(
        location=os.path.dirname(testsuite_filepath),
        name=suite_name,
        timestamp=suite_timestamp,
        suite_location=suite_repository['testsuite_filepath'],
        title=suite_repository['suite_title'],
        display=ts_junit_display,
        **ts_junit_object.init_arg())

    # Adding resultsdir as attributes to testsuite_tag in the junit result file
    # Need to remove these after making resultsdir, logsdir as part of properties tag in testcase
    ts_junit_object.update_attr("resultsdir",
                                suite_repository['suite_execution_dir'], "ts",
                                suite_timestamp)
    ts_junit_object.add_property("resultsdir",
                                 suite_repository['suite_execution_dir'], "ts",
                                 suite_timestamp)

    if "data_file" in suite_repository:
        data_repository['suite_data_file'] = suite_repository['data_file']

    # jiraproj name
    data_repository['jiraproj'] = jiraproj

    # if not from_project:
    testsuite_utils.pSuite_root(junit_resultfile)

    testsuite_utils.pSuite_testsuite(junit_resultfile,
                                     suite_name,
                                     errors='0',
                                     skipped='0',
                                     tests=no_of_tests,
                                     failures='0',
                                     time='0',
                                     timestamp=suite_timestamp)
    testsuite_utils.pSuite_property(junit_resultfile, 'title',
                                    suite_repository['suite_title'])
    ts_junit_object.add_property('location', testsuite_filepath, "ts",
                                 suite_timestamp)
    if "jobid" in data_repository:
        testsuite_utils.pSuite_property(junit_resultfile, 'resultlocation',
                                        data_repository["jobid"])
        # del data_repository["jobid"]

    print_suite_details_to_console(suite_repository, testsuite_filepath,
                                   junit_resultfile)
    # Prints the path of result summary file at the beginning of execution
    if data_repository['war_file_type'] == "Suite":
        filename = os.path.basename(testsuite_filepath)
        html_filepath = os.path.join(
            suite_repository['suite_execution_dir'],
            Utils.file_Utils.getNameOnly(filename)) + '.html'
        print_info("HTML result file: {0}".format(html_filepath))
    if not from_project:
        data_repository["war_parallel"] = False

    root = Utils.xml_Utils.getRoot(testsuite_filepath)
    suite_global_xml = root.find('Details')
    runmode, value, _ = common_execution_utils.get_runmode_from_xmlfile(
        suite_global_xml)

    #get testwrapperfile details
    testwrapperfile, j_data_type, j_runtype, setup_on_error_action = \
        get_testwrapper_file_details(testsuite_filepath, data_repository)
    setup_tc_status, cleanup_tc_status = True, True
    #execute setup steps defined in testwrapper file if testwrapperfile is present
    if testwrapperfile:
        print_info(
            "*****************TESTWRAPPER SETUP EXECUTION START*********************"
        )
        data_repository['suite_testwrapper_file'] = testwrapperfile
        data_repository['wt_data_type'] = j_data_type
        Utils.config_Utils.set_datarepository(data_repository)
        setup_tc_status, data_repository = testcase_driver.execute_testcase(testwrapperfile,\
                                            data_repository, tc_context='POSITIVE',\
                                            runtype=j_runtype,\
                                            tc_parallel=None, queue=None,\
                                            auto_defects=auto_defects, suite=None,\
                                            jiraproj=None, tc_onError_action='ABORT_AS_ERROR',\
                                            iter_ts_sys=None, steps_tag='Setup')
        print_info(
            "*****************TESTWRAPPER SETUP EXECUTION END**********************"
        )
    if setup_on_error_action == 'next' or \
        (setup_on_error_action == 'abort' and setup_tc_status == True):
        if execution_type.upper() == 'PARALLEL_TESTCASES':
            ts_junit_object.remove_html_obj()
            data_repository["war_parallel"] = True
            Utils.config_Utils.data_repository = data_repository
            print_debug("Executing testcases in parallel")
            test_suite_status = parallel_testcase_driver.main(
                testcase_list,
                suite_repository,
                data_repository,
                from_project,
                tc_parallel=True,
                auto_defects=auto_defects)

        elif execution_type.upper() == 'SEQUENTIAL_TESTCASES':
            if runmode is None:
                print_debug("Executing testcases sequentially")
                test_suite_status = sequential_testcase_driver.main(
                    testcase_list,
                    suite_repository,
                    data_repository,
                    from_project,
                    auto_defects=auto_defects)

            elif runmode.upper() == "RUF":
                print_info("Execution type: {0}, Attempts: {1}".format(
                    runmode, value))
                i = 0
                while i < int(value):
                    i += 1
                    print_debug("\n\n<======= ATTEMPT: {0} ======>".format(i))
                    test_suite_status = sequential_testcase_driver.main(
                        testcase_list,
                        suite_repository,
                        data_repository,
                        from_project,
                        auto_defects=auto_defects)
                    test_count = i * len(testcase_list)
                    testsuite_status_list.append(test_suite_status)
                    testsuite_utils.pSuite_update_suite_tests(str(test_count))
                    if str(test_suite_status).upper() == "FALSE" or\
                       str(test_suite_status).upper() == "ERROR":
                        break

            elif runmode.upper() == "RUP":
                print_info("Execution type: {0}, Attempts: {1}".format(
                    runmode, value))
                i = 0
                while i < int(value):
                    i += 1
                    print_debug("\n\n<======= ATTEMPT: {0} ======>".format(i))
                    test_suite_status = sequential_testcase_driver.main(
                        testcase_list,
                        suite_repository,
                        data_repository,
                        from_project,
                        auto_defects=auto_defects)
                    test_count = i * len(testcase_list)
                    testsuite_status_list.append(test_suite_status)
                    testsuite_utils.pSuite_update_suite_tests(str(test_count))
                    if str(test_suite_status).upper() == "TRUE":
                        break

            elif runmode.upper() == "RMT":
                print_info("Execution type: {0}, Attempts: {1}".format(
                    runmode, value))
                i = 0
                while i < int(value):
                    i += 1
                    print_debug("\n\n<======= ATTEMPT: {0} ======>".format(i))
                    # We aren't actually summing each test result here...
                    test_suite_status = sequential_testcase_driver.main(
                        testcase_list,
                        suite_repository,
                        data_repository,
                        from_project,
                        auto_defects=auto_defects)
                    testsuite_status_list.append(test_suite_status)
        # The below runmode part is not modified/removed to preserve backward compatibility
        elif execution_type.upper() == 'RUN_UNTIL_FAIL' and runmode is None:
            execution_value = Utils.xml_Utils.getChildAttributebyParentTag(
                testsuite_filepath, 'Details', 'type', 'Max_Attempts')
            execution_value = 1 if execution_value == "" else execution_value
            print_info("Execution type: {0}, Attempts: {1}".format(
                execution_type, execution_value))
            i = 0
            while i < int(execution_value):
                i += 1
                print_debug("\n\n<======= ATTEMPT: {0} ======>".format(i))
                test_suite_status = sequential_testcase_driver.main(
                    testcase_list,
                    suite_repository,
                    data_repository,
                    from_project,
                    auto_defects=auto_defects)
                test_count = i * len(testcase_list)
                testsuite_utils.pSuite_update_suite_tests(str(test_count))
                if str(test_suite_status).upper() == "FALSE" or\
                   str(test_suite_status).upper() == "ERROR":
                    break

        elif execution_type.upper() == 'RUN_UNTIL_PASS' and runmode is None:
            execution_value = Utils.xml_Utils.getChildAttributebyParentTag(
                testsuite_filepath, 'Details', 'type', 'Max_Attempts')
            execution_value = 1 if execution_value == "" else execution_value
            print_info("Execution type: {0}, Attempts: {1}".format(
                execution_type, execution_value))
            i = 0
            while i < int(execution_value):
                i += 1
                print_debug("\n\n<======= ATTEMPT: {0} ======>".format(i))
                test_suite_status = sequential_testcase_driver.main(
                    testcase_list,
                    suite_repository,
                    data_repository,
                    from_project,
                    auto_defects=auto_defects)
                test_count = i * len(testcase_list)
                testsuite_utils.pSuite_update_suite_tests(str(test_count))
                if str(test_suite_status).upper() == "TRUE":
                    break

        elif execution_type.upper() == 'RUN_MULTIPLE' and runmode is None:
            execution_value = Utils.xml_Utils.getChildAttributebyParentTag(
                testsuite_filepath, 'Details', 'type', 'Number_Attempts')
            execution_value = 1 if execution_value == "" else execution_value
            print_info("Execution type: {0}, Attempts: {1}".format(
                execution_type, execution_value))
            i = 0
            while i < int(execution_value):
                i += 1
                print_debug("\n\n<======= ATTEMPT: {0} ======>".format(i))
                # We aren't actually summing each test result here...
                test_suite_status = sequential_testcase_driver.main(
                    testcase_list,
                    suite_repository,
                    data_repository,
                    from_project,
                    auto_defects=auto_defects)
        elif execution_type.upper() == "ITERATIVE_SEQUENTIAL":
            # if execution type is iterative sequential call WarriorCore.Classes.iterative_testsuite
            # class and execute the testcases in iterative sequential fashion on the systems
            print_debug("Iterative sequential suite")

            iter_seq_ts_obj = IterativeTestsuite(testcase_list,
                                                 suite_repository,
                                                 data_repository, from_project,
                                                 auto_defects)
            test_suite_status = iter_seq_ts_obj.execute_iterative_sequential()

        elif execution_type.upper() == "ITERATIVE_PARALLEL":
            # if execution type is iterative parallel call WarriorCore.Classes.iterative_testsuite
            # class and execute the testcases in iterative parallel fashion on the systems
            ts_junit_object.remove_html_obj()
            print_debug("Iterative parallel suite")
            data_repository["war_parallel"] = True
            Utils.config_Utils.data_repository = data_repository

            iter_seq_ts_obj = IterativeTestsuite(testcase_list,
                                                 suite_repository,
                                                 data_repository, from_project,
                                                 auto_defects)

            test_suite_status = iter_seq_ts_obj.execute_iterative_parallel()

        else:
            print_error("unexpected suite_type received...aborting execution")
            test_suite_status = False

        if runmode is not None:
            test_suite_status = common_execution_utils.compute_runmode_status(
                testsuite_status_list, runmode, suite_global_xml)

    else:
        print_error("Test cases in suite are not executed as setup failed to execute,"\
                    "setup status : {0}".format(setup_tc_status))
        print_error("Steps in cleanup will be executed on besteffort")
        test_suite_status = "ERROR"

    #Execute Debug section from suite tw file upon test suite failure
    if not isinstance(test_suite_status, bool) or (isinstance(test_suite_status, bool) \
                                                          and test_suite_status is False):
        if testwrapperfile and Utils.xml_Utils.nodeExists(
                testwrapperfile, "Debug"):
            print_info(
                "*****************SUITE TESTWRAPPER DEBUG EXECUTION START"
                "*********************")
            data_repository['wt_data_type'] = j_data_type
            Utils.config_Utils.set_datarepository(data_repository)
            debug_tc_status, data_repository = testcase_driver.execute_testcase(testwrapperfile,\
                                                         data_repository, tc_context='POSITIVE',\
                                                         runtype=j_runtype,\
                                                         tc_parallel=None, queue=None,\
                                                         auto_defects=auto_defects, suite=None,\
                                                         jiraproj=None, tc_onError_action=None,\
                                                         iter_ts_sys=None, steps_tag='Debug')
            print_info("*****************SUITE TESTWRAPPER DEBUG EXECUTION END"
                       "*********************")

    #execute cleanup steps defined in testwrapper file if testwrapperfile is present
    if testwrapperfile:
        print_info(
            "*****************TESTWRAPPER CLEANUP EXECUTION START*********************"
        )
        data_repository['wt_data_type'] = j_data_type
        Utils.config_Utils.set_datarepository(data_repository)
        cleanup_tc_status, data_repository = testcase_driver.execute_testcase(testwrapperfile,\
                                                          data_repository, tc_context='POSITIVE',\
                                                          runtype=j_runtype,\
                                                          tc_parallel=None, queue=None,\
                                                          auto_defects=auto_defects, suite=None,\
                                                          jiraproj=None, tc_onError_action=None,\
                                                          iter_ts_sys=None, steps_tag='Cleanup')
        print_debug(
            "*****************TESTWRAPPER CLEANUP EXECUTION END*********************"
        )
    print_debug("\n")
    suite_end_time = Utils.datetime_utils.get_current_timestamp()
    print_info("[{0}] Testsuite execution completed".format(suite_end_time))

    if test_suite_status == True and cleanup_tc_status == True:
        test_suite_status = True
    #set status to WARN if only cleanup fails
    elif test_suite_status == True and cleanup_tc_status != True:
        print_warning("setting test suite status to WARN as cleanup failed")
        test_suite_status = 'WARN'

    suite_duration = Utils.datetime_utils.get_time_delta(suite_start_time)
    hms = Utils.datetime_utils.get_hms_for_seconds(suite_duration)
    print_info("Testsuite duration= {0}".format(hms))
    testsuite_utils.update_suite_duration(str(suite_duration))
    if test_suite_status == False and ts_onError_action and\
        ts_onError_action.upper() == 'ABORT_AS_ERROR':
        print_info(
            "Testsuite status will be marked as ERROR as onError action is set"
            "to 'abort_as_error'")
        test_suite_status = "ERROR"
    testsuite_utils.report_testsuite_result(suite_repository,
                                            test_suite_status)

    ts_junit_object = data_repository['wt_junit_object']
    ts_junit_object.update_count(test_suite_status, "1", "pj")
    ts_junit_object.update_count("suites", "1", "pj", "not appicable")
    ts_junit_object.update_attr("status", str(test_suite_status), "ts",
                                suite_timestamp)
    ts_junit_object.update_attr("time", str(suite_duration), "ts",
                                suite_timestamp)

    if not from_project:
        ts_junit_object.update_attr("status", str(test_suite_status), "pj",
                                    "not applicable")
        ts_junit_object.update_attr("time", str(suite_duration), "pj",
                                    "not appicable")
        ts_junit_object.output_junit(data_repository['wt_results_execdir'])

        # Save JUnit/HTML results of the Suite in MongoDB server
        if data_repository.get("db_obj") is not False:
            ts_junit_xml = (data_repository['wt_results_execdir'] + os.sep +
                            ts_junit_object.filename + "_junit.xml")
            data_repository.get("db_obj").add_html_result_to_mongodb(
                ts_junit_xml)
    else:
        # Do not output JUnit result file for parallel suite execution
        if not ts_parallel and not data_repository['war_parallel']:
            # Create and replace existing Project junit file for each suite
            ts_junit_object.output_junit(data_repository['wp_results_execdir'],
                                         print_summary=False)

    if ts_parallel:
        ts_impact = data_repository['wt_ts_impact']
        if ts_impact.upper() == 'IMPACT':
            msg = "Status of the executed suite impacts project result"
        elif ts_impact.upper() == 'NOIMPACT':
            msg = "Status of the executed suite case does not impact project result"
        print_debug(msg)
        # put result into multiprocessing queue and later retrieve in corresponding driver
        queue.put(
            (test_suite_status, ts_impact, suite_timestamp, ts_junit_object))

    return test_suite_status, suite_repository
def execute_sequential_testcases(testcase_list, suite_repository,
                                 data_repository, from_project, auto_defects,
                                 iter_ts_sys, tc_parallel, queue, ts_iter):
    """Executes the list of cases(of a suite) in sequential order
        - Takes a testcase_list as input and sends
        each case to Basedriver for execution.
        - Computes the suite status based on the case_status
        and the impact value of the case
        - Handles case failures as per the default/specific
        onError action/value
        - Calls the function to report the suite status

    :Arguments:
        1. testcase_list(list) = List of cases to be executed
        2. suite_repository(dict) = suite repository
        3. data_repository(dict) = Warrior data repository
        4. from_project(boolean) = True for Project execution else False
        5. auto_defects(boolean) = True for Jira auto defect creation else False
        6. iter_ts_sys(string) = System for iterative execution
        7. tc_parallel(boolean) = True for Parallel execution else False
        8. queue = Python multiprocessing queue for parallel execution
        9. ts_iter(boolean) = True for 'iterative_parallel' execution else False
    :Returns:
        1. suite_status - overall suite status

    """
    goto_tc = False

    junit_resultfile = suite_repository['junit_resultfile']
    suite_name = suite_repository['suite_name']
    testsuite_filepath = suite_repository['testsuite_filepath']
    suite_error_action = suite_repository['def_on_error_action']
    suite_error_value = suite_repository['def_on_error_value']
    testsuite_dir = os.path.dirname(testsuite_filepath)
    data_repository['wt_tc_timestamp'] = None

    errors = 0
    skipped = 0
    failures = 0
    tests = 0
    tc_duration = 0
    tc_status_list = []
    tc_impact_list = []
    impact_dict = {"IMPACT": "Impact", "NOIMPACT": "No Impact"}
    tc_duration_list = []
    tc_junit_list = []

    while tests < len(testcase_list):
        testcase = testcase_list[tests]
        tests += 1
        tc_rel_path = testsuite_utils.get_path_from_xmlfile(testcase)
        if tc_rel_path is not None:
            tc_path = Utils.file_Utils.getAbsPath(tc_rel_path, testsuite_dir)
        else:
            # if tc_rel_path is None, what are we doing here?
            tc_path = str(tc_rel_path)
        print_debug('\n')
        print_debug("<<<< Starting execution of Test case: {0}>>>>".
                    format(tc_path))
        action, tc_status = exec_type_driver.main(testcase)
        tc_runtype = testsuite_utils.get_runtype_from_xmlfile(testcase)
        tc_impact = Utils.testcase_Utils.get_impact_from_xmlfile(testcase)
        tc_context = Utils.testcase_Utils.get_context_from_xmlfile(testcase)
        suite_step_data_file = testsuite_utils.get_data_file_at_suite_step(\
            testcase, suite_repository)
        tc_onError_action = Utils.xml_Utils.get_attributevalue_from_directchildnode(\
            testcase, 'onError', 'action')
        tc_onError_action = tc_onError_action if tc_onError_action else suite_error_action
        if suite_step_data_file is not None:
            data_file = Utils.file_Utils.getAbsPath(suite_step_data_file,
                                                    testsuite_dir)
            data_repository[tc_path] = data_file
        data_repository['wt_tc_impact'] = tc_impact
        if testcase.find("runmode") is not None and \
           testcase.find("runmode").get("attempt") is not None:
            # condition to print the start of runmode execution
            if testcase.find("runmode").get("attempt") == 1:
                print_info("\n----------------- Start of Testcase Runmode Execution"
                           " -----------------\n")
            print_info("TESTCASE ATTEMPT: {0}".format(testcase.find("runmode")
                                                      .get("attempt")))
        if testcase.find("retry") is not None and \
           testcase.find("retry").get("attempt") is not None:
            print_info("TESTCASE ATTEMPT: {0}".format(testcase.find("retry")
                                                      .get("attempt")))

        if Utils.file_Utils.fileExists(tc_path) or action is False:
            tc_name = Utils.file_Utils.getFileName(tc_path)
            testsuite_utils.pSuite_testcase(junit_resultfile, suite_name,
                                            tc_name, time='0')

            if not goto_tc and action is True:
                try:
                    tc_result = testcase_driver.main(tc_path,
                                                     data_repository,
                                                     tc_context,
                                                     runtype=tc_runtype,
                                                     auto_defects=auto_defects,
                                                     suite=suite_name,
                                                     tc_onError_action=tc_onError_action,
                                                     iter_ts_sys=iter_ts_sys)

                    tc_status = tc_result[0]
                    tc_duration = tc_result[1]
                except Exception:
                    print_error('unexpected error {0}'.format(traceback.format_exc()))
                    tc_status, tc_duration = False, False
                    tc_impact = Utils.testcase_Utils.get_impact_from_xmlfile(testcase)

            elif goto_tc and goto_tc == str(tests) and action is True:

                try:
                    tc_result = testcase_driver.main(tc_path,
                                                     data_repository,
                                                     tc_context,
                                                     runtype=tc_runtype,
                                                     auto_defects=auto_defects,
                                                     suite=suite_name,
                                                     tc_onError_action=tc_onError_action,
                                                     iter_ts_sys=iter_ts_sys)
                    tc_status = tc_result[0]
                    tc_duration = tc_result[1]
                    goto_tc = False

                except Exception:
                    print_error('unexpected error {0}'.format(traceback.format_exc()))
                    tc_status, tc_duration = False, False
                    tc_impact = Utils.testcase_Utils.get_impact_from_xmlfile(testcase)

            else:
                print_info('skipped testcase %s ' % tc_name)
                skipped += 1
                testsuite_utils.pSuite_testcase_skip(junit_resultfile)
                testsuite_utils.pSuite_update_suite_attributes(
                   junit_resultfile, str(errors), str(skipped),
                    str(tests), str(failures), time='0')
                data_repository['wt_junit_object'].update_count("skipped", "1", "ts",\
                    data_repository['wt_ts_timestamp'])
                data_repository['wt_junit_object'].update_count("tests", "1", "ts",\
                    data_repository['wt_ts_timestamp'])
                data_repository['wt_junit_object'].update_count(\
                    "tests", "1", "pj", "not applicable")
                tmp_timestamp = str(Utils.datetime_utils.get_current_timestamp())
                time.sleep(2)
                data_repository['wt_junit_object'].create_testcase(
                    location="from testsuite",
                    timestamp=tmp_timestamp,
                    ts_timestamp=data_repository['wt_ts_timestamp'],
                    classname=data_repository['wt_suite_name'],
                    name=os.path.splitext(tc_name)[0])
                data_repository['wt_junit_object'].add_testcase_message(tmp_timestamp, "skipped")
                data_repository['wt_junit_object'].update_attr(\
                    "status", "SKIPPED", "tc", tmp_timestamp)
                data_repository['testcase_%d_result' % tests] = "SKIP"
                if Utils.file_Utils.fileExists(tc_path):
                   title = Utils.xml_Utils.getChildTextbyParentTag(tc_path, 'Details', 'Title')
                title = title.strip() if Utils.file_Utils.fileExists(tc_path) and title else "None"
                data_repository['wt_junit_object'].update_attr("title", title, "tc", tmp_timestamp)
                data_repository['wt_junit_object'].update_attr(\
                    "impact", impact_dict.get(tc_impact.upper()), "tc", tmp_timestamp)
                data_repository['wt_junit_object'].update_attr(\
                    "onerror", "N/A", "tc", tmp_timestamp)
                data_repository['wt_junit_object'].output_junit(\
                    data_repository['wt_results_execdir'], print_summary=False)
                continue

        else:
            errors += 1
            msg = print_error("Test case does not exist in the provided path: "
                              "{0}".format(tc_path))
            testsuite_utils.pSuite_testcase(junit_resultfile, suite_name,
                                            tc_path, time='0')
            testsuite_utils.pSuite_testcase_error(junit_resultfile, msg, '0')
            tc_status = "ERROR"
            if goto_tc and goto_tc == str(tests):
                goto_tc = False
            elif goto_tc and goto_tc != str(tests):
                data_repository['testcase_%d_result' % tests] = "ERROR"
                continue

        goto_tc_num = onerror_driver.main(testcase, suite_error_action,
                                          suite_error_value)
        if goto_tc_num is False:
            onerror = "Next"
        elif goto_tc_num == "ABORT":
            onerror = "Abort"
        else:
            onerror = "Goto:" + str(goto_tc_num)
        data_repository['wt_junit_object'].update_attr(
                        "impact", impact_dict.get(tc_impact.upper()), "tc",
                        data_repository['wt_tc_timestamp'])
        data_repository['wt_junit_object'].update_attr(
                        "onerror", onerror, "tc",
                        data_repository['wt_tc_timestamp'])

        tc_status_list, tc_impact_list = \
            common_execution_utils.compute_status(testcase, tc_status_list,
                                                  tc_impact_list,
                                                  tc_status, tc_impact)
        tc_duration_list.append(tc_duration)

        string_status = {"TRUE": "PASS", "FALSE": "FAIL", "ERROR": "ERROR",
                         "SKIP": "SKIP", "RAN": "RAN"}

        if str(tc_status).upper() in list(string_status.keys()):
            data_repository['testcase_%d_result' % tests] = string_status[str(tc_status).upper()]
        else:
            print_error("unexpected testcase status, default to exception")
            data_repository['testcase_%d_result' % tests] = "ERROR"

        if tc_impact.upper() == 'IMPACT':
            msg = "Status of the executed test case impacts Testsuite result"
        elif tc_impact.upper() == 'NOIMPACT':
            msg = "Status of the executed test case does not impact "
            "Teststuie result"
        print_debug(msg)

        runmode, value, _ = common_execution_utils.get_runmode_from_xmlfile(testcase)
        retry_type, retry_cond, retry_cond_value, retry_value, \
            retry_interval = common_execution_utils.get_retry_from_xmlfile(testcase)
        # Adding condition to check tc_status is error or not
        if runmode is not None or tc_status == "ERROR":
            if tc_status is True:
                testsuite_utils.update_tc_duration(str(tc_duration))
                # if runmode is 'rup' & tc_status is True, skip the repeated
                # execution of same testcase and move to next actual testcase
                if runmode.upper() == "RUP":
                    goto_tc = str(value)
            elif tc_status == 'ERROR' or tc_status == 'EXCEPTION':
                errors += 1
                testsuite_utils.pSuite_testcase_error(junit_resultfile,\
                    'Encountered error/exception during TC execution', str(tc_duration))
                goto_tc = onerror_driver.main(testcase, suite_error_action, suite_error_value)
                if goto_tc in ['ABORT', 'ABORT_AS_ERROR']:
                    update_suite_attribs(junit_resultfile, str(errors), str(skipped), str(tests),\
                        str(failures), time='0')
                    break
                # when 'onError:goto' value is less than the current tc num,
                # change the next iteration point to goto value
                elif goto_tc and int(goto_tc) < tests:
                    tests = int(goto_tc)-1
                    goto_tc = False
                # Handles the goto value is greater than total no of TC's
                if int(goto_tc) > len(testcase_list):
                    print_warning("The goto value {} is more than no of TC's {} \
                        so skipping all the TC's".format(goto_tc, len(testcase_list)))
            elif tc_status is False:
                failures += 1
                testsuite_utils.pSuite_testcase_failure(junit_resultfile,
                                                        time=str(tc_duration))
                goto_tc = onerror_driver.main(testcase, suite_error_action,
                                              suite_error_value)
                if goto_tc in ['ABORT', 'ABORT_AS_ERROR']:
                    update_suite_attribs(junit_resultfile, str(errors),
                                         str(skipped), str(tests),
                                         str(failures), time='0')
                    break
                # when 'onError:goto' value is less than the current tc num,
                # change the next iteration point to goto value
                elif goto_tc and int(goto_tc) < tests:
                    tests = int(goto_tc)-1
                    goto_tc = False
                # if runmode is 'ruf' & tc_status is False, skip the repeated
                # execution of same testcase and move to next actual testcase
                if not goto_tc and runmode.upper() == "RUF":
                    goto_tc = str(value)
        elif retry_type is not None:
            if retry_type.upper() == 'IF':
                try:
                    if data_repository[retry_cond] == retry_cond_value:
                        condition_met = True
                        pNote("Wait for {0}sec before retrying".format(
                                                        retry_interval))
                        pNote("The given condition '{0}' matches the expected "
                              "value '{1}'".format(data_repository[retry_cond],
                                                   retry_cond_value))
                        time.sleep(int(retry_interval))
                    else:
                        condition_met = False
                        print_warning("The condition value '{0}' does not "\
                            "match with the expected value ""'{1}'".format(\
                                data_repository[retry_cond], retry_cond_value))
                except KeyError:
                    print_warning("The given condition '{0}' is not there in "\
                        "the data repository".format(retry_cond_value))
                    condition_met = False
                if condition_met is False:
                    goto_tc = str(retry_value)
            else:
                if retry_type.upper() == 'IF NOT':
                    try:
                        if data_repository[retry_cond] != retry_cond_value:
                            condition_met = True
                            pNote("Wait for {0}sec before retrying".format(retry_interval))
                            pNote("The condition value '{0}' does not match "\
                                "with the expected value ""'{1}'".format(\
                                    data_repository[retry_cond], retry_cond_value))
                            time.sleep(int(retry_interval))
                        else:
                            condition_met = False
                            print_warning("The given condition '{0}' matches "\
                                "the expected value ""'{1}'".format(\
                                    data_repository[retry_cond], retry_cond_value))
                    except KeyError:
                        condition_met = False
                        print_warning("The given condition '{0}' is not there "\
                            "in the data repository".format(retry_cond_value))
                    if condition_met is False:
                        pNote("The given condition '{0}' matched with the "\
                            "value '{1}'".format(data_repository[retry_cond],\
                                retry_cond_value))
                        goto_tc = str(retry_value)
        else:
            if tc_status is False or tc_status == "ERROR" or\
                    tc_status == "EXCEPTION":
                goto_tc = onerror_driver.main(testcase, suite_error_action,
                                                     suite_error_value)
            if goto_tc in ['ABORT', 'ABORT_AS_ERROR']:
                break
            # when 'onError:goto' value is less than the current tc num,
            # change the next iteration point to goto value
            elif goto_tc and int(goto_tc) <tests:
                tests = int(goto_tc)-1
                goto_tc = False
# suite_status = testsuite_utils.compute_testsuite_status(suite_status,
# tc_status, tc_impact)
        update_suite_attribs(junit_resultfile, str(errors),
                             str(skipped), str(tests), str(failures),
                             time='0')
        # junit_object/python_process is different for all the cases
        # executed in parallel
        if ts_iter is False:
            tc_junit_list.append(data_repository['wt_junit_object'])

    # junit_object/python_process is same for all the cases executed in the
    # same system for 'iterative_parallel' suite execution
    if ts_iter is True:
        tc_junit_list = data_repository['wt_junit_object']
    # print the end of runmode execution as the steps skip when the condition
    # is met for RUF/RUP or when all the attempts finish
    if testcase.find("runmode") is not None and \
       testcase.find("runmode").get("attempt") is not None:
        if testcase.find("runmode").get("attempt") == \
           testcase.find("runmode").get("runmode_val"):
            print_info("\n----------------- End of Testcase Runmode Execution"
                       " -----------------\n")
    suite_status = Utils.testcase_Utils.compute_status_using_impact(tc_status_list, tc_impact_list)

    if tc_parallel:
        tc_impact = data_repository['wt_tc_impact']
        if tc_impact.upper() == 'IMPACT':
            msg = "Status of the executed test case impacts Testsuite result"
        elif tc_impact.upper() == 'NOIMPACT':
            msg = "Status of the executed test case does not impact Teststuie result"
        print_debug(msg)
        tc_name = Utils.file_Utils.getFileName(tc_path)
        # put result into multiprocessing queue and later retrieve in
        # corresponding driver
        queue.put((tc_status_list, tc_name, tc_impact_list, tc_duration_list,
                   tc_junit_list))
    return suite_status