Beispiel #1
0
def get_testcase_list(testsuite_filepath):
    """Takes the location of any Testsuite xml file as input
    Returns a list of all the Tescase elements present in the Testsuite

    Arguments:
    1. testsuite_filepath    = full path of the Testsuite xml file
    """

    testcase_list = []
    root = Utils.xml_Utils.getRoot(testsuite_filepath)
    testcases = root.find('Testcases')
    if testcases is None:
        print_info('Testsuite is empty: tag <Testcases> not found in the input Testsuite xml file ')
    else:
        testcase_list = []
        new_testcase_list = testcases.findall('Testcase')
        # execute tc multiple times
        for _, tc in enumerate(new_testcase_list):
            runmode, value = common_execution_utils.get_runmode_from_xmlfile(tc)
            retry_type, _, _, retry_value, _ = common_execution_utils.get_retry_from_xmlfile(tc)
            if runmode is not None and value > 0:
                # more than one step in step list, insert new step
                if len(new_testcase_list) > 0:
                    go_next = len(testcase_list) + value + 1
                    for i in range(0, value):
                        copy_tc = copy.deepcopy(tc)
                        copy_tc.find("runmode").set("value", go_next)
                        copy_tc.find("runmode").set("attempt", i+1)
                        testcase_list.append(copy_tc)
                # only one step in step list, append new step
                else:
                    for i in range(0, value):
                        copy_tc = copy.deepcopy(tc)
                        copy_tc.find("runmode").set("attempt", i+1)
                        testcase_list.append(tc)
            if retry_type is not None and retry_value > 0:
                if len(new_testcase_list) > 1:
                    go_next = len(testcase_list) + retry_value + 1
                    if runmode is not None:
                        get_runmode = tc.find('runmode')
                        tc.remove(get_runmode)
                    for i in range(0, retry_value):
                        copy_tc = copy.deepcopy(tc)
                        copy_tc.find("retry").set("count", go_next)
                        copy_tc.find("retry").set("attempt", i+1)
                        testcase_list.append(copy_tc)
                else:
                    if runmode is not None:
                        get_runmode = tc.find('runmode')
                        tc.remove(get_runmode)
                    for i in range(0, retry_value):
                        copy_tc = copy.deepcopy(tc)
                        copy_tc.find("retry").set("attempt", i+1)
                        testcase_list.append(copy_tc)
            if retry_type is None and runmode is None:
                testcase_list.append(tc)
        return testcase_list
    def _compute_runmode_goto_operations(self, step, step_status,
                                         exec_type_onerror, goto_stepnum,
                                         step_num):
        """
        """
        runmode, value = common_execution_utils.get_runmode_from_xmlfile(step)

        if runmode is not None:
            # if runmode is 'ruf' & step_status is False, skip the repeated
            # execution of same TC step and move to next actual step
            if all(not exec_type_onerror, runmode == "ruf",
                   step_status is False):
                goto_stepnum = str(value)
            # if runmode is 'rup' & step_status is True, skip the repeated
            # execution of same TC step and move to next actual step
            elif runmode == "rup" and step_status is True:
                goto_stepnum = str(value)
            else:
                if any([
                        step_status is False,
                        str(step_status).upper() == "ERROR",
                        str(step_status).upper() == "EXCEPTION",
                        exec_type_onerror is True
                ]):
                    goto_stepnum = onerror_driver.main(
                        step, self.default_error_action,
                        self.default_error_value, exec_type_onerror)
                    # if (goto_stepnum == 'ABORT'): break
        else:
            if any([
                    step_status is False,
                    str(step_status).upper() == "ERROR",
                    str(step_status).upper() == "EXCEPTION",
                    exec_type_onerror is True
            ]):
                goto_stepnum = onerror_driver.main(step,
                                                   self.default_error_action,
                                                   self.default_error_value,
                                                   exec_type_onerror)
                if str(goto_stepnum).upper() == 'ABORT':
                    pass
                # when 'onError:goto' value is less than the current step num,
                # change the next iteration point to goto value
                elif goto_stepnum and int(goto_stepnum) < step_num:
                    step_num = int(goto_stepnum) - 1
                    goto_stepnum = False
        return goto_stepnum, step_num
    def execute_step(self, current_step_number, go_to_step_number):
        """
        This function executes the determined step - step_num (integer index) from the step_list.
        This function is called either from the while loop (normal execution) in function
        execute_steps() or from a for loop (invoked execution)
        """
        self.current_step = self.step_list[current_step_number]
        #store loop iter number in data repository
        loop_iter_number = self.current_step.get("loop_iter_number", None)
        Utils.data_Utils.update_datarepository(
            {"loop_iter_number": loop_iter_number})
        # Incrementing current_step_number for printing purposes.
        self.current_step_number = current_step_number + 1

        self.go_to_step_number = go_to_step_number
        # execute steps
        # Decide whether or not to execute keyword
        # First decide if this step should be executed in this iteration
        if not self.go_to_step_number or self.go_to_step_number == str(
                self.current_step_number):
            # get Exectype information
            self.run_current_step, self.current_triggered_action = \
                exec_type_driver.main(self.current_step, skip_invoked=self.skip_invoked)
            if not self.run_current_step:
                return self._report_step_as_not_run()

        if not self.go_to_step_number or self.go_to_step_number == str(
                self.current_step_number):
            self.step_status = self._execute_current_step()
        else:
            # Skip because of goto
            return self._skip_because_of_goto()
        runmode, value, runmode_timer = \
            common_execution_utils.get_runmode_from_xmlfile(self.current_step)
        retry_type, retry_cond, retry_cond_value, retry_value, retry_interval = \
            common_execution_utils.get_retry_from_xmlfile(self.current_step)
        if runmode is not None:
            return self._execute_runmode_step(runmode_timer, runmode,
                                              self.step_status, value)

        elif retry_type is not None:
            return self._execute_retry_type_step(retry_type, retry_cond,
                                                 retry_cond_value,
                                                 retry_interval, retry_value)
        else:
            return self._execute_step_otherwise(self.step_status)
def get_steps_list(testcase_filepath):
    """Takes the location of any Testcase xml file as input
    Returns a list of all the step elements present in the Testcase

    :Arguments:
        1. testcase_filepath    = full path of the Testcase xml file
    """
    step_list = []
    root = Utils.xml_Utils.getRoot(testcase_filepath)
    Steps = root.find('Steps')
    if Steps is None:
        print_warning("Case: '{}' has no Steps/Keywords "
                      "to be executed".format(testcase_filepath))
    else:
        step_list = []
        new_step_list = Steps.findall('step')
        #execute step multiple times
        for index, step in enumerate(new_step_list):
            runmode, value = common_execution_utils.get_runmode_from_xmlfile(
                step)
            retry_type, _, _, retry_value, _ = common_execution_utils.get_retry_from_xmlfile(
                step)
            if runmode is not None and value > 0:
                go_next = len(step_list) + value + 1
                for i in range(0, value):
                    copy_step = copy.deepcopy(step)
                    copy_step.find("runmode").set("value", go_next)
                    copy_step.find("runmode").set("attempt", i + 1)
                    step_list.append(copy_step)
            if retry_type is not None and retry_value > 0:
                go_next = len(step_list) + retry_value + 1
                if runmode is not None:
                    get_runmode = step.find('runmode')
                    step.remove(get_runmode)
                for i in range(0, retry_value):
                    copy_step = copy.deepcopy(step)
                    copy_step.find("retry").set("count", go_next)
                    copy_step.find("retry").set("attempt", i + 1)
                    step_list.append(copy_step)
            if retry_type is None and runmode is None:
                step_list.append(step)
        return step_list
def execute_project(project_filepath, auto_defects, jiraproj, res_startdir,
                    logs_startdir, data_repository):
    """
    - Takes a list of testsuite locations input.
    - Iterates over the list and sends each testsuite
    location to testsuite_driver for execution.
    - Gets the status of the testsuite from the
    Warrior and computes the project_status based on the impact value
    of the testsuite.
    - If the testsuite fails, handles the failure using
    the default or specific  onError action,value.
    - Finally reports the project status to the result file.

    Arguments:
    1. testsuite_list        = (list) list of testsuite locations
    2. testsuite_driver      = (module loader) module loader of the testsuite_driver
    3. project_repository    = (dict) dictionary containing all data of the project under execution
    """
    project_start_time = Utils.datetime_utils.get_current_timestamp()
    print_info("[{0}] Project execution starts".format(project_start_time))
    suite_cntr = 0
    # project_status = True
    goto_testsuite = False
    ts_status_list = []
    ts_impact_list = []
    impact_dict = {"IMPACT": "Impact", "NOIMPACT": "No Impact"}
    project_dir = os.path.dirname(project_filepath)
    project_title = Utils.xml_Utils.getChildTextbyParentTag(
        project_filepath, 'Details', 'Title')
    project_repository = get_project_details(project_filepath, res_startdir,
                                             logs_startdir, data_repository)
    project_repository['project_title'] = project_title
    testsuite_list = get_testsuite_list(project_filepath)

    # project_resultfile = project_repository['project_resultfile']

    project_name = project_repository['project_name']
    wp_results_execdir = project_repository['wp_results_execdir']
    data_repository['wp_results_execdir'] = wp_results_execdir
    wp_logs_execdir = project_repository['wp_logs_execdir']

    project_error_action = project_repository['def_on_error_action']
    project_error_value = project_repository['def_on_error_value']

    pj_junit_object = junit_class.Junit(filename=project_name,
                                        timestamp=project_start_time,
                                        name=project_name,
                                        display="True")

    pj_junit_object.update_attr("resultsdir",
                                project_repository['project_execution_dir'],
                                "pj", project_start_time)
    pj_junit_object.update_attr("title", project_repository['project_title'],
                                "pj", project_start_time)
    pj_junit_object.add_property("resultsdir",
                                 project_repository['project_execution_dir'],
                                 "pj", project_start_time)

    # adding the resultsdir as attribute, need to be removed after making it
    # a property
    pj_junit_object.add_project_location(project_filepath)
    if "jobid" in data_repository:
        pj_junit_object.add_jobid(data_repository["jobid"])
        del data_repository["jobid"]
    data_repository['wt_junit_object'] = pj_junit_object

    while suite_cntr < len(testsuite_list):
        testsuite = testsuite_list[suite_cntr]
        # suite_junit_type = 'file'
        suite_cntr += 1

        testsuite_rel_path = testsuite_utils.get_path_from_xmlfile(testsuite)
        if testsuite_rel_path is not None:
            testsuite_path = Utils.file_Utils.getAbsPath(
                testsuite_rel_path, project_dir)
        else:
            testsuite_path = str(testsuite_rel_path)
        print_info("\n")
        print_debug("<<<< Starting execution of Test suite: {0}>>>>".format(
            testsuite_path))
        action, testsuite_status = exec_type_driver.main(testsuite)
        testsuite_impact = Utils.testcase_Utils.get_impact_from_xmlfile(
            testsuite)
        testsuite_name = Utils.file_Utils.getFileName(testsuite_path)
        testsuite_nameonly = Utils.file_Utils.getNameOnly(testsuite_name)
        ts_onError_action = Utils.xml_Utils.get_attributevalue_from_directchildnode(
            testsuite, 'onError', 'action')
        ts_onError_action = ts_onError_action if ts_onError_action else project_error_action
        if Utils.file_Utils.fileExists(testsuite_path):
            if not goto_testsuite and action is True:

                testsuite_result = testsuite_driver.main(
                    testsuite_path,
                    data_repository=data_repository,
                    from_project=True,
                    auto_defects=auto_defects,
                    jiraproj=jiraproj,
                    res_startdir=wp_results_execdir,
                    logs_startdir=wp_logs_execdir,
                    ts_onError_action=ts_onError_action)
                testsuite_status = testsuite_result[0]
                # testsuite_resultfile = testsuite_result[1]

            elif goto_testsuite and goto_testsuite == str(suite_cntr)\
                    and action is True:
                testsuite_result = testsuite_driver.main(
                    testsuite_path,
                    data_repository=data_repository,
                    from_project=True,
                    auto_defects=auto_defects,
                    jiraproj=jiraproj,
                    res_startdir=wp_results_execdir,
                    logs_startdir=wp_logs_execdir,
                    ts_onError_action=ts_onError_action)
                goto_testsuite = False
                testsuite_status = testsuite_result[0]
                # testsuite_resultfile = testsuite_result[1]

            else:
                msg = print_info(
                    'skipped testsuite: {0} '.format(testsuite_path))
                testsuite_resultfile = '<testsuite errors="0" failures="0" name="{0}" '\
                'skipped="0" tests="0" time="0" timestamp="{1}" > '\
                '<skipped message="{2}"/> </testsuite>'.format(testsuite_name,
                                                               project_start_time,
                                                               msg)
                tmp_timestamp = str(
                    Utils.datetime_utils.get_current_timestamp())
                time.sleep(2)
                pj_junit_object.create_testsuite(
                    location=os.path.dirname(testsuite_path),
                    name=testsuite_nameonly,
                    timestamp=tmp_timestamp,
                    **pj_junit_object.init_arg())
                pj_junit_object.update_attr("status", "SKIPPED", "ts",
                                            tmp_timestamp)
                pj_junit_object.update_attr("skipped", "1", "pj",
                                            tmp_timestamp)
                pj_junit_object.update_count("suites", "1", "pj",
                                             tmp_timestamp)
                data_repository['testsuite_{}_result'.format(
                    suite_cntr)] = "SKIP"
                # pj_junit_object.add_testcase_message(tmp_timestamp, "skipped")
                pj_junit_object.update_attr(
                    "impact", impact_dict.get(testsuite_impact.upper()), "ts",
                    tmp_timestamp)
                pj_junit_object.update_attr("onerror", "N/A", "ts",
                                            tmp_timestamp)
                pj_junit_object.output_junit(wp_results_execdir,
                                             print_summary=False)
                continue

        else:

            msg = print_error("Test suite does not exist in "
                              "provided path: {0}".format(testsuite_path))
            testsuite_status = 'ERROR'
            testsuite_resultfile = '<testsuite errors="0" failures="0" name="{0}" '\
            'skipped="0" tests="0" time="0" timestamp="{1}" > '\
            '<error message="{2}"/> </testsuite>'.format(testsuite_name, project_start_time, msg)
            # suite_junit_type = 'string'
            if goto_testsuite and goto_testsuite == str(suite_cntr):
                goto_testsuite = False
            elif goto_testsuite and goto_testsuite != str(suite_cntr):
                data_repository['testsuite_{}_result'.format(
                    suite_cntr)] = "ERROR"
                continue

        goto_testsuite_num = onerror_driver.main(testsuite,
                                                 project_error_action,
                                                 project_error_value)
        if goto_testsuite_num is False:
            onerror = "Next"
        elif goto_testsuite_num == "ABORT":
            onerror = "Abort"
        else:
            onerror = "Goto:" + str(goto_testsuite_num)
        pj_junit_object.update_attr("impact",
                                    impact_dict.get(testsuite_impact.upper()),
                                    "ts", data_repository['wt_ts_timestamp'])
        pj_junit_object.update_attr("onerror", onerror, "ts",
                                    data_repository['wt_ts_timestamp'])

        string_status = {
            "TRUE": "PASS",
            "FALSE": "FAIL",
            "ERROR": "ERROR",
            "SKIP": "SKIP"
        }

        if str(testsuite_status).upper() in string_status.keys():
            data_repository['testsuite_{}_result'.format(suite_cntr)] = string_status\
            [str(testsuite_status).upper()]
        else:
            print_error("unexpected testsuite status, default to exception")
            data_repository['testsuite_%d_result' % suite_cntr] = "ERROR"

        ts_status_list.append(testsuite_status)
        ts_impact_list.append(testsuite_impact)
        if testsuite_impact.upper() == 'IMPACT':
            msg = "Status of the executed test suite impacts Project result"
        elif testsuite_impact.upper() == 'NOIMPACT':
            msg = "Status of the executed test suite does not impact project result"
        print_debug(msg)
        # project_status = compute_project_status(project_status, testsuite_status,
        #                                                 testsuite_impact)
        runmode, value = common_execution_utils.get_runmode_from_xmlfile(
            testsuite)
        retry_type, retry_cond, retry_cond_value, retry_value,\
            retry_interval = common_execution_utils.get_retry_from_xmlfile(testsuite)
        if runmode is not None:
            if testsuite.find("runmode") is not None and\
              testsuite.find("runmode").get("attempt") is not None:
                print_info("runmode attempt: {0}".format(
                    testsuite.find("runmode").get("attempt")))
            # if runmode is 'ruf' & testsuite_status is False, skip the repeated execution of same
            # test suite and move to next actual test suite
            if not project_error_value and runmode == "RUF" and\
                    testsuite_status is False:
                goto_testsuite = str(value)
            # if runmode is 'rup' & testsuite_status is True, skip the repeated
            # execution of same testsuite and move to next actual testsuite
            elif runmode == "RUP" and testsuite_status is True:
                goto_testsuite = str(value)
        elif retry_type is not None:
            if testsuite.find("retry") is not None and\
              testsuite.find("retry").get("attempt") is not None:
                print_info("retry attempt: {0}".format(
                    testsuite.find("retry").get("attempt")))
            if retry_type.upper() == 'IF':
                try:
                    if data_repository[retry_cond] == retry_cond_value:
                        condition_met = True
                        pNote("Wait for {0}sec before retrying".format(
                            retry_interval))
                        pNote("The given condition '{0}' matches the expected"
                              "value '{1}'".format(data_repository[retry_cond],
                                                   retry_cond_value))
                        time.sleep(int(retry_interval))
                    else:
                        condition_met = False
                        print_warning(
                            "The condition value '{0}' does not match with the expected "
                            "value '{1}'".format(data_repository[retry_cond],
                                                 retry_cond_value))
                except KeyError:
                    print_warning(
                        "The given condition '{0}' do not exists in "
                        "the data repository".format(retry_cond_value))

                    condition_met = False
                if condition_met is False:
                    goto_testsuite = str(retry_value)
            else:
                if retry_type.upper() == 'IF NOT':
                    try:
                        if data_repository[retry_cond] != retry_cond_value:
                            condition_met = True
                            pNote("Wait for {0}sec before "
                                  "retrying".format(retry_interval))
                            pNote("The condition value '{0}' does not match "
                                  "with the expected value '{1}'".format(
                                      data_repository[retry_cond],
                                      retry_cond_value))
                            time.sleep(int(retry_interval))
                        else:
                            condition_met = False
                    except KeyError:
                        condition_met = False
                        print_warning(
                            "The given condition '{0}' is not there "
                            "in the data repository".format(retry_cond_value))
                    if condition_met is False:
                        pNote("The given condition '{0}' matched with the "
                              "value '{1}'".format(data_repository[retry_cond],
                                                   retry_cond_value))
                        goto_testsuite = str(retry_value)
        else:
            if testsuite_status is False or testsuite_status == "ERROR" or\
                    testsuite_status == "EXCEPTION":
                goto_testsuite = onerror_driver.main(testsuite,
                                                     project_error_action,
                                                     project_error_value)
            if goto_testsuite in ['ABORT', 'ABORT_AS_ERROR']:
                break
            # when 'onError:goto' value is less than the current ts num,
            # change the next iteration point to goto value
            elif goto_testsuite and int(goto_testsuite) < suite_cntr:
                suite_cntr = int(goto_testsuite) - 1
                goto_testsuite = False

    project_status = Utils.testcase_Utils.compute_status_using_impact(
        ts_status_list, ts_impact_list)
    print_info("\n")
    project_end_time = Utils.datetime_utils.get_current_timestamp()
    print_info("[{0}] Project execution completed".format(project_end_time))
    project_duration = Utils.datetime_utils.get_time_delta(project_start_time)
    hms = Utils.datetime_utils.get_hms_for_seconds(project_duration)
    print_info("Project duration= {0}".format(hms))

    project_status = report_project_result(project_status, project_repository)
    pj_junit_object.update_attr("status", str(project_status), "pj",
                                project_start_time)
    pj_junit_object.update_attr("time", str(project_duration), "pj",
                                project_start_time)

    pj_junit_object.output_junit(wp_results_execdir)

    # Save JUnit/HTML results of the Project in MongoDB server
    if data_repository.get("db_obj") is not False:
        pj_junit_xml = project_repository['wp_results_execdir'] +\
            os.sep + pj_junit_object.filename + "_junit.xml"
        data_repository.get("db_obj").add_html_result_to_mongodb(pj_junit_xml)

    return project_status, project_repository
Beispiel #6
0
                                                            data_repository['wt_tc_timestamp'])
            data_repository['wt_junit_object'].update_count("keywords", "1", "tc",
                                                            data_repository['wt_tc_timestamp'])
            kw_start_time = Utils.datetime_utils.get_current_timestamp()
            step_impact = Utils.testcase_Utils.get_impact_from_xmlfile(step)
            impact_dict = {"IMPACT": "Impact", "NOIMPACT": "No Impact"}
            data_repository['wt_junit_object'].\
                add_keyword_result(data_repository['wt_tc_timestamp'], step_num, keyword, "SKIPPED",
                                   kw_start_time, "0", "skipped",
                                   impact_dict.get(step_impact.upper()), "N/A", step_description)
            continue

        step_status_list.append(step_status)
        kw_resultfile_list.append(kw_resultfile)
        step_impact_list.append(step_impact)
        runmode, value = common_execution_utils.get_runmode_from_xmlfile(step)
        retry_type, retry_cond, retry_cond_value, retry_value, retry_interval = common_execution_utils.get_retry_from_xmlfile(step)
        if runmode is not None:
            # if runmode is 'ruf' & step_status is False, skip the repeated
            # execution of same TC step and move to next actual step
            if not exec_type_onerror and runmode == "RUF" and step_status is False:
                goto_stepnum = str(value)
            # if runmode is 'rup' & step_status is True, skip the repeated
            # execution of same TC step and move to next actual step
            elif runmode =="RUP" and step_status is True:
                goto_stepnum = str(value)
            else:
                if step_status is False or str(step_status).upper() == "ERROR" \
                        or str(step_status).upper() == "EXCEPTION" or exec_type_onerror is True:
                    goto_stepnum = onerror_driver.main(step, default_error_action,
                                                       default_error_value, exec_type_onerror)
Beispiel #7
0
def execute_testsuite(testsuite_filepath, data_repository, from_project,
                      auto_defects, jiraproj, res_startdir, logs_startdir,
                      ts_onError_action, queue, ts_parallel):
    """Executes the testsuite (provided as a xml file)
            - Takes a testsuite xml file as input and
            sends each testcase to Basedriver for execution.
            - Computes the testsuite status based on the
            testcase_status and the impact value of the testcase
            - Handles testcase failures as per the default/specific onError action/value
            - Calls the function to report the testsuite status

    Arguments:
    1. testsuite_filepath   = (string) the full path of the testsuite xml file.
    2. Warrior          = (module loader) module loader object to call the Warrior
    3. execution_dir        = (string) the full path of the directory under which the testsuite
                              execution directory will be created (results for the testsuite will
                              be stored in the  testsuite execution directory.)
    """
    testsuite_status_list = []
    suite_start_time = Utils.datetime_utils.get_current_timestamp()
    print_info("[{0}] Testsuite execution starts".format(suite_start_time))
    initialize_suite_fields(data_repository)
    suite_repository = get_suite_details(testsuite_filepath, data_repository,
                                         from_project, res_startdir,
                                         logs_startdir)
    testcase_list = common_execution_utils.get_step_list(
        testsuite_filepath, "Testcases", "Testcase")
    execution_type = suite_repository['suite_exectype'].upper()
    no_of_tests = str(len(testcase_list))

    junit_resultfile = suite_repository['junit_resultfile']
    suite_name = suite_repository['suite_name']
    suite_execution_dir = suite_repository['suite_execution_dir']

    data_repository['wt_suite_execution_dir'] = suite_execution_dir
    data_repository['wt_results_execdir'] = suite_repository[
        'ws_results_execdir']
    data_repository['wt_logs_execdir'] = suite_repository['ws_logs_execdir']
    data_repository['wt_suite_name'] = suite_name

    suite_timestamp = testsuite_utils.get_suite_timestamp()
    data_repository['wt_ts_timestamp'] = suite_timestamp
    suite_repository['wt_ts_timestamp'] = suite_timestamp

    data_repository["suite_exectype"] = "iterative" if execution_type == "ITERATIVE_SEQUENTIAL" or \
    execution_type == "ITERATIVE_PARALLEL" else execution_type

    ts_junit_display = "True"
    pj_junit_display = "False"
    if "wt_junit_object" in data_repository:
        ts_junit_object = data_repository["wt_junit_object"]

    else:
        ts_junit_object = junit_class.Junit(
            filename=suite_name,
            timestamp=suite_timestamp,
            name="customProject_independant_testcase_execution",
            display=pj_junit_display)

        if "jobid" in data_repository:
            ts_junit_object.add_jobid(data_repository["jobid"])
            del data_repository["jobid"]
        data_repository["wt_junit_object"] = ts_junit_object
    suite_repository["wt_junit_object"] = ts_junit_object
    ts_junit_object.create_testsuite(
        location=os.path.dirname(testsuite_filepath),
        name=suite_name,
        timestamp=suite_timestamp,
        suite_location=suite_repository['testsuite_filepath'],
        title=suite_repository['suite_title'],
        display=ts_junit_display,
        **ts_junit_object.init_arg())

    # Adding resultsdir as attributes to testsuite_tag in the junit result file
    # Need to remove these after making resultsdir, logsdir as part of properties tag in testcase
    ts_junit_object.update_attr("resultsdir",
                                suite_repository['suite_execution_dir'], "ts",
                                suite_timestamp)
    ts_junit_object.add_property("resultsdir",
                                 suite_repository['suite_execution_dir'], "ts",
                                 suite_timestamp)

    if suite_repository.has_key("data_file"):
        data_repository['suite_data_file'] = suite_repository['data_file']

    # jiraproj name
    data_repository['jiraproj'] = jiraproj

    # if not from_project:
    testsuite_utils.pSuite_root(junit_resultfile)

    testsuite_utils.pSuite_testsuite(junit_resultfile,
                                     suite_name,
                                     errors='0',
                                     skipped='0',
                                     tests=no_of_tests,
                                     failures='0',
                                     time='0',
                                     timestamp=suite_timestamp)
    testsuite_utils.pSuite_property(junit_resultfile, 'title',
                                    suite_repository['suite_title'])
    testsuite_utils.pSuite_property(junit_resultfile, 'location',
                                    testsuite_filepath)
    if "jobid" in data_repository:
        testsuite_utils.pSuite_property(junit_resultfile, 'resultlocation',
                                        data_repository["jobid"])
        # del data_repository["jobid"]

    print_suite_details_to_console(suite_repository, testsuite_filepath,
                                   junit_resultfile)

    # Prints the path of result summary file at the beginning of execution
    if data_repository['war_file_type'] == "Suite":
        filename = os.path.basename(testsuite_filepath)
        html_filepath = os.path.join(
            suite_repository['suite_execution_dir'],
            Utils.file_Utils.getNameOnly(filename)) + '.html'
        print_info("HTML result file: {0}".format(html_filepath))
    if not from_project:
        data_repository["war_parallel"] = False

    root = Utils.xml_Utils.getRoot(testsuite_filepath)
    suite_global_xml = root.find('Details')
    runmode, value, _ = common_execution_utils.get_runmode_from_xmlfile(
        suite_global_xml)

    #get testwrapperfile details
    testwrapperfile, j_data_type, j_runtype, setup_on_error_action = \
        get_testwrapper_file_details(testsuite_filepath, data_repository)
    setup_tc_status, cleanup_tc_status = True, True
    #execute setup steps defined in testwrapper file if testwrapperfile is present
    if testwrapperfile:
        print_info(
            "*****************TESTWRAPPER SETUP EXECUTION START*********************"
        )
        data_repository['suite_testwrapper_file'] = testwrapperfile
        data_repository['wt_data_type'] = j_data_type
        setup_tc_status, data_repository = testcase_driver.execute_testcase(testwrapperfile,\
                                            data_repository, tc_context='POSITIVE',\
                                            runtype=j_runtype,\
                                            tc_parallel=None, queue=None,\
                                            auto_defects=auto_defects, suite=None,\
                                            jiraproj=None, tc_onError_action='ABORT_AS_ERROR',\
                                            iter_ts_sys=None, steps_tag='Setup')
        print_info(
            "*****************TESTWRAPPER SETUP EXECUTION END**********************"
        )
    if setup_on_error_action == 'next' or \
        (setup_on_error_action == 'abort' and setup_tc_status == True):
        if execution_type.upper() == 'PARALLEL_TESTCASES':
            ts_junit_object.remove_html_obj()
            data_repository["war_parallel"] = True
            print_info("Executing testcases in parallel")
            test_suite_status = parallel_testcase_driver.main(
                testcase_list,
                suite_repository,
                data_repository,
                from_project,
                tc_parallel=True,
                auto_defects=auto_defects)

        elif execution_type.upper() == 'SEQUENTIAL_TESTCASES':
            if runmode is None:
                print_info("Executing testcases sequentially")
                test_suite_status = sequential_testcase_driver.main(
                    testcase_list,
                    suite_repository,
                    data_repository,
                    from_project,
                    auto_defects=auto_defects)

            elif runmode.upper() == "RUF":
                print_info("Execution type: {0}, Attempts: {1}".format(
                    runmode, value))
                i = 0
                while i < int(value):
                    i += 1
                    print_debug("\n\n<======= ATTEMPT: {0} ======>".format(i))
                    test_suite_status = sequential_testcase_driver.main(
                        testcase_list,
                        suite_repository,
                        data_repository,
                        from_project,
                        auto_defects=auto_defects)
                    test_count = i * len(testcase_list)
                    testsuite_status_list.append(test_suite_status)
                    testsuite_utils.pSuite_update_suite_tests(str(test_count))
                    if str(test_suite_status).upper() == "FALSE" or\
                       str(test_suite_status).upper() == "ERROR":
                        break

            elif runmode.upper() == "RUP":
                print_info("Execution type: {0}, Attempts: {1}".format(
                    runmode, value))
                i = 0
                while i < int(value):
                    i += 1
                    print_debug("\n\n<======= ATTEMPT: {0} ======>".format(i))
                    test_suite_status = sequential_testcase_driver.main(
                        testcase_list,
                        suite_repository,
                        data_repository,
                        from_project,
                        auto_defects=auto_defects)
                    test_count = i * len(testcase_list)
                    testsuite_status_list.append(test_suite_status)
                    testsuite_utils.pSuite_update_suite_tests(str(test_count))
                    if str(test_suite_status).upper() == "TRUE":
                        break

            elif runmode.upper() == "RMT":
                print_info("Execution type: {0}, Attempts: {1}".format(
                    runmode, value))
                i = 0
                while i < int(value):
                    i += 1
                    print_debug("\n\n<======= ATTEMPT: {0} ======>".format(i))
                    # We aren't actually summing each test result here...
                    test_suite_status = sequential_testcase_driver.main(
                        testcase_list,
                        suite_repository,
                        data_repository,
                        from_project,
                        auto_defects=auto_defects)
                    testsuite_status_list.append(test_suite_status)
        # The below runmode part is not modified/removed to preserve backward compatibility
        elif execution_type.upper() == 'RUN_UNTIL_FAIL' and runmode is None:
            execution_value = Utils.xml_Utils.getChildAttributebyParentTag(
                testsuite_filepath, 'Details', 'type', 'Max_Attempts')
            execution_value = 1 if execution_value == "" else execution_value
            print_info("Execution type: {0}, Attempts: {1}".format(
                execution_type, execution_value))
            i = 0
            while i < int(execution_value):
                i += 1
                print_debug("\n\n<======= ATTEMPT: {0} ======>".format(i))
                test_suite_status = sequential_testcase_driver.main(
                    testcase_list,
                    suite_repository,
                    data_repository,
                    from_project,
                    auto_defects=auto_defects)
                test_count = i * len(testcase_list)
                testsuite_utils.pSuite_update_suite_tests(str(test_count))
                if str(test_suite_status).upper() == "FALSE" or\
                   str(test_suite_status).upper() == "ERROR":
                    break

        elif execution_type.upper() == 'RUN_UNTIL_PASS' and runmode is None:
            execution_value = Utils.xml_Utils.getChildAttributebyParentTag(
                testsuite_filepath, 'Details', 'type', 'Max_Attempts')
            execution_value = 1 if execution_value == "" else execution_value
            print_info("Execution type: {0}, Attempts: {1}".format(
                execution_type, execution_value))
            i = 0
            while i < int(execution_value):
                i += 1
                print_debug("\n\n<======= ATTEMPT: {0} ======>".format(i))
                test_suite_status = sequential_testcase_driver.main(
                    testcase_list,
                    suite_repository,
                    data_repository,
                    from_project,
                    auto_defects=auto_defects)
                test_count = i * len(testcase_list)
                testsuite_utils.pSuite_update_suite_tests(str(test_count))
                if str(test_suite_status).upper() == "TRUE":
                    break

        elif execution_type.upper() == 'RUN_MULTIPLE' and runmode is None:
            execution_value = Utils.xml_Utils.getChildAttributebyParentTag(
                testsuite_filepath, 'Details', 'type', 'Number_Attempts')
            execution_value = 1 if execution_value == "" else execution_value
            print_info("Execution type: {0}, Attempts: {1}".format(
                execution_type, execution_value))

            i = 0
            while i < int(execution_value):
                i += 1
                print_debug("\n\n<======= ATTEMPT: {0} ======>".format(i))
                # We aren't actually summing each test result here...
                test_suite_status = sequential_testcase_driver.main(
                    testcase_list,
                    suite_repository,
                    data_repository,
                    from_project,
                    auto_defects=auto_defects)

        elif execution_type.upper() == "ITERATIVE_SEQUENTIAL":
            # if execution type is iterative sequential call WarriorCore.Classes.iterative_testsuite
            # class and execute the testcases in iterative sequential fashion on the systems
            print_info("Iterative sequential suite")

            iter_seq_ts_obj = IterativeTestsuite(testcase_list,
                                                 suite_repository,
                                                 data_repository, from_project,
                                                 auto_defects)
            test_suite_status = iter_seq_ts_obj.execute_iterative_sequential()

        elif execution_type.upper() == "ITERATIVE_PARALLEL":
            # if execution type is iterative parallel call WarriorCore.Classes.iterative_testsuite
            # class and execute the testcases in iterative parallel fashion on the systems
            ts_junit_object.remove_html_obj()
            print_info("Iterative parallel suite")
            data_repository["war_parallel"] = True
            iter_seq_ts_obj = IterativeTestsuite(testcase_list,
                                                 suite_repository,
                                                 data_repository, from_project,
                                                 auto_defects)

            test_suite_status = iter_seq_ts_obj.execute_iterative_parallel()

        else:
            print_error("unexpected suite_type received...aborting execution")
            test_suite_status = False

        if runmode is not None:
            test_suite_status = common_execution_utils.compute_runmode_status(
                testsuite_status_list, runmode, suite_global_xml)
    else:
        print_error("Test cases in suite are not executed as setup failed to execute,"\
                    "setup status : {0}".format(setup_tc_status))
        print_error("Steps in cleanup will be executed on besteffort")
        test_suite_status = "ERROR"
    #execute cleanup steps defined in testwrapper file if testwrapperfile is present
    if testwrapperfile:
        print_info(
            "*****************TESTWRAPPER CLEANUP EXECUTION START*********************"
        )
        data_repository['wt_data_type'] = j_data_type
        cleanup_tc_status, data_repository = testcase_driver.execute_testcase(testwrapperfile,\
                                                          data_repository, tc_context='POSITIVE',\
                                                          runtype=j_runtype,\
                                                          tc_parallel=None, queue=None,\
                                                          auto_defects=auto_defects, suite=None,\
                                                          jiraproj=None, tc_onError_action=None,\
                                                          iter_ts_sys=None, steps_tag='Cleanup')
        print_info(
            "*****************TESTWRAPPER CLEANUP EXECUTION END*********************"
        )
    print_info("\n")
    suite_end_time = Utils.datetime_utils.get_current_timestamp()
    print_info("[{0}] Testsuite execution completed".format(suite_end_time))

    if test_suite_status == True and cleanup_tc_status == True:
        test_suite_status = True
    #set status to WARN if only cleanup fails
    elif test_suite_status == True and cleanup_tc_status != True:
        print_warning("setting test suite status to WARN as cleanup failed")
        test_suite_status = 'WARN'

    suite_duration = Utils.datetime_utils.get_time_delta(suite_start_time)
    hms = Utils.datetime_utils.get_hms_for_seconds(suite_duration)
    print_info("Testsuite duration= {0}".format(hms))
    testsuite_utils.update_suite_duration(str(suite_duration))
    if test_suite_status == False and ts_onError_action and\
        ts_onError_action.upper() == 'ABORT_AS_ERROR':
        print_info(
            "Testsuite status will be marked as ERROR as onError action is set"
            "to 'abort_as_error'")
        test_suite_status = "ERROR"
    testsuite_utils.report_testsuite_result(suite_repository,
                                            test_suite_status)

    ts_junit_object = data_repository['wt_junit_object']
    ts_junit_object.update_count(test_suite_status, "1", "pj")
    ts_junit_object.update_count("suites", "1", "pj", "not appicable")
    ts_junit_object.update_attr("status", str(test_suite_status), "ts",
                                suite_timestamp)
    ts_junit_object.update_attr("time", str(suite_duration), "ts",
                                suite_timestamp)

    if not from_project:
        ts_junit_object.update_attr("status", str(test_suite_status), "pj",
                                    "not applicable")
        ts_junit_object.update_attr("time", str(suite_duration), "pj",
                                    "not appicable")
        ts_junit_object.output_junit(data_repository['wt_results_execdir'])

        # Save JUnit/HTML results of the Suite in MongoDB server
        if data_repository.get("db_obj") is not False:
            ts_junit_xml = (data_repository['wt_results_execdir'] + os.sep +
                            ts_junit_object.filename + "_junit.xml")
            data_repository.get("db_obj").add_html_result_to_mongodb(
                ts_junit_xml)
    else:
        # Do not output JUnit result file for parallel suite execution
        if not ts_parallel and not data_repository['war_parallel']:
            # Create and replace existing Project junit file for each suite
            ts_junit_object.output_junit(data_repository['wp_results_execdir'],
                                         print_summary=False)

    if ts_parallel:
        ts_impact = data_repository['wt_ts_impact']
        if ts_impact.upper() == 'IMPACT':
            msg = "Status of the executed suite impacts project result"
        elif ts_impact.upper() == 'NOIMPACT':
            msg = "Status of the executed suite case does not impact project result"
        print_debug(msg)
        # put result into multiprocessing queue and later retrieve in corresponding driver
        queue.put(
            (test_suite_status, ts_impact, suite_timestamp, ts_junit_object))

    return test_suite_status, suite_repository
def execute_sequential_testcases(testcase_list, suite_repository,
                                 data_repository, from_project, auto_defects,
                                 iter_ts_sys, tc_parallel, queue, ts_iter):
    """Executes the list of cases(of a suite) in sequential order
        - Takes a testcase_list as input and sends
        each case to Basedriver for execution.
        - Computes the suite status based on the case_status
        and the impact value of the case
        - Handles case failures as per the default/specific
        onError action/value
        - Calls the function to report the suite status

    :Arguments:
        1. testcase_list(list) = List of cases to be executed
        2. suite_repository(dict) = suite repository
        3. data_repository(dict) = Warrior data repository
        4. from_project(boolean) = True for Project execution else False
        5. auto_defects(boolean) = True for Jira auto defect creation else False
        6. iter_ts_sys(string) = System for iterative execution
        7. tc_parallel(boolean) = True for Parallel execution else False
        8. queue = Python multiprocessing queue for parallel execution
        9. ts_iter(boolean) = True for 'iterative_parallel' execution else False
    :Returns:
        1. suite_status - overall suite status

    """
    goto_tc = False

    junit_resultfile = suite_repository['junit_resultfile']
    suite_name = suite_repository['suite_name']
    testsuite_filepath = suite_repository['testsuite_filepath']
    suite_error_action = suite_repository['def_on_error_action']
    suite_error_value = suite_repository['def_on_error_value']
    testsuite_dir = os.path.dirname(testsuite_filepath)

    errors = 0
    skipped = 0
    failures = 0
    tests = 0
    tc_duration = 0
    tc_status_list = []
    tc_impact_list = []
    impact_dict = {"IMPACT": "Impact", "NOIMPACT": "No Impact"}
    tc_duration_list = []
    tc_junit_list = []

    while tests < len(testcase_list):
        testcase = testcase_list[tests]
        tests += 1

        tc_rel_path = testsuite_utils.get_path_from_xmlfile(testcase)
        if tc_rel_path is not None:
            tc_path = Utils.file_Utils.getAbsPath(tc_rel_path, testsuite_dir)
        else:
            # if tc_rel_path is None, what are we doing here?
            tc_path = str(tc_rel_path)
        print_info('\n')
        print_debug("<<<< Starting execution of Test case: {0}>>>>".
                    format(tc_path))
        action, tc_status = exec_type_driver.main(testcase)
        tc_runtype = testsuite_utils.get_runtype_from_xmlfile(testcase)
        tc_impact = Utils.testcase_Utils.get_impact_from_xmlfile(testcase)
        tc_context = Utils.testcase_Utils.get_context_from_xmlfile(testcase)
        suite_step_data_file = testsuite_utils.get_data_file_at_suite_step(
                                                testcase, suite_repository)
        tc_onError_action = Utils.xml_Utils.get_attributevalue_from_directchildnode(
                                            testcase, 'onError', 'action')
        tc_onError_action = tc_onError_action if tc_onError_action else suite_error_action
        if suite_step_data_file is not None:
            data_file = Utils.file_Utils.getAbsPath(suite_step_data_file,
                                                    testsuite_dir)
            data_repository[tc_path] = data_file
        data_repository['wt_tc_impact'] = tc_impact
        if testcase.find("runmode") is not None and \
           testcase.find("runmode").get("attempt") is not None:
            print_info("testcase attempt: {0}".format(
                                testcase.find("runmode").get("attempt")))
        if testcase.find("retry") is not None and \
           testcase.find("retry").get("attempt") is not None:
            print_info("testcase attempt: {0}".format(
                                testcase.find("retry").get("attempt")))

        if Utils.file_Utils.fileExists(tc_path):
            tc_name = Utils.file_Utils.getFileName(tc_path)
            testsuite_utils.pSuite_testcase(junit_resultfile, suite_name,
                                            tc_name, time='0')

            if not goto_tc and action is True:
                try:
                    tc_result = testcase_driver.main(tc_path,
                                                     data_repository,
                                                     tc_context,
                                                     runtype=tc_runtype,
                                                     auto_defects=auto_defects,
                                                     suite=suite_name,
                                                     tc_onError_action=tc_onError_action,
                                                     iter_ts_sys=iter_ts_sys)

                    tc_status = tc_result[0]
                    tc_duration = tc_result[1]
                except Exception:
                    print_error('unexpected error {0}'.format(
                                                    traceback.format_exc()))
                    tc_status, tc_duration = False, False
                    tc_impact = Utils.testcase_Utils.get_impact_from_xmlfile(
                                                                    testcase)

            elif goto_tc and goto_tc == str(tests) and action is True:

                try:
                    tc_result = testcase_driver.main(tc_path,
                                                     data_repository,
                                                     tc_context,
                                                     runtype=tc_runtype,
                                                     auto_defects=auto_defects,
                                                     suite=suite_name,
                                                     tc_onError_action=tc_onError_action,
                                                     iter_ts_sys=iter_ts_sys)
                    tc_status = tc_result[0]
                    tc_duration = tc_result[1]
                    goto_tc = False

                except Exception:
                    print_error('unexpected error {0}'.format(
                                                    traceback.format_exc()))
                    tc_status, tc_duration = False, False
                    tc_impact = Utils.testcase_Utils.get_impact_from_xmlfile(
                                                                    testcase)

            else:
                print_info('skipped testcase %s ' % tc_name)
                skipped += 1
                testsuite_utils.pSuite_testcase_skip(junit_resultfile)
                testsuite_utils.pSuite_update_suite_attributes(
                                junit_resultfile, str(errors), str(skipped),
                                str(tests), str(failures), time='0')
                data_repository['wt_junit_object'].update_count(
                                "skipped", "1", "ts",
                                data_repository['wt_ts_timestamp'])
                data_repository['wt_junit_object'].update_count(
                                "tests", "1", "ts",
                                data_repository['wt_ts_timestamp'])
                data_repository['wt_junit_object'].update_count(
                                "tests", "1", "pj", "not applicable")
                tmp_timestamp = str(Utils.datetime_utils.get_current_timestamp())
                time.sleep(2)
                data_repository['wt_junit_object'].create_testcase(
                                location="from testsuite",
                                timestamp=tmp_timestamp,
                                ts_timestamp=data_repository['wt_ts_timestamp'],
                                classname=data_repository['wt_suite_name'],
                                name=os.path.splitext(tc_name)[0])
                data_repository['wt_junit_object'].add_testcase_message(
                                                    tmp_timestamp, "skipped")
                data_repository['wt_junit_object'].update_attr(
                                "status", "SKIPPED", "tc", tmp_timestamp)
                data_repository['testcase_%d_result' % tests] = "SKIP"
                title = Utils.xml_Utils.getChildTextbyParentTag(
                                        tc_path, 'Details', 'Title')
                title = title.strip() if title else "None"
                data_repository['wt_junit_object'].update_attr(
                                "title", title, "tc", tmp_timestamp)
                data_repository['wt_junit_object'].update_attr(
                                "impact", impact_dict.get(tc_impact.upper()),
                                "tc", tmp_timestamp)
                data_repository['wt_junit_object'].update_attr(
                                "onerror", "N/A", "tc", tmp_timestamp)
                data_repository['wt_junit_object'].output_junit(
                                data_repository['wt_results_execdir'],
                                print_summary=False)
                continue

        else:
            errors += 1
            msg = print_error("Test case does not exist in the provided path: "
                              "{0}".format(tc_path))
            testsuite_utils.pSuite_testcase(junit_resultfile, suite_name,
                                            tc_path, time='0')
            testsuite_utils.pSuite_testcase_error(junit_resultfile, msg, '0')
            tc_status = "ERROR"
            if goto_tc and goto_tc == str(tests):
                goto_tc = False
            elif goto_tc and goto_tc != str(tests):
                data_repository['testcase_%d_result' % tests] = "ERROR"
                continue

        goto_tc_num = onerror_driver.main(testcase, suite_error_action,
                                          suite_error_value)
        if goto_tc_num is False:
            onerror = "Next"
        elif goto_tc_num == "ABORT":
            onerror = "Abort"
        else:
            onerror = "Goto:" + str(goto_tc_num)
        data_repository['wt_junit_object'].update_attr(
                        "impact", impact_dict.get(tc_impact.upper()), "tc",
                        data_repository['wt_tc_timestamp'])
        data_repository['wt_junit_object'].update_attr(
                        "onerror", onerror, "tc",
                        data_repository['wt_tc_timestamp'])

        tc_status_list.append(tc_status)
        tc_duration_list.append(tc_duration)

        string_status = {"TRUE": "PASS", "FALSE": "FAIL", "ERROR": "ERROR",
                         "SKIP": "SKIP"}

        if str(tc_status).upper() in list(string_status.keys()):
            data_repository['testcase_%d_result' % tests] = string_status[
                                                    str(tc_status).upper()]
        else:
            print_error("unexpected testcase status, default to exception")
            data_repository['testcase_%d_result' % tests] = "ERROR"

        tc_impact_list.append(tc_impact)
        if tc_impact.upper() == 'IMPACT':
            msg = "Status of the executed test case impacts Testsuite result"
        elif tc_impact.upper() == 'NOIMPACT':
            msg = "Status of the executed test case does not impact "
            "Teststuie result"
        print_debug(msg)

        runmode, value, _ = common_execution_utils.get_runmode_from_xmlfile(
                                                                testcase)
        retry_type, retry_cond, retry_cond_value, retry_value, \
            retry_interval = common_execution_utils.get_retry_from_xmlfile(testcase)
        if runmode is not None:
            if tc_status is True:
                testsuite_utils.update_tc_duration(str(tc_duration))
                # if runmode is 'rup' & tc_status is True, skip the repeated
                # execution of same testcase and move to next actual testcase
                if runmode == "rup":
                    goto_tc = str(value)
            elif tc_status == 'ERROR' or tc_status == 'EXCEPTION':
                errors += 1
                testsuite_utils.pSuite_testcase_error(
                            junit_resultfile,
                            'Encountered error/exception during TC execution',
                            str(tc_duration))
                goto_tc = onerror_driver.main(testcase, suite_error_action,
                                              suite_error_value)
                if goto_tc in ['ABORT', 'ABORT_AS_ERROR']:
                    update_suite_attribs(junit_resultfile, str(errors),
                                         str(skipped), str(tests),
                                         str(failures), time='0')
                    break
                # when 'onError:goto' value is less than the current tc num,
                # change the next iteration point to goto value
                elif goto_tc and int(goto_tc) < tests:
                    tests = int(goto_tc)-1
                    goto_tc = False
            elif tc_status is False:
                failures += 1
                testsuite_utils.pSuite_testcase_failure(junit_resultfile,
                                                        time=str(tc_duration))
                goto_tc = onerror_driver.main(testcase, suite_error_action,
                                              suite_error_value)
                if goto_tc in ['ABORT', 'ABORT_AS_ERROR']:
                    update_suite_attribs(junit_resultfile, str(errors),
                                         str(skipped), str(tests),
                                         str(failures), time='0')
                    break
                # when 'onError:goto' value is less than the current tc num,
                # change the next iteration point to goto value
                elif goto_tc and int(goto_tc) < tests:
                    tests = int(goto_tc)-1
                    goto_tc = False
                # if runmode is 'ruf' & tc_status is False, skip the repeated
                # execution of same testcase and move to next actual testcase
                if not goto_tc and runmode == "ruf":
                    goto_tc = str(value)
        elif retry_type is not None:
            if retry_type.upper() == 'IF':
                try:
                    if data_repository[retry_cond] == retry_cond_value:
                        condition_met = True
                        pNote("Wait for {0}sec before retrying".format(
                                                        retry_interval))
                        pNote("The given condition '{0}' matches the expected "
                              "value '{1}'".format(data_repository[retry_cond],
                                                   retry_cond_value))
                        time.sleep(int(retry_interval))
                    else:
                        condition_met = False
                        print_warning("The condition value '{0}' does not "
                                      "match with the expected value "
                                      "'{1}'".format(
                                        data_repository[retry_cond],
                                        retry_cond_value))
                except KeyError:
                    print_warning("The given condition '{0}' is not there in "
                                  "the data repository".format(
                                                    retry_cond_value))
                    condition_met = False
                if condition_met is False:
                    goto_tc = str(retry_value)
            else:
                if retry_type.upper() == 'IF NOT':
                    try:
                        if data_repository[retry_cond] != retry_cond_value:
                            condition_met = True
                            pNote("Wait for {0}sec before retrying".format(
                                                            retry_interval))
                            pNote("The condition value '{0}' does not match "
                                  "with the expected value "
                                  "'{1}'".format(data_repository[retry_cond],
                                                 retry_cond_value))
                            time.sleep(int(retry_interval))
                        else:
                            condition_met = False
                            print_warning("The given condition '{0}' matches "
                                          "the expected value "
                                          "'{1}'".format(
                                                data_repository[retry_cond],
                                                retry_cond_value))
                    except KeyError:
                        condition_met = False
                        print_warning("The given condition '{0}' is not there "
                                      "in the data repository".format(
                                                            retry_cond_value))
                    if condition_met is False:
                        pNote("The given condition '{0}' matched with the "
                              "value '{1}'".format(data_repository[retry_cond],
                                                   retry_cond_value))
                        goto_tc = str(retry_value)
# suite_status = testsuite_utils.compute_testsuite_status(suite_status,
# tc_status, tc_impact)
        update_suite_attribs(junit_resultfile, str(errors),
                             str(skipped), str(tests), str(failures),
                             time='0')
        # junit_object/python_process is different for all the cases
        # executed in parallel
        if ts_iter is False:
            tc_junit_list.append(data_repository['wt_junit_object'])

    # junit_object/python_process is same for all the cases executed in the
    # same system for 'iterative_parallel' suite execution
    if ts_iter is True:
        tc_junit_list = data_repository['wt_junit_object']

    suite_status = Utils.testcase_Utils.compute_status_using_impact(
                                        tc_status_list, tc_impact_list)

    if tc_parallel:
        tc_impact = data_repository['wt_tc_impact']
        if tc_impact.upper() == 'IMPACT':
            msg = "Status of the executed test case impacts Testsuite result"
        elif tc_impact.upper() == 'NOIMPACT':
            msg = "Status of the executed test case does not impact Teststuie result"
        print_debug(msg)
        tc_name = Utils.file_Utils.getFileName(tc_path)
        # put result into multiprocessing queue and later retrieve in
        # corresponding driver
        queue.put((tc_status_list, tc_name, tc_impact_list, tc_duration_list,
                   tc_junit_list))
    return suite_status
Beispiel #9
0
def get_testcase_list(testsuite_filepath):
    """Takes the location of any Testsuite xml file as input
    Returns a list of all the Tescase elements present in the Testsuite

    Arguments:
    1. testsuite_filepath    = full path of the Testsuite xml file
    """

    testcase_list = []
    root = Utils.xml_Utils.getRoot(testsuite_filepath)
    testcases = root.find('Testcases')
    if testcases is None:
        print_info(
            'Testsuite is empty: tag <Testcases> not found in the input Testsuite xml file '
        )
    else:
        new_testcase_list = []
        orig_testcase_list = testcases.findall('Testcase')
        for orig_tc in orig_testcase_list:
            orig_tc_path = orig_tc.find('path').text
            if '*' not in orig_tc_path:
                new_testcase_list.append(orig_tc)
            # When the file path has asterisk(*), get the Warrior XML testcase
            # files matching the given pattern
            else:
                orig_tc_abspath = Utils.file_Utils.getAbsPath(
                    orig_tc_path, os.path.dirname(testsuite_filepath))
                print_info("Provided testcase path: '{}' has asterisk(*) in "
                           "it. All the Warrior testcase XML files matching "
                           "the given pattern will be executed.".format(
                               orig_tc_abspath))
                # Get all the files matching the pattern and sort them by name
                all_files = sorted(glob.glob(orig_tc_abspath))
                # Get XML files
                xml_files = [fl for fl in all_files if fl.endswith('.xml')]
                tc_files = []
                # Get Warrior testcase XML files
                for xml_file in xml_files:
                    root = Utils.xml_Utils.getRoot(xml_file)
                    if root.tag.upper() == "TESTCASE":
                        tc_files.append(xml_file)
                # Copy the XML object and set the filepath as path value for
                # all the files matching the pattern
                if tc_files:
                    for tc_file in tc_files:
                        new_tc = copy.deepcopy(orig_tc)
                        new_tc.find('path').text = tc_file
                        new_testcase_list.append(new_tc)
                        print_info("Testcase: '{}' added to the execution "
                                   "list ".format(tc_file))
                else:
                    print_warning(
                        "Asterisk(*) pattern match failed for '{}' due "
                        "to at least one of the following reasons:\n"
                        "1. No files matched the given pattern\n"
                        "2. Invalid testcase path is given\n"
                        "3. No testcase XMLs are available\n"
                        "Given path will be used for the Warrior "
                        "execution.".format(orig_tc_abspath))
                    new_testcase_list.append(orig_tc)

        # execute tc multiple times
        for tc in new_testcase_list:
            runmode, value, _ = common_execution_utils.get_runmode_from_xmlfile(
                tc)
            retry_type, _, _, retry_value, _ = common_execution_utils.get_retry_from_xmlfile(
                tc)
            if runmode is not None and value > 0:
                # more than one step in step list, insert new step
                if len(new_testcase_list) > 0:
                    go_next = len(testcase_list) + value + 1
                    for i in range(0, value):
                        copy_tc = copy.deepcopy(tc)
                        copy_tc.find("runmode").set("value", go_next)
                        copy_tc.find("runmode").set("attempt", i + 1)
                        testcase_list.append(copy_tc)
                # only one step in step list, append new step
                else:
                    for i in range(0, value):
                        copy_tc = copy.deepcopy(tc)
                        copy_tc.find("runmode").set("attempt", i + 1)
                        testcase_list.append(tc)
            if retry_type is not None and retry_value > 0:
                if len(new_testcase_list) > 1:
                    go_next = len(testcase_list) + retry_value + 1
                    if runmode is not None:
                        get_runmode = tc.find('runmode')
                        tc.remove(get_runmode)
                    for i in range(0, retry_value):
                        copy_tc = copy.deepcopy(tc)
                        copy_tc.find("retry").set("count", go_next)
                        copy_tc.find("retry").set("attempt", i + 1)
                        testcase_list.append(copy_tc)
                else:
                    if runmode is not None:
                        get_runmode = tc.find('runmode')
                        tc.remove(get_runmode)
                    for i in range(0, retry_value):
                        copy_tc = copy.deepcopy(tc)
                        copy_tc.find("retry").set("attempt", i + 1)
                        testcase_list.append(copy_tc)
            if retry_type is None and runmode is None:
                testcase_list.append(tc)
        return testcase_list
Beispiel #10
0
def execute_sequential_testsuites(testsuite_list, project_repository,
                                  data_repository, auto_defects):
    """ Executes suites in a project sequentially """

    suite_cntr = 0
    goto_testsuite = False
    ts_status_list = []
    ts_impact_list = []
    impact_dict = {"IMPACT": "Impact", "NOIMPACT": "No Impact"}

    project_error_action = project_repository['def_on_error_action']
    project_filepath = project_repository['project_filepath']
    project_dir = os.path.dirname(project_filepath)
    wp_results_execdir = project_repository['wp_results_execdir']
    wp_logs_execdir = project_repository['wp_logs_execdir']
    project_error_value = project_repository['def_on_error_value']

    jiraproj = data_repository['jiraproj']
    pj_junit_object = data_repository['wt_junit_object']

    while suite_cntr < len(testsuite_list):
        testsuite = testsuite_list[suite_cntr]
        suite_cntr += 1

        testsuite_rel_path = testsuite_utils.get_path_from_xmlfile(testsuite)
        if testsuite_rel_path is not None:
            testsuite_path = Utils.file_Utils.getAbsPath(
                testsuite_rel_path, project_dir)
        else:
            testsuite_path = str(testsuite_rel_path)
        print_info("\n")
        print_debug("<<<< Starting execution of Test suite: {0}>>>>".format(
            testsuite_path))
        action, testsuite_status = exec_type_driver.main(testsuite)
        testsuite_impact = Utils.testcase_Utils.get_impact_from_xmlfile(
            testsuite)
        testsuite_name = Utils.file_Utils.getFileName(testsuite_path)
        testsuite_nameonly = Utils.file_Utils.getNameOnly(testsuite_name)
        ts_onError_action = Utils.xml_Utils.get_attributevalue_from_directchildnode(
            testsuite, 'onError', 'action')
        ts_onError_action = ts_onError_action if ts_onError_action else project_error_action
        if Utils.file_Utils.fileExists(testsuite_path):
            if not goto_testsuite and action is True:

                testsuite_result = testsuite_driver.main(
                    testsuite_path,
                    data_repository=data_repository,
                    from_project=True,
                    auto_defects=auto_defects,
                    jiraproj=jiraproj,
                    res_startdir=wp_results_execdir,
                    logs_startdir=wp_logs_execdir,
                    ts_onError_action=ts_onError_action)
                testsuite_status = testsuite_result[0]

            elif goto_testsuite and goto_testsuite == str(suite_cntr)\
                    and action is True:
                testsuite_result = testsuite_driver.main(
                    testsuite_path,
                    data_repository=data_repository,
                    from_project=True,
                    auto_defects=auto_defects,
                    jiraproj=jiraproj,
                    res_startdir=wp_results_execdir,
                    logs_startdir=wp_logs_execdir,
                    ts_onError_action=ts_onError_action)
                goto_testsuite = False
                testsuite_status = testsuite_result[0]

            else:
                msg = print_info(
                    'skipped testsuite: {0} '.format(testsuite_path))
                tmp_timestamp = str(
                    Utils.datetime_utils.get_current_timestamp())
                time.sleep(2)
                pj_junit_object.create_testsuite(
                    location=os.path.dirname(testsuite_path),
                    name=testsuite_nameonly,
                    timestamp=tmp_timestamp,
                    **pj_junit_object.init_arg())
                pj_junit_object.update_attr("status", "SKIPPED", "ts",
                                            tmp_timestamp)
                pj_junit_object.update_attr("skipped", "1", "pj",
                                            tmp_timestamp)
                pj_junit_object.update_count("suites", "1", "pj",
                                             tmp_timestamp)
                data_repository['testsuite_{}_result'.format(
                    suite_cntr)] = "SKIP"
                pj_junit_object.update_attr(
                    "impact", impact_dict.get(testsuite_impact.upper()), "ts",
                    tmp_timestamp)
                pj_junit_object.update_attr("onerror", "N/A", "ts",
                                            tmp_timestamp)
                pj_junit_object.output_junit(wp_results_execdir,
                                             print_summary=False)
                continue

        else:

            msg = print_error("Test suite does not exist in "
                              "provided path: {0}".format(testsuite_path))
            testsuite_status = 'ERROR'
            if goto_testsuite and goto_testsuite == str(suite_cntr):
                goto_testsuite = False
            elif goto_testsuite and goto_testsuite != str(suite_cntr):
                data_repository['testsuite_{}_result'.format(
                    suite_cntr)] = "ERROR"
                continue

        goto_testsuite_num = onerror_driver.main(testsuite,
                                                 project_error_action,
                                                 project_error_value)
        if goto_testsuite_num is False:
            onerror = "Next"
        elif goto_testsuite_num == "ABORT":
            onerror = "Abort"
        else:
            onerror = "Goto:" + str(goto_testsuite_num)
        pj_junit_object.update_attr("impact",
                                    impact_dict.get(testsuite_impact.upper()),
                                    "ts", data_repository['wt_ts_timestamp'])
        pj_junit_object.update_attr("onerror", onerror, "ts",
                                    data_repository['wt_ts_timestamp'])

        string_status = {
            "TRUE": "PASS",
            "FALSE": "FAIL",
            "ERROR": "ERROR",
            "SKIP": "SKIP",
            "RAN": "RAN"
        }

        if str(testsuite_status).upper() in string_status.keys():
            data_repository['testsuite_{}_result'.format(suite_cntr)] = \
             string_status[str(testsuite_status).upper()]
        else:
            print_error("unexpected testsuite status, default to exception")
            data_repository['testsuite_%d_result' % suite_cntr] = "ERROR"

        ts_status_list.append(testsuite_status)
        ts_impact_list.append(testsuite_impact)
        if testsuite_impact.upper() == 'IMPACT':
            msg = "Status of the executed test suite impacts Project result"
        elif testsuite_impact.upper() == 'NOIMPACT':
            msg = "Status of the executed test suite does not impact project result"
        print_debug(msg)

        runmode, value, _ = common_execution_utils.get_runmode_from_xmlfile(
            testsuite)
        retry_type, retry_cond, retry_cond_value, retry_value,\
            retry_interval = common_execution_utils.get_retry_from_xmlfile(testsuite)
        if runmode is not None:
            if testsuite.find("runmode") is not None and\
              testsuite.find("runmode").get("attempt") is not None:
                print_info("runmode attempt: {0}".format(
                    testsuite.find("runmode").get("attempt")))
            # if runmode is 'ruf' & step_status is False, skip the repeated
            # execution of same TC step and move to next actual step
            if not project_error_value and runmode == "RUF" and\
                    testsuite_status is False:
                goto_testsuite = str(value)
            # if runmode is 'rup' & step_status is True, skip the repeated
            # execution of same TC step and move to next actual step
            elif runmode == "RUP" and testsuite_status is True:
                goto_testsuite = str(value)
        elif retry_type is not None:
            if testsuite.find("retry") is not None and\
              testsuite.find("retry").get("attempt") is not None:
                print_info("retry attempt: {0}".format(
                    testsuite.find("retry").get("attempt")))
            if retry_type.upper() == 'IF':
                try:
                    if data_repository[retry_cond] == retry_cond_value:
                        condition_met = True
                        pNote("Wait for {0}sec before retrying".format(
                            retry_interval))
                        pNote("The given condition '{0}' matches the expected"
                              "value '{1}'".format(data_repository[retry_cond],
                                                   retry_cond_value))
                        time.sleep(int(retry_interval))
                    else:
                        condition_met = False
                        print_warning(
                            "The condition value '{0}' does not match with the expected "
                            "value '{1}'".format(data_repository[retry_cond],
                                                 retry_cond_value))
                except KeyError:
                    print_warning(
                        "The given condition '{0}' do not exists in "
                        "the data repository".format(retry_cond_value))

                    condition_met = False
                if condition_met is False:
                    goto_testsuite = str(retry_value)
            else:
                if retry_type.upper() == 'IF NOT':
                    try:
                        if data_repository[retry_cond] != retry_cond_value:
                            condition_met = True
                            pNote("Wait for {0}sec before "
                                  "retrying".format(retry_interval))
                            pNote("The condition value '{0}' does not match "
                                  "with the expected value '{1}'".format(
                                      data_repository[retry_cond],
                                      retry_cond_value))
                            time.sleep(int(retry_interval))
                        else:
                            condition_met = False
                    except KeyError:
                        condition_met = False
                        print_warning(
                            "The given condition '{0}' is not there "
                            "in the data repository".format(retry_cond_value))
                    if condition_met is False:
                        pNote("The given condition '{0}' matched with the "
                              "value '{1}'".format(data_repository[retry_cond],
                                                   retry_cond_value))
                        goto_testsuite = str(retry_value)
        else:
            if testsuite_status is False or testsuite_status == "ERROR" or\
                    testsuite_status == "EXCEPTION":
                goto_testsuite = onerror_driver.main(testsuite,
                                                     project_error_action,
                                                     project_error_value)
            if goto_testsuite in ['ABORT', 'ABORT_AS_ERROR']:
                break
            # when 'onError:goto' value is less than the current ts num,
            # change the next iteration point to goto value
            elif goto_testsuite and int(goto_testsuite) < suite_cntr:
                suite_cntr = int(goto_testsuite) - 1
                goto_testsuite = False

    project_status = Utils.testcase_Utils.compute_status_using_impact(
        ts_status_list, ts_impact_list)

    return project_status