예제 #1
0
def execute_step(step, step_num, data_repository, system_name, parallel,
                 queue):
    """ Executes a step from the testcase xml file
        - Parses a step from the testcase xml file
        - Get the values of Driver, Keyword, impactsTcResult
        - If the step has arguments, get all the arguments and store them as key/value pairs in args_repository
        - Sends the Keyword, data_repository, args_repository to the respective Driver.
        - Reports the status of the keyword executed (obtained as return value from the respective Driver)

    Arguments:
    1. step            = (xml element) xml element with tag <step> containing the details of the step to be executed like (Driver, Keyword, Arguments, Impact etc..)
    2. step_num        = (int) step number being executed
    3. data_repository = (dict) data_repository of the testcase
    """

    tc_junit_object = data_repository['wt_junit_object']
    driver = step.get('Driver')
    keyword = step.get('Keyword')
    context = Utils.testcase_Utils.get_context_from_xmlfile(step)
    step_impact = Utils.testcase_Utils.get_impact_from_xmlfile(step)
    step_description = Utils.testcase_Utils.get_description_from_xmlfile(step)

    if parallel is True:
        step_console_log = get_step_console_log(
            data_repository['wt_filename'], data_repository['wt_logsdir'],
            'step-{0}_{1}_consoleLogs'.format(step_num, keyword))

    data_repository['step_num'] = step_num
    data_repository['wt_driver'] = driver
    data_repository['wt_keyword'] = keyword
    data_repository['wt_step_impact'] = step_impact
    data_repository['wt_step_context'] = context
    data_repository['wt_step_description'] = step_description

    kw_resultfile = get_keyword_resultfile(data_repository, system_name,
                                           step_num, keyword)
    Utils.config_Utils.set_resultfile(kw_resultfile)
    # print keyword to result file
    Utils.testcase_Utils.pKeyword(keyword, driver)
    print_info("step number: {0}".format(step_num))
    print_info("Teststep Description: {0}".format(step_description))

    if step.find("runmode") is not None and step.find("runmode").get(
            "attempt") is not None:
        print_info("keyword attempt: {0}".format(
            step.find("runmode").get("attempt")))
    if step.find("retry") is not None and step.find("retry").get(
            "attempt") is not None:
        print_info("keyword attempt: {0}".format(
            step.find("retry").get("attempt")))
    kw_start_time = Utils.datetime_utils.get_current_timestamp()
    print_info("[{0}] Keyword execution starts".format(kw_start_time))
    # get argument list provided by user
    args_repository = get_arguments(step)
    if system_name is not None:
        args_repository['system_name'] = system_name
    Utils.testcase_Utils.update_arguments(args_repository)
    Utils.testcase_Utils.update_kw_resultfile(kw_resultfile)

    exec_type_onerror = False
    action, keyword_status = exec_type_driver.main(step)

    if action is True:
        send_keyword_to_productdriver(driver, keyword, data_repository,
                                      args_repository)
        keyword_status = data_repository['step-%s_status' % step_num]
        Utils.testcase_Utils.update_step_num(str(step_num))
        if context.upper() == 'NEGATIVE' and type(keyword_status) == bool:
            print_debug(
                "Keyword status = {0}, Flip status as context is Negative".
                format(keyword_status))
            keyword_status = not keyword_status
    elif action == 'SKIP':
        print_debug("Action is {0}".format(action))

    elif action is False:
        exec_type_onerror = True
        print_debug("Action is {0}".format(action))

    print("\n")
    print_info("*** Keyword status ***")
    step_goto_value = False
    step_onError_action = Utils.xml_Utils.get_attributevalue_from_directchildnode(
        step, 'onError', 'action')
    if step_onError_action is not False:
        if step_onError_action.upper() == 'GOTO':
            step_goto_value = Utils.xml_Utils.get_attributevalue_from_directchildnode(
                step, 'onError', 'value')
    testcase_error_action = data_repository['wt_def_on_error_action']
    step_onError_action = step_onError_action if step_onError_action else testcase_error_action
    if step_onError_action.upper() == "GOTO" and step_goto_value == False:
        step_goto_value = data_repository['wt_def_on_error_value']
    onerror = step_onError_action.upper()
    if step_goto_value is not False and step_goto_value is not None:
        onerror = onerror + " step " + step_goto_value
    if keyword_status == False and step_onError_action and step_onError_action.upper(
    ) == 'ABORT_AS_ERROR':
        print_info(
            "Keyword status will be marked as ERROR as onError action is set to 'abort_as_error'"
        )
        keyword_status = "ERROR"
    Utils.testcase_Utils.reportKeywordStatus(keyword_status, keyword)
    print_info("step number: {0}".format(step_num))

    string_status = {
        "TRUE": "PASS",
        "FALSE": "FAIL",
        "ERROR": "ERROR",
        "EXCEPTION": "EXCEPTION",
        "SKIP": "SKIP"
    }

    if str(keyword_status).upper() in string_status.keys():
        data_repository['step_%s_result' %
                        step_num] = string_status[str(keyword_status).upper()]
    else:
        print_error("unexpected step status, default to exception")
        data_repository['step_%s_result' % step_num] = "EXCEPTION"

    if step_impact.upper() == 'IMPACT':
        msg = "Status of the executed step  impacts TC result"
        if str(keyword_status).upper() == 'SKIP':
            keyword_status = None
        # elif exec_type_onerror is False and str(keyword_status).upper() ==
        # 'SKIP':
    elif step_impact.upper() == 'NOIMPACT':
        msg = "Status of the executed step does not impact TC result"
    Utils.testcase_Utils.pNote_level(msg, "debug", "kw")
    if data_repository.has_key('step-%s_exception' % step_num):
        msg = "Exception message: " + \
            data_repository['step-%s_exception' % step_num]
        Utils.testcase_Utils.pNote_level(msg, "debug", "kw", ptc=False)
    # time.sleep(1)
    print("\n")
    kw_end_time = Utils.datetime_utils.get_current_timestamp()
    tc_duration = Utils.datetime_utils.get_time_delta(kw_start_time)
    hms = Utils.datetime_utils.get_hms_for_seconds(tc_duration)
    print_info("Keyword duration= {0}".format(hms))
    print_info("[{0}] Keyword execution completed".format(kw_end_time))

    impact_dict = {"IMPACT": "Impact", "NOIMPACT": "No Impact"}
    tc_junit_object.add_keyword_result(data_repository['wt_tc_timestamp'],
                                       step_num, keyword, str(keyword_status),
                                       kw_start_time, tc_duration,
                                       kw_resultfile,
                                       impact_dict.get(step_impact.upper()),
                                       onerror)
    tc_junit_object.update_count(str(keyword_status), "1", "tc",
                                 data_repository['wt_tc_timestamp'])
    tc_junit_object.update_count("keywords", "1", "tc",
                                 data_repository['wt_tc_timestamp'])

    if parallel is True:
        # put result into multiprocessing queue and later retrieve in
        # corresponding driver
        queue.put((keyword_status, kw_resultfile, step_impact.upper(),
                   tc_junit_object))
    else:
        return keyword_status, kw_resultfile, step_impact, exec_type_onerror
예제 #2
0
def execute_project(project_filepath, auto_defects, jiraproj, res_startdir,
                    logs_startdir, data_repository):
    """
    - Takes a list of testsuite locations input.
    - Iterates over the list and sends each testsuite
    location to testsuite_driver for execution.
    - Gets the status of the testsuite from the
    Warrior and computes the project_status based on the impact value
    of the testsuite.
    - If the testsuite fails, handles the failure using
    the default or specific  onError action,value.
    - Finally reports the project status to the result file.

    Arguments:
    1. testsuite_list        = (list) list of testsuite locations
    2. testsuite_driver      = (module loader) module loader of the testsuite_driver
    3. project_repository    = (dict) dictionary containing all data of the project under execution
    """
    project_start_time = Utils.datetime_utils.get_current_timestamp()
    print_info("[{0}] Project execution starts".format(project_start_time))
    suite_cntr = 0
    # project_status = True
    goto_testsuite = False
    ts_status_list = []
    ts_impact_list = []
    impact_dict = {"IMPACT": "Impact", "NOIMPACT": "No Impact"}
    project_dir = os.path.dirname(project_filepath)
    project_title = Utils.xml_Utils.getChildTextbyParentTag(
        project_filepath, 'Details', 'Title')
    project_repository = get_project_details(project_filepath, res_startdir,
                                             logs_startdir, data_repository)
    project_repository['project_title'] = project_title
    testsuite_list = get_testsuite_list(project_filepath)

    # project_resultfile = project_repository['project_resultfile']

    project_name = project_repository['project_name']
    wp_results_execdir = project_repository['wp_results_execdir']
    data_repository['wp_results_execdir'] = wp_results_execdir
    wp_logs_execdir = project_repository['wp_logs_execdir']

    project_error_action = project_repository['def_on_error_action']
    project_error_value = project_repository['def_on_error_value']

    pj_junit_object = junit_class.Junit(filename=project_name,
                                        timestamp=project_start_time,
                                        name=project_name,
                                        display="True")

    pj_junit_object.update_attr("resultsdir",
                                project_repository['project_execution_dir'],
                                "pj", project_start_time)
    pj_junit_object.update_attr("title", project_repository['project_title'],
                                "pj", project_start_time)
    pj_junit_object.add_property("resultsdir",
                                 project_repository['project_execution_dir'],
                                 "pj", project_start_time)

    # adding the resultsdir as attribute, need to be removed after making it
    # a property
    pj_junit_object.add_project_location(project_filepath)
    if "jobid" in data_repository:
        pj_junit_object.add_jobid(data_repository["jobid"])
        del data_repository["jobid"]
    data_repository['wt_junit_object'] = pj_junit_object

    while suite_cntr < len(testsuite_list):
        testsuite = testsuite_list[suite_cntr]
        # suite_junit_type = 'file'
        suite_cntr += 1

        testsuite_rel_path = testsuite_utils.get_path_from_xmlfile(testsuite)
        if testsuite_rel_path is not None:
            testsuite_path = Utils.file_Utils.getAbsPath(
                testsuite_rel_path, project_dir)
        else:
            testsuite_path = str(testsuite_rel_path)
        print_info("\n")
        print_debug("<<<< Starting execution of Test suite: {0}>>>>".format(
            testsuite_path))
        action, testsuite_status = exec_type_driver.main(testsuite)
        testsuite_impact = Utils.testcase_Utils.get_impact_from_xmlfile(
            testsuite)
        testsuite_name = Utils.file_Utils.getFileName(testsuite_path)
        testsuite_nameonly = Utils.file_Utils.getNameOnly(testsuite_name)
        ts_onError_action = Utils.xml_Utils.get_attributevalue_from_directchildnode(
            testsuite, 'onError', 'action')
        ts_onError_action = ts_onError_action if ts_onError_action else project_error_action
        if Utils.file_Utils.fileExists(testsuite_path):
            if not goto_testsuite and action is True:

                testsuite_result = testsuite_driver.main(
                    testsuite_path,
                    data_repository=data_repository,
                    from_project=True,
                    auto_defects=auto_defects,
                    jiraproj=jiraproj,
                    res_startdir=wp_results_execdir,
                    logs_startdir=wp_logs_execdir,
                    ts_onError_action=ts_onError_action)
                testsuite_status = testsuite_result[0]
                # testsuite_resultfile = testsuite_result[1]

            elif goto_testsuite and goto_testsuite == str(suite_cntr)\
                    and action is True:
                testsuite_result = testsuite_driver.main(
                    testsuite_path,
                    data_repository=data_repository,
                    from_project=True,
                    auto_defects=auto_defects,
                    jiraproj=jiraproj,
                    res_startdir=wp_results_execdir,
                    logs_startdir=wp_logs_execdir,
                    ts_onError_action=ts_onError_action)
                goto_testsuite = False
                testsuite_status = testsuite_result[0]
                # testsuite_resultfile = testsuite_result[1]

            else:
                msg = print_info(
                    'skipped testsuite: {0} '.format(testsuite_path))
                testsuite_resultfile = '<testsuite errors="0" failures="0" name="{0}" '\
                'skipped="0" tests="0" time="0" timestamp="{1}" > '\
                '<skipped message="{2}"/> </testsuite>'.format(testsuite_name,
                                                               project_start_time,
                                                               msg)
                tmp_timestamp = str(
                    Utils.datetime_utils.get_current_timestamp())
                time.sleep(2)
                pj_junit_object.create_testsuite(
                    location=os.path.dirname(testsuite_path),
                    name=testsuite_nameonly,
                    timestamp=tmp_timestamp,
                    **pj_junit_object.init_arg())
                pj_junit_object.update_attr("status", "SKIPPED", "ts",
                                            tmp_timestamp)
                pj_junit_object.update_attr("skipped", "1", "pj",
                                            tmp_timestamp)
                pj_junit_object.update_count("suites", "1", "pj",
                                             tmp_timestamp)
                data_repository['testsuite_{}_result'.format(
                    suite_cntr)] = "SKIP"
                # pj_junit_object.add_testcase_message(tmp_timestamp, "skipped")
                pj_junit_object.update_attr(
                    "impact", impact_dict.get(testsuite_impact.upper()), "ts",
                    tmp_timestamp)
                pj_junit_object.update_attr("onerror", "N/A", "ts",
                                            tmp_timestamp)
                pj_junit_object.output_junit(wp_results_execdir,
                                             print_summary=False)
                continue

        else:

            msg = print_error("Test suite does not exist in "
                              "provided path: {0}".format(testsuite_path))
            testsuite_status = 'ERROR'
            testsuite_resultfile = '<testsuite errors="0" failures="0" name="{0}" '\
            'skipped="0" tests="0" time="0" timestamp="{1}" > '\
            '<error message="{2}"/> </testsuite>'.format(testsuite_name, project_start_time, msg)
            # suite_junit_type = 'string'
            if goto_testsuite and goto_testsuite == str(suite_cntr):
                goto_testsuite = False
            elif goto_testsuite and goto_testsuite != str(suite_cntr):
                data_repository['testsuite_{}_result'.format(
                    suite_cntr)] = "ERROR"
                continue

        goto_testsuite_num = onerror_driver.main(testsuite,
                                                 project_error_action,
                                                 project_error_value)
        if goto_testsuite_num is False:
            onerror = "Next"
        elif goto_testsuite_num == "ABORT":
            onerror = "Abort"
        else:
            onerror = "Goto:" + str(goto_testsuite_num)
        pj_junit_object.update_attr("impact",
                                    impact_dict.get(testsuite_impact.upper()),
                                    "ts", data_repository['wt_ts_timestamp'])
        pj_junit_object.update_attr("onerror", onerror, "ts",
                                    data_repository['wt_ts_timestamp'])

        string_status = {
            "TRUE": "PASS",
            "FALSE": "FAIL",
            "ERROR": "ERROR",
            "SKIP": "SKIP"
        }

        if str(testsuite_status).upper() in string_status.keys():
            data_repository['testsuite_{}_result'.format(suite_cntr)] = string_status\
            [str(testsuite_status).upper()]
        else:
            print_error("unexpected testsuite status, default to exception")
            data_repository['testsuite_%d_result' % suite_cntr] = "ERROR"

        ts_status_list.append(testsuite_status)
        ts_impact_list.append(testsuite_impact)
        if testsuite_impact.upper() == 'IMPACT':
            msg = "Status of the executed test suite impacts Project result"
        elif testsuite_impact.upper() == 'NOIMPACT':
            msg = "Status of the executed test suite does not impact project result"
        print_debug(msg)
        # project_status = compute_project_status(project_status, testsuite_status,
        #                                                 testsuite_impact)
        runmode, value = common_execution_utils.get_runmode_from_xmlfile(
            testsuite)
        retry_type, retry_cond, retry_cond_value, retry_value,\
            retry_interval = common_execution_utils.get_retry_from_xmlfile(testsuite)
        if runmode is not None:
            if testsuite.find("runmode") is not None and\
              testsuite.find("runmode").get("attempt") is not None:
                print_info("runmode attempt: {0}".format(
                    testsuite.find("runmode").get("attempt")))
            # if runmode is 'ruf' & testsuite_status is False, skip the repeated execution of same
            # test suite and move to next actual test suite
            if not project_error_value and runmode == "RUF" and\
                    testsuite_status is False:
                goto_testsuite = str(value)
            # if runmode is 'rup' & testsuite_status is True, skip the repeated
            # execution of same testsuite and move to next actual testsuite
            elif runmode == "RUP" and testsuite_status is True:
                goto_testsuite = str(value)
        elif retry_type is not None:
            if testsuite.find("retry") is not None and\
              testsuite.find("retry").get("attempt") is not None:
                print_info("retry attempt: {0}".format(
                    testsuite.find("retry").get("attempt")))
            if retry_type.upper() == 'IF':
                try:
                    if data_repository[retry_cond] == retry_cond_value:
                        condition_met = True
                        pNote("Wait for {0}sec before retrying".format(
                            retry_interval))
                        pNote("The given condition '{0}' matches the expected"
                              "value '{1}'".format(data_repository[retry_cond],
                                                   retry_cond_value))
                        time.sleep(int(retry_interval))
                    else:
                        condition_met = False
                        print_warning(
                            "The condition value '{0}' does not match with the expected "
                            "value '{1}'".format(data_repository[retry_cond],
                                                 retry_cond_value))
                except KeyError:
                    print_warning(
                        "The given condition '{0}' do not exists in "
                        "the data repository".format(retry_cond_value))

                    condition_met = False
                if condition_met is False:
                    goto_testsuite = str(retry_value)
            else:
                if retry_type.upper() == 'IF NOT':
                    try:
                        if data_repository[retry_cond] != retry_cond_value:
                            condition_met = True
                            pNote("Wait for {0}sec before "
                                  "retrying".format(retry_interval))
                            pNote("The condition value '{0}' does not match "
                                  "with the expected value '{1}'".format(
                                      data_repository[retry_cond],
                                      retry_cond_value))
                            time.sleep(int(retry_interval))
                        else:
                            condition_met = False
                    except KeyError:
                        condition_met = False
                        print_warning(
                            "The given condition '{0}' is not there "
                            "in the data repository".format(retry_cond_value))
                    if condition_met is False:
                        pNote("The given condition '{0}' matched with the "
                              "value '{1}'".format(data_repository[retry_cond],
                                                   retry_cond_value))
                        goto_testsuite = str(retry_value)
        else:
            if testsuite_status is False or testsuite_status == "ERROR" or\
                    testsuite_status == "EXCEPTION":
                goto_testsuite = onerror_driver.main(testsuite,
                                                     project_error_action,
                                                     project_error_value)
            if goto_testsuite in ['ABORT', 'ABORT_AS_ERROR']:
                break
            # when 'onError:goto' value is less than the current ts num,
            # change the next iteration point to goto value
            elif goto_testsuite and int(goto_testsuite) < suite_cntr:
                suite_cntr = int(goto_testsuite) - 1
                goto_testsuite = False

    project_status = Utils.testcase_Utils.compute_status_using_impact(
        ts_status_list, ts_impact_list)
    print_info("\n")
    project_end_time = Utils.datetime_utils.get_current_timestamp()
    print_info("[{0}] Project execution completed".format(project_end_time))
    project_duration = Utils.datetime_utils.get_time_delta(project_start_time)
    hms = Utils.datetime_utils.get_hms_for_seconds(project_duration)
    print_info("Project duration= {0}".format(hms))

    project_status = report_project_result(project_status, project_repository)
    pj_junit_object.update_attr("status", str(project_status), "pj",
                                project_start_time)
    pj_junit_object.update_attr("time", str(project_duration), "pj",
                                project_start_time)

    pj_junit_object.output_junit(wp_results_execdir)

    # Save JUnit/HTML results of the Project in MongoDB server
    if data_repository.get("db_obj") is not False:
        pj_junit_xml = project_repository['wp_results_execdir'] +\
            os.sep + pj_junit_object.filename + "_junit.xml"
        data_repository.get("db_obj").add_html_result_to_mongodb(pj_junit_xml)

    return project_status, project_repository
예제 #3
0
def execute_sequential_testcases(testcase_list, suite_repository,
                                 data_repository, from_project, auto_defects,
                                 iter_ts_sys, tc_parallel, queue, ts_iter):
    """Executes the list of cases(of a suite) in sequential order
        - Takes a testcase_list as input and sends
        each case to Basedriver for execution.
        - Computes the suite status based on the case_status
        and the impact value of the case
        - Handles case failures as per the default/specific
        onError action/value
        - Calls the function to report the suite status

    :Arguments:
        1. testcase_list(list) = List of cases to be executed
        2. suite_repository(dict) = suite repository
        3. data_repository(dict) = Warrior data repository
        4. from_project(boolean) = True for Project execution else False
        5. auto_defects(boolean) = True for Jira auto defect creation else False
        6. iter_ts_sys(string) = System for iterative execution
        7. tc_parallel(boolean) = True for Parallel execution else False
        8. queue = Python multiprocessing queue for parallel execution
        9. ts_iter(boolean) = True for 'iterative_parallel' execution else False
    :Returns:
        1. suite_status - overall suite status

    """
    goto_tc = False

    junit_resultfile = suite_repository['junit_resultfile']
    suite_name = suite_repository['suite_name']
    testsuite_filepath = suite_repository['testsuite_filepath']
    suite_error_action = suite_repository['def_on_error_action']
    suite_error_value = suite_repository['def_on_error_value']
    testsuite_dir = os.path.dirname(testsuite_filepath)

    errors = 0
    skipped = 0
    failures = 0
    tests = 0
    tc_duration = 0
    tc_status_list = []
    tc_impact_list = []
    impact_dict = {"IMPACT": "Impact", "NOIMPACT": "No Impact"}
    tc_duration_list = []
    tc_junit_list = []

    while tests < len(testcase_list):
        testcase = testcase_list[tests]
        tests += 1

        tc_rel_path = testsuite_utils.get_path_from_xmlfile(testcase)
        if tc_rel_path is not None:
            tc_path = Utils.file_Utils.getAbsPath(tc_rel_path, testsuite_dir)
        else:
            # if tc_rel_path is None, what are we doing here?
            tc_path = str(tc_rel_path)
        print_info('\n')
        print_debug(
            "<<<< Starting execution of Test case: {0}>>>>".format(tc_path))
        action, tc_status = exec_type_driver.main(testcase)
        tc_runtype = testsuite_utils.get_runtype_from_xmlfile(testcase)
        tc_impact = Utils.testcase_Utils.get_impact_from_xmlfile(testcase)
        tc_context = Utils.testcase_Utils.get_context_from_xmlfile(testcase)
        suite_step_data_file = testsuite_utils.get_data_file_at_suite_step(
            testcase, suite_repository)
        tc_onError_action = Utils.xml_Utils.get_attributevalue_from_directchildnode(
            testcase, 'onError', 'action')
        tc_onError_action = tc_onError_action if tc_onError_action else suite_error_action
        if suite_step_data_file is not None:
            data_file = Utils.file_Utils.getAbsPath(suite_step_data_file,
                                                    testsuite_dir)
            data_repository[tc_path] = data_file
        data_repository['wt_tc_impact'] = tc_impact
        if testcase.find("runmode") is not None and \
           testcase.find("runmode").get("attempt") is not None:
            print_info("testcase attempt: {0}".format(
                testcase.find("runmode").get("attempt")))
        if testcase.find("retry") is not None and \
           testcase.find("retry").get("attempt") is not None:
            print_info("testcase attempt: {0}".format(
                testcase.find("retry").get("attempt")))

        if Utils.file_Utils.fileExists(tc_path):
            tc_name = Utils.file_Utils.getFileName(tc_path)
            testsuite_utils.pSuite_testcase(junit_resultfile,
                                            suite_name,
                                            tc_name,
                                            time='0')

            if not goto_tc and action is True:
                try:
                    tc_result = testcase_driver.main(
                        tc_path,
                        data_repository,
                        tc_context,
                        runtype=tc_runtype,
                        auto_defects=auto_defects,
                        suite=suite_name,
                        tc_onError_action=tc_onError_action,
                        iter_ts_sys=iter_ts_sys)

                    tc_status = tc_result[0]
                    tc_duration = tc_result[1]
                except Exception:
                    print_error('unexpected error {0}'.format(
                        traceback.format_exc()))
                    tc_status, tc_duration = False, False
                    tc_impact = Utils.testcase_Utils.get_impact_from_xmlfile(
                        testcase)

            elif goto_tc and goto_tc == str(tests) and action is True:

                try:
                    tc_result = testcase_driver.main(
                        tc_path,
                        data_repository,
                        tc_context,
                        runtype=tc_runtype,
                        auto_defects=auto_defects,
                        suite=suite_name,
                        tc_onError_action=tc_onError_action,
                        iter_ts_sys=iter_ts_sys)
                    tc_status = tc_result[0]
                    tc_duration = tc_result[1]
                    goto_tc = False

                except Exception:
                    print_error('unexpected error {0}'.format(
                        traceback.format_exc()))
                    tc_status, tc_duration = False, False
                    tc_impact = Utils.testcase_Utils.get_impact_from_xmlfile(
                        testcase)

            else:
                print_info('skipped testcase %s ' % tc_name)
                skipped += 1
                testsuite_utils.pSuite_testcase_skip(junit_resultfile)
                testsuite_utils.pSuite_update_suite_attributes(
                    junit_resultfile,
                    str(errors),
                    str(skipped),
                    str(tests),
                    str(failures),
                    time='0')
                data_repository['wt_junit_object'].update_count(
                    "skipped", "1", "ts", data_repository['wt_ts_timestamp'])
                data_repository['wt_junit_object'].update_count(
                    "tests", "1", "ts", data_repository['wt_ts_timestamp'])
                data_repository['wt_junit_object'].update_count(
                    "tests", "1", "pj", "not applicable")
                tmp_timestamp = str(
                    Utils.datetime_utils.get_current_timestamp())
                time.sleep(2)
                data_repository['wt_junit_object'].create_testcase(
                    location="from testsuite",
                    timestamp=tmp_timestamp,
                    ts_timestamp=data_repository['wt_ts_timestamp'],
                    classname=data_repository['wt_suite_name'],
                    name=os.path.splitext(tc_name)[0])
                data_repository['wt_junit_object'].add_testcase_message(
                    tmp_timestamp, "skipped")
                data_repository['wt_junit_object'].update_attr(
                    "status", "SKIPPED", "tc", tmp_timestamp)
                data_repository['testcase_%d_result' % tests] = "SKIP"
                title = Utils.xml_Utils.getChildTextbyParentTag(
                    tc_path, 'Details', 'Title')
                title = title.strip() if title else "None"
                data_repository['wt_junit_object'].update_attr(
                    "title", title, "tc", tmp_timestamp)
                data_repository['wt_junit_object'].update_attr(
                    "impact", impact_dict.get(tc_impact.upper()), "tc",
                    tmp_timestamp)
                data_repository['wt_junit_object'].update_attr(
                    "onerror", "N/A", "tc", tmp_timestamp)
                data_repository['wt_junit_object'].output_junit(
                    data_repository['wt_results_execdir'], print_summary=False)
                continue

        else:
            errors += 1
            msg = print_error("Test case does not exist in the provided path: "
                              "{0}".format(tc_path))
            testsuite_utils.pSuite_testcase(junit_resultfile,
                                            suite_name,
                                            tc_path,
                                            time='0')
            testsuite_utils.pSuite_testcase_error(junit_resultfile, msg, '0')
            tc_status = "ERROR"
            if goto_tc and goto_tc == str(tests):
                goto_tc = False
            elif goto_tc and goto_tc != str(tests):
                data_repository['testcase_%d_result' % tests] = "ERROR"
                continue

        goto_tc_num = onerror_driver.main(testcase, suite_error_action,
                                          suite_error_value)
        if goto_tc_num is False:
            onerror = "Next"
        elif goto_tc_num == "ABORT":
            onerror = "Abort"
        else:
            onerror = "Goto:" + str(goto_tc_num)
        data_repository['wt_junit_object'].update_attr(
            "impact", impact_dict.get(tc_impact.upper()), "tc",
            data_repository['wt_tc_timestamp'])
        data_repository['wt_junit_object'].update_attr(
            "onerror", onerror, "tc", data_repository['wt_tc_timestamp'])

        tc_status_list.append(tc_status)
        tc_duration_list.append(tc_duration)

        string_status = {
            "TRUE": "PASS",
            "FALSE": "FAIL",
            "ERROR": "ERROR",
            "SKIP": "SKIP",
            "RAN": "RAN"
        }

        if str(tc_status).upper() in string_status.keys():
            data_repository['testcase_%d_result' %
                            tests] = string_status[str(tc_status).upper()]
        else:
            print_error("unexpected testcase status, default to exception")
            data_repository['testcase_%d_result' % tests] = "ERROR"

        tc_impact_list.append(tc_impact)
        if tc_impact.upper() == 'IMPACT':
            msg = "Status of the executed test case impacts Testsuite result"
        elif tc_impact.upper() == 'NOIMPACT':
            msg = "Status of the executed test case does not impact "
            "Teststuie result"
        print_debug(msg)

        runmode, value, _ = common_execution_utils.get_runmode_from_xmlfile(
            testcase)
        retry_type, retry_cond, retry_cond_value, retry_value, \
            retry_interval = common_execution_utils.get_retry_from_xmlfile(testcase)
        if runmode is not None:
            if tc_status is True:
                testsuite_utils.update_tc_duration(str(tc_duration))
                # if runmode is 'rup' & tc_status is True, skip the repeated
                # execution of same testcase and move to next actual testcase
                if runmode == "rup":
                    goto_tc = str(value)
            elif tc_status == 'ERROR' or tc_status == 'EXCEPTION':
                errors += 1
                testsuite_utils.pSuite_testcase_error(
                    junit_resultfile,
                    'Encountered error/exception during TC execution',
                    str(tc_duration))
                goto_tc = onerror_driver.main(testcase, suite_error_action,
                                              suite_error_value)
                if goto_tc in ['ABORT', 'ABORT_AS_ERROR']:
                    update_suite_attribs(junit_resultfile,
                                         str(errors),
                                         str(skipped),
                                         str(tests),
                                         str(failures),
                                         time='0')
                    break
                # when 'onError:goto' value is less than the current tc num,
                # change the next iteration point to goto value
                elif goto_tc and int(goto_tc) < tests:
                    tests = int(goto_tc) - 1
                    goto_tc = False
            elif tc_status is False:
                failures += 1
                testsuite_utils.pSuite_testcase_failure(junit_resultfile,
                                                        time=str(tc_duration))
                goto_tc = onerror_driver.main(testcase, suite_error_action,
                                              suite_error_value)
                if goto_tc in ['ABORT', 'ABORT_AS_ERROR']:
                    update_suite_attribs(junit_resultfile,
                                         str(errors),
                                         str(skipped),
                                         str(tests),
                                         str(failures),
                                         time='0')
                    break
                # when 'onError:goto' value is less than the current tc num,
                # change the next iteration point to goto value
                elif goto_tc and int(goto_tc) < tests:
                    tests = int(goto_tc) - 1
                    goto_tc = False
                # if runmode is 'ruf' & tc_status is False, skip the repeated
                # execution of same testcase and move to next actual testcase
                if not goto_tc and runmode == "ruf":
                    goto_tc = str(value)
        elif retry_type is not None:
            if retry_type.upper() == 'IF':
                try:
                    if data_repository[retry_cond] == retry_cond_value:
                        condition_met = True
                        pNote("Wait for {0}sec before retrying".format(
                            retry_interval))
                        pNote("The given condition '{0}' matches the expected "
                              "value '{1}'".format(data_repository[retry_cond],
                                                   retry_cond_value))
                        time.sleep(int(retry_interval))
                    else:
                        condition_met = False
                        print_warning("The condition value '{0}' does not "
                                      "match with the expected value "
                                      "'{1}'".format(
                                          data_repository[retry_cond],
                                          retry_cond_value))
                except KeyError:
                    print_warning(
                        "The given condition '{0}' is not there in "
                        "the data repository".format(retry_cond_value))
                    condition_met = False
                if condition_met is False:
                    goto_tc = str(retry_value)
            else:
                if retry_type.upper() == 'IF NOT':
                    try:
                        if data_repository[retry_cond] != retry_cond_value:
                            condition_met = True
                            pNote("Wait for {0}sec before retrying".format(
                                retry_interval))
                            pNote("The condition value '{0}' does not match "
                                  "with the expected value "
                                  "'{1}'".format(data_repository[retry_cond],
                                                 retry_cond_value))
                            time.sleep(int(retry_interval))
                        else:
                            condition_met = False
                            print_warning("The given condition '{0}' matches "
                                          "the expected value "
                                          "'{1}'".format(
                                              data_repository[retry_cond],
                                              retry_cond_value))
                    except KeyError:
                        condition_met = False
                        print_warning(
                            "The given condition '{0}' is not there "
                            "in the data repository".format(retry_cond_value))
                    if condition_met is False:
                        pNote("The given condition '{0}' matched with the "
                              "value '{1}'".format(data_repository[retry_cond],
                                                   retry_cond_value))
                        goto_tc = str(retry_value)


# suite_status = testsuite_utils.compute_testsuite_status(suite_status,
# tc_status, tc_impact)
        update_suite_attribs(junit_resultfile,
                             str(errors),
                             str(skipped),
                             str(tests),
                             str(failures),
                             time='0')
        # junit_object/python_process is different for all the cases
        # executed in parallel
        if ts_iter is False:
            tc_junit_list.append(data_repository['wt_junit_object'])

    # junit_object/python_process is same for all the cases executed in the
    # same system for 'iterative_parallel' suite execution
    if ts_iter is True:
        tc_junit_list = data_repository['wt_junit_object']

    suite_status = Utils.testcase_Utils.compute_status_using_impact(
        tc_status_list, tc_impact_list)

    if tc_parallel:
        tc_impact = data_repository['wt_tc_impact']
        if tc_impact.upper() == 'IMPACT':
            msg = "Status of the executed test case impacts Testsuite result"
        elif tc_impact.upper() == 'NOIMPACT':
            msg = "Status of the executed test case does not impact Teststuie result"
        print_debug(msg)
        tc_name = Utils.file_Utils.getFileName(tc_path)
        # put result into multiprocessing queue and later retrieve in
        # corresponding driver
        queue.put((tc_status_list, tc_name, tc_impact_list, tc_duration_list,
                   tc_junit_list))
    return suite_status