def _execute_runmode_step(self, runmode_timer, runmode, step_status, value): """ This function will execute a runmode step """ runmode_evaluation = any([ runmode == "RMT", runmode == "RUF" and step_status is True, runmode == "RUP" and step_status is False ]) if runmode_timer is not None and runmode_evaluation: pNote("Wait for {0}sec before the next runmode attempt ".format( runmode_timer)) wait_for_timeout(runmode_timer) # if runmode is 'ruf' & step_status is False, skip the repeated # execution of same TC step and move to next actual step elif runmode.upper() == "RUF" and ((step_status is True) or (step_status is False)): runmode_value = self.current_step.find("runmode").get("value") if not step_status: self.go_to_step_number = str(value) return self.current_step_number, self.go_to_step_number, "continue" if step_status and self.current_step_number == runmode_value - 1: self.go_to_step_number = onerror_driver.main( self.current_step, self.default_error_action, self.default_error_value, skip_invoked=self.skip_invoked, current_step_number=self.current_step_number) return self.current_step_number, self.go_to_step_number, "break" # if runmode is 'rup' & step_status is True, skip the repeated # execution of same TC step and move to next actual step elif runmode.upper() == "RUP" and step_status is True: self.go_to_step_number = str(value) else: if step_status is False or str(step_status).upper() in [ "ERROR", "EXCEPTION" ]: self.go_to_step_number = onerror_driver.main( self.current_step, self.default_error_action, self.default_error_value, skip_invoked=self.skip_invoked, current_step_number=self.current_step_number) if self.go_to_step_number in ['ABORT', 'ABORT_AS_ERROR']: return self.current_step_number, self.go_to_step_number, "break" elif type(self.go_to_step_number) is list: self.__run_execute_and_resume_mode() return self.current_step_number, self.go_to_step_number, "continue"
def _compute_runmode_goto_operations(self, step, step_status, exec_type_onerror, goto_stepnum, step_num): """ """ runmode, value = common_execution_utils.get_runmode_from_xmlfile(step) if runmode is not None: # if runmode is 'ruf' & step_status is False, skip the repeated # execution of same TC step and move to next actual step if all(not exec_type_onerror, runmode == "ruf", step_status is False): goto_stepnum = str(value) # if runmode is 'rup' & step_status is True, skip the repeated # execution of same TC step and move to next actual step elif runmode == "rup" and step_status is True: goto_stepnum = str(value) else: if any([ step_status is False, str(step_status).upper() == "ERROR", str(step_status).upper() == "EXCEPTION", exec_type_onerror is True ]): goto_stepnum = onerror_driver.main( step, self.default_error_action, self.default_error_value, exec_type_onerror) # if (goto_stepnum == 'ABORT'): break else: if any([ step_status is False, str(step_status).upper() == "ERROR", str(step_status).upper() == "EXCEPTION", exec_type_onerror is True ]): goto_stepnum = onerror_driver.main(step, self.default_error_action, self.default_error_value, exec_type_onerror) if str(goto_stepnum).upper() == 'ABORT': pass # when 'onError:goto' value is less than the current step num, # change the next iteration point to goto value elif goto_stepnum and int(goto_stepnum) < step_num: step_num = int(goto_stepnum) - 1 goto_stepnum = False return goto_stepnum, step_num
def _execute_step_otherwise(self, step_status): """ This function will execute a step's onError functionality """ if step_status is False or str(step_status).upper() in ["ERROR", "EXCEPTION"]: self.go_to_step_number = onerror_driver.main(self.current_step, self.default_error_action, self.default_error_value, skip_invoked=self.skip_invoked) if self.go_to_step_number in ['ABORT', 'ABORT_AS_ERROR']: return self.current_step_number, self.go_to_step_number, "break" # when 'onError:goto' value is less than the current step num, # change the next iteration point to goto value elif type(self.go_to_step_number) is list: self.__run_execute_and_resume_mode() elif self.go_to_step_number and int(self.go_to_step_number) < self.current_step_number: self.current_step_number = int(self.go_to_step_number) - 1 self.go_to_step_number = False return self.current_step_number, self.go_to_step_number, "continue"
def execute_project(project_filepath, auto_defects, jiraproj, res_startdir, logs_startdir, data_repository): """ - Takes a list of testsuite locations input. - Iterates over the list and sends each testsuite location to testsuite_driver for execution. - Gets the status of the testsuite from the Warrior and computes the project_status based on the impact value of the testsuite. - If the testsuite fails, handles the failure using the default or specific onError action,value. - Finally reports the project status to the result file. Arguments: 1. testsuite_list = (list) list of testsuite locations 2. testsuite_driver = (module loader) module loader of the testsuite_driver 3. project_repository = (dict) dictionary containing all data of the project under execution """ project_start_time = Utils.datetime_utils.get_current_timestamp() print_info("[{0}] Project execution starts".format(project_start_time)) suite_cntr = 0 # project_status = True goto_testsuite = False ts_status_list = [] ts_impact_list = [] impact_dict = {"IMPACT": "Impact", "NOIMPACT": "No Impact"} project_dir = os.path.dirname(project_filepath) project_title = Utils.xml_Utils.getChildTextbyParentTag( project_filepath, 'Details', 'Title') project_repository = get_project_details(project_filepath, res_startdir, logs_startdir, data_repository) project_repository['project_title'] = project_title testsuite_list = get_testsuite_list(project_filepath) # project_resultfile = project_repository['project_resultfile'] project_name = project_repository['project_name'] wp_results_execdir = project_repository['wp_results_execdir'] data_repository['wp_results_execdir'] = wp_results_execdir wp_logs_execdir = project_repository['wp_logs_execdir'] project_error_action = project_repository['def_on_error_action'] project_error_value = project_repository['def_on_error_value'] pj_junit_object = junit_class.Junit(filename=project_name, timestamp=project_start_time, name=project_name, display="True") pj_junit_object.update_attr("resultsdir", project_repository['project_execution_dir'], "pj", project_start_time) pj_junit_object.update_attr("title", project_repository['project_title'], "pj", project_start_time) pj_junit_object.add_property("resultsdir", project_repository['project_execution_dir'], "pj", project_start_time) # adding the resultsdir as attribute, need to be removed after making it # a property pj_junit_object.add_project_location(project_filepath) if "jobid" in data_repository: pj_junit_object.add_jobid(data_repository["jobid"]) del data_repository["jobid"] data_repository['wt_junit_object'] = pj_junit_object while suite_cntr < len(testsuite_list): testsuite = testsuite_list[suite_cntr] # suite_junit_type = 'file' suite_cntr += 1 testsuite_rel_path = testsuite_utils.get_path_from_xmlfile(testsuite) if testsuite_rel_path is not None: testsuite_path = Utils.file_Utils.getAbsPath( testsuite_rel_path, project_dir) else: testsuite_path = str(testsuite_rel_path) print_info("\n") print_debug("<<<< Starting execution of Test suite: {0}>>>>".format( testsuite_path)) action, testsuite_status = exec_type_driver.main(testsuite) testsuite_impact = Utils.testcase_Utils.get_impact_from_xmlfile( testsuite) testsuite_name = Utils.file_Utils.getFileName(testsuite_path) testsuite_nameonly = Utils.file_Utils.getNameOnly(testsuite_name) ts_onError_action = Utils.xml_Utils.get_attributevalue_from_directchildnode( testsuite, 'onError', 'action') ts_onError_action = ts_onError_action if ts_onError_action else project_error_action if Utils.file_Utils.fileExists(testsuite_path): if not goto_testsuite and action is True: testsuite_result = testsuite_driver.main( testsuite_path, data_repository=data_repository, from_project=True, auto_defects=auto_defects, jiraproj=jiraproj, res_startdir=wp_results_execdir, logs_startdir=wp_logs_execdir, ts_onError_action=ts_onError_action) testsuite_status = testsuite_result[0] # testsuite_resultfile = testsuite_result[1] elif goto_testsuite and goto_testsuite == str(suite_cntr)\ and action is True: testsuite_result = testsuite_driver.main( testsuite_path, data_repository=data_repository, from_project=True, auto_defects=auto_defects, jiraproj=jiraproj, res_startdir=wp_results_execdir, logs_startdir=wp_logs_execdir, ts_onError_action=ts_onError_action) goto_testsuite = False testsuite_status = testsuite_result[0] # testsuite_resultfile = testsuite_result[1] else: msg = print_info( 'skipped testsuite: {0} '.format(testsuite_path)) testsuite_resultfile = '<testsuite errors="0" failures="0" name="{0}" '\ 'skipped="0" tests="0" time="0" timestamp="{1}" > '\ '<skipped message="{2}"/> </testsuite>'.format(testsuite_name, project_start_time, msg) tmp_timestamp = str( Utils.datetime_utils.get_current_timestamp()) time.sleep(2) pj_junit_object.create_testsuite( location=os.path.dirname(testsuite_path), name=testsuite_nameonly, timestamp=tmp_timestamp, **pj_junit_object.init_arg()) pj_junit_object.update_attr("status", "SKIPPED", "ts", tmp_timestamp) pj_junit_object.update_attr("skipped", "1", "pj", tmp_timestamp) pj_junit_object.update_count("suites", "1", "pj", tmp_timestamp) data_repository['testsuite_{}_result'.format( suite_cntr)] = "SKIP" # pj_junit_object.add_testcase_message(tmp_timestamp, "skipped") pj_junit_object.update_attr( "impact", impact_dict.get(testsuite_impact.upper()), "ts", tmp_timestamp) pj_junit_object.update_attr("onerror", "N/A", "ts", tmp_timestamp) pj_junit_object.output_junit(wp_results_execdir, print_summary=False) continue else: msg = print_error("Test suite does not exist in " "provided path: {0}".format(testsuite_path)) testsuite_status = 'ERROR' testsuite_resultfile = '<testsuite errors="0" failures="0" name="{0}" '\ 'skipped="0" tests="0" time="0" timestamp="{1}" > '\ '<error message="{2}"/> </testsuite>'.format(testsuite_name, project_start_time, msg) # suite_junit_type = 'string' if goto_testsuite and goto_testsuite == str(suite_cntr): goto_testsuite = False elif goto_testsuite and goto_testsuite != str(suite_cntr): data_repository['testsuite_{}_result'.format( suite_cntr)] = "ERROR" continue goto_testsuite_num = onerror_driver.main(testsuite, project_error_action, project_error_value) if goto_testsuite_num is False: onerror = "Next" elif goto_testsuite_num == "ABORT": onerror = "Abort" else: onerror = "Goto:" + str(goto_testsuite_num) pj_junit_object.update_attr("impact", impact_dict.get(testsuite_impact.upper()), "ts", data_repository['wt_ts_timestamp']) pj_junit_object.update_attr("onerror", onerror, "ts", data_repository['wt_ts_timestamp']) string_status = { "TRUE": "PASS", "FALSE": "FAIL", "ERROR": "ERROR", "SKIP": "SKIP" } if str(testsuite_status).upper() in string_status.keys(): data_repository['testsuite_{}_result'.format(suite_cntr)] = string_status\ [str(testsuite_status).upper()] else: print_error("unexpected testsuite status, default to exception") data_repository['testsuite_%d_result' % suite_cntr] = "ERROR" ts_status_list.append(testsuite_status) ts_impact_list.append(testsuite_impact) if testsuite_impact.upper() == 'IMPACT': msg = "Status of the executed test suite impacts Project result" elif testsuite_impact.upper() == 'NOIMPACT': msg = "Status of the executed test suite does not impact project result" print_debug(msg) # project_status = compute_project_status(project_status, testsuite_status, # testsuite_impact) runmode, value = common_execution_utils.get_runmode_from_xmlfile( testsuite) retry_type, retry_cond, retry_cond_value, retry_value,\ retry_interval = common_execution_utils.get_retry_from_xmlfile(testsuite) if runmode is not None: if testsuite.find("runmode") is not None and\ testsuite.find("runmode").get("attempt") is not None: print_info("runmode attempt: {0}".format( testsuite.find("runmode").get("attempt"))) # if runmode is 'ruf' & testsuite_status is False, skip the repeated execution of same # test suite and move to next actual test suite if not project_error_value and runmode == "RUF" and\ testsuite_status is False: goto_testsuite = str(value) # if runmode is 'rup' & testsuite_status is True, skip the repeated # execution of same testsuite and move to next actual testsuite elif runmode == "RUP" and testsuite_status is True: goto_testsuite = str(value) elif retry_type is not None: if testsuite.find("retry") is not None and\ testsuite.find("retry").get("attempt") is not None: print_info("retry attempt: {0}".format( testsuite.find("retry").get("attempt"))) if retry_type.upper() == 'IF': try: if data_repository[retry_cond] == retry_cond_value: condition_met = True pNote("Wait for {0}sec before retrying".format( retry_interval)) pNote("The given condition '{0}' matches the expected" "value '{1}'".format(data_repository[retry_cond], retry_cond_value)) time.sleep(int(retry_interval)) else: condition_met = False print_warning( "The condition value '{0}' does not match with the expected " "value '{1}'".format(data_repository[retry_cond], retry_cond_value)) except KeyError: print_warning( "The given condition '{0}' do not exists in " "the data repository".format(retry_cond_value)) condition_met = False if condition_met is False: goto_testsuite = str(retry_value) else: if retry_type.upper() == 'IF NOT': try: if data_repository[retry_cond] != retry_cond_value: condition_met = True pNote("Wait for {0}sec before " "retrying".format(retry_interval)) pNote("The condition value '{0}' does not match " "with the expected value '{1}'".format( data_repository[retry_cond], retry_cond_value)) time.sleep(int(retry_interval)) else: condition_met = False except KeyError: condition_met = False print_warning( "The given condition '{0}' is not there " "in the data repository".format(retry_cond_value)) if condition_met is False: pNote("The given condition '{0}' matched with the " "value '{1}'".format(data_repository[retry_cond], retry_cond_value)) goto_testsuite = str(retry_value) else: if testsuite_status is False or testsuite_status == "ERROR" or\ testsuite_status == "EXCEPTION": goto_testsuite = onerror_driver.main(testsuite, project_error_action, project_error_value) if goto_testsuite in ['ABORT', 'ABORT_AS_ERROR']: break # when 'onError:goto' value is less than the current ts num, # change the next iteration point to goto value elif goto_testsuite and int(goto_testsuite) < suite_cntr: suite_cntr = int(goto_testsuite) - 1 goto_testsuite = False project_status = Utils.testcase_Utils.compute_status_using_impact( ts_status_list, ts_impact_list) print_info("\n") project_end_time = Utils.datetime_utils.get_current_timestamp() print_info("[{0}] Project execution completed".format(project_end_time)) project_duration = Utils.datetime_utils.get_time_delta(project_start_time) hms = Utils.datetime_utils.get_hms_for_seconds(project_duration) print_info("Project duration= {0}".format(hms)) project_status = report_project_result(project_status, project_repository) pj_junit_object.update_attr("status", str(project_status), "pj", project_start_time) pj_junit_object.update_attr("time", str(project_duration), "pj", project_start_time) pj_junit_object.output_junit(wp_results_execdir) # Save JUnit/HTML results of the Project in MongoDB server if data_repository.get("db_obj") is not False: pj_junit_xml = project_repository['wp_results_execdir'] +\ os.sep + pj_junit_object.filename + "_junit.xml" data_repository.get("db_obj").add_html_result_to_mongodb(pj_junit_xml) return project_status, project_repository
step_impact_list.append(step_impact) runmode, value = common_execution_utils.get_runmode_from_xmlfile(step) retry_type, retry_cond, retry_cond_value, retry_value, retry_interval = common_execution_utils.get_retry_from_xmlfile(step) if runmode is not None: # if runmode is 'ruf' & step_status is False, skip the repeated # execution of same TC step and move to next actual step if not exec_type_onerror and runmode == "RUF" and step_status is False: goto_stepnum = str(value) # if runmode is 'rup' & step_status is True, skip the repeated # execution of same TC step and move to next actual step elif runmode =="RUP" and step_status is True: goto_stepnum = str(value) else: if step_status is False or str(step_status).upper() == "ERROR" \ or str(step_status).upper() == "EXCEPTION" or exec_type_onerror is True: goto_stepnum = onerror_driver.main(step, default_error_action, default_error_value, exec_type_onerror) if goto_stepnum in ['ABORT', 'ABORT_AS_ERROR']: break elif retry_type is not None: if retry_type.upper() == 'IF': try: if data_repository[retry_cond] == retry_cond_value: condition_met = True pNote("Wait for {0}sec before retrying".format(retry_interval)) pNote("The given condition '{0}' matches the expected " "value '{1}'".format(data_repository[retry_cond], retry_cond_value)) time.sleep(int(retry_interval)) else: condition_met = False print_warning("The condition value '{0}' does not match with the " "expected value '{1}'".format(data_repository[retry_cond],
def execute_sequential_testsuites(testsuite_list, project_repository, data_repository, auto_defects): """ Executes suites in a project sequentially """ suite_cntr = 0 goto_testsuite = False ts_status_list = [] ts_impact_list = [] impact_dict = {"IMPACT": "Impact", "NOIMPACT": "No Impact"} project_error_action = project_repository['def_on_error_action'] project_filepath = project_repository['project_filepath'] project_dir = os.path.dirname(project_filepath) wp_results_execdir = project_repository['wp_results_execdir'] wp_logs_execdir = project_repository['wp_logs_execdir'] project_error_value = project_repository['def_on_error_value'] jiraproj = data_repository['jiraproj'] pj_junit_object = data_repository['wt_junit_object'] while suite_cntr < len(testsuite_list): testsuite = testsuite_list[suite_cntr] suite_cntr += 1 testsuite_rel_path = testsuite_utils.get_path_from_xmlfile(testsuite) if testsuite_rel_path is not None: testsuite_path = Utils.file_Utils.getAbsPath( testsuite_rel_path, project_dir) else: testsuite_path = str(testsuite_rel_path) print_info("\n") print_debug("<<<< Starting execution of Test suite: {0}>>>>".format( testsuite_path)) action, testsuite_status = exec_type_driver.main(testsuite) testsuite_impact = Utils.testcase_Utils.get_impact_from_xmlfile( testsuite) testsuite_name = Utils.file_Utils.getFileName(testsuite_path) testsuite_nameonly = Utils.file_Utils.getNameOnly(testsuite_name) ts_onError_action = Utils.xml_Utils.get_attributevalue_from_directchildnode( testsuite, 'onError', 'action') ts_onError_action = ts_onError_action if ts_onError_action else project_error_action if Utils.file_Utils.fileExists(testsuite_path): if not goto_testsuite and action is True: testsuite_result = testsuite_driver.main( testsuite_path, data_repository=data_repository, from_project=True, auto_defects=auto_defects, jiraproj=jiraproj, res_startdir=wp_results_execdir, logs_startdir=wp_logs_execdir, ts_onError_action=ts_onError_action) testsuite_status = testsuite_result[0] elif goto_testsuite and goto_testsuite == str(suite_cntr)\ and action is True: testsuite_result = testsuite_driver.main( testsuite_path, data_repository=data_repository, from_project=True, auto_defects=auto_defects, jiraproj=jiraproj, res_startdir=wp_results_execdir, logs_startdir=wp_logs_execdir, ts_onError_action=ts_onError_action) goto_testsuite = False testsuite_status = testsuite_result[0] else: msg = print_info( 'skipped testsuite: {0} '.format(testsuite_path)) tmp_timestamp = str( Utils.datetime_utils.get_current_timestamp()) time.sleep(2) pj_junit_object.create_testsuite( location=os.path.dirname(testsuite_path), name=testsuite_nameonly, timestamp=tmp_timestamp, **pj_junit_object.init_arg()) pj_junit_object.update_attr("status", "SKIPPED", "ts", tmp_timestamp) pj_junit_object.update_attr("skipped", "1", "pj", tmp_timestamp) pj_junit_object.update_count("suites", "1", "pj", tmp_timestamp) data_repository['testsuite_{}_result'.format( suite_cntr)] = "SKIP" pj_junit_object.update_attr( "impact", impact_dict.get(testsuite_impact.upper()), "ts", tmp_timestamp) pj_junit_object.update_attr("onerror", "N/A", "ts", tmp_timestamp) pj_junit_object.output_junit(wp_results_execdir, print_summary=False) continue else: msg = print_error("Test suite does not exist in " "provided path: {0}".format(testsuite_path)) testsuite_status = 'ERROR' if goto_testsuite and goto_testsuite == str(suite_cntr): goto_testsuite = False elif goto_testsuite and goto_testsuite != str(suite_cntr): data_repository['testsuite_{}_result'.format( suite_cntr)] = "ERROR" continue goto_testsuite_num = onerror_driver.main(testsuite, project_error_action, project_error_value) if goto_testsuite_num is False: onerror = "Next" elif goto_testsuite_num == "ABORT": onerror = "Abort" else: onerror = "Goto:" + str(goto_testsuite_num) pj_junit_object.update_attr("impact", impact_dict.get(testsuite_impact.upper()), "ts", data_repository['wt_ts_timestamp']) pj_junit_object.update_attr("onerror", onerror, "ts", data_repository['wt_ts_timestamp']) string_status = { "TRUE": "PASS", "FALSE": "FAIL", "ERROR": "ERROR", "SKIP": "SKIP", "RAN": "RAN" } if str(testsuite_status).upper() in string_status.keys(): data_repository['testsuite_{}_result'.format(suite_cntr)] = \ string_status[str(testsuite_status).upper()] else: print_error("unexpected testsuite status, default to exception") data_repository['testsuite_%d_result' % suite_cntr] = "ERROR" ts_status_list.append(testsuite_status) ts_impact_list.append(testsuite_impact) if testsuite_impact.upper() == 'IMPACT': msg = "Status of the executed test suite impacts Project result" elif testsuite_impact.upper() == 'NOIMPACT': msg = "Status of the executed test suite does not impact project result" print_debug(msg) runmode, value, _ = common_execution_utils.get_runmode_from_xmlfile( testsuite) retry_type, retry_cond, retry_cond_value, retry_value,\ retry_interval = common_execution_utils.get_retry_from_xmlfile(testsuite) if runmode is not None: if testsuite.find("runmode") is not None and\ testsuite.find("runmode").get("attempt") is not None: print_info("runmode attempt: {0}".format( testsuite.find("runmode").get("attempt"))) # if runmode is 'ruf' & step_status is False, skip the repeated # execution of same TC step and move to next actual step if not project_error_value and runmode == "RUF" and\ testsuite_status is False: goto_testsuite = str(value) # if runmode is 'rup' & step_status is True, skip the repeated # execution of same TC step and move to next actual step elif runmode == "RUP" and testsuite_status is True: goto_testsuite = str(value) elif retry_type is not None: if testsuite.find("retry") is not None and\ testsuite.find("retry").get("attempt") is not None: print_info("retry attempt: {0}".format( testsuite.find("retry").get("attempt"))) if retry_type.upper() == 'IF': try: if data_repository[retry_cond] == retry_cond_value: condition_met = True pNote("Wait for {0}sec before retrying".format( retry_interval)) pNote("The given condition '{0}' matches the expected" "value '{1}'".format(data_repository[retry_cond], retry_cond_value)) time.sleep(int(retry_interval)) else: condition_met = False print_warning( "The condition value '{0}' does not match with the expected " "value '{1}'".format(data_repository[retry_cond], retry_cond_value)) except KeyError: print_warning( "The given condition '{0}' do not exists in " "the data repository".format(retry_cond_value)) condition_met = False if condition_met is False: goto_testsuite = str(retry_value) else: if retry_type.upper() == 'IF NOT': try: if data_repository[retry_cond] != retry_cond_value: condition_met = True pNote("Wait for {0}sec before " "retrying".format(retry_interval)) pNote("The condition value '{0}' does not match " "with the expected value '{1}'".format( data_repository[retry_cond], retry_cond_value)) time.sleep(int(retry_interval)) else: condition_met = False except KeyError: condition_met = False print_warning( "The given condition '{0}' is not there " "in the data repository".format(retry_cond_value)) if condition_met is False: pNote("The given condition '{0}' matched with the " "value '{1}'".format(data_repository[retry_cond], retry_cond_value)) goto_testsuite = str(retry_value) else: if testsuite_status is False or testsuite_status == "ERROR" or\ testsuite_status == "EXCEPTION": goto_testsuite = onerror_driver.main(testsuite, project_error_action, project_error_value) if goto_testsuite in ['ABORT', 'ABORT_AS_ERROR']: break # when 'onError:goto' value is less than the current ts num, # change the next iteration point to goto value elif goto_testsuite and int(goto_testsuite) < suite_cntr: suite_cntr = int(goto_testsuite) - 1 goto_testsuite = False project_status = Utils.testcase_Utils.compute_status_using_impact( ts_status_list, ts_impact_list) return project_status
"Wait for {0}sec before the next runmode attempt ".format( runmode_timer)) wait_for_timeout(runmode_timer) # if runmode is 'ruf' & step_status is False, skip the repeated # execution of same TC step and move to next actual step elif runmode == "RUF" and step_status is False: goto_stepnum = str(value) # if runmode is 'rup' & step_status is True, skip the repeated # execution of same TC step and move to next actual step elif runmode == "RUP" and step_status is True: goto_stepnum = str(value) else: if step_status is False or str(step_status).upper() == "ERROR" \ or str(step_status).upper() == "EXCEPTION": goto_stepnum = onerror_driver.main(step, default_error_action, default_error_value) if goto_stepnum in ['ABORT', 'ABORT_AS_ERROR']: break elif retry_type is not None: if retry_type.upper() == 'IF': try: if data_repository[retry_cond] == retry_cond_value: condition_met = True pNote("Wait for {0}sec before retrying".format( retry_interval)) pNote("The given condition '{0}' matches the expected " "value '{1}'".format(data_repository[retry_cond], retry_cond_value)) wait_for_timeout(retry_interval)