def execute_testsuite(testsuite_filepath, data_repository, from_project, auto_defects, jiraproj, res_startdir, logs_startdir, ts_onError_action): """Executes the testsuite (provided as a xml file) - Takes a testsuite xml file as input and sends each testcase to Basedriver for execution. - Computes the testsuite status based on the testcase_status and the impact value of the testcase - Handles testcase failures as per the default/specific onError action/value - Calls the function to report the testsuite status Arguments: 1. testsuite_filepath = (string) the full path of the testsuite xml file. 2. Warrior = (module loader) module loader object to call the Warrior 3. execution_dir = (string) the full path of the directory under which the testsuite execution directory will be created (results for the testsuite will be stored in the testsuite execution directory.) """ suite_start_time = Utils.datetime_utils.get_current_timestamp() print_info("[{0}] Testsuite execution starts".format(suite_start_time)) # goto_tc = False suite_repository = get_suite_details(testsuite_filepath, data_repository, from_project, res_startdir, logs_startdir) testcase_list = get_testcase_list(testsuite_filepath) execution_type = suite_repository['suite_exectype'].upper() no_of_tests = str(len(testcase_list)) junit_resultfile = suite_repository['junit_resultfile'] suite_name = suite_repository['suite_name'] suite_execution_dir = suite_repository['suite_execution_dir'] data_repository['wt_suite_execution_dir'] = suite_execution_dir data_repository['wt_results_execdir'] = suite_repository['ws_results_execdir'] data_repository['wt_logs_execdir'] = suite_repository['ws_logs_execdir'] data_repository['wt_suite_name'] = suite_name suite_timestamp = testsuite_utils.get_suite_timestamp() data_repository['wt_ts_timestamp'] = suite_timestamp suite_repository['wt_ts_timestamp'] = suite_timestamp data_repository["suite_exectype"] = "iterative" if execution_type == "ITERATIVE_SEQUENTIAL" or \ execution_type == "ITERATIVE_PARALLEL" else execution_type ts_junit_display = "True" pj_junit_display = "False" if "wt_junit_object" in data_repository: ts_junit_object = data_repository["wt_junit_object"] else: ts_junit_object = junit_class.Junit(filename=suite_name, timestamp=suite_timestamp, name="customProject_independant_testcase_execution", display=pj_junit_display) if "jobid" in data_repository: ts_junit_object.add_jobid(data_repository["jobid"]) del data_repository["jobid"] data_repository["wt_junit_object"] = ts_junit_object suite_repository["wt_junit_object"] = ts_junit_object ts_junit_object.create_testsuite(location=os.path.dirname(testsuite_filepath), name=suite_name, timestamp=suite_timestamp, suite_location=suite_repository['testsuite_filepath'], title=suite_repository['suite_title'], display=ts_junit_display, **ts_junit_object.init_arg()) # Adding resultsdir as attributes to testsuite_tag in the junit result file # Need to remove these after making resultsdir, logsdir as part of properties tag in testcase ts_junit_object.update_attr("resultsdir", suite_repository['suite_execution_dir'], "ts", suite_timestamp) ts_junit_object.add_property("resultsdir", suite_repository['suite_execution_dir'], "ts", suite_timestamp) if suite_repository.has_key("data_file"): data_repository['suite_data_file'] = suite_repository['data_file'] # jiraproj name data_repository['jiraproj'] = jiraproj # if not from_project: testsuite_utils.pSuite_root(junit_resultfile) testsuite_utils.pSuite_testsuite(junit_resultfile, suite_name, errors='0', skipped='0', tests=no_of_tests, failures='0', time='0', timestamp=suite_timestamp) testsuite_utils.pSuite_property(junit_resultfile, 'title', suite_repository['suite_title']) testsuite_utils.pSuite_property(junit_resultfile, 'location', testsuite_filepath) if "jobid" in data_repository: testsuite_utils.pSuite_property(junit_resultfile, 'resultlocation', data_repository["jobid"]) # del data_repository["jobid"] print_suite_details_to_console(suite_repository, testsuite_filepath, junit_resultfile) data_repository["war_parallel"] = False if execution_type.upper() == 'PARALLEL_TESTCASES': ts_junit_object.remove_html_obj() data_repository["war_parallel"] = True print_info("Executing testcases in parallel") test_suite_status = parallel_testcase_driver.main(testcase_list, suite_repository, data_repository, from_project, tc_parallel=True, auto_defects=auto_defects) elif execution_type.upper() == 'SEQUENTIAL_TESTCASES': print_info("Executing testccases sequentially") test_suite_status = sequential_testcase_driver.main(testcase_list, suite_repository, data_repository, from_project, auto_defects=auto_defects) elif execution_type.upper() == 'RUN_UNTIL_FAIL': execution_value = Utils.xml_Utils.getChildAttributebyParentTag(testsuite_filepath, 'Details', 'type', 'Max_Attempts') print_info("Execution type: {0}, Attempts: {1}".format(execution_type, execution_value)) i = 0 while i < int(execution_value): i += 1 print_debug("\n\n<======= ATTEMPT: {0} ======>".format(i)) test_suite_status = sequential_testcase_driver.main(testcase_list, suite_repository, data_repository, from_project, auto_defects=auto_defects) test_count = i * len(testcase_list) testsuite_utils.pSuite_update_suite_tests(str(test_count)) if str(test_suite_status).upper() == "FALSE" or\ str(test_suite_status).upper() == "ERROR": break elif execution_type.upper() == 'RUN_UNTIL_PASS': execution_value = Utils.xml_Utils.getChildAttributebyParentTag(testsuite_filepath, 'Details', 'type', 'Max_Attempts') print_info("Execution type: {0}, Attempts: {1}".format(execution_type, execution_value)) i = 0 while i < int(execution_value): i += 1 print_debug("\n\n<======= ATTEMPT: {0} ======>".format(i)) test_suite_status = sequential_testcase_driver.main(testcase_list, suite_repository, data_repository, from_project, auto_defects=auto_defects) test_count = i * len(testcase_list) testsuite_utils.pSuite_update_suite_tests(str(test_count)) if str(test_suite_status).upper() == "TRUE": break elif execution_type.upper() == 'RUN_MULTIPLE': Max_Attempts = Utils.xml_Utils.getChildAttributebyParentTag(testsuite_filepath, 'Details', 'type', 'Max_Attempts') Number_Attempts = Utils.xml_Utils.getChildAttributebyParentTag(testsuite_filepath, 'Details', 'type', 'Number_Attempts') if Max_Attempts == "": execution_value = Number_Attempts else: execution_value = Max_Attempts print_info("Execution type: {0}, Max Attempts: {1}".format(execution_type, execution_value)) i = 0 while i < int(execution_value): i += 1 print_debug("\n\n<======= ATTEMPT: {0} ======>".format(i)) # We aren't actually summing each test result here... test_suite_status = sequential_testcase_driver.main(testcase_list, suite_repository, data_repository, from_project, auto_defects=auto_defects) elif execution_type.upper() == "ITERATIVE_SEQUENTIAL": # if execution type is iterative sequential call WarriorCore.Classes.iterative_testsuite # class and # execute the testcases in iterative sequential fashion on the systems print_info("Iterative sequential suite") iter_seq_ts_obj = IterativeTestsuite(testcase_list, suite_repository, data_repository, from_project, auto_defects) test_suite_status = iter_seq_ts_obj.execute_iterative_sequential() elif execution_type.upper() == "ITERATIVE_PARALLEL": # if execution type is iterative parallel call WarriorCore.Classes.iterative_testsuite # class and # execute the testcases in iterative parallel fashion on the systems ts_junit_object.remove_html_obj() print_info("Iterative parallel suite") data_repository["war_parallel"] = True iter_seq_ts_obj = IterativeTestsuite(testcase_list, suite_repository, data_repository, from_project, auto_defects) test_suite_status = iter_seq_ts_obj.execute_iterative_parallel() else: print_error("unexpected suite_type received...aborting execution") test_suite_status = False print_info("\n") suite_end_time = Utils.datetime_utils.get_current_timestamp() print_info("[{0}] Testsuite execution completed".format(suite_end_time)) suite_duration = Utils.datetime_utils.get_time_delta(suite_start_time) hms = Utils.datetime_utils.get_hms_for_seconds(suite_duration) print_info("Testsuite duration= {0}".format(hms)) testsuite_utils.update_suite_duration(str(suite_duration)) if test_suite_status == False and ts_onError_action and\ ts_onError_action.upper() == 'ABORT_AS_ERROR': print_info("Testsuite status will be marked as ERROR as onError action is set" "to 'abort_as_error'") test_suite_status = "ERROR" testsuite_utils.report_testsuite_result(suite_repository, test_suite_status) ts_junit_object = data_repository['wt_junit_object'] ts_junit_object.update_count(test_suite_status, "1", "pj") ts_junit_object.update_count("suites", "1", "pj", "not appicable") ts_junit_object.update_attr("status", str(test_suite_status), "ts", suite_timestamp) ts_junit_object.update_attr("time", str(suite_duration), "ts", suite_timestamp) if not from_project: ts_junit_object.update_attr("status", str(test_suite_status), "pj", "not applicable") ts_junit_object.update_attr("time", str(suite_duration), "pj", "not appicable") ts_junit_object.output_junit(data_repository['wt_results_execdir']) # Save JUnit/HTML results of the Suite in MongoDB server if data_repository.get("db_obj") is not False: ts_junit_xml = (data_repository['wt_results_execdir'] + os.sep + ts_junit_object.filename+"_junit.xml") data_repository.get("db_obj").add_html_result_to_mongodb(ts_junit_xml) else: # Create and replace existing Project junit file for each suite ts_junit_object.output_junit(data_repository['wp_results_execdir'], print_summary=False) return test_suite_status, suite_repository
def execute_project(project_filepath, auto_defects, jiraproj, res_startdir, logs_startdir, data_repository): """ - Takes a list of testsuite locations input. - Iterates over the list and sends each testsuite location to testsuite_driver for execution. - Gets the status of the testsuite from the Warrior and computes the project_status based on the impact value of the testsuite. - If the testsuite fails, handles the failure using the default or specific onError action,value. - Finally reports the project status to the result file. Arguments: 1. testsuite_list = (list) list of testsuite locations 2. testsuite_driver = (module loader) module loader of the testsuite_driver 3. project_repository = (dict) dictionary containing all data of the project under execution """ project_start_time = Utils.datetime_utils.get_current_timestamp() print_info("[{0}] Project execution starts".format(project_start_time)) suite_cntr = 0 # project_status = True goto_testsuite = False ts_status_list = [] ts_impact_list = [] impact_dict = {"IMPACT": "Impact", "NOIMPACT": "No Impact"} project_dir = os.path.dirname(project_filepath) project_title = Utils.xml_Utils.getChildTextbyParentTag( project_filepath, 'Details', 'Title') project_repository = get_project_details(project_filepath, res_startdir, logs_startdir, data_repository) project_repository['project_title'] = project_title testsuite_list = get_testsuite_list(project_filepath) # project_resultfile = project_repository['project_resultfile'] project_name = project_repository['project_name'] wp_results_execdir = project_repository['wp_results_execdir'] data_repository['wp_results_execdir'] = wp_results_execdir wp_logs_execdir = project_repository['wp_logs_execdir'] project_error_action = project_repository['def_on_error_action'] project_error_value = project_repository['def_on_error_value'] pj_junit_object = junit_class.Junit(filename=project_name, timestamp=project_start_time, name=project_name, display="True") pj_junit_object.update_attr("resultsdir", project_repository['project_execution_dir'], "pj", project_start_time) pj_junit_object.update_attr("title", project_repository['project_title'], "pj", project_start_time) pj_junit_object.add_property("resultsdir", project_repository['project_execution_dir'], "pj", project_start_time) # adding the resultsdir as attribute, need to be removed after making it # a property pj_junit_object.add_project_location(project_filepath) if "jobid" in data_repository: pj_junit_object.add_jobid(data_repository["jobid"]) del data_repository["jobid"] data_repository['wt_junit_object'] = pj_junit_object while suite_cntr < len(testsuite_list): testsuite = testsuite_list[suite_cntr] # suite_junit_type = 'file' suite_cntr += 1 testsuite_rel_path = testsuite_utils.get_path_from_xmlfile(testsuite) if testsuite_rel_path is not None: testsuite_path = Utils.file_Utils.getAbsPath( testsuite_rel_path, project_dir) else: testsuite_path = str(testsuite_rel_path) print_info("\n") print_debug("<<<< Starting execution of Test suite: {0}>>>>".format( testsuite_path)) action, testsuite_status = exec_type_driver.main(testsuite) testsuite_impact = Utils.testcase_Utils.get_impact_from_xmlfile( testsuite) testsuite_name = Utils.file_Utils.getFileName(testsuite_path) testsuite_nameonly = Utils.file_Utils.getNameOnly(testsuite_name) ts_onError_action = Utils.xml_Utils.get_attributevalue_from_directchildnode( testsuite, 'onError', 'action') ts_onError_action = ts_onError_action if ts_onError_action else project_error_action if Utils.file_Utils.fileExists(testsuite_path): if not goto_testsuite and action is True: testsuite_result = testsuite_driver.main( testsuite_path, data_repository=data_repository, from_project=True, auto_defects=auto_defects, jiraproj=jiraproj, res_startdir=wp_results_execdir, logs_startdir=wp_logs_execdir, ts_onError_action=ts_onError_action) testsuite_status = testsuite_result[0] # testsuite_resultfile = testsuite_result[1] elif goto_testsuite and goto_testsuite == str(suite_cntr)\ and action is True: testsuite_result = testsuite_driver.main( testsuite_path, data_repository=data_repository, from_project=True, auto_defects=auto_defects, jiraproj=jiraproj, res_startdir=wp_results_execdir, logs_startdir=wp_logs_execdir, ts_onError_action=ts_onError_action) goto_testsuite = False testsuite_status = testsuite_result[0] # testsuite_resultfile = testsuite_result[1] else: msg = print_info( 'skipped testsuite: {0} '.format(testsuite_path)) testsuite_resultfile = '<testsuite errors="0" failures="0" name="{0}" '\ 'skipped="0" tests="0" time="0" timestamp="{1}" > '\ '<skipped message="{2}"/> </testsuite>'.format(testsuite_name, project_start_time, msg) tmp_timestamp = str( Utils.datetime_utils.get_current_timestamp()) time.sleep(2) pj_junit_object.create_testsuite( location=os.path.dirname(testsuite_path), name=testsuite_nameonly, timestamp=tmp_timestamp, **pj_junit_object.init_arg()) pj_junit_object.update_attr("status", "SKIPPED", "ts", tmp_timestamp) pj_junit_object.update_attr("skipped", "1", "pj", tmp_timestamp) pj_junit_object.update_count("suites", "1", "pj", tmp_timestamp) data_repository['testsuite_{}_result'.format( suite_cntr)] = "SKIP" # pj_junit_object.add_testcase_message(tmp_timestamp, "skipped") pj_junit_object.update_attr( "impact", impact_dict.get(testsuite_impact.upper()), "ts", tmp_timestamp) pj_junit_object.update_attr("onerror", "N/A", "ts", tmp_timestamp) pj_junit_object.output_junit(wp_results_execdir, print_summary=False) continue else: msg = print_error("Test suite does not exist in " "provided path: {0}".format(testsuite_path)) testsuite_status = 'ERROR' testsuite_resultfile = '<testsuite errors="0" failures="0" name="{0}" '\ 'skipped="0" tests="0" time="0" timestamp="{1}" > '\ '<error message="{2}"/> </testsuite>'.format(testsuite_name, project_start_time, msg) # suite_junit_type = 'string' if goto_testsuite and goto_testsuite == str(suite_cntr): goto_testsuite = False elif goto_testsuite and goto_testsuite != str(suite_cntr): data_repository['testsuite_{}_result'.format( suite_cntr)] = "ERROR" continue goto_testsuite_num = onerror_driver.main(testsuite, project_error_action, project_error_value) if goto_testsuite_num is False: onerror = "Next" elif goto_testsuite_num == "ABORT": onerror = "Abort" else: onerror = "Goto:" + str(goto_testsuite_num) pj_junit_object.update_attr("impact", impact_dict.get(testsuite_impact.upper()), "ts", data_repository['wt_ts_timestamp']) pj_junit_object.update_attr("onerror", onerror, "ts", data_repository['wt_ts_timestamp']) string_status = { "TRUE": "PASS", "FALSE": "FAIL", "ERROR": "ERROR", "SKIP": "SKIP" } if str(testsuite_status).upper() in string_status.keys(): data_repository['testsuite_{}_result'.format(suite_cntr)] = string_status\ [str(testsuite_status).upper()] else: print_error("unexpected testsuite status, default to exception") data_repository['testsuite_%d_result' % suite_cntr] = "ERROR" ts_status_list.append(testsuite_status) ts_impact_list.append(testsuite_impact) if testsuite_impact.upper() == 'IMPACT': msg = "Status of the executed test suite impacts Project result" elif testsuite_impact.upper() == 'NOIMPACT': msg = "Status of the executed test suite does not impact project result" print_debug(msg) # project_status = compute_project_status(project_status, testsuite_status, # testsuite_impact) runmode, value = common_execution_utils.get_runmode_from_xmlfile( testsuite) retry_type, retry_cond, retry_cond_value, retry_value,\ retry_interval = common_execution_utils.get_retry_from_xmlfile(testsuite) if runmode is not None: if testsuite.find("runmode") is not None and\ testsuite.find("runmode").get("attempt") is not None: print_info("runmode attempt: {0}".format( testsuite.find("runmode").get("attempt"))) # if runmode is 'ruf' & testsuite_status is False, skip the repeated execution of same # test suite and move to next actual test suite if not project_error_value and runmode == "RUF" and\ testsuite_status is False: goto_testsuite = str(value) # if runmode is 'rup' & testsuite_status is True, skip the repeated # execution of same testsuite and move to next actual testsuite elif runmode == "RUP" and testsuite_status is True: goto_testsuite = str(value) elif retry_type is not None: if testsuite.find("retry") is not None and\ testsuite.find("retry").get("attempt") is not None: print_info("retry attempt: {0}".format( testsuite.find("retry").get("attempt"))) if retry_type.upper() == 'IF': try: if data_repository[retry_cond] == retry_cond_value: condition_met = True pNote("Wait for {0}sec before retrying".format( retry_interval)) pNote("The given condition '{0}' matches the expected" "value '{1}'".format(data_repository[retry_cond], retry_cond_value)) time.sleep(int(retry_interval)) else: condition_met = False print_warning( "The condition value '{0}' does not match with the expected " "value '{1}'".format(data_repository[retry_cond], retry_cond_value)) except KeyError: print_warning( "The given condition '{0}' do not exists in " "the data repository".format(retry_cond_value)) condition_met = False if condition_met is False: goto_testsuite = str(retry_value) else: if retry_type.upper() == 'IF NOT': try: if data_repository[retry_cond] != retry_cond_value: condition_met = True pNote("Wait for {0}sec before " "retrying".format(retry_interval)) pNote("The condition value '{0}' does not match " "with the expected value '{1}'".format( data_repository[retry_cond], retry_cond_value)) time.sleep(int(retry_interval)) else: condition_met = False except KeyError: condition_met = False print_warning( "The given condition '{0}' is not there " "in the data repository".format(retry_cond_value)) if condition_met is False: pNote("The given condition '{0}' matched with the " "value '{1}'".format(data_repository[retry_cond], retry_cond_value)) goto_testsuite = str(retry_value) else: if testsuite_status is False or testsuite_status == "ERROR" or\ testsuite_status == "EXCEPTION": goto_testsuite = onerror_driver.main(testsuite, project_error_action, project_error_value) if goto_testsuite in ['ABORT', 'ABORT_AS_ERROR']: break # when 'onError:goto' value is less than the current ts num, # change the next iteration point to goto value elif goto_testsuite and int(goto_testsuite) < suite_cntr: suite_cntr = int(goto_testsuite) - 1 goto_testsuite = False project_status = Utils.testcase_Utils.compute_status_using_impact( ts_status_list, ts_impact_list) print_info("\n") project_end_time = Utils.datetime_utils.get_current_timestamp() print_info("[{0}] Project execution completed".format(project_end_time)) project_duration = Utils.datetime_utils.get_time_delta(project_start_time) hms = Utils.datetime_utils.get_hms_for_seconds(project_duration) print_info("Project duration= {0}".format(hms)) project_status = report_project_result(project_status, project_repository) pj_junit_object.update_attr("status", str(project_status), "pj", project_start_time) pj_junit_object.update_attr("time", str(project_duration), "pj", project_start_time) pj_junit_object.output_junit(wp_results_execdir) # Save JUnit/HTML results of the Project in MongoDB server if data_repository.get("db_obj") is not False: pj_junit_xml = project_repository['wp_results_execdir'] +\ os.sep + pj_junit_object.filename + "_junit.xml" data_repository.get("db_obj").add_html_result_to_mongodb(pj_junit_xml) return project_status, project_repository
def execute_testcase(testcase_filepath, data_repository, tc_context, runtype, tc_parallel, queue, auto_defects, suite, jiraproj, tc_onError_action, iter_ts_sys): """ Executes the testcase (provided as a xml file) - Takes a testcase xml file as input and executes each command in the testcase. - Computes the testcase status based on the stepstatus and the impact value of the step - Handles step failures as per the default/specific onError action/value - Calls the function to report the testcase status :Arguments: 1. testcase_filepath (string) = the full path of the testcase xml file 2. execution_dir (string) = the full path of the directory under which the testcase execution directory will be created (the results, logs for this testcase will be stored in this testcase execution directory.) """ tc_status = True tc_start_time = Utils.datetime_utils.get_current_timestamp() tc_timestamp = str(tc_start_time) print_info("[{0}] Testcase execution starts".format(tc_start_time)) get_testcase_details(testcase_filepath, data_repository, jiraproj) # These lines are for creating testcase junit file from_ts = False if not 'wt_junit_object' in data_repository: # not from testsuite tc_junit_object = junit_class.Junit( filename=data_repository['wt_name'], timestamp=tc_timestamp, name="customProject_independant_testcase_execution", display="False") if "jobid" in data_repository: tc_junit_object.add_jobid(data_repository["jobid"]) del data_repository["jobid"] tc_junit_object.create_testcase( location=data_repository['wt_filedir'], timestamp=tc_timestamp, ts_timestamp=tc_timestamp, name=data_repository['wt_name'], testcasefile_path=data_repository['wt_testcase_filepath'], display="False") junit_requirements(testcase_filepath, tc_junit_object, tc_timestamp) data_repository['wt_ts_timestamp'] = tc_timestamp else: tc_junit_object = data_repository['wt_junit_object'] tc_junit_object.create_testcase( location="from testsuite", timestamp=tc_timestamp, ts_timestamp=data_repository['wt_ts_timestamp'], classname=data_repository['wt_suite_name'], name=data_repository['wt_name'], testcasefile_path=data_repository['wt_testcase_filepath']) from_ts = True junit_requirements(testcase_filepath, tc_junit_object, data_repository['wt_ts_timestamp']) data_repository['wt_tc_timestamp'] = tc_timestamp data_type = data_repository['wt_data_type'] # Adding resultsdir, logsdir, title as attributes to testcase_tag in the junit result file # Need to remove these after making resultsdir, logsdir as part of properties tag in testcase tc_junit_object.add_property( "resultsdir", os.path.dirname(data_repository['wt_resultsdir']), "tc", tc_timestamp) tc_junit_object.add_property( "logsdir", os.path.dirname(data_repository['wt_logsdir']), "tc", tc_timestamp) tc_junit_object.update_attr("title", data_repository['wt_title'], "tc", tc_timestamp) data_repository['wt_junit_object'] = tc_junit_object data_repository['wt_junit_object'] = tc_junit_object print_testcase_details_to_console(testcase_filepath, data_repository) step_list = get_steps_list(testcase_filepath) tc_state = Utils.xml_Utils.getChildTextbyParentTag(testcase_filepath, 'Details', 'State') if tc_state is not False and tc_state is not None and \ tc_state.upper() == "DRAFT": print_warning("Testcase is in 'Draft' state, it may have keywords " "that have not been developed yet. Skipping the " "testcase execution and it will be marked as 'ERROR'") tc_status = "ERROR" else: if data_type.upper() == 'CUSTOM' and \ runtype.upper() == 'SEQUENTIAL_KEYWORDS': tc_status = execute_custom(data_type, runtype, custom_sequential_kw_driver, data_repository, step_list) elif data_type.upper() == 'CUSTOM' and \ runtype.upper() == 'PARALLEL_KEYWORDS': tc_status = execute_custom(data_type, runtype, custom_parallel_kw_driver, data_repository, step_list) elif data_type.upper() == 'ITERATIVE' and \ runtype.upper() == 'SEQUENTIAL_KEYWORDS': print_info("iterative sequential") system_list = get_system_list(data_repository['wt_datafile'], iter_req=True) \ if iter_ts_sys is None else [iter_ts_sys] # print len(system_list) if len(system_list) == 0: print_warning("Datatype is iterative but no systems found in " "input datafile, when Datatype is iterative the " "InputDataFile should have system(s) to " "iterate upon") tc_status = False elif len(system_list) > 0: tc_status = iterative_sequential_kw_driver.main( step_list, data_repository, tc_status, system_list) elif data_type.upper() == 'ITERATIVE' and \ runtype.upper() == 'PARALLEL_KEYWORDS': print_info("iterative parallel") system_list = get_system_list(data_repository['wt_datafile'], iter_req=True) \ if iter_ts_sys is None else [iter_ts_sys] # print len(system_list) if len(system_list) == 0: print_warning("DataType is iterative but no systems found in " "input datafile, when DataType id iterative the " "InputDataFile should have system(s) to " "iterate upon") tc_status = False elif len(system_list) > 0: tc_status = iterative_parallel_kw_driver.main( step_list, data_repository, tc_status, system_list) elif data_type.upper() == "HYBRID": print_info("Hybrid") system_list, system_node_list = get_system_list( data_repository['wt_datafile'], node_req=True) # call the hybrid driver here hyb_drv_obj = hybrid_driver_class.HybridDriver( step_list, data_repository, tc_status, system_list, system_node_list) tc_status = hyb_drv_obj.execute_hybrid_mode() else: print_warning("unsupported value provided for testcase data_type " "or testsuite runtype") tc_status = False if tc_context.upper() == 'NEGATIVE': if all([tc_status != 'EXCEPTION', tc_status != 'ERROR']): print_debug( "Test case status is: '{0}', flip status as context is negative" .format(tc_status)) tc_status = not tc_status if tc_status == False and tc_onError_action and tc_onError_action.upper( ) == 'ABORT_AS_ERROR': print_info( "Testcase status will be marked as ERROR as onError action is set to 'abort_as_error'" ) tc_status = "ERROR" check_and_create_defects(tc_status, auto_defects, data_repository, tc_junit_object) print("\n") tc_end_time = Utils.datetime_utils.get_current_timestamp() print_info("[{0}] Testcase execution completed".format(tc_end_time)) tc_duration = Utils.datetime_utils.get_time_delta(tc_start_time) hms = Utils.datetime_utils.get_hms_for_seconds(tc_duration) print_info("Testcase duration= {0}".format(hms)) tc_junit_object.update_count(tc_status, "1", "ts", data_repository['wt_ts_timestamp']) tc_junit_object.update_count("tests", "1", "ts", data_repository['wt_ts_timestamp']) tc_junit_object.update_count("tests", "1", "pj", "not appicable") tc_junit_object.update_attr("status", str(tc_status), "tc", tc_timestamp) tc_junit_object.update_attr("time", str(tc_duration), "tc", tc_timestamp) tc_junit_object.add_testcase_message(tc_timestamp, tc_status) # Adding resultsdir, logsdir, title as attributes to testcase_tag in the junit result file # Need to remove these after making resultsdir, logsdir as part of properties tag in testcase tc_junit_object.update_attr( "resultsdir", os.path.dirname(data_repository['wt_resultsdir']), "tc", tc_timestamp) tc_junit_object.update_attr("logsdir", os.path.dirname(data_repository['wt_logsdir']), "tc", tc_timestamp) report_testcase_result(tc_status, data_repository) if not from_ts: tc_junit_object.update_count(tc_status, "1", "pj", "not appicable") tc_junit_object.update_count("suites", "1", "pj", "not appicable") tc_junit_object.update_attr("status", str(tc_status), "ts", data_repository['wt_ts_timestamp']) tc_junit_object.update_attr("status", str(tc_status), "pj", "not appicable") tc_junit_object.update_attr("time", str(tc_duration), "ts", data_repository['wt_ts_timestamp']) tc_junit_object.update_attr("time", str(tc_duration), "pj", "not appicable") tc_junit_object.output_junit(data_repository['wt_resultsdir']) # Save JUnit/HTML results of the Case in MongoDB server if data_repository.get("db_obj") is not False: tc_junit_xml = data_repository[ 'wt_resultsdir'] + os.sep + tc_junit_object.filename + "_junit.xml" data_repository.get("db_obj").add_html_result_to_mongodb( tc_junit_xml) else: # send an email on TC failure(no need to send an email here when # executing a single case). if str(tc_status).upper() in ["FALSE", "ERROR", "EXCEPTION"]: email_setting = None # for first TC failure if "any_failures" not in data_repository: email_params = email.get_email_params("first_failure") if all(value != "" for value in email_params[:3]): email_setting = "first_failure" data_repository['any_failures'] = True # for further TC failures if email_setting is None: email_params = email.get_email_params("every_failure") if all(value != "" for value in email_params[:3]): email_setting = "every_failure" if email_setting is not None: email.compose_send_email( "Test Case: ", data_repository['wt_testcase_filepath'], data_repository['wt_logsdir'], data_repository['wt_resultsdir'], tc_status, email_setting) if 'wp_results_execdir' in data_repository: # Create and replace existing Project junit file for each case tc_junit_object.output_junit(data_repository['wp_results_execdir'], print_summary=False) else: # Create and replace existing Suite junit file for each case tc_junit_object.output_junit(data_repository['wt_results_execdir'], print_summary=False) if tc_parallel: tc_impact = data_repository['wt_tc_impact'] if tc_impact.upper() == 'IMPACT': msg = "Status of the executed test case impacts Testsuite result" elif tc_impact.upper() == 'NOIMPACT': msg = "Status of the executed test case does not impact Teststuie result" print_debug(msg) tc_name = Utils.file_Utils.getFileName(testcase_filepath) # put result into multiprocessing queue and later retrieve in corresponding driver queue.put( (tc_status, tc_name, tc_impact, tc_duration, tc_junit_object)) # Save XML results of the Case in MongoDB server if data_repository.get("db_obj") is not False: data_repository.get("db_obj").add_xml_result_to_mongodb( data_repository['wt_resultfile']) # main need tc_status and data_repository values to unpack return tc_status, data_repository
def execute_project(project_filepath, auto_defects, jiraproj, res_startdir, logs_startdir, data_repository): """ - Takes a list of testsuite locations input. - Iterates over the list and sends each testsuite location to testsuite_driver for execution. - Gets the status of the testsuite from the Warrior and computes the project_status based on the impact value of the testsuite. - If the testsuite fails, handles the failure using the default or specific onError action,value. - Finally reports the project status to the result file. Arguments: 1. testsuite_list = (list) list of testsuite locations 2. testsuite_driver = (module loader) module loader of the testsuite_driver 3. project_repository = (dict) dictionary containing all data of the project under execution """ project_start_time = Utils.datetime_utils.get_current_timestamp() print_info("[{0}] Project execution starts".format(project_start_time)) project_title = Utils.xml_Utils.getChildTextbyParentTag(project_filepath, 'Details', 'Title') project_repository = get_project_details(project_filepath, res_startdir, logs_startdir, data_repository) project_repository['project_title'] = project_title testsuite_list = get_testsuite_list(project_filepath) # project_resultfile = project_repository['project_resultfile'] project_name = project_repository['project_name'] wp_results_execdir = project_repository['wp_results_execdir'] data_repository['wp_results_execdir'] = wp_results_execdir data_repository['jiraproj'] = jiraproj pj_junit_object = junit_class.Junit(filename=project_name, timestamp=project_start_time, name=project_name, display="True") pj_junit_object.update_attr("resultsdir", project_repository['project_execution_dir'], "pj", project_start_time) pj_junit_object.update_attr("title", project_repository['project_title'], "pj", project_start_time) pj_junit_object.add_property("resultsdir", project_repository['project_execution_dir'], "pj", project_start_time) # adding the resultsdir as attribute, need to be removed after making it # a property pj_junit_object.add_project_location(project_filepath) if "jobid" in data_repository: pj_junit_object.add_jobid(data_repository["jobid"]) del data_repository["jobid"] data_repository['wt_junit_object'] = pj_junit_object data_repository["war_parallel"] = False execution_type = Utils.xml_Utils.getChildAttributebyParentTag(project_filepath, 'Details', 'type', 'exectype') # for backward compatibility(when exectype is not provided) if execution_type is False: execution_type = "sequential_suites" if execution_type.upper() == 'PARALLEL_SUITES': pj_junit_object.remove_html_obj() data_repository["war_parallel"] = True print_info("Executing suites in parallel") project_status = parallel_testsuite_driver.main(testsuite_list, project_repository, data_repository, auto_defects, ts_parallel=True) elif execution_type.upper() == 'SEQUENTIAL_SUITES': print_info("Executing suites sequentially") project_status = sequential_testsuite_driver.main(testsuite_list, project_repository, data_repository, auto_defects) else: print_error("unexpected project_type received...aborting execution") project_status = False print_info("\n") project_end_time = Utils.datetime_utils.get_current_timestamp() print_info("[{0}] Project execution completed".format(project_end_time)) project_duration = Utils.datetime_utils.get_time_delta(project_start_time) hms = Utils.datetime_utils.get_hms_for_seconds(project_duration) print_info("Project duration= {0}".format(hms)) project_status = report_project_result(project_status, project_repository) pj_junit_object.update_attr("status", str(project_status), "pj", project_start_time) pj_junit_object.update_attr("time", str(project_duration), "pj", project_start_time) pj_junit_object.output_junit(wp_results_execdir) # Save JUnit/HTML results of the Project in MongoDB server if data_repository.get("db_obj") is not False: pj_junit_xml = project_repository['wp_results_execdir'] +\ os.sep + pj_junit_object.filename + "_junit.xml" data_repository.get("db_obj").add_html_result_to_mongodb(pj_junit_xml) return project_status, project_repository
def execute_testsuite(testsuite_filepath, data_repository, from_project, auto_defects, jiraproj, res_startdir, logs_startdir, ts_onError_action, queue, ts_parallel): """Executes the testsuite (provided as a xml file) - Takes a testsuite xml file as input and sends each testcase to Basedriver for execution. - Computes the testsuite status based on the testcase_status and the impact value of the testcase - Handles testcase failures as per the default/specific onError action/value - Calls the function to report the testsuite status Arguments: 1. testsuite_filepath = (string) the full path of the testsuite xml file. 2. Warrior = (module loader) module loader object to call the Warrior 3. execution_dir = (string) the full path of the directory under which the testsuite execution directory will be created (results for the testsuite will be stored in the testsuite execution directory.) """ testsuite_status_list = [] suite_start_time = Utils.datetime_utils.get_current_timestamp() print_info("[{0}] Testsuite execution starts".format(suite_start_time)) initialize_suite_fields(data_repository) suite_repository = get_suite_details(testsuite_filepath, data_repository, from_project, res_startdir, logs_startdir) testcase_list = common_execution_utils.get_step_list( testsuite_filepath, "Testcases", "Testcase") execution_type = suite_repository['suite_exectype'].upper() no_of_tests = str(len(testcase_list)) junit_resultfile = suite_repository['junit_resultfile'] suite_name = suite_repository['suite_name'] suite_execution_dir = suite_repository['suite_execution_dir'] data_repository['wt_suite_execution_dir'] = suite_execution_dir data_repository['wt_results_execdir'] = suite_repository[ 'ws_results_execdir'] data_repository['wt_logs_execdir'] = suite_repository['ws_logs_execdir'] data_repository['wt_suite_name'] = suite_name suite_timestamp = testsuite_utils.get_suite_timestamp() data_repository['wt_ts_timestamp'] = suite_timestamp suite_repository['wt_ts_timestamp'] = suite_timestamp data_repository["suite_exectype"] = "iterative" if execution_type == "ITERATIVE_SEQUENTIAL" or \ execution_type == "ITERATIVE_PARALLEL" else execution_type ts_junit_display = "True" pj_junit_display = "False" if "wt_junit_object" in data_repository: ts_junit_object = data_repository["wt_junit_object"] else: ts_junit_object = junit_class.Junit( filename=suite_name, timestamp=suite_timestamp, name="customProject_independant_testcase_execution", display=pj_junit_display) if "jobid" in data_repository: ts_junit_object.add_jobid(data_repository["jobid"]) del data_repository["jobid"] data_repository["wt_junit_object"] = ts_junit_object suite_repository["wt_junit_object"] = ts_junit_object ts_junit_object.create_testsuite( location=os.path.dirname(testsuite_filepath), name=suite_name, timestamp=suite_timestamp, suite_location=suite_repository['testsuite_filepath'], title=suite_repository['suite_title'], display=ts_junit_display, **ts_junit_object.init_arg()) # Adding resultsdir as attributes to testsuite_tag in the junit result file # Need to remove these after making resultsdir, logsdir as part of properties tag in testcase ts_junit_object.update_attr("resultsdir", suite_repository['suite_execution_dir'], "ts", suite_timestamp) ts_junit_object.add_property("resultsdir", suite_repository['suite_execution_dir'], "ts", suite_timestamp) if suite_repository.has_key("data_file"): data_repository['suite_data_file'] = suite_repository['data_file'] # jiraproj name data_repository['jiraproj'] = jiraproj # if not from_project: testsuite_utils.pSuite_root(junit_resultfile) testsuite_utils.pSuite_testsuite(junit_resultfile, suite_name, errors='0', skipped='0', tests=no_of_tests, failures='0', time='0', timestamp=suite_timestamp) testsuite_utils.pSuite_property(junit_resultfile, 'title', suite_repository['suite_title']) testsuite_utils.pSuite_property(junit_resultfile, 'location', testsuite_filepath) if "jobid" in data_repository: testsuite_utils.pSuite_property(junit_resultfile, 'resultlocation', data_repository["jobid"]) # del data_repository["jobid"] print_suite_details_to_console(suite_repository, testsuite_filepath, junit_resultfile) # Prints the path of result summary file at the beginning of execution if data_repository['war_file_type'] == "Suite": filename = os.path.basename(testsuite_filepath) html_filepath = os.path.join( suite_repository['suite_execution_dir'], Utils.file_Utils.getNameOnly(filename)) + '.html' print_info("HTML result file: {0}".format(html_filepath)) if not from_project: data_repository["war_parallel"] = False root = Utils.xml_Utils.getRoot(testsuite_filepath) suite_global_xml = root.find('Details') runmode, value, _ = common_execution_utils.get_runmode_from_xmlfile( suite_global_xml) #get testwrapperfile details testwrapperfile, j_data_type, j_runtype, setup_on_error_action = \ get_testwrapper_file_details(testsuite_filepath, data_repository) setup_tc_status, cleanup_tc_status = True, True #execute setup steps defined in testwrapper file if testwrapperfile is present if testwrapperfile: print_info( "*****************TESTWRAPPER SETUP EXECUTION START*********************" ) data_repository['suite_testwrapper_file'] = testwrapperfile data_repository['wt_data_type'] = j_data_type setup_tc_status, data_repository = testcase_driver.execute_testcase(testwrapperfile,\ data_repository, tc_context='POSITIVE',\ runtype=j_runtype,\ tc_parallel=None, queue=None,\ auto_defects=auto_defects, suite=None,\ jiraproj=None, tc_onError_action='ABORT_AS_ERROR',\ iter_ts_sys=None, steps_tag='Setup') print_info( "*****************TESTWRAPPER SETUP EXECUTION END**********************" ) if setup_on_error_action == 'next' or \ (setup_on_error_action == 'abort' and setup_tc_status == True): if execution_type.upper() == 'PARALLEL_TESTCASES': ts_junit_object.remove_html_obj() data_repository["war_parallel"] = True print_info("Executing testcases in parallel") test_suite_status = parallel_testcase_driver.main( testcase_list, suite_repository, data_repository, from_project, tc_parallel=True, auto_defects=auto_defects) elif execution_type.upper() == 'SEQUENTIAL_TESTCASES': if runmode is None: print_info("Executing testcases sequentially") test_suite_status = sequential_testcase_driver.main( testcase_list, suite_repository, data_repository, from_project, auto_defects=auto_defects) elif runmode.upper() == "RUF": print_info("Execution type: {0}, Attempts: {1}".format( runmode, value)) i = 0 while i < int(value): i += 1 print_debug("\n\n<======= ATTEMPT: {0} ======>".format(i)) test_suite_status = sequential_testcase_driver.main( testcase_list, suite_repository, data_repository, from_project, auto_defects=auto_defects) test_count = i * len(testcase_list) testsuite_status_list.append(test_suite_status) testsuite_utils.pSuite_update_suite_tests(str(test_count)) if str(test_suite_status).upper() == "FALSE" or\ str(test_suite_status).upper() == "ERROR": break elif runmode.upper() == "RUP": print_info("Execution type: {0}, Attempts: {1}".format( runmode, value)) i = 0 while i < int(value): i += 1 print_debug("\n\n<======= ATTEMPT: {0} ======>".format(i)) test_suite_status = sequential_testcase_driver.main( testcase_list, suite_repository, data_repository, from_project, auto_defects=auto_defects) test_count = i * len(testcase_list) testsuite_status_list.append(test_suite_status) testsuite_utils.pSuite_update_suite_tests(str(test_count)) if str(test_suite_status).upper() == "TRUE": break elif runmode.upper() == "RMT": print_info("Execution type: {0}, Attempts: {1}".format( runmode, value)) i = 0 while i < int(value): i += 1 print_debug("\n\n<======= ATTEMPT: {0} ======>".format(i)) # We aren't actually summing each test result here... test_suite_status = sequential_testcase_driver.main( testcase_list, suite_repository, data_repository, from_project, auto_defects=auto_defects) testsuite_status_list.append(test_suite_status) # The below runmode part is not modified/removed to preserve backward compatibility elif execution_type.upper() == 'RUN_UNTIL_FAIL' and runmode is None: execution_value = Utils.xml_Utils.getChildAttributebyParentTag( testsuite_filepath, 'Details', 'type', 'Max_Attempts') execution_value = 1 if execution_value == "" else execution_value print_info("Execution type: {0}, Attempts: {1}".format( execution_type, execution_value)) i = 0 while i < int(execution_value): i += 1 print_debug("\n\n<======= ATTEMPT: {0} ======>".format(i)) test_suite_status = sequential_testcase_driver.main( testcase_list, suite_repository, data_repository, from_project, auto_defects=auto_defects) test_count = i * len(testcase_list) testsuite_utils.pSuite_update_suite_tests(str(test_count)) if str(test_suite_status).upper() == "FALSE" or\ str(test_suite_status).upper() == "ERROR": break elif execution_type.upper() == 'RUN_UNTIL_PASS' and runmode is None: execution_value = Utils.xml_Utils.getChildAttributebyParentTag( testsuite_filepath, 'Details', 'type', 'Max_Attempts') execution_value = 1 if execution_value == "" else execution_value print_info("Execution type: {0}, Attempts: {1}".format( execution_type, execution_value)) i = 0 while i < int(execution_value): i += 1 print_debug("\n\n<======= ATTEMPT: {0} ======>".format(i)) test_suite_status = sequential_testcase_driver.main( testcase_list, suite_repository, data_repository, from_project, auto_defects=auto_defects) test_count = i * len(testcase_list) testsuite_utils.pSuite_update_suite_tests(str(test_count)) if str(test_suite_status).upper() == "TRUE": break elif execution_type.upper() == 'RUN_MULTIPLE' and runmode is None: execution_value = Utils.xml_Utils.getChildAttributebyParentTag( testsuite_filepath, 'Details', 'type', 'Number_Attempts') execution_value = 1 if execution_value == "" else execution_value print_info("Execution type: {0}, Attempts: {1}".format( execution_type, execution_value)) i = 0 while i < int(execution_value): i += 1 print_debug("\n\n<======= ATTEMPT: {0} ======>".format(i)) # We aren't actually summing each test result here... test_suite_status = sequential_testcase_driver.main( testcase_list, suite_repository, data_repository, from_project, auto_defects=auto_defects) elif execution_type.upper() == "ITERATIVE_SEQUENTIAL": # if execution type is iterative sequential call WarriorCore.Classes.iterative_testsuite # class and execute the testcases in iterative sequential fashion on the systems print_info("Iterative sequential suite") iter_seq_ts_obj = IterativeTestsuite(testcase_list, suite_repository, data_repository, from_project, auto_defects) test_suite_status = iter_seq_ts_obj.execute_iterative_sequential() elif execution_type.upper() == "ITERATIVE_PARALLEL": # if execution type is iterative parallel call WarriorCore.Classes.iterative_testsuite # class and execute the testcases in iterative parallel fashion on the systems ts_junit_object.remove_html_obj() print_info("Iterative parallel suite") data_repository["war_parallel"] = True iter_seq_ts_obj = IterativeTestsuite(testcase_list, suite_repository, data_repository, from_project, auto_defects) test_suite_status = iter_seq_ts_obj.execute_iterative_parallel() else: print_error("unexpected suite_type received...aborting execution") test_suite_status = False if runmode is not None: test_suite_status = common_execution_utils.compute_runmode_status( testsuite_status_list, runmode, suite_global_xml) else: print_error("Test cases in suite are not executed as setup failed to execute,"\ "setup status : {0}".format(setup_tc_status)) print_error("Steps in cleanup will be executed on besteffort") test_suite_status = "ERROR" #execute cleanup steps defined in testwrapper file if testwrapperfile is present if testwrapperfile: print_info( "*****************TESTWRAPPER CLEANUP EXECUTION START*********************" ) data_repository['wt_data_type'] = j_data_type cleanup_tc_status, data_repository = testcase_driver.execute_testcase(testwrapperfile,\ data_repository, tc_context='POSITIVE',\ runtype=j_runtype,\ tc_parallel=None, queue=None,\ auto_defects=auto_defects, suite=None,\ jiraproj=None, tc_onError_action=None,\ iter_ts_sys=None, steps_tag='Cleanup') print_info( "*****************TESTWRAPPER CLEANUP EXECUTION END*********************" ) print_info("\n") suite_end_time = Utils.datetime_utils.get_current_timestamp() print_info("[{0}] Testsuite execution completed".format(suite_end_time)) if test_suite_status == True and cleanup_tc_status == True: test_suite_status = True #set status to WARN if only cleanup fails elif test_suite_status == True and cleanup_tc_status != True: print_warning("setting test suite status to WARN as cleanup failed") test_suite_status = 'WARN' suite_duration = Utils.datetime_utils.get_time_delta(suite_start_time) hms = Utils.datetime_utils.get_hms_for_seconds(suite_duration) print_info("Testsuite duration= {0}".format(hms)) testsuite_utils.update_suite_duration(str(suite_duration)) if test_suite_status == False and ts_onError_action and\ ts_onError_action.upper() == 'ABORT_AS_ERROR': print_info( "Testsuite status will be marked as ERROR as onError action is set" "to 'abort_as_error'") test_suite_status = "ERROR" testsuite_utils.report_testsuite_result(suite_repository, test_suite_status) ts_junit_object = data_repository['wt_junit_object'] ts_junit_object.update_count(test_suite_status, "1", "pj") ts_junit_object.update_count("suites", "1", "pj", "not appicable") ts_junit_object.update_attr("status", str(test_suite_status), "ts", suite_timestamp) ts_junit_object.update_attr("time", str(suite_duration), "ts", suite_timestamp) if not from_project: ts_junit_object.update_attr("status", str(test_suite_status), "pj", "not applicable") ts_junit_object.update_attr("time", str(suite_duration), "pj", "not appicable") ts_junit_object.output_junit(data_repository['wt_results_execdir']) # Save JUnit/HTML results of the Suite in MongoDB server if data_repository.get("db_obj") is not False: ts_junit_xml = (data_repository['wt_results_execdir'] + os.sep + ts_junit_object.filename + "_junit.xml") data_repository.get("db_obj").add_html_result_to_mongodb( ts_junit_xml) else: # Do not output JUnit result file for parallel suite execution if not ts_parallel and not data_repository['war_parallel']: # Create and replace existing Project junit file for each suite ts_junit_object.output_junit(data_repository['wp_results_execdir'], print_summary=False) if ts_parallel: ts_impact = data_repository['wt_ts_impact'] if ts_impact.upper() == 'IMPACT': msg = "Status of the executed suite impacts project result" elif ts_impact.upper() == 'NOIMPACT': msg = "Status of the executed suite case does not impact project result" print_debug(msg) # put result into multiprocessing queue and later retrieve in corresponding driver queue.put( (test_suite_status, ts_impact, suite_timestamp, ts_junit_object)) return test_suite_status, suite_repository
def execute_testcase(testcase_filepath, data_repository, tc_context, runtype, tc_parallel, queue, auto_defects, suite, jiraproj, tc_onError_action, iter_ts_sys, steps_tag="Steps"): """ Executes the testcase (provided as a xml file) - Takes a testcase xml file as input and executes each command in the testcase. - Computes the testcase status based on the stepstatus and the impact value of the step - Handles step failures as per the default/specific onError action/value - Calls the function to report the testcase status :Arguments: 1. testcase_filepath (string) = the full path of the testcase xml file 2. execution_dir (string) = the full path of the directory under which the testcase execution directory will be created (the results, logs for this testcase will be stored in this testcase execution directory.) """ tc_status = True tc_start_time = Utils.datetime_utils.get_current_timestamp() tc_timestamp = str(tc_start_time) print_info("[{0}] Testcase execution starts".format(tc_start_time)) get_testcase_details(testcase_filepath, data_repository, jiraproj) #get testwrapperfile details like testwrapperfile, data_type and runtype testwrapperfile, j_data_type, j_runtype, setup_on_error_action = \ get_testwrapper_file_details(testcase_filepath, data_repository) data_repository['wt_testwrapperfile'] = testwrapperfile isRobotWrapperCase = check_robot_wrapper_case(testcase_filepath) # These lines are for creating testcase junit file from_ts = False pj_junit_display = 'False' if not 'wt_junit_object' in data_repository: # not from testsuite tc_junit_object = junit_class.Junit( filename=data_repository['wt_name'], timestamp=tc_timestamp, name="customProject_independant_testcase_execution", display=pj_junit_display) if "jobid" in data_repository: tc_junit_object.add_jobid(data_repository["jobid"]) del data_repository["jobid"] tc_junit_object.create_testcase( location=data_repository['wt_filedir'], timestamp=tc_timestamp, ts_timestamp=tc_timestamp, name=data_repository['wt_name'], testcasefile_path=data_repository['wt_testcase_filepath']) junit_requirements(testcase_filepath, tc_junit_object, tc_timestamp) data_repository['wt_ts_timestamp'] = tc_timestamp else: tag = "testcase" if steps_tag == "Steps" else steps_tag tc_junit_object = data_repository['wt_junit_object'] #creates testcase based on tag given Setup/Steps/Cleanup tc_junit_object.create_testcase( location="from testsuite", timestamp=tc_timestamp, ts_timestamp=data_repository['wt_ts_timestamp'], classname=data_repository['wt_suite_name'], name=data_repository['wt_name'], tag=tag, testcasefile_path=data_repository['wt_testcase_filepath']) from_ts = True junit_requirements(testcase_filepath, tc_junit_object, data_repository['wt_ts_timestamp']) data_repository['wt_tc_timestamp'] = tc_timestamp data_repository['tc_parallel'] = tc_parallel data_type = data_repository['wt_data_type'] if not from_ts: data_repository["war_parallel"] = False # Adding resultsdir, logsdir, title as attributes to testcase_tag in the junit result file # Need to remove these after making resultsdir, logsdir as part of properties tag in testcase tc_junit_object.add_property( "resultsdir", os.path.dirname(data_repository['wt_resultsdir']), "tc", tc_timestamp) tc_junit_object.update_attr("title", data_repository['wt_title'], "tc", tc_timestamp) data_repository['wt_junit_object'] = tc_junit_object print_testcase_details_to_console(testcase_filepath, data_repository, steps_tag) # Prints the path of result summary file at the beginning of execution if data_repository['war_file_type'] == "Case": filename = os.path.basename(testcase_filepath) html_filepath = os.path.join( data_repository['wt_resultsdir'], Utils.file_Utils.getNameOnly(filename)) + '.html' print_info("HTML result file: {0}".format(html_filepath)) #get the list of steps in the given tag - Setup/Steps/Cleanup step_list = common_execution_utils.get_step_list(testcase_filepath, steps_tag, "step") if not step_list: print_warning("Warning! cannot get steps for execution") tc_status = "ERROR" if step_list and not len(step_list): print_warning("step list is empty in {0} block".format(steps_tag)) tc_state = Utils.xml_Utils.getChildTextbyParentTag(testcase_filepath, 'Details', 'State') if tc_state is not False and tc_state is not None and \ tc_state.upper() == "DRAFT": print_warning("Testcase is in 'Draft' state, it may have keywords " "that have not been developed yet. Skipping the " "testcase execution and it will be marked as 'ERROR'") tc_status = "ERROR" elif isRobotWrapperCase is True and from_ts is False: print_warning("Case which has robot_wrapper steps should be executed " "as part of a Suite. Skipping the case execution and " "it will be marked as 'ERROR'") tc_status = "ERROR" elif step_list: setup_tc_status, cleanup_tc_status = True, True #1.execute setup steps if testwrapperfile is present in testcase #and not from testsuite execution #2.execute setup steps if testwrapperfile is present in testcase #and from testsuite execution and testwrapperfile is not defined in test suite. if (testwrapperfile and not from_ts) or (testwrapperfile and \ from_ts and not data_repository.has_key('suite_testwrapper_file')): setup_step_list = common_execution_utils.get_step_list( testwrapperfile, "Setup", "step") if not len(setup_step_list): print_warning( "step list is empty in {0} block".format("Setup")) print_info("****** SETUP STEPS EXECUTION STARTS *******") data_repository['wt_step_type'] = 'setup' #to consider relative paths provided from wrapperfile instead of testcase file original_tc_filepath = data_repository['wt_testcase_filepath'] data_repository['wt_testcase_filepath'] = testwrapperfile setup_tc_status = execute_steps(j_data_type, j_runtype, \ data_repository, setup_step_list, tc_junit_object, iter_ts_sys) #reset to original testcase filepath data_repository['wt_testcase_filepath'] = original_tc_filepath data_repository['wt_step_type'] = 'step' print_info("setup_tc_status : {0}".format(setup_tc_status)) print_info("****** SETUP STEPS EXECUTION ENDS *******") if setup_on_error_action == 'next' or \ (setup_on_error_action == 'abort' \ and isinstance(setup_tc_status, bool) and setup_tc_status): if steps_tag == "Steps": print_info("****** TEST STEPS EXECUTION STARTS *******") data_repository['wt_step_type'] = 'step' tc_status = execute_steps(data_type, runtype, \ data_repository, step_list, tc_junit_object, iter_ts_sys) if steps_tag == "Steps": print_info("****** TEST STEPS EXECUTION ENDS *******") else: print_error("Test steps are not executed as setup steps failed to execute,"\ "setup status : {0}".format(setup_tc_status)) print_error("Steps in cleanup will be executed on besteffort") tc_status = "ERROR" #1.execute cleanup steps if testwrapperfile is present in testcase #and not from testsuite execution #2.execute cleanup steps if testwrapperfile is present in testcase #and from testsuite execution and testwrapperfile is not defined in test suite. if (testwrapperfile and not from_ts) or (testwrapperfile and \ from_ts and not data_repository.has_key('suite_testwrapper_file')): cleanup_step_list = common_execution_utils.get_step_list( testwrapperfile, "Cleanup", "step") if not len(cleanup_step_list): print_warning( "step list is empty in {0} block".format("Cleanup")) print_info("****** CLEANUP STEPS EXECUTION STARTS *******") data_repository['wt_step_type'] = 'cleanup' original_tc_filepath = data_repository['wt_testcase_filepath'] #to consider relative paths provided from wrapperfile instead of testcase file data_repository['wt_testcase_filepath'] = testwrapperfile cleanup_tc_status = execute_steps(j_data_type, j_runtype, \ data_repository, cleanup_step_list, tc_junit_object, iter_ts_sys) #reset to original testcase filepath data_repository['wt_testcase_filepath'] = original_tc_filepath data_repository['wt_step_type'] = 'step' print_info("cleanup_tc_status : {0}".format(cleanup_tc_status)) print_info("****** CLEANUP STEPS EXECUTION ENDS *******") if tc_context.upper() == 'NEGATIVE': if all([tc_status != 'EXCEPTION', tc_status != 'ERROR']): print_debug( "Test case status is: '{0}', flip status as context is " "negative".format(tc_status)) tc_status = not tc_status if step_list and isinstance(tc_status, bool) and isinstance(cleanup_tc_status, bool) \ and tc_status and cleanup_tc_status: tc_status = True #set tc status to WARN if only cleanup fails elif step_list and isinstance( tc_status, bool) and tc_status and cleanup_tc_status != True: print_warning("setting tc status to WARN as cleanup failed") tc_status = "WARN" if step_list and tc_status == False and tc_onError_action and tc_onError_action.upper( ) == 'ABORT_AS_ERROR': print_info("Testcase status will be marked as ERROR as onError " "action is set to 'abort_as_error'") tc_status = "ERROR" defectsdir = data_repository['wt_defectsdir'] check_and_create_defects(tc_status, auto_defects, data_repository, tc_junit_object) print_info("\n") tc_end_time = Utils.datetime_utils.get_current_timestamp() print_info("[{0}] Testcase execution completed".format(tc_end_time)) tc_duration = Utils.datetime_utils.get_time_delta(tc_start_time) hms = Utils.datetime_utils.get_hms_for_seconds(tc_duration) print_info("Testcase duration= {0}".format(hms)) tc_junit_object.update_count(tc_status, "1", "ts", data_repository['wt_ts_timestamp']) tc_junit_object.update_count("tests", "1", "ts", data_repository['wt_ts_timestamp']) tc_junit_object.update_count("tests", "1", "pj", "not appicable") tc_junit_object.update_attr("status", str(tc_status), "tc", tc_timestamp) tc_junit_object.update_attr("time", str(tc_duration), "tc", tc_timestamp) tc_junit_object.add_testcase_message(tc_timestamp, tc_status) if str(tc_status).upper() in ["FALSE", "ERROR", "EXCEPTION"]: tc_junit_object.update_attr("defects", defectsdir, "tc", tc_timestamp) # Adding resultsdir, logsdir, title as attributes to testcase_tag in the junit result file # Need to remove these after making resultsdir, logsdir as part of properties tag in testcase tc_junit_object.update_attr( "resultsdir", os.path.dirname(data_repository['wt_resultsdir']), "tc", tc_timestamp) tc_junit_object.update_attr("logsdir", os.path.dirname(data_repository['wt_logsdir']), "tc", tc_timestamp) data_file = data_repository["wt_datafile"] system_name = "" try: tree = et.parse(data_file) for elem in tree.iter(): if elem.tag == "system": for key, value in elem.items(): if value == "kafka_producer": system_name = elem.get("name") break except: pass if system_name: junit_file_obj = data_repository['wt_junit_object'] root = junit_file_obj.root suite_details = root.findall("testsuite")[0] test_case_details = suite_details.findall("testcase")[0] print_info("kafka server is presented in Inputdata file..") system_details = _get_system_or_subsystem(data_file, system_name) data = {} for item in system_details.getchildren(): if item.tag == "kafka_port": ssh_port = item.text continue if item.tag == "ip": ip_address = item.text continue try: value = ast.literal_eval(item.text) except ValueError: value = item.text data.update({item.tag: value}) ip_port = ["{}:{}".format(ip_address, ssh_port)] data.update({"bootstrap_servers": ip_port}) data.update({"value_serializer": lambda x: dumps(x).encode('utf-8')}) try: producer = WarriorKafkaProducer(**data) producer.send_messages('warrior_results', suite_details.items()) producer.send_messages('warrior_results', test_case_details.items()) print_info("message published to topic: warrior_results {}".format( suite_details.items())) print_info("message published to topic: warrior_results {}".format( test_case_details.items())) except: print_warning("Unable to connect kafka server !!") report_testcase_result(tc_status, data_repository, tag=steps_tag) if not from_ts: tc_junit_object.update_count(tc_status, "1", "pj", "not appicable") tc_junit_object.update_count("suites", "1", "pj", "not appicable") tc_junit_object.update_attr("status", str(tc_status), "ts", data_repository['wt_ts_timestamp']) tc_junit_object.update_attr("status", str(tc_status), "pj", "not appicable") tc_junit_object.update_attr("time", str(tc_duration), "ts", data_repository['wt_ts_timestamp']) tc_junit_object.update_attr("time", str(tc_duration), "pj", "not appicable") tc_junit_object.output_junit(data_repository['wt_resultsdir']) # Save JUnit/HTML results of the Case in MongoDB server if data_repository.get("db_obj") is not False: tc_junit_xml = data_repository['wt_resultsdir'] + os.sep +\ tc_junit_object.filename + "_junit.xml" data_repository.get("db_obj").add_html_result_to_mongodb( tc_junit_xml) else: # send an email on TC failure(no need to send an email here when # executing a single case). if str(tc_status).upper() in ["FALSE", "ERROR", "EXCEPTION"]: email_setting = None # for first TC failure if "any_failures" not in data_repository: email_params = email.get_email_params("first_failure") if all(value != "" for value in email_params[:3]): email_setting = "first_failure" data_repository['any_failures'] = True # for further TC failures if email_setting is None: email_params = email.get_email_params("every_failure") if all(value != "" for value in email_params[:3]): email_setting = "every_failure" if email_setting is not None: email.compose_send_email("Test Case: ", data_repository[\ 'wt_testcase_filepath'], data_repository['wt_logsdir'],\ data_repository['wt_resultsdir'], tc_status, email_setting) if not tc_parallel and not data_repository["war_parallel"]: if 'wp_results_execdir' in data_repository: # Create and replace existing Project junit file for each case tc_junit_object.output_junit( data_repository['wp_results_execdir'], print_summary=False) else: # Create and replace existing Suite junit file for each case tc_junit_object.output_junit( data_repository['wt_results_execdir'], print_summary=False) if tc_parallel: tc_impact = data_repository['wt_tc_impact'] if tc_impact.upper() == 'IMPACT': msg = "Status of the executed test case impacts Testsuite result" elif tc_impact.upper() == 'NOIMPACT': msg = "Status of the executed test case does not impact Teststuie result" print_debug(msg) tc_name = Utils.file_Utils.getFileName(testcase_filepath) # put result into multiprocessing queue and later retrieve in corresponding driver queue.put( (tc_status, tc_name, tc_impact, tc_duration, tc_junit_object)) # Save XML results of the Case in MongoDB server if data_repository.get("db_obj") is not False: data_repository.get("db_obj").add_xml_result_to_mongodb( data_repository['wt_resultfile']) # main need tc_status and data_repository values to unpack return tc_status, data_repository