def execute_testsuite(testsuite_filepath, data_repository, from_project, auto_defects, jiraproj, res_startdir, logs_startdir, ts_onError_action): """Executes the testsuite (provided as a xml file) - Takes a testsuite xml file as input and sends each testcase to Basedriver for execution. - Computes the testsuite status based on the testcase_status and the impact value of the testcase - Handles testcase failures as per the default/specific onError action/value - Calls the function to report the testsuite status Arguments: 1. testsuite_filepath = (string) the full path of the testsuite xml file. 2. Warrior = (module loader) module loader object to call the Warrior 3. execution_dir = (string) the full path of the directory under which the testsuite execution directory will be created (results for the testsuite will be stored in the testsuite execution directory.) """ suite_start_time = Utils.datetime_utils.get_current_timestamp() print_info("[{0}] Testsuite execution starts".format(suite_start_time)) # goto_tc = False suite_repository = get_suite_details(testsuite_filepath, data_repository, from_project, res_startdir, logs_startdir) testcase_list = get_testcase_list(testsuite_filepath) execution_type = suite_repository['suite_exectype'].upper() no_of_tests = str(len(testcase_list)) junit_resultfile = suite_repository['junit_resultfile'] suite_name = suite_repository['suite_name'] suite_execution_dir = suite_repository['suite_execution_dir'] data_repository['wt_suite_execution_dir'] = suite_execution_dir data_repository['wt_results_execdir'] = suite_repository['ws_results_execdir'] data_repository['wt_logs_execdir'] = suite_repository['ws_logs_execdir'] data_repository['wt_suite_name'] = suite_name suite_timestamp = testsuite_utils.get_suite_timestamp() data_repository['wt_ts_timestamp'] = suite_timestamp suite_repository['wt_ts_timestamp'] = suite_timestamp data_repository["suite_exectype"] = "iterative" if execution_type == "ITERATIVE_SEQUENTIAL" or \ execution_type == "ITERATIVE_PARALLEL" else execution_type ts_junit_display = "True" pj_junit_display = "False" if "wt_junit_object" in data_repository: ts_junit_object = data_repository["wt_junit_object"] else: ts_junit_object = junit_class.Junit(filename=suite_name, timestamp=suite_timestamp, name="customProject_independant_testcase_execution", display=pj_junit_display) if "jobid" in data_repository: ts_junit_object.add_jobid(data_repository["jobid"]) del data_repository["jobid"] data_repository["wt_junit_object"] = ts_junit_object suite_repository["wt_junit_object"] = ts_junit_object ts_junit_object.create_testsuite(location=os.path.dirname(testsuite_filepath), name=suite_name, timestamp=suite_timestamp, suite_location=suite_repository['testsuite_filepath'], title=suite_repository['suite_title'], display=ts_junit_display, **ts_junit_object.init_arg()) # Adding resultsdir as attributes to testsuite_tag in the junit result file # Need to remove these after making resultsdir, logsdir as part of properties tag in testcase ts_junit_object.update_attr("resultsdir", suite_repository['suite_execution_dir'], "ts", suite_timestamp) ts_junit_object.add_property("resultsdir", suite_repository['suite_execution_dir'], "ts", suite_timestamp) if suite_repository.has_key("data_file"): data_repository['suite_data_file'] = suite_repository['data_file'] # jiraproj name data_repository['jiraproj'] = jiraproj # if not from_project: testsuite_utils.pSuite_root(junit_resultfile) testsuite_utils.pSuite_testsuite(junit_resultfile, suite_name, errors='0', skipped='0', tests=no_of_tests, failures='0', time='0', timestamp=suite_timestamp) testsuite_utils.pSuite_property(junit_resultfile, 'title', suite_repository['suite_title']) testsuite_utils.pSuite_property(junit_resultfile, 'location', testsuite_filepath) if "jobid" in data_repository: testsuite_utils.pSuite_property(junit_resultfile, 'resultlocation', data_repository["jobid"]) # del data_repository["jobid"] print_suite_details_to_console(suite_repository, testsuite_filepath, junit_resultfile) data_repository["war_parallel"] = False if execution_type.upper() == 'PARALLEL_TESTCASES': ts_junit_object.remove_html_obj() data_repository["war_parallel"] = True print_info("Executing testcases in parallel") test_suite_status = parallel_testcase_driver.main(testcase_list, suite_repository, data_repository, from_project, tc_parallel=True, auto_defects=auto_defects) elif execution_type.upper() == 'SEQUENTIAL_TESTCASES': print_info("Executing testccases sequentially") test_suite_status = sequential_testcase_driver.main(testcase_list, suite_repository, data_repository, from_project, auto_defects=auto_defects) elif execution_type.upper() == 'RUN_UNTIL_FAIL': execution_value = Utils.xml_Utils.getChildAttributebyParentTag(testsuite_filepath, 'Details', 'type', 'Max_Attempts') print_info("Execution type: {0}, Attempts: {1}".format(execution_type, execution_value)) i = 0 while i < int(execution_value): i += 1 print_debug("\n\n<======= ATTEMPT: {0} ======>".format(i)) test_suite_status = sequential_testcase_driver.main(testcase_list, suite_repository, data_repository, from_project, auto_defects=auto_defects) test_count = i * len(testcase_list) testsuite_utils.pSuite_update_suite_tests(str(test_count)) if str(test_suite_status).upper() == "FALSE" or\ str(test_suite_status).upper() == "ERROR": break elif execution_type.upper() == 'RUN_UNTIL_PASS': execution_value = Utils.xml_Utils.getChildAttributebyParentTag(testsuite_filepath, 'Details', 'type', 'Max_Attempts') print_info("Execution type: {0}, Attempts: {1}".format(execution_type, execution_value)) i = 0 while i < int(execution_value): i += 1 print_debug("\n\n<======= ATTEMPT: {0} ======>".format(i)) test_suite_status = sequential_testcase_driver.main(testcase_list, suite_repository, data_repository, from_project, auto_defects=auto_defects) test_count = i * len(testcase_list) testsuite_utils.pSuite_update_suite_tests(str(test_count)) if str(test_suite_status).upper() == "TRUE": break elif execution_type.upper() == 'RUN_MULTIPLE': Max_Attempts = Utils.xml_Utils.getChildAttributebyParentTag(testsuite_filepath, 'Details', 'type', 'Max_Attempts') Number_Attempts = Utils.xml_Utils.getChildAttributebyParentTag(testsuite_filepath, 'Details', 'type', 'Number_Attempts') if Max_Attempts == "": execution_value = Number_Attempts else: execution_value = Max_Attempts print_info("Execution type: {0}, Max Attempts: {1}".format(execution_type, execution_value)) i = 0 while i < int(execution_value): i += 1 print_debug("\n\n<======= ATTEMPT: {0} ======>".format(i)) # We aren't actually summing each test result here... test_suite_status = sequential_testcase_driver.main(testcase_list, suite_repository, data_repository, from_project, auto_defects=auto_defects) elif execution_type.upper() == "ITERATIVE_SEQUENTIAL": # if execution type is iterative sequential call WarriorCore.Classes.iterative_testsuite # class and # execute the testcases in iterative sequential fashion on the systems print_info("Iterative sequential suite") iter_seq_ts_obj = IterativeTestsuite(testcase_list, suite_repository, data_repository, from_project, auto_defects) test_suite_status = iter_seq_ts_obj.execute_iterative_sequential() elif execution_type.upper() == "ITERATIVE_PARALLEL": # if execution type is iterative parallel call WarriorCore.Classes.iterative_testsuite # class and # execute the testcases in iterative parallel fashion on the systems ts_junit_object.remove_html_obj() print_info("Iterative parallel suite") data_repository["war_parallel"] = True iter_seq_ts_obj = IterativeTestsuite(testcase_list, suite_repository, data_repository, from_project, auto_defects) test_suite_status = iter_seq_ts_obj.execute_iterative_parallel() else: print_error("unexpected suite_type received...aborting execution") test_suite_status = False print_info("\n") suite_end_time = Utils.datetime_utils.get_current_timestamp() print_info("[{0}] Testsuite execution completed".format(suite_end_time)) suite_duration = Utils.datetime_utils.get_time_delta(suite_start_time) hms = Utils.datetime_utils.get_hms_for_seconds(suite_duration) print_info("Testsuite duration= {0}".format(hms)) testsuite_utils.update_suite_duration(str(suite_duration)) if test_suite_status == False and ts_onError_action and\ ts_onError_action.upper() == 'ABORT_AS_ERROR': print_info("Testsuite status will be marked as ERROR as onError action is set" "to 'abort_as_error'") test_suite_status = "ERROR" testsuite_utils.report_testsuite_result(suite_repository, test_suite_status) ts_junit_object = data_repository['wt_junit_object'] ts_junit_object.update_count(test_suite_status, "1", "pj") ts_junit_object.update_count("suites", "1", "pj", "not appicable") ts_junit_object.update_attr("status", str(test_suite_status), "ts", suite_timestamp) ts_junit_object.update_attr("time", str(suite_duration), "ts", suite_timestamp) if not from_project: ts_junit_object.update_attr("status", str(test_suite_status), "pj", "not applicable") ts_junit_object.update_attr("time", str(suite_duration), "pj", "not appicable") ts_junit_object.output_junit(data_repository['wt_results_execdir']) # Save JUnit/HTML results of the Suite in MongoDB server if data_repository.get("db_obj") is not False: ts_junit_xml = (data_repository['wt_results_execdir'] + os.sep + ts_junit_object.filename+"_junit.xml") data_repository.get("db_obj").add_html_result_to_mongodb(ts_junit_xml) else: # Create and replace existing Project junit file for each suite ts_junit_object.output_junit(data_repository['wp_results_execdir'], print_summary=False) return test_suite_status, suite_repository
def execute_testsuite(testsuite_filepath, data_repository, from_project, auto_defects, jiraproj, res_startdir, logs_startdir, ts_onError_action, queue, ts_parallel): """Executes the testsuite (provided as a xml file) - Takes a testsuite xml file as input and sends each testcase to Basedriver for execution. - Computes the testsuite status based on the testcase_status and the impact value of the testcase - Handles testcase failures as per the default/specific onError action/value - Calls the function to report the testsuite status Arguments: 1. testsuite_filepath = (string) the full path of the testsuite xml file. 2. Warrior = (module loader) module loader object to call the Warrior 3. execution_dir = (string) the full path of the directory under which the testsuite execution directory will be created (results for the testsuite will be stored in the testsuite execution directory.) """ testsuite_status_list = [] suite_start_time = Utils.datetime_utils.get_current_timestamp() print_info("[{0}] Testsuite execution starts".format(suite_start_time)) initialize_suite_fields(data_repository) suite_repository = get_suite_details(testsuite_filepath, data_repository, from_project, res_startdir, logs_startdir) testcase_list = common_execution_utils.get_step_list( testsuite_filepath, "Testcases", "Testcase") execution_type = suite_repository['suite_exectype'].upper() no_of_tests = str(len(testcase_list)) junit_resultfile = suite_repository['junit_resultfile'] suite_name = suite_repository['suite_name'] suite_execution_dir = suite_repository['suite_execution_dir'] data_repository['wt_suite_execution_dir'] = suite_execution_dir data_repository['wt_results_execdir'] = suite_repository[ 'ws_results_execdir'] data_repository['wt_logs_execdir'] = suite_repository['ws_logs_execdir'] data_repository['wt_suite_name'] = suite_name suite_timestamp = testsuite_utils.get_suite_timestamp() data_repository['wt_ts_timestamp'] = suite_timestamp suite_repository['wt_ts_timestamp'] = suite_timestamp data_repository["suite_exectype"] = "iterative" if execution_type == "ITERATIVE_SEQUENTIAL" or \ execution_type == "ITERATIVE_PARALLEL" else execution_type ts_junit_display = "True" pj_junit_display = "False" if "wt_junit_object" in data_repository: ts_junit_object = data_repository["wt_junit_object"] else: ts_junit_object = junit_class.Junit( filename=suite_name, timestamp=suite_timestamp, name="customProject_independant_testcase_execution", display=pj_junit_display) if "jobid" in data_repository: ts_junit_object.add_jobid(data_repository["jobid"]) del data_repository["jobid"] data_repository["wt_junit_object"] = ts_junit_object suite_repository["wt_junit_object"] = ts_junit_object ts_junit_object.create_testsuite( location=os.path.dirname(testsuite_filepath), name=suite_name, timestamp=suite_timestamp, suite_location=suite_repository['testsuite_filepath'], title=suite_repository['suite_title'], display=ts_junit_display, **ts_junit_object.init_arg()) # Adding resultsdir as attributes to testsuite_tag in the junit result file # Need to remove these after making resultsdir, logsdir as part of properties tag in testcase ts_junit_object.update_attr("resultsdir", suite_repository['suite_execution_dir'], "ts", suite_timestamp) ts_junit_object.add_property("resultsdir", suite_repository['suite_execution_dir'], "ts", suite_timestamp) if suite_repository.has_key("data_file"): data_repository['suite_data_file'] = suite_repository['data_file'] # jiraproj name data_repository['jiraproj'] = jiraproj # if not from_project: testsuite_utils.pSuite_root(junit_resultfile) testsuite_utils.pSuite_testsuite(junit_resultfile, suite_name, errors='0', skipped='0', tests=no_of_tests, failures='0', time='0', timestamp=suite_timestamp) testsuite_utils.pSuite_property(junit_resultfile, 'title', suite_repository['suite_title']) testsuite_utils.pSuite_property(junit_resultfile, 'location', testsuite_filepath) if "jobid" in data_repository: testsuite_utils.pSuite_property(junit_resultfile, 'resultlocation', data_repository["jobid"]) # del data_repository["jobid"] print_suite_details_to_console(suite_repository, testsuite_filepath, junit_resultfile) # Prints the path of result summary file at the beginning of execution if data_repository['war_file_type'] == "Suite": filename = os.path.basename(testsuite_filepath) html_filepath = os.path.join( suite_repository['suite_execution_dir'], Utils.file_Utils.getNameOnly(filename)) + '.html' print_info("HTML result file: {0}".format(html_filepath)) if not from_project: data_repository["war_parallel"] = False root = Utils.xml_Utils.getRoot(testsuite_filepath) suite_global_xml = root.find('Details') runmode, value, _ = common_execution_utils.get_runmode_from_xmlfile( suite_global_xml) #get testwrapperfile details testwrapperfile, j_data_type, j_runtype, setup_on_error_action = \ get_testwrapper_file_details(testsuite_filepath, data_repository) setup_tc_status, cleanup_tc_status = True, True #execute setup steps defined in testwrapper file if testwrapperfile is present if testwrapperfile: print_info( "*****************TESTWRAPPER SETUP EXECUTION START*********************" ) data_repository['suite_testwrapper_file'] = testwrapperfile data_repository['wt_data_type'] = j_data_type setup_tc_status, data_repository = testcase_driver.execute_testcase(testwrapperfile,\ data_repository, tc_context='POSITIVE',\ runtype=j_runtype,\ tc_parallel=None, queue=None,\ auto_defects=auto_defects, suite=None,\ jiraproj=None, tc_onError_action='ABORT_AS_ERROR',\ iter_ts_sys=None, steps_tag='Setup') print_info( "*****************TESTWRAPPER SETUP EXECUTION END**********************" ) if setup_on_error_action == 'next' or \ (setup_on_error_action == 'abort' and setup_tc_status == True): if execution_type.upper() == 'PARALLEL_TESTCASES': ts_junit_object.remove_html_obj() data_repository["war_parallel"] = True print_info("Executing testcases in parallel") test_suite_status = parallel_testcase_driver.main( testcase_list, suite_repository, data_repository, from_project, tc_parallel=True, auto_defects=auto_defects) elif execution_type.upper() == 'SEQUENTIAL_TESTCASES': if runmode is None: print_info("Executing testcases sequentially") test_suite_status = sequential_testcase_driver.main( testcase_list, suite_repository, data_repository, from_project, auto_defects=auto_defects) elif runmode.upper() == "RUF": print_info("Execution type: {0}, Attempts: {1}".format( runmode, value)) i = 0 while i < int(value): i += 1 print_debug("\n\n<======= ATTEMPT: {0} ======>".format(i)) test_suite_status = sequential_testcase_driver.main( testcase_list, suite_repository, data_repository, from_project, auto_defects=auto_defects) test_count = i * len(testcase_list) testsuite_status_list.append(test_suite_status) testsuite_utils.pSuite_update_suite_tests(str(test_count)) if str(test_suite_status).upper() == "FALSE" or\ str(test_suite_status).upper() == "ERROR": break elif runmode.upper() == "RUP": print_info("Execution type: {0}, Attempts: {1}".format( runmode, value)) i = 0 while i < int(value): i += 1 print_debug("\n\n<======= ATTEMPT: {0} ======>".format(i)) test_suite_status = sequential_testcase_driver.main( testcase_list, suite_repository, data_repository, from_project, auto_defects=auto_defects) test_count = i * len(testcase_list) testsuite_status_list.append(test_suite_status) testsuite_utils.pSuite_update_suite_tests(str(test_count)) if str(test_suite_status).upper() == "TRUE": break elif runmode.upper() == "RMT": print_info("Execution type: {0}, Attempts: {1}".format( runmode, value)) i = 0 while i < int(value): i += 1 print_debug("\n\n<======= ATTEMPT: {0} ======>".format(i)) # We aren't actually summing each test result here... test_suite_status = sequential_testcase_driver.main( testcase_list, suite_repository, data_repository, from_project, auto_defects=auto_defects) testsuite_status_list.append(test_suite_status) # The below runmode part is not modified/removed to preserve backward compatibility elif execution_type.upper() == 'RUN_UNTIL_FAIL' and runmode is None: execution_value = Utils.xml_Utils.getChildAttributebyParentTag( testsuite_filepath, 'Details', 'type', 'Max_Attempts') execution_value = 1 if execution_value == "" else execution_value print_info("Execution type: {0}, Attempts: {1}".format( execution_type, execution_value)) i = 0 while i < int(execution_value): i += 1 print_debug("\n\n<======= ATTEMPT: {0} ======>".format(i)) test_suite_status = sequential_testcase_driver.main( testcase_list, suite_repository, data_repository, from_project, auto_defects=auto_defects) test_count = i * len(testcase_list) testsuite_utils.pSuite_update_suite_tests(str(test_count)) if str(test_suite_status).upper() == "FALSE" or\ str(test_suite_status).upper() == "ERROR": break elif execution_type.upper() == 'RUN_UNTIL_PASS' and runmode is None: execution_value = Utils.xml_Utils.getChildAttributebyParentTag( testsuite_filepath, 'Details', 'type', 'Max_Attempts') execution_value = 1 if execution_value == "" else execution_value print_info("Execution type: {0}, Attempts: {1}".format( execution_type, execution_value)) i = 0 while i < int(execution_value): i += 1 print_debug("\n\n<======= ATTEMPT: {0} ======>".format(i)) test_suite_status = sequential_testcase_driver.main( testcase_list, suite_repository, data_repository, from_project, auto_defects=auto_defects) test_count = i * len(testcase_list) testsuite_utils.pSuite_update_suite_tests(str(test_count)) if str(test_suite_status).upper() == "TRUE": break elif execution_type.upper() == 'RUN_MULTIPLE' and runmode is None: execution_value = Utils.xml_Utils.getChildAttributebyParentTag( testsuite_filepath, 'Details', 'type', 'Number_Attempts') execution_value = 1 if execution_value == "" else execution_value print_info("Execution type: {0}, Attempts: {1}".format( execution_type, execution_value)) i = 0 while i < int(execution_value): i += 1 print_debug("\n\n<======= ATTEMPT: {0} ======>".format(i)) # We aren't actually summing each test result here... test_suite_status = sequential_testcase_driver.main( testcase_list, suite_repository, data_repository, from_project, auto_defects=auto_defects) elif execution_type.upper() == "ITERATIVE_SEQUENTIAL": # if execution type is iterative sequential call WarriorCore.Classes.iterative_testsuite # class and execute the testcases in iterative sequential fashion on the systems print_info("Iterative sequential suite") iter_seq_ts_obj = IterativeTestsuite(testcase_list, suite_repository, data_repository, from_project, auto_defects) test_suite_status = iter_seq_ts_obj.execute_iterative_sequential() elif execution_type.upper() == "ITERATIVE_PARALLEL": # if execution type is iterative parallel call WarriorCore.Classes.iterative_testsuite # class and execute the testcases in iterative parallel fashion on the systems ts_junit_object.remove_html_obj() print_info("Iterative parallel suite") data_repository["war_parallel"] = True iter_seq_ts_obj = IterativeTestsuite(testcase_list, suite_repository, data_repository, from_project, auto_defects) test_suite_status = iter_seq_ts_obj.execute_iterative_parallel() else: print_error("unexpected suite_type received...aborting execution") test_suite_status = False if runmode is not None: test_suite_status = common_execution_utils.compute_runmode_status( testsuite_status_list, runmode, suite_global_xml) else: print_error("Test cases in suite are not executed as setup failed to execute,"\ "setup status : {0}".format(setup_tc_status)) print_error("Steps in cleanup will be executed on besteffort") test_suite_status = "ERROR" #execute cleanup steps defined in testwrapper file if testwrapperfile is present if testwrapperfile: print_info( "*****************TESTWRAPPER CLEANUP EXECUTION START*********************" ) data_repository['wt_data_type'] = j_data_type cleanup_tc_status, data_repository = testcase_driver.execute_testcase(testwrapperfile,\ data_repository, tc_context='POSITIVE',\ runtype=j_runtype,\ tc_parallel=None, queue=None,\ auto_defects=auto_defects, suite=None,\ jiraproj=None, tc_onError_action=None,\ iter_ts_sys=None, steps_tag='Cleanup') print_info( "*****************TESTWRAPPER CLEANUP EXECUTION END*********************" ) print_info("\n") suite_end_time = Utils.datetime_utils.get_current_timestamp() print_info("[{0}] Testsuite execution completed".format(suite_end_time)) if test_suite_status == True and cleanup_tc_status == True: test_suite_status = True #set status to WARN if only cleanup fails elif test_suite_status == True and cleanup_tc_status != True: print_warning("setting test suite status to WARN as cleanup failed") test_suite_status = 'WARN' suite_duration = Utils.datetime_utils.get_time_delta(suite_start_time) hms = Utils.datetime_utils.get_hms_for_seconds(suite_duration) print_info("Testsuite duration= {0}".format(hms)) testsuite_utils.update_suite_duration(str(suite_duration)) if test_suite_status == False and ts_onError_action and\ ts_onError_action.upper() == 'ABORT_AS_ERROR': print_info( "Testsuite status will be marked as ERROR as onError action is set" "to 'abort_as_error'") test_suite_status = "ERROR" testsuite_utils.report_testsuite_result(suite_repository, test_suite_status) ts_junit_object = data_repository['wt_junit_object'] ts_junit_object.update_count(test_suite_status, "1", "pj") ts_junit_object.update_count("suites", "1", "pj", "not appicable") ts_junit_object.update_attr("status", str(test_suite_status), "ts", suite_timestamp) ts_junit_object.update_attr("time", str(suite_duration), "ts", suite_timestamp) if not from_project: ts_junit_object.update_attr("status", str(test_suite_status), "pj", "not applicable") ts_junit_object.update_attr("time", str(suite_duration), "pj", "not appicable") ts_junit_object.output_junit(data_repository['wt_results_execdir']) # Save JUnit/HTML results of the Suite in MongoDB server if data_repository.get("db_obj") is not False: ts_junit_xml = (data_repository['wt_results_execdir'] + os.sep + ts_junit_object.filename + "_junit.xml") data_repository.get("db_obj").add_html_result_to_mongodb( ts_junit_xml) else: # Do not output JUnit result file for parallel suite execution if not ts_parallel and not data_repository['war_parallel']: # Create and replace existing Project junit file for each suite ts_junit_object.output_junit(data_repository['wp_results_execdir'], print_summary=False) if ts_parallel: ts_impact = data_repository['wt_ts_impact'] if ts_impact.upper() == 'IMPACT': msg = "Status of the executed suite impacts project result" elif ts_impact.upper() == 'NOIMPACT': msg = "Status of the executed suite case does not impact project result" print_debug(msg) # put result into multiprocessing queue and later retrieve in corresponding driver queue.put( (test_suite_status, ts_impact, suite_timestamp, ts_junit_object)) return test_suite_status, suite_repository