def process_action(self, configuration): replay_id = configuration.replay_id[0] print "Using CTS in version %s to replay execution %s" \ % (ColorPrinter.format_text(BuildInformation.get_version(), bold=True), replay_id) error, script_execution_id = split_replay_id(replay_id) if error: return # TODO: warn user when he tries to replay using newer CTS script_execution = ScriptDAO.get_script_execution_details( script_execution_id) if script_execution is None: cts_error( "Recording for script execution id={id:ignore} not found", id=script_execution_id) return script_path = script_execution.script_path configuration = self._configuration_from_string( script_execution.configuration) test_plan = self._prepare_test_plan(script_path) environ[ReplayController.CTS_REPLAY_SCRIPT_EXECUTION_ID] = str( script_execution_id) self._execute(configuration, test_plan)
def process_action(self, configuration): print "Using CTS in version %s" % (ColorPrinter.format_text( BuildInformation.get_version(), bold=True)) print "\nComparing...\n\n" with ErrorMute(): report = self.report_name() with open(report, 'w') as sys.stdout: comparator = MetadataComparator(configuration.METADATA, configuration.qualifiers) result = comparator.run() if result is not None: if result.result == Comparator.EQUAL: msg = "\n\nMetadata sets {metadata} are equal\n\n".format( metadata=" and ".join(configuration.METADATA)) sys.stderr.write(msg) sys.stdout.write(msg) else: msg = "\n\nMetadata sets {metadata} differ\n\n".format( metadata=" and ".join(configuration.METADATA)) sys.stderr.write(msg) sys.stdout.write(msg) print result.get_side_by_side(*configuration.METADATA) sys.stdout = sys.__stdout__ print "Report has been written to: {name}".format(name=report)
def process_action(self, configuration): if configuration.update: self.update_process() elif configuration.generate: self.generate_config() else: print "CTS Version: %s" % BuildInformation.get_version()
def process_action(self, configuration): print "Using CTS in version %s" % (ColorPrinter.format_text(BuildInformation.get_version(), bold=True)) tests_manager = TestsManager() configuration_files = configuration.config_files test_scripts_configuration = get_configuration_from_files(configuration_files) if not configuration.use_case: print('\nMissing --use_case argument\n') JsonLoader().print_available_actions() sys.exit(64) # EX_USAGE = 64 command line usage error test_scripts_configuration['UseCase'] = configuration.use_case[0] if test_scripts_configuration is None: sys.exit(-1) if not certificates_files_are_valid(test_scripts_configuration): sys.exit(-1) test_plan = TestsPackagesContainer() test_scripts_found = tests_manager.get_packages() test_scripts_found.filter(scripts='validate_use_case', remove_empty=True) test_plan += test_scripts_found if not test_plan.packages: print('No script selected to execute') exit(64) # EX_USAGE = 64 command line usage error message = "Executing " print('Executing:') for package in test_plan.packages: for suite in package.suites: for script in suite.scripts: print "\t* %s from suite %s from package %s" % (script.name, suite.name, package.name) message += "%s from suite %s from package %s, " % (script.name, suite.name, package.name) self._logger.log_debug(message) if configuration.timeout is not None: message = "setting timeout execution to %s" % configuration.timeout[0] self._logger.log_debug(message) print('\n%s' % message) test_plan.set_scripts_timeout(configuration.timeout[0]) execution_feed = ExecutionFeed(test_plan, test_scripts_configuration) for case in configuration.use_case: if not self.check_use_case_exist(case): print('\nUse Case was not found: %s ' % case) exit(64) # EX_USAGE = 64 command line usage error
def save_script_results(self, script_execution_id, run_id): cases_execution_ids = CaseDAO.get_cases_execution_ids_for_script_execution(script_execution_id) status, passed, total = ScriptDAO.get_script_execution_statistics(script_execution_id) text_status = HtmlFormatter.PASSED if status else HtmlFormatter.FAILED test_script_result = "" for case_execution_id in cases_execution_ids: case_execution_details = CaseDAO.get_case_execution_details(case_execution_id) if case_execution_details.status == ResultStatus.PASSED: li_style = "success" glyphicon = "ok" elif case_execution_details.status in [ResultStatus.FAILED, ResultStatus.TIMEOUT]: li_style = "warning" glyphicon = "exclamation-sign" elif case_execution_details.status in [ResultStatus.BLOCKED, ResultStatus.UNKNOWN]: li_style = "danger" glyphicon = "remove" else: li_style = "neutral" glyphicon = "minus" test_script_result += "<li class='list-group-item list-item-%s'><a class='report' " \ "href='case_results/%s_case_results.html'><span class='report glyphicon " \ "glyphicon-%s'></span> %s</a></li>" % (li_style, case_execution_details.id, glyphicon, case_execution_details.name) self.save_case_result(case_execution_id, script_execution_id, run_id) result_report = HtmlFormatter.RESULT_SCRIPT_REPORT.format(status=text_status, passed=passed, total=total, test_script_result=test_script_result) script_path = '/'.join((HtmlFormatter.REPORT_FOLDER, "%s_run_report" % run_id, "%s_script_report" % script_execution_id)) script_html_path = '/'.join((script_path, "%s_script_report.html" % script_execution_id)) self._check_and_make_path(script_path) parent_lvl = self._parent_level(2) with open(script_html_path, "w") as report: report.write( "{header}{resultRunReport}{footer}".format(header=HtmlFormatter.HEADER.format(parent_level=parent_lvl), resultRunReport=result_report, footer=HtmlFormatter.FOOTER.format( version=BuildInformation.get_version(), parent_level=parent_lvl)))
def check_new_version_is_available(self): latest_path = "http://{host}/api/v1/latest".format( host=self.update_config['host'].replace("http://", "")) try: response = requests.get( latest_path, auth=(self.update_config['user'], self.update_config['password'])).json() except: print( 'Error while fetching information about new version.' '\nUpdate server is not responding or login details are incorrect.' ) exit(1) if BuildInformation.get_version() >= response['latest']: print('CTS Version: %s\nNewest version is installed' % BuildInformation.get_version()) return None print('Installed version of CTS: %s\nA new version is available: %s' % (BuildInformation.get_version(), response['latest'])) return response['latest_link']
def process_action(self, configuration): """ The SOS process collects the information needed for easier debugging :return: """ print "Using SOS CTS in version %s" % (ColorPrinter.format_text( BuildInformation.get_version(), bold=True)) SOS_SCRIPT_PATH = "" SOS_SCRIPT_NAME = "cts-sos.sh" resource_package = __name__ resource_path = '/'.join((SOS_SCRIPT_PATH, SOS_SCRIPT_NAME)) script_path = pkg_resources.resource_filename(resource_package, resource_path) subprocess.call(['sh', script_path])
def process_action(self, configuration): print "Using CTS in version %s" % (ColorPrinter.format_text(BuildInformation.get_version(), bold=True)) print "\nComparing...\n\n" if not self.__create_dirs(): exit(1) # if you can not create a dirs dont create a report include_reports = set(configuration.include_reports) if include_reports: reports_details = self.get_information_about_test_case_from_db(include_reports) else: reports_details = {} with ErrorMute(): report = self.report_name(configuration.RackScaleMetadata, configuration.RedfishMetadata) comparator = MetadataComparator([configuration.RackScaleMetadata, configuration.RedfishMetadata]) result = comparator.run() if result is not None: if result.result == Comparator.EQUAL: print "\n\nMetadata sets {metadata} are equal\n\n".format(metadata=" and ".join( (configuration.RackScaleMetadata, configuration.RedfishMetadata))) else: print "\n\nMetadata sets {metadata} differ\n\n".format(metadata=" and ".join( (configuration.RackScaleMetadata, configuration.RedfishMetadata))) if configuration.preview: print result.get_side_by_side(*(configuration.RackScaleMetadata, configuration.RedfishMetadata)) left_text_container = result.left_ext_file.getvalue().split('\n') right_text_container = result.right_ext_file.getvalue().split('\n') missing_elements_on_left = self._generate_statistics_from_text_container(left_text_container, right_text_container) missing_elements_on_right = self._generate_statistics_from_text_container(right_text_container, left_text_container) preformat_left = self._update_view_for_more_human_readable(left_text_container) preformat_right = self._update_view_for_more_human_readable(right_text_container) self._build_html_view(configuration, {'left': preformat_left, 'right': preformat_right, 'missing_left': missing_elements_on_left, 'missing_right': missing_elements_on_right, 'reports_details': reports_details}) print "Report has been written to: {name}".format(name=report)
def save_results(self, run_id): status, passed, total = RunDAO.get_overall_run_result(run_id) text_status = HtmlFormatter.PASSED if status else HtmlFormatter.FAILED script_execution_ids = ScriptDAO.get_scripts_execution_ids_for_run(run_id) test_scripts_result = "" for script_execution_id in script_execution_ids: script_execution_details = ScriptDAO.get_script_execution_details(script_execution_id) self.save_script_results(script_execution_id, run_id) overall, passed, total = ScriptDAO.get_script_execution_statistics(script_execution_id) if passed == total: li_style = "success" glyphicon = "ok" elif overall: li_style = "warning" glyphicon = "exclamation-sign" else: li_style = "danger" glyphicon = "remove" test_scripts_result += "<li class='list-group-item list-item-%s'><a class='report' " \ "href='%s_script_report/%s_script_report.html'><span class='report glyphicon " \ "glyphicon-%s'></span> %s</a></li>" % ( li_style, script_execution_details.id, script_execution_details.id, glyphicon, script_execution_details.script_path) result_report = HtmlFormatter.RESULT_RUN_REPORT.format(status=text_status, passed=passed, total=total, test_script_result=test_scripts_result) case_results_dir = '/'.join((HtmlFormatter.REPORT_FOLDER, "%s_run_report" % run_id)) case_results_html = '/'.join((case_results_dir, "%s_run_report.html" % run_id)) self._check_and_make_path(case_results_dir) parent_lvl = self._parent_level(1) with open(case_results_html, "w") as report: report.write("{header}{resultRunReport}{footer}".format( header=HtmlFormatter.HEADER.format(parent_level=parent_lvl), resultRunReport=result_report, footer=HtmlFormatter.FOOTER.format(version=BuildInformation.get_version(), parent_level=parent_lvl)))
def print_results(self, run_id): run_details = RunDAO.get_run_details(run_id) results_header = "Test run ID{horizontal_separator} #{run_id}" "".format( horizontal_separator=HORIZONTAL_SEPARATOR, run_id=run_details.id) print "CTS version\t%s\n%s%s" % (BuildInformation.get_version(), results_header, VERTICAL_SEPARATOR) script_executions_results = ( self.format_script_results(script_execution_id) for script_execution_id in ScriptDAO.get_scripts_execution_ids_for_run(run_id)) for script_results in script_executions_results: for line in script_results: # replace comma to dot to avoid conflict in opening CSV format and drop empty line if line is not VERTICAL_SEPARATOR: sys.stdout.write(line.replace(',', '.')) print VERTICAL_SEPARATOR
def print_results(self, run_id): run_details = RunDAO.get_run_details(run_id) results_header = "Test run ID{horizontal_separator} #{run_id}""".format( horizontal_separator=HORIZONTAL_SEPARATOR, run_id=run_details.id) print "CTS version\t%s\n%s%s" % (BuildInformation.get_version(), results_header, VERTICAL_SEPARATOR) script_executions_results = (self.format_script_results(script_execution_id) for script_execution_id in ScriptDAO.get_scripts_execution_ids_for_run(run_id)) lines = [] for script_results in script_executions_results: for line in script_results: sys.stdout.write(line) lines.append(line) print VERTICAL_SEPARATOR lines.append(VERTICAL_SEPARATOR) return lines
def process_action(self, configuration): package_name = configuration.package test_script_name = configuration.test_script tests_manager = TestsManager() packages = tests_manager.get_packages() packages.filter(packages=package_name, scripts=test_script_name, remove_empty=True) try: test_script_info = packages.packages[0].suites[0].scripts[0] generated_configuration = [ "# Configuration file generated by CTS in version %s" % BuildInformation.get_version(), "", "[%s]" % test_script_info.name, ] generated_configuration += [ "# %s" % line for line in test_script_info.description.split("\n") if line.strip() ] generated_configuration.append("") for test_script_parameter in test_script_info.parameters: generated_configuration.append( "# [%s] %s" % ("Required" if test_script_parameter.is_required else "Optional", test_script_parameter.description)) generated_configuration.append( "%s%s=PUT_VALUE_HERE" % ("#" if not test_script_parameter.is_required else "", test_script_parameter.name)) generated_configuration.append("") self.save_configuration(generated_configuration, configuration.output_file[0]) except IndexError: print "Test script not found"
def process_action(self, configuration): print "Using CTS in version %s" % (ColorPrinter.format_text( BuildInformation.get_version(), bold=True)) running_list = ConfigParser.ConfigParser() running_list.optionxform = str running_list.read(configuration.test_run_list) test_run = TestRun() for running_list_section in running_list.sections(): self.process_running_list_section(running_list_section, running_list, test_run) test_run.register_all() execution_status = test_run.run_all() if not execution_status and configuration.return_tests_status_as_exit_code: print "Tests finished with status failed, exit code set to 4" exit(4)
def save_case_result(self, case_execution_id, script_execution_id, run_id): result = "" status_count = {'neutral': 0, 'debug': 0, 'warning': 0, 'error': 0} for message in MessageDAO.get_messages_for_case_execution(case_execution_id): if LoggingLevel.CONTROL == message.message_level: for (status_code, request, response) in self.request_from_control_channel(message.message_text): li_style = "color: grey" result += "<li class='list-group-item list-item-{li_style}'> <div><br>Request:<br>".format( li_style=li_style) result += "<pre>{message}</pre>\n". \ format(message=request.replace("\n", "<br>")) result += "</div>" result += "<div style='{li_style}'><br>Response:<br>".format(li_style=li_style) result += "<pre>{message}</pre>\n". \ format(message=response.replace("\n", "<br>")) result += "</div></li>" else: if message.message_level == MessageLevel.ERROR: status_count['error'] += 1 li_style = "danger" glyphicon = "remove" elif message.message_level == MessageLevel.WARNING: status_count['warning'] += 1 li_style = "warning" glyphicon = "exclamation-sign" elif message.message_level == MessageLevel.DEBUG: status_count['debug'] += 1 li_style = "neutral" glyphicon = "asterisk" else: status_count['neutral'] += 1 li_style = "default" glyphicon = "unchecked" result += "<li class='list-group-item list-item-{li_style}'><span class='report glyphicon glyphicon-{glyphicon}'></span>{datetime} - {message}</li>\n". \ format(li_style=li_style, glyphicon=glyphicon, datetime=message.message_datetime, message=message.message_text.replace("\n", "<br>")) result_report = HtmlFormatter.RESULT_SAVE_CASE_RESULT.format(test_script_result=result, successBadge=status_count['neutral'], debugBadge=status_count['debug'], warningsBadge=status_count['warning'], errorsBadge=status_count['error'], successBadgeStatus=self._set_active_or_disabled_button( status_count['neutral']), debugBadgeStatus=self._set_active_or_disabled_button( status_count['debug']), warningsBadgeStatus=self._set_active_or_disabled_button( status_count['warning']), errorsBadgeStatus=self._set_active_or_disabled_button( status_count['error'])) case_results_dir = '/'.join((HtmlFormatter.REPORT_FOLDER, "%s_run_report" % run_id, "%s_script_report" % script_execution_id, "case_results")) case_results_html = '/'.join((case_results_dir, "%s_case_results.html" % case_execution_id)) self._check_and_make_path(case_results_dir) parent_lvl = self._parent_level(3) with open(case_results_html, "w") as report: report.write("{header}{resultRunReport}{footer}".format( header=HtmlFormatter.HEADER.format(parent_level=parent_lvl), resultRunReport=result_report, footer=HtmlFormatter.FOOTER.format(version=BuildInformation.get_version(), parent_level=parent_lvl)))
def process_action(self, configuration): print "Using CTS in version %s\n" % (ColorPrinter.format_text( BuildInformation.get_version(), bold=True)) JsonLoader().print_available_actions()
def process_action(self, configuration): print "Using CTS in version %s" % (ColorPrinter.format_text( BuildInformation.get_version(), bold=True)) tests_manager = TestsManager() configuration_files = configuration.config_files test_scripts_configuration = get_configuration_from_files( configuration_files) if test_scripts_configuration is None: sys.exit(-1) if not certificates_files_are_valid(test_scripts_configuration): sys.exit(-1) test_plan = TestsPackagesContainer() if configuration.test_suites or configuration.test_scripts: if configuration.test_suites: test_suites_found = tests_manager.get_packages() test_suites_found.filter(packages=configuration.package, suites=configuration.test_suites, remove_empty=True) test_plan += test_suites_found if configuration.test_scripts: test_scripts_found = tests_manager.get_packages() test_scripts_found.filter(packages=configuration.package, scripts=configuration.test_scripts, remove_empty=True) test_plan += test_scripts_found else: test_plan += tests_manager.get_packages() test_plan.filter(packages=configuration.package) if not test_plan.packages: print "No script selected to execute" exit(64) # EX_USAGE = 64 command line usage error message = "Executing " print "Executing:" for package in test_plan.packages: for suite in package.suites: for script in suite.scripts: print "\t* %s from suite %s from package %s" % ( script.name, suite.name, package.name) message += "%s from suite %s from package %s, " % ( script.name, suite.name, package.name) self._logger.log_debug(message) if configuration.timeout is not None: message = "setting timeout execution to %s" % configuration.timeout[ 0] self._logger.log_debug(message) print "\n%s" % message test_plan.set_scripts_timeout(configuration.timeout[0]) execution_feed = ExecutionFeed(test_plan, test_scripts_configuration) ExecuteTestScriptsAction.execute_configuration_group( execution_feed, test_status_as_exit_code=configuration. return_tests_status_as_exit_code)