def get_scan_result_from_module(xml_path): """ Gathers ScanResult information from an already existing XML file. It does two checks: 1. Check the testsuites element for an ErrorCode property, if present then set ScanResult to that error code 2. If there is no ErrorCode, determine if tests passed or failed based on attributes in testsuites :param xml_path: path to the XML file to read :return: ScanResult with the appropriate data or None if file cannot be read """ try: with open(xml_path, 'r') as fx: tree = ElementTree.parse(fx) except IOError: return None testsuites = tree.getroot() if testsuites is None: return None # Default to success code return_code = 0 # Search for the ErrorCode property properties = testsuites.find("properties") if properties is not None: for property in properties.findall("property"): if property.get('name') == "ErrorCode": return_code = int(property.get('value')) # If no error code yet, check for failures if not return_code: failures = int(testsuites.get('failures') or 0) if failures: return_code = 1 path = get_module_name_from_xml_filename(xml_path) error_msg = RunnerReturnCodes.to_string(return_code) return ScanResult(path=path, xml_path=xml_path, return_code=return_code, error_msg=error_msg)
def scan_one(args, extra, type_, scanner, runner_path, bootstrap_config, file_name, output_dir): """ Scan one module or executable :param args: command line arguments :param extra: extra parameters :param int type_: module or executable :param scanner: platform-specific scanner instance :param runner_path: path to test runner executable :param BootstrapConfig bootstrap_config: configuration object for bootstrapping modules :param file_name: filename of module to scan :param output_dir: directory for output :return: ScannerResult """ logger.info("{}: {}".format(type_, file_name)) xml_out = create_xml_output_filename(file_name, output_dir) if os.path.exists(xml_out): return # module has already been tested # for a more exhaustive list of options: # https://github.com/google/googletest/blob/master/googletest/docs/AdvancedGuide.md#running-a-subset-of-the-tests # --help lists command options # --gtest_list_tests just list what tests are in the module # --gtest_shuffle shuffle test ordering cmd_args = ["--gtest_output=xml:" + xml_out, "--gtest_color=yes"] cmd_args += extra if args.wait_for_debugger: # user has requested to attach a debugger when running cmd_args += ["--wait-for-debugger"] ret = 0 if type_ == ModuleType.LIBRARY: if args.integ: # user wants to run integration tests export_symbol = __module_integ_export_symbol__ cmd_args += ["--integ"] else: # just run unit tests export_symbol = __module_unit_export_symbol__ # run with bootstrapper ran_with_bootstrapper = False if bootstrap_config: module_name = os.path.split(file_name)[1] bootstrapper = bootstrap_config.get_bootstrapper(module_name) if bootstrapper: ran_with_bootstrapper = True try: working_dir = args.dir app = os.path.join(args.dir, bootstrapper.command_line[0]) if not os.path.isfile(app): logger.error( "bootstrap executable not found {}".format(app)) full_command_line = bootstrapper.command_line + tuple( cmd_args) ret = scanner.bootstrap(working_dir, full_command_line) except: ret = RunnerReturnCodes.UNEXPECTED_EXCEPTION logger.exception("bootstrap failed") # run with "runner_<platform>" as the implicit bootstrapper (no need to specify this # in the bootstrapper config file) if not ran_with_bootstrapper: if scanner.exports_symbol(file_name, export_symbol): try: ret = scanner.call(file_name, export_symbol, runner_path, args=cmd_args) except KeyboardInterrupt: raise except: ret = RunnerReturnCodes.UNEXPECTED_EXCEPTION logger.exception("module call failed") else: ret = RunnerReturnCodes.SYMBOL_NOT_FOUND elif type_ == ModuleType.EXECUTABLE: if scanner.exports_symbol(file_name, __executable_export_symbol__): try: cmd_args = ["--unittest"] + cmd_args ret = scanner.run(file_name, args=cmd_args) except KeyboardInterrupt: raise except: ret = RunnerReturnCodes.UNEXPECTED_EXCEPTION logger.exception("executable run failed") else: logger.error("Executable does not export correct symbol.") ret = RunnerReturnCodes.SYMBOL_NOT_FOUND else: raise NotImplementedError("module type not supported: " + str(type_)) err = RunnerReturnCodes.to_string(ret) return ScanResult(path=file_name, return_code=ret, xml_path=xml_out, error_msg=err)
def scan(args, extra): scanner = Scanner() output_dir = create_output_directory(args.output_path, args.no_timestamp) # setup logging setup_logging(os.path.join(output_dir, "aztest.log"), args.verbosity) logger.info("AZ Test Scanner") if not args.runner_path: runner_path = os.path.abspath( os.path.join(args.dir, scanner.__runner_exe__)) else: runner_path = os.path.abspath(args.runner_path) if not os.path.exists(runner_path): logger.exception("Invalid test runner path: {}".format(runner_path)) return bootstrap_config = None if args.bootstrap_config: with open(args.bootstrap_config) as json_file: bootstrap_config = BootstrapConfig(flatten=True) bootstrap_config.load(json.load(json_file)) add_dirs_to_path(args.add_path) scan_results = [] # list of ScanResult() # Find default filter files if they exist and add to user-defined lists whitelist_files = (args.whitelist_files if args.whitelist_files else []) + [get_default_whitelist()] blacklist_files = (args.blacklist_files if args.blacklist_files else []) + [get_default_blacklist()] # Create a FileApprover to determine if scanned files can be tested file_approver = FileApprover(whitelist_files, blacklist_files) module_failures = 0 # Dynamic Libraries / Modules if not __no_dll__: logger.info("Scanning for dynamic libraries") for file_name in scanner.enumerate_modules(args.dir): try: if args.limit and len(scan_results) >= args.limit: continue # reached scanning limit if args.only and not FileApprover.is_in_list( file_name, args.only.split(',')): continue # filename does not match any expected pattern if not file_approver.is_approved(file_name): continue result = scan_one(args, extra, ModuleType.LIBRARY, scanner, runner_path, bootstrap_config, file_name, output_dir) if result: scan_results += [result] if result.return_code != RunnerReturnCodes.TESTS_SUCCEEDED: logger.error( "Module FAILED: {}, with exit code: {} ({})". format( file_name, result.return_code, RunnerReturnCodes.to_string( result.return_code))) module_failures += 1 if not os.path.exists(result.xml_path): XMLGenerator.create_xml_output_file( result.xml_path, result.return_code, result.error_msg) except KeyboardInterrupt: logger.exception("Process interrupted by user.") break except: logger.exception("Module scan failed.") # Executables if args.exe: logger.info("Scanning for executables") for file_name in scanner.enumerate_executables(args.dir): if args.limit and len(scan_results) >= args.limit: continue # reached scanning limit if args.only and not FileApprover.is_in_list( file_name, args.only.split(',')): continue # filename does not match any expected pattern if not file_approver.is_approved(file_name): continue result = scan_one(args, extra, ModuleType.EXECUTABLE, scanner, runner_path, bootstrap_config, file_name, output_dir) if result: scan_results += [result] if result.return_code != RunnerReturnCodes.TESTS_SUCCEEDED: logger.error( "Module FAILED: {}, with exit code: {} ({})".format( file_name, result.return_code, RunnerReturnCodes.to_string(result.return_code))) module_failures += 1 if not os.path.exists(result.xml_path): XMLGenerator.create_xml_output_file( result.xml_path, result.return_code, result.error_msg) # Always save ScanResult data in a JSON file so we have access to it later scan_results_json = {'scan_results': []} for scan_result in scan_results: scan_results_json['scan_results'].append(scan_result._asdict()) json_path = os.path.join(output_dir, 'scan_results.json') with open(json_path, 'w') as f: json.dump(scan_results_json, f) if not args.no_html_report: # Convert the set of XML files into an HTML report HTMLReporter.create_html_report(scan_results, output_dir) HTMLReporter.create_html_failure_report(scan_results, output_dir) return 1 if module_failures > 0 else 0
def scan(args, extra): scanner = Scanner() output_dir = create_output_directory(args.output_path, args.no_timestamp) # setup logging log_path = os.path.join(output_dir, "aztest.log") setup_logging(log_path, args.verbosity) logger.info("AZ Test Scanner") if not args.runner_path: runner_path = os.path.abspath( os.path.join(args.dir, scanner.__runner_exe__)) else: runner_path = os.path.abspath(args.runner_path) if not os.path.exists(runner_path): logger.exception("Invalid test runner path: {}".format(runner_path)) return bootstrap_config = None if args.bootstrap_config: with open(args.bootstrap_config) as json_file: bootstrap_config = BootstrapConfig(flatten=True) bootstrap_config.load(json.load(json_file)) add_dirs_to_path(args.add_path) scan_results = [] # list of ScanResult() # Find default filter files if they exist and add to user-defined lists whitelist_files = (args.whitelist_files if args.whitelist_files else []) + [get_default_whitelist()] blacklist_files = (args.blacklist_files if args.blacklist_files else []) + [get_default_blacklist()] # Create a FileApprover to determine if scanned files can be tested file_approver = FileApprover(whitelist_files, blacklist_files) module_failures = 0 library_module_name_list = [] executable_name_list = [] # Dynamic Libraries / Modules if not __no_dll__: logger.info("Scanning for dynamic libraries") library_module_name_list = list(scanner.enumerate_modules(args.dir)) for file_name in library_module_name_list: try: if args.limit and len(scan_results) >= args.limit: continue # reached scanning limit if args.only and not FileApprover.is_in_list( file_name, args.only.split(',')): continue # filename does not match any expected pattern if not file_approver.is_approved(file_name): continue result = scan_one(args, extra, ModuleType.LIBRARY, scanner, runner_path, bootstrap_config, file_name, output_dir) if result: scan_results += [result] if result.return_code != RunnerReturnCodes.TESTS_SUCCEEDED: if result.return_code == RunnerReturnCodes.MODULE_SKIPPED: logger.info("Module SKIPPED: {}".format(file_name)) else: logger.error( "Module FAILED: {}, with exit code: {} ({})". format( file_name, result.return_code, RunnerReturnCodes.to_string( result.return_code))) module_failures += 1 if not os.path.exists(result.xml_path): XMLGenerator.create_xml_output_file( result.xml_path, result.return_code, result.error_msg) except KeyboardInterrupt: logger.exception("Process interrupted by user.") break except: logger.exception("Module scan failed.") # Executables if args.exe: logger.info("Scanning for executables") executable_name_list = list(scanner.enumerate_executables(args.dir)) for file_name in executable_name_list: if args.limit and len(scan_results) >= args.limit: continue # reached scanning limit if args.only and not FileApprover.is_in_list( file_name, args.only.split(',')): continue # filename does not match any expected pattern if not file_approver.is_approved(file_name): continue result = scan_one(args, extra, ModuleType.EXECUTABLE, scanner, runner_path, bootstrap_config, file_name, output_dir) if result: scan_results += [result] if result.return_code != RunnerReturnCodes.TESTS_SUCCEEDED: logger.error( "Module FAILED: {}, with exit code: {} ({})".format( file_name, result.return_code, RunnerReturnCodes.to_string(result.return_code))) module_failures += 1 if not os.path.exists(result.xml_path): XMLGenerator.create_xml_output_file( result.xml_path, result.return_code, result.error_msg) # Always save ScanResult data in a JSON file so we have access to it later scan_results_json = {'scan_results': []} for scan_result in scan_results: scan_results_json['scan_results'].append(scan_result._asdict()) json_path = os.path.join(output_dir, 'scan_results.json') with open(json_path, 'w') as f: json.dump(scan_results_json, f) print "----------------AUTOTEST SUMMARY -----------------" print("Log: {}".format(log_path)) print("JSON results: {}".format(json_path)) if not args.no_html_report: # Convert the set of XML files into an HTML report html_report = HTMLReporter.create_html_report(scan_results, output_dir) html_failure_report = HTMLReporter.create_html_failure_report( scan_results, output_dir) print("HTML report: {}".format(html_report)) print("HTML failure-only report: {}".format(html_failure_report)) print("Total modules found: {}".format(len(library_module_name_list))) if module_failures: colorama.init() print( Fore.RED + "**** {} modules had failures or errors ****".format( module_failures)) print(Style.RESET_ALL) else: print("No modules had failures nor errors.") print( "If a module has 1/0 tests failed or errored, this means the library failed to load." ) test_summary_results = { TESTS_RUN_KEY: 0, TESTS_PASSED_KEY: 0, TESTS_FAILED_KEY: 0, TESTS_ERRORED_KEY: 0, TESTS_SKIPPED_KEY: 0, TOTAL_TIME_TAKEN_KEY: 0, } for file_name in library_module_name_list: _print_summary_for_file_name(test_summary_results, file_name, output_dir) for file_name in executable_name_list: _print_summary_for_file_name(test_summary_results, file_name, output_dir) print "\nTotal tests run: {0}".format( str(test_summary_results[TESTS_RUN_KEY])) print "Total tests passed: {0}".format( str(test_summary_results[TESTS_PASSED_KEY])) print "Total tests failed: {0}".format( str(test_summary_results[TESTS_FAILED_KEY])) print "Total tests errored: {0}".format( str(test_summary_results[TESTS_ERRORED_KEY])) print "Total tests skipped: {0}".format( str(test_summary_results[TESTS_SKIPPED_KEY])) print "Total test time taken: {0}\n".format( str(test_summary_results[TOTAL_TIME_TAKEN_KEY])) print "----------------AUTOTEST SUMMARY -----------------" return 1 if module_failures > 0 else 0
def test_ToString_UnknownCode_ReturnsDefaultMessage(self): return_code = -1 # Unknown error code error_string = RunnerReturnCodes.to_string(return_code) self.assertEqual(error_string, "Unknown return code")
def test_ToString_FailedCode_ReturnsMessage(self): return_code = 1 # TESTS_FAILED error_string = RunnerReturnCodes.to_string(return_code) self.assertEqual(error_string, "Tests Failed")
def test_ToString_PassCode_ReturnsNone(self): return_code = 0 # TESTS_SUCCEEDED error_string = RunnerReturnCodes.to_string(return_code) self.assertIsNone(error_string)
def test_Constructor_NoParams_ThrowsException(self): with self.assertRaises(InvalidUseError) as ex: runner_return_codes = RunnerReturnCodes()