def run_pytest(known_args, extra_args): """ Triggers test discovery and execution through pytest :param known_args: Arguments recognized by the parser and handled here :param extra_args: Additional arguments, passed directly to pytest :raises: CalledProcessError if pytest detects failures (returning a non-zero return code) :raises: SubprocessTimeoutException if pytest fails to return within timeout """ lg.setup_logging(level=known_args.verbosity) timeout_sec = known_args.module_timeout xunit_flags = _get_xunit_flags(known_args.output_path) argument_call = [ sys.executable, "-B", "-m", "pytest", "--cache-clear", "-c", "lmbr_test_pytest.ini" ] argument_call.extend(xunit_flags) argument_call.extend(extra_args) log.info( "Invoking pytest with a timeout of {} seconds".format(timeout_sec)) log.info(argument_call) try: return_code = subprocess_with_timeout(argument_call, timeout_sec) except SubprocessTimeoutException as ste: log.error( "Pytest execution timed out after {} seconds".format(timeout_sec)) if return_code != 0: log.error("Pytest tests failed with exit code: {}".format(return_code)) # raise on failure raise subprocess.CalledProcessError(return_code, argument_call)
def execute(args=None): parser = get_parser() args, extra = parser.parse_known_args(args) # create output directory timestamp = datetime.now() if not args.no_timestamp else None if args.output_path: output_dir = get_output_dir(timestamp, os.path.abspath(args.output_path)) else: output_dir = get_output_dir(timestamp) if os.path.exists(output_dir): shutil.rmtree(output_dir) os.makedirs(output_dir) # setup logging setup_logging(os.path.join(output_dir, "aztest.log"), lg.DEBUG) logger.info("AZ Test Scanner") # execute command return args.func(args, extra, output_dir)
def scan(args, extra): scanner = Scanner() output_dir = create_output_directory(args.output_path, args.no_timestamp) # setup logging setup_logging(os.path.join(output_dir, "aztest.log"), args.verbosity) logger.info("AZ Test Scanner") if not args.runner_path: runner_path = os.path.abspath( os.path.join(args.dir, scanner.__runner_exe__)) else: runner_path = os.path.abspath(args.runner_path) if not os.path.exists(runner_path): logger.exception("Invalid test runner path: {}".format(runner_path)) return bootstrap_config = None if args.bootstrap_config: with open(args.bootstrap_config) as json_file: bootstrap_config = BootstrapConfig(flatten=True) bootstrap_config.load(json.load(json_file)) add_dirs_to_path(args.add_path) scan_results = [] # list of ScanResult() # Find default filter files if they exist and add to user-defined lists whitelist_files = (args.whitelist_files if args.whitelist_files else []) + [get_default_whitelist()] blacklist_files = (args.blacklist_files if args.blacklist_files else []) + [get_default_blacklist()] # Create a FileApprover to determine if scanned files can be tested file_approver = FileApprover(whitelist_files, blacklist_files) module_failures = 0 # Dynamic Libraries / Modules if not __no_dll__: logger.info("Scanning for dynamic libraries") for file_name in scanner.enumerate_modules(args.dir): try: if args.limit and len(scan_results) >= args.limit: continue # reached scanning limit if args.only and not FileApprover.is_in_list( file_name, args.only.split(',')): continue # filename does not match any expected pattern if not file_approver.is_approved(file_name): continue result = scan_one(args, extra, ModuleType.LIBRARY, scanner, runner_path, bootstrap_config, file_name, output_dir) if result: scan_results += [result] if result.return_code != RunnerReturnCodes.TESTS_SUCCEEDED: module_failures += 1 if not os.path.exists(result.xml_path): XMLGenerator.create_xml_output_file( result.xml_path, result.return_code, result.error_msg) except KeyboardInterrupt: logger.exception("Process interrupted by user.") break except: logger.exception("Module scan failed.") # Executables if args.exe: logger.info("Scanning for executables") for file_name in scanner.enumerate_executables(args.dir): if args.limit and len(scan_results) >= args.limit: continue # reached scanning limit if args.only and not FileApprover.is_in_list( file_name, args.only.split(',')): continue # filename does not match any expected pattern if not file_approver.is_approved(file_name): continue result = scan_one(args, extra, ModuleType.EXECUTABLE, scanner, runner_path, bootstrap_config, file_name, output_dir) if result: scan_results += [result] if result.return_code != RunnerReturnCodes.TESTS_SUCCEEDED: module_failures += 1 if not os.path.exists(result.xml_path): XMLGenerator.create_xml_output_file( result.xml_path, result.return_code, result.error_msg) if args.html_report: # Convert the set of XML files into an HTML report HTMLReporter.create_html_report(scan_results, output_dir) HTMLReporter.create_html_failure_report(scan_results, output_dir) return 1 if module_failures > 0 else 0
def scan(args, extra): scanner = Scanner() output_dir = create_output_directory(args.output_path, args.no_timestamp) # setup logging log_path = os.path.join(output_dir, "aztest.log") setup_logging(log_path, args.verbosity) logger.info("AZ Test Scanner") if not args.runner_path: runner_path = os.path.abspath( os.path.join(args.dir, scanner.__runner_exe__)) else: runner_path = os.path.abspath(args.runner_path) if not os.path.exists(runner_path): logger.exception("Invalid test runner path: {}".format(runner_path)) return bootstrap_config = None if args.bootstrap_config: with open(args.bootstrap_config) as json_file: bootstrap_config = BootstrapConfig(flatten=True) bootstrap_config.load(json.load(json_file)) add_dirs_to_path(args.add_path) scan_results = [] # list of ScanResult() # Find default filter files if they exist and add to user-defined lists whitelist_files = (args.whitelist_files if args.whitelist_files else []) + [get_default_whitelist()] blacklist_files = (args.blacklist_files if args.blacklist_files else []) + [get_default_blacklist()] # Create a FileApprover to determine if scanned files can be tested file_approver = FileApprover(whitelist_files, blacklist_files) module_failures = 0 library_module_name_list = [] executable_name_list = [] # Dynamic Libraries / Modules if not __no_dll__: logger.info("Scanning for dynamic libraries") library_module_name_list = list(scanner.enumerate_modules(args.dir)) for file_name in library_module_name_list: try: if args.limit and len(scan_results) >= args.limit: continue # reached scanning limit if args.only and not FileApprover.is_in_list( file_name, args.only.split(',')): continue # filename does not match any expected pattern if not file_approver.is_approved(file_name): continue result = scan_one(args, extra, ModuleType.LIBRARY, scanner, runner_path, bootstrap_config, file_name, output_dir) if result: scan_results += [result] if result.return_code != RunnerReturnCodes.TESTS_SUCCEEDED: if result.return_code == RunnerReturnCodes.MODULE_SKIPPED: logger.info("Module SKIPPED: {}".format(file_name)) else: logger.error( "Module FAILED: {}, with exit code: {} ({})". format( file_name, result.return_code, RunnerReturnCodes.to_string( result.return_code))) module_failures += 1 if not os.path.exists(result.xml_path): XMLGenerator.create_xml_output_file( result.xml_path, result.return_code, result.error_msg) except KeyboardInterrupt: logger.exception("Process interrupted by user.") break except: logger.exception("Module scan failed.") # Executables if args.exe: logger.info("Scanning for executables") executable_name_list = list(scanner.enumerate_executables(args.dir)) for file_name in executable_name_list: if args.limit and len(scan_results) >= args.limit: continue # reached scanning limit if args.only and not FileApprover.is_in_list( file_name, args.only.split(',')): continue # filename does not match any expected pattern if not file_approver.is_approved(file_name): continue result = scan_one(args, extra, ModuleType.EXECUTABLE, scanner, runner_path, bootstrap_config, file_name, output_dir) if result: scan_results += [result] if result.return_code != RunnerReturnCodes.TESTS_SUCCEEDED: logger.error( "Module FAILED: {}, with exit code: {} ({})".format( file_name, result.return_code, RunnerReturnCodes.to_string(result.return_code))) module_failures += 1 if not os.path.exists(result.xml_path): XMLGenerator.create_xml_output_file( result.xml_path, result.return_code, result.error_msg) # Always save ScanResult data in a JSON file so we have access to it later scan_results_json = {'scan_results': []} for scan_result in scan_results: scan_results_json['scan_results'].append(scan_result._asdict()) json_path = os.path.join(output_dir, 'scan_results.json') with open(json_path, 'w') as f: json.dump(scan_results_json, f) print "----------------AUTOTEST SUMMARY -----------------" print("Log: {}".format(log_path)) print("JSON results: {}".format(json_path)) if not args.no_html_report: # Convert the set of XML files into an HTML report html_report = HTMLReporter.create_html_report(scan_results, output_dir) html_failure_report = HTMLReporter.create_html_failure_report( scan_results, output_dir) print("HTML report: {}".format(html_report)) print("HTML failure-only report: {}".format(html_failure_report)) print("Total modules found: {}".format(len(library_module_name_list))) if module_failures: colorama.init() print( Fore.RED + "**** {} modules had failures or errors ****".format( module_failures)) print(Style.RESET_ALL) else: print("No modules had failures nor errors.") print( "If a module has 1/0 tests failed or errored, this means the library failed to load." ) test_summary_results = { TESTS_RUN_KEY: 0, TESTS_PASSED_KEY: 0, TESTS_FAILED_KEY: 0, TESTS_ERRORED_KEY: 0, TESTS_SKIPPED_KEY: 0, TOTAL_TIME_TAKEN_KEY: 0, } for file_name in library_module_name_list: _print_summary_for_file_name(test_summary_results, file_name, output_dir) for file_name in executable_name_list: _print_summary_for_file_name(test_summary_results, file_name, output_dir) print "\nTotal tests run: {0}".format( str(test_summary_results[TESTS_RUN_KEY])) print "Total tests passed: {0}".format( str(test_summary_results[TESTS_PASSED_KEY])) print "Total tests failed: {0}".format( str(test_summary_results[TESTS_FAILED_KEY])) print "Total tests errored: {0}".format( str(test_summary_results[TESTS_ERRORED_KEY])) print "Total tests skipped: {0}".format( str(test_summary_results[TESTS_SKIPPED_KEY])) print "Total test time taken: {0}\n".format( str(test_summary_results[TOTAL_TIME_TAKEN_KEY])) print "----------------AUTOTEST SUMMARY -----------------" return 1 if module_failures > 0 else 0