def __init__(self, user, pswd, enable_pswd): Cmd.__init__(self) self.controller = DBController() self.runner = TestRunner(self.controller) self.user = user self.pswd = pswd self.enable_pswd = enable_pswd if enable_pswd else pswd self.current_results = {}
def main(): parser = argparse.ArgumentParser( description="Runs the CMS functional test suite.") parser.add_argument( "-s", "--submissions", action="store", type=int, default=50, help="set the number of submissions to submit (default 50)") parser.add_argument( "-w", "--workers", action="store", type=int, default=4, help="set the number of workers to use (default 4)") parser.add_argument( "-v", "--verbose", action="count", default=0, help="print debug information (use multiple times for more)") args = parser.parse_args() CONFIG["VERBOSITY"] = args.verbose CONFIG["COVERAGE"] = False test_list = [Test('batch', task=batch_50, filenames=['correct-stdio.%l'], languages=(LANG_C, ), checks=[]) for _ in range(args.submissions)] runner = TestRunner(test_list, workers=args.workers) runner.submit_tests(concurrent_submit_and_eval=False) runner.log_elapsed_time() failures = runner.wait_for_evaluation() runner.log_elapsed_time() if failures == []: logger.info("All tests passed!") return 0 else: logger.error("Some test failed!") return 1
def main(): parser = argparse.ArgumentParser( description="Runs the CMS functional test suite.") parser.add_argument( "-s", "--submissions", action="store", type=int, default=50, help="set the number of submissions to submit (default 50)") parser.add_argument("-w", "--workers", action="store", type=int, default=4, help="set the number of workers to use (default 4)") parser.add_argument( "-v", "--verbose", action="count", default=0, help="print debug information (use multiple times for more)") args = parser.parse_args() CONFIG["VERBOSITY"] = args.verbose CONFIG["COVERAGE"] = False test_list = [ Test('batch', task=batch_50, filenames=['correct-stdio.%l'], languages=(LANG_C, ), checks=[]) for _ in range(args.submissions) ] runner = TestRunner(test_list, workers=args.workers) runner.submit_tests(concurrent_submit_and_eval=False) runner.log_elapsed_time() failures = runner.wait_for_evaluation() runner.log_elapsed_time() if failures == []: logger.info("All tests passed!") return 0 else: logger.error("Some test failed!") return 1
def main(): parser = ArgumentParser(description="Runs the CMS functional test suite.") parser.add_argument( "regex", action="store", type=utf8_decoder, nargs='*', metavar="regex", help="a regex to match to run a subset of tests") parser.add_argument( "-l", "--languages", action="store", type=utf8_decoder, default="", help="a comma-separated list of languages to test") parser.add_argument( "-c", "--contest", action="store", type=utf8_decoder, help="use an existing contest (and the tasks in it)") parser.add_argument( "-r", "--retry-failed", action="store_true", help="only run failed tests from the previous run (stored in %s)" % FAILED_TEST_FILENAME) parser.add_argument( "-n", "--dry-run", action="store_true", help="show what tests would be run, but do not run them") parser.add_argument( "-v", "--verbose", action="count", help="print debug information (use multiple times for more)") args = parser.parse_args() CONFIG["VERBOSITY"] = args.verbose CONFIG["COVERAGE"] = True # Pre-process our command-line arguments to figure out which tests to run. regexes = [re.compile(s) for s in args.regex] if args.languages: languages = frozenset(args.languages.split(',')) else: languages = frozenset() if args.retry_failed: test_list = load_failed_tests() else: test_list = ALL_TESTS test_list = filter_tests(test_list, regexes, languages) if not test_list: logger.info( "There are no tests to run! (was your filter too restrictive?)") return 0 tests = 0 for test in test_list: for language in test.languages: if args.dry_run: logger.info("Test %s in %s.", test.name, language) tests += 1 if test.user_tests: for language in test.languages: if args.dry_run: logger.info("Test %s in %s (for usertest).", test.name, language) tests += 1 if args.dry_run: return 0 if args.retry_failed: logger.info( "Re-running %s failed tests from last run.", len(test_list)) # Clear out any old coverage data. logging.info("Clearing old coverage data.") sh(sys.executable + " -m coverage erase") # Startup the test runner. runner = TestRunner(test_list, contest_id=args.contest, workers=4) # Submit and wait for all tests to complete. runner.submit_tests() failures = runner.wait_for_evaluation() write_test_case_list( [(test, lang) for test, lang, _ in failures], FAILED_TEST_FILENAME) # And good night! runner.shutdown() runner.log_elapsed_time() combine_coverage() logger.info("Executed: %s", tests) logger.info("Failed: %s", len(failures)) if not failures: logger.info("All tests passed!") return 0 else: logger.error("Some test failed!") logger.info("Run again with --retry-failed (or -r) to retry.") logger.info("Failed tests:") for test, lang, msg in failures: logger.info("%s (%s): %s\n", test.name, lang, msg) return 1
def __init__(self): TestRunner.__init__(self, Corstone700FVP)
def main(): parser = ArgumentParser(description="Runs the CMS functional test suite.") parser.add_argument("regex", action="store", type=utf8_decoder, nargs='*', metavar="regex", help="a regex to match to run a subset of tests") parser.add_argument("-l", "--languages", action="store", type=utf8_decoder, default="", help="a comma-separated list of languages to test") parser.add_argument("-c", "--contest", action="store", type=utf8_decoder, help="use an existing contest (and the tasks in it)") parser.add_argument( "-r", "--retry-failed", action="store_true", help="only run failed tests from the previous run (stored in %s)" % FAILED_TEST_FILENAME) parser.add_argument( "-n", "--dry-run", action="store_true", help="show what tests would be run, but do not run them") parser.add_argument( "-v", "--verbose", action="count", help="print debug information (use multiple times for more)") args = parser.parse_args() CONFIG["VERBOSITY"] = args.verbose CONFIG["COVERAGE"] = True # Pre-process our command-line arguments to figure out which tests to run. regexes = [re.compile(s) for s in args.regex] if args.languages: languages = frozenset(args.languages.split(',')) else: languages = frozenset() if args.retry_failed: test_list = load_failed_tests() else: test_list = ALL_TESTS test_list = filter_tests(test_list, regexes, languages) if not test_list: logger.info( "There are no tests to run! (was your filter too restrictive?)") return 0 tests = 0 for test in test_list: for language in test.languages: if args.dry_run: logger.info("Test %s in %s.", test.name, language) tests += 1 if test.user_tests: for language in test.languages: if args.dry_run: logger.info("Test %s in %s (for usertest).", test.name, language) tests += 1 if args.dry_run: return 0 if args.retry_failed: logger.info("Re-running %s failed tests from last run.", len(test_list)) # Clear out any old coverage data. logging.info("Clearing old coverage data.") sh(sys.executable + " -m coverage erase") # Startup the test runner. runner = TestRunner(test_list, contest_id=args.contest, workers=4) # Submit and wait for all tests to complete. runner.submit_tests() failures = runner.wait_for_evaluation() write_test_case_list([(test, lang) for test, lang, _ in failures], FAILED_TEST_FILENAME) # And good night! runner.shutdown() runner.log_elapsed_time() combine_coverage() logger.info("Executed: %s", tests) logger.info("Failed: %s", len(failures)) if not failures: logger.info("All tests passed!") return 0 else: logger.error("Some test failed!") logger.info("Run again with --retry-failed (or -r) to retry.") logger.info("Failed tests:") for test, lang, msg in failures: logger.info("%s (%s): %s\n", test.name, lang, msg) return 1
def __init__(self): TestRunner.__init__(self, A5dsFVP)
class TestsCmd(Cmd): prompt = 'tests>' def __init__(self, user, pswd, enable_pswd): Cmd.__init__(self) self.controller = DBController() self.runner = TestRunner(self.controller) self.user = user self.pswd = pswd self.enable_pswd = enable_pswd if enable_pswd else pswd self.current_results = {} def do_listar_campus(self, line): "Lista de campus disponibles" for f in self.controller.get_campus(): print(f) def do_listar_facultades(self, line): "Lista de facultades disponibles" for f in self.controller.get_facultades(): print(f) def do_listar_oomm(self, lines): "Lista de organismos menores" for f in self.controller.get_oomms(): print(f) def do_listar_equipos_datacenter(self, lines): "Lista de equipos de datacenter" for f in self.controller.get_datacenter(): print(f) def complete_seleccionar_campus(self, text, line, begidx, endidx): if not text: options = self.controller.get_campus() else: if text.startswith('-'): options = ['-todos'] else: options = [f for f in self.controller.get_campus() if f.lower().startswith(text.lower())] return options def do_seleccionar_campus(self, line): if line: if line.startswith('-todos'): self.runner.add_facultades(self.controller.get_facultades()) else: self.runner.add_facultades(self.controller.parse_campus(line)) else: campus = self.select(self.controller.get_campus(), 'Campus? ') self.runner.add_facultades(self.controller.get_facultades_campus(campus)) def do_seleccionar_facultad(self, line): campus = self.select(self.controller.get_campus(), 'Campus? ') facultad = self.select(self.controller.get_facultades_campus(campus), 'Facultad? ') self.runner.add_facultad(facultad) def complete_seleccionar_oomm(self, text, line, begidx, endidx): if not text: options = self.controller.get_oomms() else: if text.startswith('-'): options = ['-todos'] else: options = [f for f in self.controller.get_oomms() if f.lower().startswith(text.lower())] return options def do_seleccionar_oomm(self, line): if line: if line.startswith('-todos'): self.runner.add_oomms(self.controller.get_oomms()) else: self.runner.add_oomms(self.controller.parse_oomm(line)) def do_seleccionar_todo(self, line): self.runner.add_oomms(self.controller.get_oomms()) self.runner.add_facultades(self.controller.get_facultades()) self.runner.add_datacenters(self.controller.get_datacenter()) run_parser = argparse.ArgumentParser() run_parser.add_argument('--packets', nargs='?', default=default_values.number_pings) run_parser.add_argument('--threads', nargs='?', default=default_values.default_threads) run_parser.add_argument('--destination', nargs='?', default=default_values.default_destination) run_parser.add_argument('--time', action='store_true') run_parser.add_argument('--repeat_failures', nargs=2, metavar=('NUMBER_REPETITIONS', 'WAIT_TIME'), default=[default_values.default_repeats, default_values.default_wait_time]) @with_argparser(run_parser) def do_correr_prueba(self, args): config = { 'user': self.user, 'pswd': self.pswd, 'enable_pswd': self.enable_pswd, 'pings': args.packets, 'threads': args.threads, 'destination': args.destination, 'measure_time': args.time, 'repeat_failures': True if args.repeat_failures else False, 'repetitions_config': { 'repetitions': int(args.repeat_failures[0]), 'wait_time': int(args.repeat_failures[1]) } } self.current_results = self.runner.run_selected_tests(config) save_parser = argparse.ArgumentParser() save_parser.add_argument('--output', nargs='?', default=default_values.default_output) @with_argparser(save_parser) def do_guardar_resultados(self, args): output = args.output output_path = current_path + '/' + output write_results(self.current_results, output_path) self.current_results = {} def do_ver_seleccionados(self, line): print(self.runner.get_selected()) def do_borrar(self, line): self.runner.delete_selected() ver_parser = argparse.ArgumentParser() ver_parser.add_argument('--not_ok', action='store_true') @with_argparser(ver_parser) def do_ver_resultados(self, args): for v in self.current_results.values(): for device in v: string = device['status'].print_status(args.not_ok) if string: print(device['machine']['name'] + ' ' + string) def complete_seleccionar_equipos_datacenter(self, text, line, begidx, endidx): if not text: options = self.controller.get_datacenter() else: if text.startswith('-'): options = ['-todos'] else: options = [f for f in self.controller.get_datacenter() if f.lower().startswith(text.lower())] return options def do_seleccionar_equipos_datacenter(self, line): if line: if line.startswith('-todos'): self.runner.add_datacenters(self.controller.get_datacenter()) else: self.runner.add_datacenters(self.controller.parse_datacenter(line))
def main(): parser = argparse.ArgumentParser( description="Runs the CMS functional test suite.") parser.add_argument( "-s", "--submissions", action="store", type=int, default=50, help="set the number of submissions to submit (default 50)") parser.add_argument("-w", "--workers", action="store", type=int, default=4, help="set the number of workers to use (default 4)") parser.add_argument( "-l", "--cpu_limits", action="append", default=[], help="set maximum CPU percentage for a set of services, for example: " "'-l .*Server:40' limits servers to use 40%% of a CPU or less; " "can be specified multiple times (requires cputool)") parser.add_argument( "-v", "--verbose", action="count", default=0, help="print debug information (use multiple times for more)") args = parser.parse_args() CONFIG["VERBOSITY"] = args.verbose CONFIG["COVERAGE"] = False test_list = [ Test('batch', task=batch_50, filenames=['correct-stdio.%l'], languages=(LANG_C, ), checks=[]) for _ in range(args.submissions) ] cpu_limits = [] for l in args.cpu_limits: if ":" not in l: parser.error("CPU limit must be in the form <regex>:<limit>.") regex, _, limit = l.rpartition(":") try: limit = int(limit) except ValueError: parser.error("CPU limit must be an integer.") cpu_limits.append((regex, limit)) runner = TestRunner(test_list, workers=args.workers, cpu_limits=cpu_limits) runner.submit_tests(concurrent_submit_and_eval=False) runner.log_elapsed_time() failures = runner.wait_for_evaluation() runner.log_elapsed_time() if failures == []: logger.info("All tests passed!") return 0 else: logger.error("Some test failed!") return 1
def main(): # Clean up output directory before doing anything else clean() test_url = 'http://localhost:8001/tester/gui.html' reporter = SuitesReport('TESTS-TestSuites.xml.tmpl') test_servers = [] installers = [] launchers = [] if ('bin-dbg' in sys.argv[1]): build_type = 'dbg' elif ('bin-opt' in sys.argv[1]): build_type = 'opt' else: build_type = '' # WinCE is a special case, because it is compiled # and run on different platforms. if len(sys.argv) > 2 and sys.argv[2] == 'wince': local_ip = WindowsNetworkHelper.GetLocalIp() launchers.append(browser_launchers.IExploreWinCeLauncher(local_ip)) installers.append(installer.WinCeInstaller(local_ip)) test_url = 'http://%s:8001/tester/gui.html' % local_ip elif sys.argv[1] == 'chromium': reporter = ChromiumReport() if len(sys.argv) > 2: installers.append( installer.ChromiumWin32Installer(mode=sys.argv[2])) launchers.append( browser_launchers.ChromiumWin32Launcher(mode=sys.argv[2])) else: installers.append(installer.ChromiumWin32Installer()) launchers.append(browser_launchers.ChromiumWin32Launcher()) elif osutils.osIsWin(): launchers.append(browser_launchers.IExploreWin32Launcher()) launchers.append( browser_launchers.Firefox3Win32Launcher('ff3profile-win')) launchers.append( browser_launchers.Firefox35Win32Launcher('ff31profile-win')) launchers.append(browser_launchers.ChromeWin32Launcher()) installers.append(installer.ChromeWin32Installer()) if osutils.osIsVista(): installers.append(installer.WinVistaInstaller()) else: installers.append(installer.WinXpInstaller()) elif osutils.osIsNix(): if osutils.osIsMac(): launchers.append( browser_launchers.Firefox3MacLauncher('gears-ff3')) launchers.append( browser_launchers.Firefox35MacLauncher('gears-ff35')) installers.append(installer.Firefox3MacInstaller('gears-ff3')) installers.append(installer.Firefox35MacInstaller('gears-ff35')) launchers.append(browser_launchers.SafariMacLauncher()) installers.append(installer.SafariMacInstaller(build_type)) else: launchers.append( browser_launchers.Firefox3LinuxLauncher('gears-ff3')) installers.append(installer.Firefox3LinuxInstaller('gears-ff3')) # Adding second and third webservers for cross domain tests. test_servers.append(TestWebserver(serverRootDir(), port=8001)) test_servers.append(TestWebserver(serverRootDir(), port=8002)) test_servers.append(TestWebserver(serverRootDir(), port=8003)) gears_binaries = sys.argv[1] testrunner = TestRunner(launchers, test_servers, test_url) bootstrap = Bootstrap(gears_binaries, installers, testrunner, reporter) bootstrap.invoke()
def main(): parser = argparse.ArgumentParser() parser.add_argument( "test_directory", help="The directory where the toolchain tests are located.") parser.add_argument("-v", "--verbose", action="store_true", help="Print verbose output") parser.add_argument( "-j", "--jobs", type=int, default=int(os.cpu_count() / 2), help= "Number of threads to use for parallel execution (defaults to half of the system max)", ) parser.add_argument("-t", "--tests", default="all", help="Test/Testsuite to run (defaults to all)") parser.add_argument( "--format", choices=("human", "xunit"), default="human", help="Format of the test output (defaults to human)", ) parser.add_argument( "--cdt", default=get_cdt_path(), help="Path to CDT (defaults to built CDT in this repo)", ) args = parser.parse_args() P.verbose = args.verbose abs_test_directory = os.path.abspath(args.test_directory) temp_dir = tempfile.mkdtemp() P.print(f"Temp files will be written to {temp_dir}", verbose=True) os.chdir(temp_dir) test_directories: List[str] = [] for f in os.listdir(abs_test_directory): abs_f = os.path.join(abs_test_directory, f) if os.path.isdir(abs_f): test_directories.append(abs_f) test_suites = list(map(lambda d: TestSuite(d, args.cdt), test_directories)) start = timer() test_runner = TestRunner(test_suites, args.tests, args.jobs) test_results = test_runner.run_tests() end = timer() if args.format == "human": failures = print_test_results(test_results, end - start) else: failures = print_test_results_machine(test_results, end - start) if failures: sys.exit(1) sys.exit(0)
plot.lineplot, nhds,"randomdate",groupby="geog_region",line_style=6, filterexpr="diagnosis1 >=: '480' and diagnosis1 <=: '487'",filterlabel="Admissions for pneumonia and influenza"), Test('Panelled time-series line plot', plot.lineplot, nhds,"randomdate","geog_region", filterexpr="diagnosis1 >=: '480' and diagnosis1 <=: '487'",filterlabel="Admissions for pneumonia and influenza"), Test('Panelled time-series line plot with groupby', plot.lineplot, nhds,"randomdate","geog_region",groupby='sex', filterexpr="diagnosis1 >=: '480' and diagnosis1 <=: '487'",filterlabel="Admissions for pneumonia and influenza"), Test('Line plot of a categorical column', plot.lineplot, nhds,"marital_status", filterexpr="diagnosis1 >=: '480' and diagnosis1 <=: '487'",filterlabel="Admissions for pneumonia and influenza"), Test('Line plot of an ordinal column', plot.lineplot, nhds,"agegrp", filterexpr="diagnosis1 >=: '480' and diagnosis1 <=: '487'",filterlabel="Admissions for pneumonia and influenza"), ] runner = TestRunner() # Run tests try: runner.run(tests) finally: runner.close()
filterlabel="Admissions for pneumonia and influenza"), Test('Panelled time-series line plot with groupby', plot.lineplot, nhds, "randomdate", "geog_region", groupby='sex', filterexpr="diagnosis1 >=: '480' and diagnosis1 <=: '487'", filterlabel="Admissions for pneumonia and influenza"), Test('Line plot of a categorical column', plot.lineplot, nhds, "marital_status", filterexpr="diagnosis1 >=: '480' and diagnosis1 <=: '487'", filterlabel="Admissions for pneumonia and influenza"), Test('Line plot of an ordinal column', plot.lineplot, nhds, "agegrp", filterexpr="diagnosis1 >=: '480' and diagnosis1 <=: '487'", filterlabel="Admissions for pneumonia and influenza"), ] runner = TestRunner() # Run tests try: runner.run(tests) finally: runner.close()