def test_not_matches(self): """Returns False when the test matches any regex.""" test = profile.RegexFilter([r'fob', r'bar'], inverse=True) assert test('foobob', None)
def test_empty(self): """Returns True when no filters are provided.""" test = profile.RegexFilter([], inverse=True) assert test('foobob', None)
def test_not_matches(self): """Returns True when the test matches any regex.""" test = profile.RegexFilter([r'fob', r'bar']) assert not test('foobob', None)
def resume(input_): parser = argparse.ArgumentParser() parser.add_argument("results_path", type=path.realpath, metavar="<Results Path>", help="Path to results folder") parser.add_argument("-f", "--config", dest="config_file", type=argparse.FileType("r"), help="Optionally specify a piglit config file to use. " "Default is piglit.conf") parser.add_argument("-n", "--no-retry", dest="no_retry", action="store_true", help="Do not retry incomplete tests") args = parser.parse_args(input_) _disable_windows_exception_messages() results = backends.load(args.results_path) options.OPTIONS.execute = results.options['execute'] options.OPTIONS.valgrind = results.options['valgrind'] options.OPTIONS.sync = results.options['sync'] options.OPTIONS.deqp_mustpass = results.options['deqp_mustpass'] options.OPTIONS.proces_isolation = results.options['process_isolation'] core.get_config(args.config_file) options.OPTIONS.env['PIGLIT_PLATFORM'] = results.options['platform'] results.options['env'] = core.collect_system_info() results.options['name'] = results.name # Resume only works with the JSON backend backend = backends.get_backend('json')( args.results_path, file_start_count=len(results.tests) + 1) # Specifically do not initialize again, everything initialize does is done. # Don't re-run tests that have already completed, incomplete status tests # have obviously not completed. exclude_tests = set() for name, result in six.iteritems(results.tests): if args.no_retry or result.result != 'incomplete': exclude_tests.add(name) profiles = [ profile.load_test_profile(p) for p in results.options['profile'] ] for p in profiles: p.results_dir = args.results_path if results.options['dmesg']: p.dmesg = dmesg.get_dmesg(results.options['dmesg']) if results.options['monitoring']: p.options['monitor'] = monitoring.Monitoring( results.options['monitoring']) if exclude_tests: p.filters.append(lambda n, _: n not in exclude_tests) if results.options['exclude_filter']: p.filters.append( profile.RegexFilter(results.options['exclude_filter'], inverse=True)) if results.options['include_filter']: p.filters.append( profile.RegexFilter(results.options['include_filter'])) if results.options['forced_test_list']: p.forced_test_list = results.options['forced_test_list'] # This is resumed, don't bother with time since it won't be accurate anyway profile.run(profiles, results.options['log_level'], backend, results.options['concurrent']) backend.finalize() print("Thank you for running Piglit!\n" "Results have been written to {0}".format(args.results_path))
def run(input_): """ Function for piglit run command This is a function because it allows it to be shared between piglit-run.py and piglit run """ args = _run_parser(input_) _disable_windows_exception_messages() # If dmesg is requested we must have serial run, this is because dmesg # isn't reliable with threaded run if args.dmesg or args.monitored: args.concurrency = "none" # Pass arguments into Options options.OPTIONS.execute = args.execute options.OPTIONS.valgrind = args.valgrind options.OPTIONS.sync = args.sync options.OPTIONS.deqp_mustpass = args.deqp_mustpass options.OPTIONS.process_isolation = args.process_isolation # Set the platform to pass to waffle options.OPTIONS.env['PIGLIT_PLATFORM'] = args.platform # Change working directory to the root of the piglit directory piglit_dir = path.dirname(path.realpath(sys.argv[0])) os.chdir(piglit_dir) # If the results directory already exists and if overwrite was set, then # clear the directory. If it wasn't set, then raise fatal error. try: core.check_dir(args.results_path, failifexists=args.overwrite, handler=_results_handler) except exceptions.PiglitException: raise exceptions.PiglitFatalError( 'Cannot overwrite existing folder without the -o/--overwrite ' 'option being set.') # If a test list is provided then set the forced_test_list value. forced_test_list = None if args.test_list: if len(args.test_profile) != 1: raise exceptions.PiglitFatalError( 'Unable to force a test list with more than one profile') with open(args.test_list) as test_list: # Strip newlines and comments, ignore empty lines stripped = (t.split('#')[0].strip() for t in test_list) forced_test_list = [t for t in stripped if t] backend = backends.get_backend(args.backend)( args.results_path, junit_suffix=args.junit_suffix, junit_subtests=args.junit_subtests) backend.initialize( _create_metadata(args, args.name or path.basename(args.results_path), forced_test_list)) profiles = [profile.load_test_profile(p) for p in args.test_profile] for p in profiles: p.results_dir = args.results_path # Set the forced_test_list, if applicable if forced_test_list: profiles[0].forced_test_list = forced_test_list # Set the dmesg type if args.dmesg: for p in profiles: p.options['dmesg'] = dmesg.get_dmesg(args.dmesg) if args.monitored: for p in profiles: p.options['monitor'] = monitoring.Monitoring(args.monitored) for p in profiles: if args.exclude_tests: p.filters.append( profile.RegexFilter(args.exclude_tests, inverse=True)) if args.include_tests: p.filters.append(profile.RegexFilter(args.include_tests)) time_elapsed = TimeAttribute(start=time.time()) profile.run(profiles, args.log_level, backend, args.concurrency) time_elapsed.end = time.time() backend.finalize({'time_elapsed': time_elapsed.to_json()}) print('Thank you for running Piglit!\n' 'Results have been written to ' + args.results_path)
def resume(input_): unparsed = parsers.parse_config(input_)[1] parser = argparse.ArgumentParser() parser.add_argument("results_path", type=path.realpath, metavar="<Results Path>", help="Path to results folder") parser.add_argument("-f", "--config", dest="config_file", type=argparse.FileType("r"), help="Optionally specify a piglit config file to use. " "Default is piglit.conf") parser.add_argument("-n", "--no-retry", dest="no_retry", action="store_true", help="Do not retry incomplete tests") parser.add_argument( '-j', '--jobs', dest='jobs', action='store', type=int, default=core.PIGLIT_CONFIG.safe_get('core', 'jobs', None), help='Set the maximum number of jobs to run concurrently. ' 'By default, the reported number of CPUs is used.') args = parser.parse_args(unparsed) _disable_windows_exception_messages() results = backends.load(args.results_path) options.OPTIONS.execute = results.options['execute'] options.OPTIONS.valgrind = results.options['valgrind'] options.OPTIONS.sync = results.options['sync'] options.OPTIONS.deqp_mustpass = results.options['deqp_mustpass'] options.OPTIONS.process_isolation = results.options['process_isolation'] options.OPTIONS.jobs = args.jobs options.OPTIONS.no_retry = args.no_retry options.OPTIONS.force_glsl = results.options['force_glsl'] core.get_config(args.config_file) options.OPTIONS.env['PIGLIT_PLATFORM'] = results.options['platform'] base.Test.timeout = results.options['timeout'] results.options['env'] = core.collect_system_info() results.options['name'] = results.name # Resume only works with the JSON backend backend = backends.get_backend('json')( args.results_path, file_start_count=len(results.tests) + 1) # Specifically do not initialize again, everything initialize does is done. # Don't re-run tests that have already completed, incomplete status tests # have obviously not completed. exclude_tests = set() for name, result in results.tests.items(): if args.no_retry or result.result != 'incomplete': exclude_tests.add(name) profiles = [ profile.load_test_profile(p) for p in results.options['profile'] ] for p in profiles: p.results_dir = args.results_path if results.options['dmesg']: p.dmesg = dmesg.get_dmesg(results.options['dmesg']) if results.options['monitoring']: p.options['monitor'] = monitoring.Monitoring( results.options['monitoring']) if results.options['ignore_missing']: p.options['ignore_missing'] = results.options['ignore_missing'] if exclude_tests: p.filters.append(lambda n, _: n not in exclude_tests) if results.options['exclude_filter']: p.filters.append( profile.RegexFilter(results.options['exclude_filter'], inverse=True)) if results.options['include_filter']: p.filters.append( profile.RegexFilter(results.options['include_filter'])) if results.options['forced_test_list']: p.forced_test_list = results.options['forced_test_list'] # This is resumed, don't bother with time since it won't be accurate anyway try: profile.run(profiles, results.options['log_level'], backend, results.options['concurrent'], args.jobs) except exceptions.PiglitUserError as e: if str(e) != 'no matching tests': raise backend.finalize() print("Thank you for running Piglit!\n" "Results have been written to {0}".format(args.results_path))