def _loop(requests, responses, host, worker_num, callback, context, pre_fn, post_fn, should_loop=True): host = host or Host() try: context_after_pre = pre_fn(host, worker_num, context) keep_looping = True while keep_looping: message_type, args = requests.get(block=True) if message_type == _MessageType.Close: responses.put((_MessageType.Done, (worker_num, post_fn(context_after_pre)))) break assert message_type == _MessageType.Request resp = callback(context_after_pre, args) responses.put((_MessageType.Response, resp)) keep_looping = should_loop except KeyboardInterrupt as e: responses.put((_MessageType.Interrupt, (worker_num, str(e)))) except Exception as e: responses.put( (_MessageType.Error, (worker_num, traceback.format_exc(e))))
def __init__(self, host=None): self.args = None self.classifier = None self.cov = None self.context = None self.coverage_source = None self.host = host or Host() self.loader = unittest.loader.TestLoader() self.printer = None self.setup_fn = None self.stats = None self.teardown_fn = None self.top_level_dir = None self.top_level_dirs = [] self.win_multiprocessing = WinMultiprocessing.spawn self.final_responses = [] self.has_expectations = False self.expectations = None self.metadata = {} self.path_delimiter = json_results.DEFAULT_TEST_SEPARATOR self.artifact_output_dir = None # initialize self.args to the defaults. parser = ArgumentParser(self.host) self.parse_args(parser, [])
def test_join_discards_messages(self): host = Host() context = {'pre': False, 'post': False} pool = make_pool(host, 2, _echo, context, _pre, _post) pool.send('hello') pool.close() pool.join() self.assertEqual(len(pool.discarded_responses), 1)
def test_join_gets_an_error(self): host = Host() pool = make_pool(host, 2, _error, None, _stub, _stub) pool.send('hello') pool.close() try: pool.join() except Exception as e: self.assertIn('_error() raised Exception', str(e))
def __init__(self, host, jobs, callback, context, pre_fn, post_fn): self.host = host or Host() self.jobs = jobs self.callback = callback self.context = copy.deepcopy(context) self.msgs = [] self.closed = False self.post_fn = post_fn self.context_after_pre = pre_fn(self.host, 1, self.context) self.final_context = None
def test_pickling_errors(self): def unpicklable_fn(): # pragma: no cover pass host = Host() jobs = 2 self.assertRaises(Exception, make_pool, host, jobs, _stub, unpicklable_fn, None, None) self.assertRaises(Exception, make_pool, host, jobs, _stub, None, unpicklable_fn, None) self.assertRaises(Exception, make_pool, host, jobs, _stub, None, None, unpicklable_fn)
def run_through_loop(self, callback=None, pool=None): callback = callback or _stub if pool: host = pool.host else: host = Host() pool = _ProcessPool(host, 0, _stub, None, _stub, _stub) pool.send('hello') worker_num = 1 _loop(pool.requests, pool.responses, host, worker_num, callback, None, _stub, _stub, should_loop=False) return pool
def testChildStderrIsInTypFinalOutput(self): sys.stdout.write('Started testChildStderrIsInTypFinalOutput') host = Host() host.capture_output() self.assertEqual( subprocess.call([ sys.executable, self.script_path, ('typ.tests.host_test.TestHostReceivesProcessOutput' '.testWriteToStderr'), '-vv' ], stdout=sys.stdout, stderr=sys.stderr), 0) out, _ = host.restore_output() sys.stderr.write(out) self.assertIn(' Started testWriteToStderr', out)
def run_basic_test(self, jobs): host = Host() context = {'pre': False, 'post': False} pool = make_pool(host, jobs, _echo, context, _pre, _post) pool.send('hello') pool.send('world') msg1 = pool.get() msg2 = pool.get() pool.close() final_contexts = pool.join() self.assertEqual(set([msg1, msg2]), set(['True/False/hello', 'True/False/world'])) expected_context = {'pre': True, 'post': True} expected_final_contexts = [expected_context for _ in range(jobs)] self.assertEqual(final_contexts, expected_final_contexts)
def __init__(self, host=None): self.args = None self.classifier = None self.cov = None self.context = None self.coverage_source = None self.host = host or Host() self.loader = unittest.loader.TestLoader() self.printer = None self.setup_fn = None self.stats = None self.teardown_fn = None self.top_level_dir = None self.win_multiprocessing = WinMultiprocessing.spawn # initialize self.args to the defaults. parser = ArgumentParser(self.host) self.parse_args(parser, [])
def host(self): return Host()
def __init__(self, host=None, add_help=True, version=True, discovery=True, reporting=True, running=True): super(ArgumentParser, self).__init__(prog='typ', add_help=add_help) self._host = host or Host() self.exit_status = None self.usage = '%(prog)s [options] [tests...]' if version: self.add_argument('-V', '--version', action='store_true', help='Print the typ version and exit.') if discovery: self.add_argument('-f', '--file-list', metavar='FILENAME', action='store', help=('Takes the list of tests from the file ' '(use "-" for stdin).')) self.add_argument('--all', action='store_true', help=('Run all the tests, including the ones ' 'normally skipped.')) self.add_argument('--isolate', metavar='glob', default=[], action='append', help=('Globs of tests to run in isolation ' '(serially).')) self.add_argument('--suffixes', metavar='glob', default=[], action='append', help=('Globs of test filenames to look for (' 'can specify multiple times; defaults ' 'to %s).' % DEFAULT_SUFFIXES)) if reporting: self.add_argument('--builder-name', help=('Builder name to include in the ' 'uploaded data.')) self.add_argument('-c', '--coverage', action='store_true', help='Reports coverage information.') self.add_argument('--coverage-source', action='append', default=[], help=('Directories to include when running and ' 'reporting coverage (defaults to ' '--top-level-dirs plus --path)')) self.add_argument('--coverage-omit', action='append', default=[], help=('Globs to omit when reporting coverage ' '(defaults to %s).' % DEFAULT_COVERAGE_OMIT)) self.add_argument('--coverage-annotate', action='store_true', help=('Produce an annotate source report.')) self.add_argument('--coverage-show-missing', action='store_true', help=('Show missing line ranges in coverage ' 'report.')) self.add_argument('--master-name', help=('Buildbot master name to include in the ' 'uploaded data.')) self.add_argument('--metadata', action='append', default=[], help=('Optional key=value metadata that will ' 'be included in the results.')) self.add_argument('--repository-absolute-path', default='', action='store', help=('Specifies the absolute path of the repository.')) self.add_argument('--test-results-server', help=('If specified, uploads the full results ' 'to this server.')) self.add_argument('--test-type', help=('Name of test type to include in the ' 'uploaded data (e.g., ' '"telemetry_unittests").')) self.add_argument('--write-full-results-to', '--isolated-script-test-output', type=str, metavar='FILENAME', action='store', help=('If specified, writes the full results to ' 'that path.')) self.add_argument('--isolated-script-test-perf-output', type=str, metavar='FILENAME', action='store', help='(ignored/unsupported)') self.add_argument('--write-trace-to', metavar='FILENAME', action='store', help=('If specified, writes the trace to ' 'that path.')) self.add_argument('--disable-resultsink', action='store_true', default=False, help=('Explicitly disable ResultSink integration ' 'instead of automatically determining ' 'based off LUCI_CONTEXT.')) self.add_argument('tests', nargs='*', default=[], help=argparse.SUPPRESS) if running: self.add_argument('-d', '--debugger', action='store_true', help='Runs the tests under the debugger.') self.add_argument('-j', '--jobs', metavar='N', type=int, default=self._host.cpu_count(), help=('Runs N jobs in parallel ' '(defaults to %(default)s).')) self.add_argument('-l', '--list-only', action='store_true', help='Lists all the test names found and exits.') self.add_argument('-n', '--dry-run', action='store_true', help=argparse.SUPPRESS) self.add_argument('-q', '--quiet', action='store_true', default=False, help=('Runs as quietly as possible ' '(only prints errors).')) self.add_argument('-r', '--repeat', '--isolated-script-test-repeat', default=1, type=int, help='The number of times to repeat running each ' 'test. Note that if the tests are A, B, C ' 'and repeat is 2, the execution order would' ' be A B C [possible retries] A B C ' '[possible retries].') self.add_argument('-s', '--status-format', default=self._host.getenv('NINJA_STATUS', DEFAULT_STATUS_FORMAT), help=argparse.SUPPRESS) self.add_argument('-t', '--timing', action='store_true', help='Prints timing info.') self.add_argument('-v', '--verbose', action='count', default=0, help=('Prints more stuff (can specify multiple ' 'times for more output).')) self.add_argument('-x', '--tag', dest='tags', default=[], action='append', help=('test tags (conditions) that apply to ' 'this run (can specify multiple times')) self.add_argument('-i', '--ignore-tag', dest='ignored_tags', default=[], action='append', help=('test tags (conditions) to treat as ' 'ignored for the purposes of tag ' 'validation.')) self.add_argument('-X', '--expectations-file', dest='expectations_files', default=[], action='append', help=('test expectations file (can specify ' 'multiple times')) self.add_argument('--passthrough', action='store_true', default=False, help='Prints all output while running.') self.add_argument('--total-shards', default=1, type=int, help=('Total number of shards being used for ' 'this test run. (The user of ' 'this script is responsible for spawning ' 'all of the shards.)')) self.add_argument('--shard-index', default=0, type=int, help=('Shard index (0..total_shards-1) of this ' 'test run.')) self.add_argument('--retry-limit', '--isolated-script-test-launcher-retry-limit', type=int, default=0, help='Retries each failure up to N times.') self.add_argument('--retry-only-retry-on-failure-tests', action='store_true', help=('Retries are only for tests that have the' ' RetryOnFailure tag in the test' ' expectations file')) self.add_argument('--terminal-width', type=int, default=self._host.terminal_width(), help=argparse.SUPPRESS) self.add_argument('--overwrite', action='store_true', default=None, help=argparse.SUPPRESS) self.add_argument('--no-overwrite', action='store_false', dest='overwrite', default=None, help=argparse.SUPPRESS) self.add_argument('--test-name-prefix', default='', action='store', help=('Specifies the prefix that will be removed' ' from test names')) self.add_argument('--isolated-outdir', type=str, metavar='PATH', help='directory to write output to (ignored)') if discovery or running: self.add_argument('-P', '--path', action='append', default=[], help=('Adds dir to sys.path (can specify ' 'multiple times).')) self.add_argument('--top-level-dir', action='store', default=None, help=argparse.SUPPRESS) self.add_argument('--top-level-dirs', action='append', default=[], help=('Sets the top directory of project ' '(used when running subdirs).')) self.add_argument('--skip', metavar='glob', default=[], action='append', help=('Globs of test names to skip (' 'defaults to %(default)s).')) self.add_argument( '--test-filter', '--isolated-script-test-filter', type=str, default='', action='store', help='Pass a double-colon-separated ("::") list of exact test ' 'names or globs, to run just that subset of tests. fnmatch will ' 'be used to match globs to test names') self.add_argument( '--partial-match-filter', type=str, default=[], action='append', help='Pass a string and Typ will run tests whose names ' 'partially match the passed string')
def test_async_close(self): host = Host() pool = make_pool(host, 1, _echo, None, _stub, _stub) pool.join()
def test_join_gets_an_interrupt(self): host = Host() pool = make_pool(host, 2, _interrupt, None, _stub, _stub) pool.send('hello') pool.close() self.assertRaises(KeyboardInterrupt, pool.join)
def __init__(self, host=None, add_help=True, version=True, discovery=True, reporting=True, running=True): super(ArgumentParser, self).__init__(prog='typ', add_help=add_help) self._host = host or Host() self.exit_status = None self.usage = '%(prog)s [options] [tests...]' if version: self.add_argument('-V', '--version', action='store_true', help='Print the typ version and exit.') if discovery: self.add_argument('-f', '--file-list', metavar='FILENAME', action='store', help=('Takes the list of tests from the file ' '(use "-" for stdin).')) self.add_argument('--all', action='store_true', help=('Run all the tests, including the ones ' 'normally skipped.')) self.add_argument('--isolate', metavar='glob', default=[], action='append', help=('Globs of tests to run in isolation ' '(serially).')) self.add_argument('--skip', metavar='glob', default=[], action='append', help=('Globs of test names to skip (' 'defaults to %(default)s).')) self.add_argument('--suffixes', metavar='glob', default=[], action='append', help=('Globs of test filenames to look for (' 'can specify multiple times; defaults ' 'to %s).' % DEFAULT_SUFFIXES)) if reporting: self.add_argument('--builder-name', help=('Builder name to include in the ' 'uploaded data.')) self.add_argument('-c', '--coverage', action='store_true', help='Reports coverage information.') self.add_argument('--coverage-source', action='append', default=[], help=('Directories to include when running and ' 'reporting coverage (defaults to ' '--top-level-dirs plus --path)')) self.add_argument('--coverage-omit', action='append', default=[], help=('Globs to omit when reporting coverage ' '(defaults to %s).' % DEFAULT_COVERAGE_OMIT)) self.add_argument('--coverage-annotate', action='store_true', help=('Produce an annotate source report.')) self.add_argument('--coverage-show-missing', action='store_true', help=('Show missing line ranges in coverage ' 'report.')) self.add_argument('--master-name', help=('Buildbot master name to include in the ' 'uploaded data.')) self.add_argument('--metadata', action='append', default=[], help=('Optional key=value metadata that will ' 'be included in the results.')) self.add_argument('--test-results-server', help=('If specified, uploads the full results ' 'to this server.')) self.add_argument('--test-type', help=('Name of test type to include in the ' 'uploaded data (e.g., ' '"telemetry_unittests").')) self.add_argument('--write-full-results-to', metavar='FILENAME', action='store', help=('If specified, writes the full results to ' 'that path.')) self.add_argument('--write-trace-to', metavar='FILENAME', action='store', help=('If specified, writes the trace to ' 'that path.')) self.add_argument('tests', nargs='*', default=[], help=argparse.SUPPRESS) if running: self.add_argument('-d', '--debugger', action='store_true', help='Runs the tests under the debugger.') self.add_argument('-j', '--jobs', metavar='N', type=int, default=self._host.cpu_count(), help=('Runs N jobs in parallel ' '(defaults to %(default)s).')) self.add_argument('-l', '--list-only', action='store_true', help='Lists all the test names found and exits.') self.add_argument('-n', '--dry-run', action='store_true', help=argparse.SUPPRESS) self.add_argument('-q', '--quiet', action='store_true', default=False, help=('Runs as quietly as possible ' '(only prints errors).')) self.add_argument('-s', '--status-format', default=self._host.getenv( 'NINJA_STATUS', DEFAULT_STATUS_FORMAT), help=argparse.SUPPRESS) self.add_argument('-t', '--timing', action='store_true', help='Prints timing info.') self.add_argument('-v', '--verbose', action='count', default=0, help=('Prints more stuff (can specify multiple ' 'times for more output).')) self.add_argument('--passthrough', action='store_true', default=False, help='Prints all output while running.') self.add_argument('--total-shards', default=1, type=int, help=('Total number of shards being used for ' 'this test run. (The user of ' 'this script is responsible for spawning ' 'all of the shards.)')) self.add_argument('--shard-index', default=0, type=int, help=('Shard index (0..total_shards-1) of this ' 'test run.')) self.add_argument('--retry-limit', type=int, default=0, help='Retries each failure up to N times.') self.add_argument('--terminal-width', type=int, default=self._host.terminal_width(), help=argparse.SUPPRESS) self.add_argument('--overwrite', action='store_true', default=None, help=argparse.SUPPRESS) self.add_argument('--no-overwrite', action='store_false', dest='overwrite', default=None, help=argparse.SUPPRESS) if discovery or running: self.add_argument('-P', '--path', action='append', default=[], help=('Adds dir to sys.path (can specify ' 'multiple times).')) self.add_argument('--top-level-dir', action='store', default=None, help=argparse.SUPPRESS) self.add_argument('--top-level-dirs', action='append', default=[], help=('Sets the top directory of project ' '(used when running subdirs).'))
def main(argv=None, host=None, win_multiprocessing=None, **defaults): host = host or Host() runner = Runner(host=host) if win_multiprocessing is not None: runner.win_multiprocessing = win_multiprocessing return runner.main(argv, **defaults)
def test_no_close(self): host = Host() context = {'pre': False, 'post': False} pool = make_pool(host, 2, _echo, context, _pre, _post) final_contexts = pool.join() self.assertEqual(final_contexts, [])