def _run(self): try: while True: test_desc = self._description_queue.get() if not test_desc: self._runner_proc.stdout.close() self._runner_proc.stdin.write('\0') self._runner_proc.stdin.close() self._runner_proc.wait() return if not write_message(self._runner_proc.stdin, test_desc): self._outcome_queue.put( Outcome(internal_error=[ 'Unable to send test description for (%s.%s) from %r' % (test_desc.recipe_name, test_desc.test_name, self.name) ])) return result = read_message(self._runner_proc.stdout, Outcome) if result is None: return self._outcome_queue.put(result) except KeyboardInterrupt: pass except gevent.GreenletExit: pass except Exception as ex: # pylint: disable=broad-except self._outcome_queue.put( Outcome(internal_error=[ 'Uncaught exception in %r: %s' % (self.name, ex) ] + traceback.format_exc().splitlines())) finally: try: self._runner_proc.kill() except OSError: pass self._runner_proc.wait() # We rely on the thread to dump coverage information to disk; if we don't # wait for the process to die, then our main thread will race with the # runner thread for the coverage information. On windows this almost # always causes an IOError, on *nix this will likely result in flakily # truncated coverage files. # # Sending ourselves down the pipe lets the main process know that we've # quit so it can remove us from the live threads. self._outcome_queue.put(self)
def _outcome_json(self, per_test=None, coverage=100, uncovered_mods=(), unused_expects=()): """Generates a JSON dict representing a runner.Outcome message. Args: * per_test (Dict[test name: str, Seq[OutcomeType]]) - Mapping of test name to a series of OutcomeTypes which that test had. `check` may be repeated multiple times to indicate multiple check failures. * coverage (float) - Percentage covered. * uncovered_mods (Seq[module name]) - modules which have NO possible test coverage. * unused_expects (Seq[file name]) - file paths relative to the main repo of unused expectation files (i.e. JSON files on disk without a corresponding test case). Returns a python dict which is the JSONPB representation of the Outcome. """ ret = Outcome() if per_test is None: per_test = {'foo.basic': []} for test_name, outcome_types in (per_test or {}).iteritems(): results = ret.test_results[test_name] for type_ in outcome_types: if type_ == self.OutcomeType.diff: results.diff.lines[:] = ['placeholder'] if type_ == self.OutcomeType.written: results.written = True if type_ == self.OutcomeType.removed: results.removed = True elif type_ == self.OutcomeType.check: results.check.add(lines=['placeholder']) elif type_ == self.OutcomeType.crash: results.crash_mismatch[:] = ['placeholder'] elif type_ == self.OutcomeType.bad_test: results.bad_test[:] = ['placeholder'] elif type_ == self.OutcomeType.internal_error: results.internal_error[:] = ['placeholder'] ret.coverage_percent = coverage ret.uncovered_modules.extend(uncovered_mods) ret.unused_expectation_files.extend(unused_expects) return jsonpb.MessageToDict(ret, preserving_proto_field_name=True)
def main(args): """Runs simulation tests on a given repo of recipes. Args: args: the parsed args (see add_subparser). Returns: Exit code """ is_train = args.subcommand == 'train' ret = Outcome() def _dump(): if args.json: json.dump( json_format.MessageToDict(ret, preserving_proto_field_name=True), args.json) try: _run(ret, args.recipe_deps, args.use_emoji, args.test_filters, is_train) _dump() except KeyboardInterrupt: args.docs = False # skip docs except SystemExit: _dump() raise if is_train and args.docs: print 'Generating README.recipes.md' regenerate_docs(args.recipe_deps.main_repo) return 0
def _read_test_desc(): try: return read_message(sys.stdin, Description) except Exception as ex: # pylint: disable=broad-except write_message( sys.stdout, Outcome(internal_error=['while reading: %r' % (ex, )] + traceback.format_exc().splitlines())) return None
def test_returning_empty(self): d = OrderedDict([ ('x', {'name': 'x', 'cmd': ['one', 'two', 'three']}), ('y', {'name': 'y', 'cmd': []}), ('z', {'name': 'z', 'cmd': ['foo', 'bar']}), ]) test_data = self.mkApi().post_process(lambda check, steps: {}) results = Outcome.Results() expectations = post_process(results, d, test_data) self.assertIsNone(expectations) self.assertEqual(len(results.check), 0)
def test_returning_nonsubset(self): d = OrderedDict([ ('x', {'name': 'x', 'cmd': ['one', 'two', 'three']}), ('y', {'name': 'y', 'cmd': []}), ('z', {'name': 'z', 'cmd': ['foo', 'bar']}), ]) test_data = self.mkApi().post_process( lambda check, steps: OrderedDict((k, dict(cwd='cwd', **v.to_step_dict())) for k, v in steps.iteritems())) with self.assertRaises(PostProcessError): post_process(Outcome.Results(), d, test_data)
def test_key_error_followed_by_attribute(self): d = OrderedDict([('x', {'name': 'x'})]) def body(check, steps): foo = steps['y'].env['foo'] test_data = self.mkApi().post_process(body) results = Outcome.Results() post_process(results, d, test_data) self.assertEqual(len(results.check), 1) self.assertHas( results.check[0], "foo = steps['y'].env['foo']", "steps.keys(): ['x']", "raised exception: KeyError: 'y'")
def test_key_error_implicit_check(self): d = OrderedDict([('x', {'name': 'x'})]) def body(check, steps): foo = steps['x'].env['foo'] test_data = self.mkApi().post_process(body) results = Outcome.Results() expectations = post_process(results, d, test_data) self.assertEqual(len(results.check), 1) self.assertHas( results.check[0], "foo = steps['x'].env['foo']", "steps['x'].env.keys(): []", "raised exception: KeyError: 'foo'")
def test_returning_subset(self): d = OrderedDict([ ('x', {'name': 'x', 'cmd': ['one', 'two', 'three']}), ('y', {'name': 'y', 'cmd': []}), ('z', {'name': 'z', 'cmd': ['foo', 'bar']}), ]) test_data = self.mkApi().post_process( lambda check, steps: OrderedDict((k, {'name': v.name}) for k, v in steps.iteritems())) results = Outcome.Results() expectations = post_process(results, d, test_data) self.assertEqual(expectations, [{'name': 'x'}, {'name': 'y'}, {'name': 'z'}]) self.assertEqual(len(results.check), 0)
def test_key_error_in_subscript_expression(self): d = OrderedDict([('x', {'name': 'x'})]) def body(check, steps): d2 = {} foo = steps[d2['x']].env['foo'] test_data = self.mkApi().post_process(body) results = Outcome.Results() expectations = post_process(results, d, test_data) self.assertEqual(len(results.check), 1) self.assertHas( results.check[0], "foo = steps[d2['x']].env['foo']", 'd2.keys(): []', "raised exception: KeyError: 'x'")
def test_post_process_failure(self): d = OrderedDict([('x', {'name': 'x'})]) def body(check, steps, *args, **kwargs): check('x' not in steps) test_data = self.mkApi().post_process(body, 'foo', 'bar', a=1, b=2) results = Outcome.Results() expectations = post_process(results, d, test_data) self.assertEqual(expectations, [{'name': 'x'}]) self.assertEqual(len(results.check), 1) self.assertHas(results.check[0], "body('foo', 'bar', a=1, b=2)") self.assertHas( results.check[0], "check(('x' not in steps))", "steps.keys(): ['x']")
def test_key_error_implicit_check_no_checker_in_frame(self): d = OrderedDict([('x', {'name': 'x'})]) def body(check, steps_dict): # The failure backtrace for the implicit check should even include frames # where check isn't explicitly passed def inner(steps_dict): return steps_dict['x'].env['foo'] == 'bar' check(inner(steps_dict)) test_data = self.mkApi().post_process(body) results = Outcome.Results() post_process(results, d, test_data) self.assertEqual(len(results.check), 1) self.assertHas( results.check[0], 'check(inner(steps_dict))') self.assertHas( results.check[0], "return (steps_dict['x'].env['foo'] == 'bar')", "steps_dict['x'].env.keys(): []", "raised exception: KeyError: 'foo'")
def test_removing_name(self): d = OrderedDict([ ('x', {'name': 'x', 'cmd': ['one', 'two', 'three']}), ('y', {'name': 'y', 'cmd': []}), ('z', {'name': 'z', 'cmd': ['foo', 'bar']}), ]) test_data = self.mkApi().post_process( lambda check, steps: OrderedDict( (k, {a: value for a, value in v.to_step_dict().iteritems() if a != 'name'}) for k,v in steps.iteritems())) results = Outcome.Results() expectations = post_process(results, d, test_data) self.assertEqual(expectations, [ {'name': 'x', 'cmd': ['one', 'two', 'three']}, {'name': 'y', 'cmd': []}, {'name': 'z', 'cmd': ['foo', 'bar']}, ]) self.assertEqual(len(results.check), 0)
def test_post_process_failure_in_multiple_hooks(self): d = OrderedDict([('x', {'name': 'x'})]) def body(check, steps, *args, **kwargs): check('x' not in steps) def body2(check, steps, *args, **kwargs): check('y' in steps) api = self.mkApi() test_data = (api.post_process(body, 'foo', a=1) + api.post_process(body2, 'bar', b=2)) results = Outcome.Results() expectations = post_process(results, d, test_data) self.assertEqual(expectations, [{'name': 'x'}]) self.assertEqual(len(results.check), 2) self.assertHas( results.check[0], "body('foo', a=1)", "check(('x' not in steps))", "steps.keys(): ['x']") self.assertHas( results.check[1], "body2('bar', b=2)", "check(('y' in steps))", "steps.keys(): ['x']")
def post_process(d, f, *args, **kwargs): test_data = RecipeTestApi().post_process(f, *args, **kwargs) results = Outcome.Results() expectations = magic_check_fn.post_process(results, d, test_data) return expectations, results.check
def main(recipe_deps, cov_file, is_train, cover_module_imports): # TODO(iannucci): Route and log greenlet exception information somewhere # useful as part of each test case. gevent.get_hub().exception_stream = None main_repo = recipe_deps.main_repo cov_data = coverage.CoverageData() if cover_module_imports: cov_data.update(_cover_all_imports(main_repo)) test_data_cache = {} path_cleaner = _make_path_cleaner(recipe_deps) fatal = False while True: test_desc = _read_test_desc() if not test_desc: break # EOF or error result = Outcome() try: full_name = '%s.%s' % (test_desc.recipe_name, test_desc.test_name) test_result = result.test_results[full_name] recipe = main_repo.recipes[test_desc.recipe_name] if cov_file: # We have to start coverage now because we want to cover the importation # of the covered recipe and/or covered recipe modules. cov = coverage.Coverage(config_file=False, include=recipe.coverage_patterns) cov.start() # to cover execfile of recipe/module.__init__ # However, to accurately track coverage when using gevent greenlets, we # need to tell Coverage about this. If the recipe (or the module it # covers) uses futures directly, stop the coverage so far and restart it # with 'concurrency="gevent"'. # # TODO(iannucci): We may need to upgrade in the future this to # 'transitively uses the futures module' instead of 'directly uses the # futures module'. uses_gevent = (_FUTURES_MODULE in recipe.normalized_DEPS.itervalues()) if not uses_gevent and recipe.module: uses_gevent = ( _FUTURES_MODULE in recipe.module.normalized_DEPS.itervalues()) if uses_gevent: cov.stop() cov_data.update(cov.get_data()) cov = coverage.Coverage(config_file=False, include=recipe.coverage_patterns, concurrency='gevent') cov.start() test_data = _get_test_data(test_data_cache, recipe, test_desc.test_name) try: _run_test(path_cleaner, test_result, recipe_deps, test_desc, test_data, is_train) except Exception as ex: # pylint: disable=broad-except test_result.internal_error.append('Uncaught exception: %r' % (ex, )) test_result.internal_error.extend( traceback.format_exc().splitlines()) if cov_file: cov.stop() cov_data.update(cov.get_data()) except Exception as ex: # pylint: disable=broad-except result.internal_error.append('Uncaught exception: %r' % (ex, )) result.internal_error.extend(traceback.format_exc().splitlines()) fatal = True if not write_message(sys.stdout, result) or fatal: break # EOF if cov_file: coverage.data.CoverageDataFiles(basename=cov_file).write(cov_data)