def __init__(self, config, features=None, step_registry=None): self.config = config self.features = features or [] self.hooks = {} self.formatters = [] self.undefined_steps = [] self.step_registry = step_registry self.capture_controller = CaptureController(config) self.context = None self.feature = None self.hook_failures = 0
def create_capture_controller(config=None): if not config: config = Mock() config.stdout_capture = True config.stderr_capture = True config.log_capture = True config.logging_filter = None config.logging_level = "INFO" return CaptureController(config)
def create_capture_controller(config=None): if not config: config = Mock() config.stdout_capture = True config.stderr_capture = True config.log_capture = True config.logging_filter = None config.logging_level = "INFO" config.logging_format = "%(levelname)s:%(name)s:%(message)s" config.logging_datefmt = None return CaptureController(config)
def test_run_sets_status_to_failed_on_assertion_error(self): step = Step('foo.feature', 17, u'Given', 'given', u'foo') self.runner.context = Context(self.runner) self.runner.config.stdout_capture = True self.runner.config.log_capture = False self.runner.capture_controller = CaptureController(self.runner.config) self.runner.capture_controller.setup_capture(self.runner.context) step.error_message = None match = Mock() match.run.side_effect = raiser(AssertionError('whee')) self.runner.step_registry.find_match.return_value = match step.run(self.runner) eq_(step.status, Status.failed) assert step.error_message.startswith('Assertion Failed')
def test_run_sets_status_to_failed_on_assertion_error(self): step = Step("foo.feature", 17, u"Given", "given", u"foo") self.runner.context = Context(self.runner) self.runner.config.stdout_capture = True self.runner.config.log_capture = False self.runner.capture_controller = CaptureController(self.runner.config) self.runner.capture_controller.setup_capture(self.runner.context) step.error_message = None match = Mock() match.run.side_effect = raiser(AssertionError("whee")) self.runner.step_registry.find_match.return_value = match step.run(self.runner) assert step.status == Status.failed assert step.error_message.startswith("Assertion Failed")
def setUp(self): self.step_registry = Mock() self.runner = Mock() # self.capture_controller = self.runner.capture_controller = Mock() self.capture_controller = CaptureController(self.runner.config) self.runner.capture_controller = self.capture_controller self.runner.step_registry = self.step_registry self.config = self.runner.config = Mock() self.config.outputs = [None] self.context = self.runner.context = Mock() print('context is %s' % self.context) self.formatters = self.runner.formatters = [Mock()] self.stdout_capture = self.capture_controller.stdout_capture = Mock() self.stdout_capture.getvalue.return_value = '' self.stderr_capture = self.capture_controller.stderr_capture = Mock() self.stderr_capture.getvalue.return_value = '' self.log_capture = self.capture_controller.log_capture = Mock() self.log_capture.getvalue.return_value = '' self.run_hook = self.runner.run_hook = Mock()
class ModelRunner(object): """ Test runner for a behave model (features). Provides the core functionality of a test runner and the functional API needed by model elements. .. attribute:: aborted This is set to true when the user aborts a test run (:exc:`KeyboardInterrupt` exception). Initially: False. Stored as derived attribute in :attr:`Context.aborted`. .. attribute:: captured If any output capture is enabled, provides access to a :class:`~behave.capture.Captured` object that contains a snapshot of all captured data (stdout/stderr/log). .. versionadded:: 1.3.0 """ # pylint: disable=too-many-instance-attributes def __init__(self, config, features=None, step_registry=None): self.config = config self.features = features or [] self.hooks = {} self.formatters = [] self.undefined_steps = [] self.step_registry = step_registry self.capture_controller = CaptureController(config) self.context = None self.feature = None self.hook_failures = 0 # @property def _get_aborted(self): value = False if self.context: value = self.context.aborted return value # @aborted.setter def _set_aborted(self, value): # pylint: disable=protected-access assert self.context, "REQUIRE: context, but context=%r" % self.context self.context._set_root_attribute("aborted", bool(value)) aborted = property(_get_aborted, _set_aborted, doc="Indicates that test run is aborted by the user.") def run_hook(self, name, context, *args): if not self.config.dry_run and (name in self.hooks): try: with context.use_with_user_mode(): self.hooks[name](context, *args) # except KeyboardInterrupt: # self.aborted = True # if name not in ("before_all", "after_all"): # raise except Exception as e: # pylint: disable=broad-except # -- HANDLE HOOK ERRORS: use_traceback = False if self.config.verbose: use_traceback = True ExceptionUtil.set_traceback(e) extra = u"" if "tag" in name: extra = "(tag=%s)" % args[0] error_text = ExceptionUtil.describe(e, use_traceback).rstrip() error_message = u"HOOK-ERROR in %s%s: %s" % (name, extra, error_text) print(error_message) self.hook_failures += 1 if "tag" in name: # -- SCENARIO or FEATURE statement = getattr(context, "scenario", context.feature) elif "all" in name: # -- ABORT EXECUTION: For before_all/after_all self.aborted = True statement = None else: # -- CASE: feature, scenario, step statement = args[0] if statement: # -- CASE: feature, scenario, step statement.hook_failed = True if statement.error_message: # -- NOTE: One exception/failure is already stored. # Append only error message. statement.error_message += u"\n" + error_message else: # -- FIRST EXCEPTION/FAILURE: statement.store_exception_context(e) statement.error_message = error_message def setup_capture(self): if not self.context: self.context = Context(self) self.capture_controller.setup_capture(self.context) def start_capture(self): self.capture_controller.start_capture() def stop_capture(self): self.capture_controller.stop_capture() def teardown_capture(self): self.capture_controller.teardown_capture() @property def captured(self): """Return the current state of the captured output/logging (as captured object). """ return self.capture_controller.captured def run_model(self, features=None): # pylint: disable=too-many-branches if not self.context: self.context = Context(self) if self.step_registry is None: self.step_registry = the_step_registry if features is None: features = self.features # -- ENSURE: context.execute_steps() works in weird cases (hooks, ...) context = self.context self.hook_failures = 0 self.setup_capture() self.run_hook("before_all", context) run_feature = not self.aborted failed_count = 0 undefined_steps_initial_size = len(self.undefined_steps) for feature in features: if run_feature: try: self.feature = feature for formatter in self.formatters: formatter.uri(feature.filename) failed = feature.run(self) if failed: failed_count += 1 if self.config.stop or self.aborted: # -- FAIL-EARLY: After first failure. run_feature = False except KeyboardInterrupt: self.aborted = True failed_count += 1 run_feature = False # -- ALWAYS: Report run/not-run feature to reporters. # REQUIRED-FOR: Summary to keep track of untested features. for reporter in self.config.reporters: reporter.feature(feature) # -- AFTER-ALL: # pylint: disable=protected-access, broad-except cleanups_failed = False self.run_hook("after_all", self.context) try: self.context._do_cleanups( ) # Without dropping the last context layer. except Exception: cleanups_failed = True if self.aborted: print("\nABORTED: By user.") for formatter in self.formatters: formatter.close() for reporter in self.config.reporters: reporter.end() failed = ((failed_count > 0) or self.aborted or (self.hook_failures > 0) or (len(self.undefined_steps) > undefined_steps_initial_size) or cleanups_failed) # XXX-MAYBE: or context.failed) return failed def run(self): """ Implements the run method by running the model. """ self.context = Context(self) return self.run_model()
class ModelRunner(object): """ Test runner for a behave model (features). Provides the core functionality of a test runner and the functional API needed by model elements. .. attribute:: aborted This is set to true when the user aborts a test run (:exc:`KeyboardInterrupt` exception). Initially: False. Stored as derived attribute in :attr:`Context.aborted`. """ # pylint: disable=too-many-instance-attributes def __init__(self, config, features=None, step_registry=None): self.config = config self.features = features or [] self.hooks = {} self.formatters = [] self.undefined_steps = [] self.step_registry = step_registry self.capture_controller = CaptureController(config) self.context = None self.feature = None self.hook_failures = 0 # @property def _get_aborted(self): value = False if self.context: value = self.context.aborted return value # @aborted.setter def _set_aborted(self, value): # pylint: disable=protected-access assert self.context, "REQUIRE: context, but context=%r" % self.context self.context._set_root_attribute("aborted", bool(value)) aborted = property(_get_aborted, _set_aborted, doc="Indicates that test run is aborted by the user.") def run_hook(self, name, context, *args): if not self.config.dry_run and (name in self.hooks): try: with context.user_mode(): self.hooks[name](context, *args) # except KeyboardInterrupt: # self.aborted = True # if name not in ("before_all", "after_all"): # raise except Exception as e: # pylint: disable=broad-except # -- HANDLE HOOK ERRORS: use_traceback = False if self.config.verbose: use_traceback = True ExceptionUtil.set_traceback(e) extra = u"" if "tag" in name: extra = "(tag=%s)" % args[0] error_text = ExceptionUtil.describe(e, use_traceback).rstrip() error_message = u"HOOK-ERROR in %s%s: %s" % (name, extra, error_text) print(error_message) self.hook_failures += 1 if "tag" in name: # -- SCENARIO or FEATURE statement = getattr(context, "scenario", context.feature) elif "all" in name: # -- ABORT EXECUTION: For before_all/after_all self.aborted = True statement = None else: # -- CASE: feature, scenario, step statement = args[0] if statement: # -- CASE: feature, scenario, step statement.hook_failed = True if statement.error_message: # -- NOTE: One exception/failure is already stored. # Append only error message. statement.error_message += u"\n"+ error_message else: # -- FIRST EXCEPTION/FAILURE: statement.store_exception_context(e) statement.error_message = error_message def setup_capture(self): if not self.context: self.context = Context(self) self.capture_controller.setup_capture(self.context) def start_capture(self): self.capture_controller.start_capture() def stop_capture(self): self.capture_controller.stop_capture() def teardown_capture(self): self.capture_controller.teardown_capture() def run_model(self, features=None): # pylint: disable=too-many-branches if not self.context: self.context = Context(self) if self.step_registry is None: self.step_registry = the_step_registry if features is None: features = self.features # -- ENSURE: context.execute_steps() works in weird cases (hooks, ...) context = self.context self.hook_failures = 0 self.setup_capture() self.run_hook("before_all", context) run_feature = not self.aborted failed_count = 0 undefined_steps_initial_size = len(self.undefined_steps) for feature in features: if run_feature: try: self.feature = feature for formatter in self.formatters: formatter.uri(feature.filename) failed = feature.run(self) if failed: failed_count += 1 if self.config.stop or self.aborted: # -- FAIL-EARLY: After first failure. run_feature = False except KeyboardInterrupt: self.aborted = True failed_count += 1 run_feature = False # -- ALWAYS: Report run/not-run feature to reporters. # REQUIRED-FOR: Summary to keep track of untested features. for reporter in self.config.reporters: reporter.feature(feature) # -- AFTER-ALL: if self.aborted: print("\nABORTED: By user.") for formatter in self.formatters: formatter.close() self.run_hook("after_all", self.context) for reporter in self.config.reporters: reporter.end() failed = ((failed_count > 0) or self.aborted or (self.hook_failures > 0) or (len(self.undefined_steps) > undefined_steps_initial_size)) return failed def run(self): """ Implements the run method by running the model. """ self.context = Context(self) return self.run_model()
class ModelRunner(object): """ Test runner for a behave model (features). Provides the core functionality of a test runner and the functional API needed by model elements. .. attribute:: aborted This is set to true when the user aborts a test run (:exc:`KeyboardInterrupt` exception). Initially: False. Stored as derived attribute in :attr:`Context.aborted`. """ # pylint: disable=too-many-instance-attributes def __init__(self, config, features=None, step_registry=None): self.config = config self.features = features or [] self.hooks = {} self.formatters = [] self.undefined_steps = [] self.step_registry = step_registry self.capture_controller = CaptureController(config) self.context = None self.feature = None self.hook_failures = 0 # @property def _get_aborted(self): value = False if self.context: value = self.context.aborted return value # @aborted.setter def _set_aborted(self, value): # pylint: disable=protected-access assert self.context, "REQUIRE: context, but context=%r" % self.context self.context._set_root_attribute("aborted", bool(value)) aborted = property(_get_aborted, _set_aborted, doc="Indicates that test run is aborted by the user.") def run_hook(self, name, context, *args): if not self.config.dry_run and (name in self.hooks): try: with context.use_with_user_mode(): self.hooks[name](context, *args) # except KeyboardInterrupt: # self.aborted = True # if name not in ("before_all", "after_all"): # raise except Exception as e: # pylint: disable=broad-except # -- HANDLE HOOK ERRORS: use_traceback = False if self.config.verbose: use_traceback = True ExceptionUtil.set_traceback(e) extra = u"" if "tag" in name: extra = "(tag=%s)" % args[0] error_text = ExceptionUtil.describe(e, use_traceback).rstrip() error_message = u"HOOK-ERROR in %s%s: %s" % (name, extra, error_text) print(error_message) self.hook_failures += 1 if "tag" in name: # -- SCENARIO or FEATURE statement = getattr(context, "scenario", context.feature) elif "all" in name: # -- ABORT EXECUTION: For before_all/after_all self.aborted = True statement = None else: # -- CASE: feature, scenario, step statement = args[0] if statement: # -- CASE: feature, scenario, step statement.hook_failed = True if statement.error_message: # -- NOTE: One exception/failure is already stored. # Append only error message. statement.error_message += u"\n"+ error_message else: # -- FIRST EXCEPTION/FAILURE: statement.store_exception_context(e) statement.error_message = error_message def setup_capture(self): if not self.context: self.context = Context(self) self.capture_controller.setup_capture(self.context) def start_capture(self): self.capture_controller.start_capture() def stop_capture(self): self.capture_controller.stop_capture() def teardown_capture(self): self.capture_controller.teardown_capture() def _forEachItemInFeatures(self, feature, f, param): def check(obj, attrib): return hasattr(obj, attrib) and getattr(obj, attrib) is not None def foreachitem(item): f(item, param) if check(item, 'scenarios'): for x in item.scenarios: foreachitem(x) if check(item, 'examples'): for x in item.examples: f(x, param) if check(item, 'steps'): for step in item.steps: f(step, param) if check(item, 'parser'): foreachitem(item.parser) if check(item, 'statement'): foreachitem(item.statement) foreachitem(feature) class QueueEvent(Enum): # a test scenario is going to start SCENARIO_STARTED = 0 # a test scenario has finished SCENARIO_FINISHED = 1 # a test feature (the file) has finished FEATURE_FINISHED = 2 def _run_feature(self, feature, queue, next): try: # scenarios/feature can't be added to the queue after running def prepare_for_pickle(item): def clean(item, param): if hasattr(item, 'exc_traceback'): # pickle will fail if traceback is not removed delattr(item, 'exc_traceback') if hasattr(item, 'tags'): # replace tags array with new strings to avoid <class 'super'> after memory release item.tags = [str(tag) for tag in item.tags] # pickle process will file if we dont clear traceback self._forEachItemInFeatures(item, clean, 0) def before_run_scenario(scenario): if next > scenario.counter: return True queue.put((ModelRunner.QueueEvent.SCENARIO_STARTED, scenario)) return False def after_run_scenario(scenario): prepare_for_pickle(scenario) queue.put((ModelRunner.QueueEvent.SCENARIO_FINISHED, scenario)) ret = feature.run(self, before_run_scenario, after_run_scenario) prepare_for_pickle(feature) queue.put((ModelRunner.QueueEvent.FEATURE_FINISHED, feature)) return 0 except Exception as e: # pylint: disable=broad-except print("_run_feature() -> ERROR {}".format(e)) return sys.exit(255) def _run_one_feature(self, feature): try: self.feature = feature for formatter in self.formatters: formatter.uri(feature.filename) def read_timeout(tags): tag_name = "timeout=" for tag in tags: res = tag.find(tag_name) if res == 0: return int(tag[len(tag_name):]) return 0 timeout = read_timeout(feature.tags) if not (timeout and timeout > 0): if self.config.timeout and self.config.timeout > 0: timeout = self.config.timeout else: timeout = None if not self.config.fork: failed = feature.run(self) return failed, feature else: next = 0 running = 0 error = StatusError.none while next < len(feature.run_items): counter = 0 for counter in range(0, len(feature.run_items)): feature.run_items[counter].counter = counter queue = Queue() proc = Process(target=self._run_feature, args=(feature, queue, next)) proc.start() timer = Timer() while True: try: e, scenario = queue.get(block=True, timeout=1) if e == ModelRunner.QueueEvent.FEATURE_FINISHED: feature = scenario error = StatusError.none break elif e == ModelRunner.QueueEvent.SCENARIO_STARTED: running = scenario.counter timer.reset() elif e == ModelRunner.QueueEvent.SCENARIO_FINISHED: feature.run_items[scenario.counter] = scenario except Empty: pass if timeout is not None: if timer.elapsed() > timeout: print("killing Child Process after {} seconds timeout".format(timer.elapsed()), flush=True) proc.terminate() error = StatusError.timeout break if not proc.is_alive(): if proc.exitcode < 0: print("Child Process finished with exit code {}".format(proc.exitcode), flush=True) error = StatusError.crash break if error != StatusError.none: # if an error was detected we couldn't read the scenario from the queue, so we need to set status manually def setStatus(item, status): if hasattr(item, "set_status") and callable(getattr(item, 'set_status')): item.set_status(status[0]) item.status_error = status[1] else: item.status = status[0] item.status_error = status[1] self._forEachItemInFeatures(feature.run_items[running], setStatus, [Status.failed, error]) next = running + 1 else: break except KeyboardInterrupt: self.aborted = True return False, feature any_error = False for scenario in feature.run_items: if scenario.status is Status.failed: any_error = True return not any_error, feature def run_model(self, features=None): if self.config.just_list: for feature in self.features: if feature.should_run(self.config): print(feature.filename) return False if self.config.just_list_tags: for feature in self.features: for scenario in feature.scenarios: if scenario.should_run(self.config): print("{} : {}".format(scenario.tags, scenario.name)) return False # pylint: disable=too-many-branches if not self.context: self.context = Context(self) if self.step_registry is None: self.step_registry = the_step_registry if features is None: features = self.features # -- ENSURE: context.execute_steps() works in weird cases (hooks, ...) context = self.context self.hook_failures = 0 self.setup_capture() self.run_hook("before_all", context) run_feature = not self.aborted failed_count = 0 undefined_steps_initial_size = len(self.undefined_steps) for feature in features: if run_feature: succeed, feature = self._run_one_feature(feature) # -- ALWAYS: Report run/not-run feature to reporters. # REQUIRED-FOR: Summary to keep track of untested features. for reporter in self.config.reporters: reporter.feature(feature) if not succeed: failed_count += 1 if self.config.stop or self.aborted: # -- FAIL-EARLY: After first failure. break # -- AFTER-ALL: # pylint: disable=protected-access, broad-except cleanups_failed = False self.run_hook("after_all", self.context) try: self.context._do_cleanups() # Without dropping the last context layer. except Exception: cleanups_failed = True if self.aborted: print("\nABORTED: By user.") for formatter in self.formatters: formatter.close() for reporter in self.config.reporters: reporter.end() failed = ((failed_count > 0) or self.aborted or (self.hook_failures > 0) or (len(self.undefined_steps) > undefined_steps_initial_size) or cleanups_failed) # XXX-MAYBE: or context.failed) return_code = 0 if failed: return_code = self.features[-1].status_error.value return return_code def run(self): """ Implements the run method by running the model. """ self.context = Context(self) return self.run_model()