Exemplo n.º 1
0
    def __process_hook(self, is_started, context, element):
        """
        Hook to be installed. Reports steps, features etc.
        :param is_started true if test/feature/scenario is started
        :type is_started bool
        :param context behave context
        :type context behave.runner.Context
        :param element feature/suite/step
        """
        element.location.file = element.location.filename  # To preserve _bdd_utils contract
        utils = VersionAgnosticUtils()
        if isinstance(element, Step):
            # Process step
            step_name = u"{0} {1}".format(utils.to_unicode(element.keyword), utils.to_unicode(element.name))
            duration_ms = element.duration * 1000
            if is_started:
                self._test_started(step_name, element.location)
            elif element.status == 'passed':
                self._test_passed(step_name, duration_ms)
            elif element.status == 'failed':
                # Correct way is to use element.errormessage
                # but assertions do not have trace there (due to Behave internals)
                # do, we collect it manually
                error_message = element.error_message
                fetch_log = not error_message  # If no error_message provided, need to fetch log manually
                trace = ""
                if isinstance(element.exception, AssertionError):
                    trace = self._collect_trace(element, utils)

                # May be empty https://github.com/behave/behave/issues/468 for some exceptions
                if not trace and not error_message:
                    try:
                        error_message = traceback.format_exc()
                    except AttributeError:
                        # Exception may have empty stracktrace, and traceback.format_exc() throws
                        # AttributeError in this case
                        trace = self._collect_trace(element, utils)
                if not error_message:
                    # Format exception as last resort
                    error_message = element.exception
                message_as_string = utils.to_unicode(error_message)
                if fetch_log and self.__real_runner.config.log_capture:
                    message_as_string += u"\n" + utils.to_unicode(self.__real_runner.log_capture.getvalue())
                self._test_failed(step_name, message_as_string, trace, duration=duration_ms)
            elif element.status == 'undefined':
                self._test_undefined(step_name, element.location)
            else:
                self._test_skipped(step_name, element.status, element.location)
        elif not is_started and isinstance(element, Scenario) and element.status == 'failed':
            # To process scenarios with undefined/skipped tests
            for step in element.steps:
                assert isinstance(step, Step), step
                if step.status not in ['passed', 'failed']:  # Something strange, probably skipped or undefined
                    self.__process_hook(False, context, step)
            self._feature_or_scenario(is_started, element.name, element.location)
        elif isinstance(element, ScenarioOutline):
            self._feature_or_scenario(is_started, str(element.examples), element.location)
        else:
            self._feature_or_scenario(is_started, element.name, element.location)
Exemplo n.º 2
0
    def __process_hook(self, is_started, context, element):
        """
        Hook to be installed. Reports steps, features etc.
        :param is_started true if test/feature/scenario is started
        :type is_started bool
        :param context behave context
        :type context behave.runner.Context
        :param element feature/suite/step
        """
        element.location.file = element.location.filename  # To preserve _bdd_utils contract
        utils = VersionAgnosticUtils()
        if isinstance(element, Step):
            # Process step
            step_name = u"{0} {1}".format(utils.to_unicode(element.keyword),
                                          utils.to_unicode(element.name))
            if is_started:
                self._test_started(step_name, element.location)
            elif element.status == 'passed':
                self._test_passed(step_name, element.duration)
            elif element.status == 'failed':
                # Correct way is to use element.errormessage
                # but assertions do not have trace there (due to Behave internals)
                # do, we collect it manually
                trace = ""
                if isinstance(element.exception, AssertionError):
                    trace = u"".join([
                        utils.to_unicode(l)
                        for l in traceback.format_tb(element.exc_traceback)
                    ])

                error_message = utils.to_unicode(element.error_message)

                self._test_failed(step_name,
                                  error_message,
                                  trace,
                                  duration=element.duration)
            elif element.status == 'undefined':
                self._test_undefined(step_name, element.location)
            else:
                self._test_skipped(step_name, element.status, element.location)
        elif not is_started and isinstance(
                element, Scenario) and element.status == 'failed':
            # To process scenarios with undefined/skipped tests
            for step in element.steps:
                assert isinstance(step, Step), step
                if step.status not in [
                        'passed', 'failed'
                ]:  # Something strange, probably skipped or undefined
                    self.__process_hook(False, context, step)
            self._feature_or_scenario(is_started, element.name,
                                      element.location)
        elif isinstance(element, ScenarioOutline):
            self._feature_or_scenario(is_started, str(element.examples),
                                      element.location)
        else:
            self._feature_or_scenario(is_started, element.name,
                                      element.location)
Exemplo n.º 3
0
 def __process_hook(self, is_started, context, element):
     """
     Hook to be installed. Reports steps, features etc.
     :param is_started true if test/feature/scenario is started
     :type is_started bool
     :param context behave context
     :type context behave.runner.Context
     :param element feature/suite/step
     """
     element.location.file = element.location.filename  # To preserve _bdd_utils contract
     utils = VersionAgnosticUtils()
     if isinstance(element, Step):
         # Process step
         step_name = u"{0} {1}".format(utils.to_unicode(element.keyword),
                                       utils.to_unicode(element.name))
         if is_started:
             self._test_started(step_name, element.location)
         elif element.status == 'passed':
             self._test_passed(step_name, element.duration)
         elif element.status == 'failed':
             try:
                 trace = traceback.format_exc()
             except Exception:
                 trace = "".join(traceback.format_tb(element.exc_traceback))
             error_message = utils.to_unicode(element.error_message)
             if "Traceback " in error_message:
                 error_message = ""  # No reason to duplicate output (see PY-13647)
             self._test_failed(step_name,
                               error_message,
                               trace,
                               duration=element.duration)
         elif element.status == 'undefined':
             self._test_undefined(step_name, element.location)
         else:
             self._test_skipped(step_name, element.status, element.location)
     elif not is_started and isinstance(
             element, Scenario) and element.status == 'failed':
         # To process scenarios with undefined/skipped tests
         for step in element.steps:
             assert isinstance(step, Step), step
             if step.status not in [
                     'passed', 'failed'
             ]:  # Something strange, probably skipped or undefined
                 self.__process_hook(False, context, step)
         self._feature_or_scenario(is_started, element.name,
                                   element.location)
     elif isinstance(element, ScenarioOutline):
         self._feature_or_scenario(is_started, str(element.examples),
                                   element.location)
     else:
         self._feature_or_scenario(is_started, element.name,
                                   element.location)
Exemplo n.º 4
0
    def __process_hook(self, is_started, context, element):
        """
        Hook to be installed. Reports steps, features etc.
        :param is_started true if test/feature/scenario is started
        :type is_started bool
        :param context behave context
        :type context behave.runner.Context
        :param element feature/suite/step
        """
        element.location.file = element.location.filename  # To preserve _bdd_utils contract
        utils = VersionAgnosticUtils()
        if isinstance(element, Step):
            # Process step
            step_name = u"{0} {1}".format(utils.to_unicode(element.keyword), utils.to_unicode(element.name))
            if is_started:
                self._test_started(step_name, element.location)
            elif element.status == 'passed':
                self._test_passed(step_name, element.duration)
            elif element.status == 'failed':
                # Correct way is to use element.errormessage
                # but assertions do not have trace there (due to Behave internals)
                # do, we collect it manually
                error_message = element.error_message
                trace = ""
                if isinstance(element.exception, AssertionError):
                    trace = u"".join([utils.to_unicode(l) for l in traceback.format_tb(element.exc_traceback)])

                # May be empty https://github.com/behave/behave/issues/468 for some exceptions
                if not trace and not error_message:
                    error_message = traceback.format_exc()
                if not error_message:
                    # Format exception as last resort
                    error_message = element.exception

                self._test_failed(step_name, utils.to_unicode(error_message), trace, duration=element.duration)
            elif element.status == 'undefined':
                self._test_undefined(step_name, element.location)
            else:
                self._test_skipped(step_name, element.status, element.location)
        elif not is_started and isinstance(element, Scenario) and element.status == 'failed':
            # To process scenarios with undefined/skipped tests
            for step in element.steps:
                assert isinstance(step, Step), step
                if step.status not in ['passed', 'failed']:  # Something strange, probably skipped or undefined
                    self.__process_hook(False, context, step)
            self._feature_or_scenario(is_started, element.name, element.location)
        elif isinstance(element, ScenarioOutline):
            self._feature_or_scenario(is_started, str(element.examples), element.location)
        else:
            self._feature_or_scenario(is_started, element.name, element.location)
 def __process_hook(self, is_started, context, element):
     """
     Hook to be installed. Reports steps, features etc.
     :param is_started true if test/feature/scenario is started
     :type is_started bool
     :param context behave context
     :type context behave.runner.Context
     :param element feature/suite/step
     """
     element.location.file = element.location.filename  # To preserve _bdd_utils contract
     utils = VersionAgnosticUtils()
     if isinstance(element, Step):
         # Process step
         step_name = u"{0} {1}".format(utils.to_unicode(element.keyword), utils.to_unicode(element.name))
         if is_started:
             self._test_started(step_name, element.location)
         elif element.status == 'passed':
             self._test_passed(step_name, element.duration)
         elif element.status == 'failed':
             try:
                 trace = traceback.format_exc()
             except Exception:
                 trace = "".join(traceback.format_tb(element.exc_traceback))
             error_message = utils.to_unicode(element.error_message)
             if "Traceback " in error_message:
                 error_message = ""  # No reason to duplicate output (see PY-13647)
             self._test_failed(step_name, error_message, trace, duration=element.duration)
         elif element.status == 'undefined':
             self._test_undefined(step_name, element.location)
         else:
             self._test_skipped(step_name, element.status, element.location)
     elif not is_started and isinstance(element, Scenario) and element.status == 'failed':
         # To process scenarios with undefined/skipped tests
         for step in element.steps:
             assert isinstance(step, Step), step
             if step.status not in ['passed', 'failed']:  # Something strange, probably skipped or undefined
                 self.__process_hook(False, context, step)
         self._feature_or_scenario(is_started, element.name, element.location)
     elif isinstance(element, ScenarioOutline):
         self._feature_or_scenario(is_started, str(element.examples), element.location)
     else:
         self._feature_or_scenario(is_started, element.name, element.location)
Exemplo n.º 6
0
    def __process_hook(self, is_started, context, element):
        """
        Hook to be installed. Reports steps, features etc.
        :param is_started true if test/feature/scenario is started
        :type is_started bool
        :param context behave context
        :type context behave.runner.Context
        :param element feature/suite/step
        """
        element.location.file = element.location.filename  # To preserve _bdd_utils contract
        utils = VersionAgnosticUtils()
        if isinstance(element, Step):
            # Process step
            step_name = u"{0} {1}".format(utils.to_unicode(element.keyword),
                                          utils.to_unicode(element.name))
            duration_ms = element.duration * 1000
            if is_started:
                self._test_started(step_name, element.location)
            elif element.status == 'passed':
                self._test_passed(step_name, duration_ms)
            elif element.status == 'failed':
                # Correct way is to use element.errormessage
                # but assertions do not have trace there (due to Behave internals)
                # do, we collect it manually
                error_message = element.error_message
                fetch_log = not error_message  # If no error_message provided, need to fetch log manually
                trace = ""
                if isinstance(element.exception, AssertionError):
                    trace = self._collect_trace(element, utils)

                # May be empty https://github.com/behave/behave/issues/468 for some exceptions
                if not trace and not error_message:
                    try:
                        error_message = traceback.format_exc()
                    except AttributeError:
                        # Exception may have empty stracktrace, and traceback.format_exc() throws
                        # AttributeError in this case
                        trace = self._collect_trace(element, utils)
                if not error_message:
                    # Format exception as last resort
                    error_message = element.exception
                message_as_string = utils.to_unicode(error_message)
                if fetch_log and self.__real_runner.config.log_capture:
                    message_as_string += u"\n" + utils.to_unicode(
                        self.__real_runner.log_capture.getvalue())
                self._test_failed(step_name,
                                  message_as_string,
                                  trace,
                                  duration=duration_ms)
            elif element.status == 'undefined':
                self._test_undefined(step_name, element.location)
            else:
                self._test_skipped(step_name, element.status, element.location)
        elif not is_started and isinstance(
                element, Scenario) and element.status == 'failed':
            # To process scenarios with undefined/skipped tests
            for step in element.steps:
                assert isinstance(step, Step), step
                if step.status not in [
                        'passed', 'failed'
                ]:  # Something strange, probably skipped or undefined
                    self.__process_hook(False, context, step)
            self._feature_or_scenario(is_started, element.name,
                                      element.location)
        elif isinstance(element, ScenarioOutline):
            self._feature_or_scenario(is_started, str(element.examples),
                                      element.location)
        else:
            self._feature_or_scenario(is_started, element.name,
                                      element.location)