コード例 #1
0
    def __process_hook(self, is_started, context, element):
        """
        Hook to be installed. Reports steps, features etc.
        :param is_started true if test/feature/scenario is started
        :type is_started bool
        :param context behave context
        :type context behave.runner.Context
        :param element feature/suite/step
        """
        element.location.file = element.location.filename  # To preserve _bdd_utils contract
        utils = VersionAgnosticUtils()
        if isinstance(element, Step):
            # Process step
            step_name = u"{0} {1}".format(utils.to_unicode(element.keyword), utils.to_unicode(element.name))
            duration_ms = element.duration * 1000
            if is_started:
                self._test_started(step_name, element.location)
            elif element.status == 'passed':
                self._test_passed(step_name, duration_ms)
            elif element.status == 'failed':
                # Correct way is to use element.errormessage
                # but assertions do not have trace there (due to Behave internals)
                # do, we collect it manually
                error_message = element.error_message
                fetch_log = not error_message  # If no error_message provided, need to fetch log manually
                trace = ""
                if isinstance(element.exception, AssertionError):
                    trace = self._collect_trace(element, utils)

                # May be empty https://github.com/behave/behave/issues/468 for some exceptions
                if not trace and not error_message:
                    try:
                        error_message = traceback.format_exc()
                    except AttributeError:
                        # Exception may have empty stracktrace, and traceback.format_exc() throws
                        # AttributeError in this case
                        trace = self._collect_trace(element, utils)
                if not error_message:
                    # Format exception as last resort
                    error_message = element.exception
                message_as_string = utils.to_unicode(error_message)
                if fetch_log and self.__real_runner.config.log_capture:
                    message_as_string += u"\n" + utils.to_unicode(self.__real_runner.log_capture.getvalue())
                self._test_failed(step_name, message_as_string, trace, duration=duration_ms)
            elif element.status == 'undefined':
                self._test_undefined(step_name, element.location)
            else:
                self._test_skipped(step_name, element.status, element.location)
        elif not is_started and isinstance(element, Scenario) and element.status == 'failed':
            # To process scenarios with undefined/skipped tests
            for step in element.steps:
                assert isinstance(step, Step), step
                if step.status not in ['passed', 'failed']:  # Something strange, probably skipped or undefined
                    self.__process_hook(False, context, step)
            self._feature_or_scenario(is_started, element.name, element.location)
        elif isinstance(element, ScenarioOutline):
            self._feature_or_scenario(is_started, str(element.examples), element.location)
        else:
            self._feature_or_scenario(is_started, element.name, element.location)
コード例 #2
0
ファイル: behave_runner.py プロジェクト: nunb/bugvm
 def __process_hook(self, is_started, context, element):
     """
     Hook to be installed. Reports steps, features etc.
     :param is_started true if test/feature/scenario is started
     :type is_started bool
     :param context behave context
     :type context behave.runner.Context
     :param element feature/suite/step
     """
     element.location.file = element.location.filename  # To preserve _bdd_utils contract
     utils = VersionAgnosticUtils()
     if isinstance(element, Step):
         # Process step
         step_name = u"{0} {1}".format(utils.to_unicode(element.keyword),
                                       utils.to_unicode(element.name))
         if is_started:
             self._test_started(step_name, element.location)
         elif element.status == 'passed':
             self._test_passed(step_name, element.duration)
         elif element.status == 'failed':
             try:
                 trace = traceback.format_exc()
             except Exception:
                 trace = "".join(traceback.format_tb(element.exc_traceback))
             error_message = utils.to_unicode(element.error_message)
             if "Traceback " in error_message:
                 error_message = ""  # No reason to duplicate output (see PY-13647)
             self._test_failed(step_name,
                               error_message,
                               trace,
                               duration=element.duration)
         elif element.status == 'undefined':
             self._test_undefined(step_name, element.location)
         else:
             self._test_skipped(step_name, element.status, element.location)
     elif not is_started and isinstance(
             element, Scenario) and element.status == 'failed':
         # To process scenarios with undefined/skipped tests
         for step in element.steps:
             assert isinstance(step, Step), step
             if step.status not in [
                     'passed', 'failed'
             ]:  # Something strange, probably skipped or undefined
                 self.__process_hook(False, context, step)
         self._feature_or_scenario(is_started, element.name,
                                   element.location)
     elif isinstance(element, ScenarioOutline):
         self._feature_or_scenario(is_started, str(element.examples),
                                   element.location)
     else:
         self._feature_or_scenario(is_started, element.name,
                                   element.location)
コード例 #3
0
    def __process_hook(self, is_started, context, element):
        """
        Hook to be installed. Reports steps, features etc.
        :param is_started true if test/feature/scenario is started
        :type is_started bool
        :param context behave context
        :type context behave.runner.Context
        :param element feature/suite/step
        """
        element.location.file = element.location.filename  # To preserve _bdd_utils contract
        utils = VersionAgnosticUtils()
        if isinstance(element, Step):
            # Process step
            step_name = u"{0} {1}".format(utils.to_unicode(element.keyword), utils.to_unicode(element.name))
            if is_started:
                self._test_started(step_name, element.location)
            elif element.status == 'passed':
                self._test_passed(step_name, element.duration)
            elif element.status == 'failed':
                # Correct way is to use element.errormessage
                # but assertions do not have trace there (due to Behave internals)
                # do, we collect it manually
                error_message = element.error_message
                trace = ""
                if isinstance(element.exception, AssertionError):
                    trace = u"".join([utils.to_unicode(l) for l in traceback.format_tb(element.exc_traceback)])

                # May be empty https://github.com/behave/behave/issues/468 for some exceptions
                if not trace and not error_message:
                    error_message = traceback.format_exc()
                if not error_message:
                    # Format exception as last resort
                    error_message = element.exception

                self._test_failed(step_name, utils.to_unicode(error_message), trace, duration=element.duration)
            elif element.status == 'undefined':
                self._test_undefined(step_name, element.location)
            else:
                self._test_skipped(step_name, element.status, element.location)
        elif not is_started and isinstance(element, Scenario) and element.status == 'failed':
            # To process scenarios with undefined/skipped tests
            for step in element.steps:
                assert isinstance(step, Step), step
                if step.status not in ['passed', 'failed']:  # Something strange, probably skipped or undefined
                    self.__process_hook(False, context, step)
            self._feature_or_scenario(is_started, element.name, element.location)
        elif isinstance(element, ScenarioOutline):
            self._feature_or_scenario(is_started, str(element.examples), element.location)
        else:
            self._feature_or_scenario(is_started, element.name, element.location)
コード例 #4
0
def report_data(dumper, commands_to_skip):
    """
    Fetches data from management commands and reports it to dumper.

    :type dumper _xml.XmlDumper
    :type commands_to_skip list
    :param commands_to_skip list of commands to skip
    :param dumper: destination to report
    """
    utility = ManagementUtility()
    for command_name in get_commands().keys():

        if command_name in commands_to_skip:
            sys.stderr.write("Skipping command '{0}' due to config\n".format(command_name))
            continue

        fetcher = _Fetcher(utility, command_name)
        fetcher.daemon = True
        fetcher.start()
        fetcher.join(int(os.getenv("_PYCHARM_DJANGO_DEFAULT_TIMEOUT", "2")))
        command = fetcher.result
        if not command:
            if fetcher.command_lead_to_exception:
                sys.stderr.write("Command '{0}' skipped\n".format(command_name))
                continue
            else:
                sys.stderr.write(
                    "Command '{0}' took too long and may freeze everything. Consider adding it to 'skip commands' list\n".format(
                        command_name))
                sys.exit(1)

        use_argparse = False
        # There is no optparse in 1.10
        if _is_django_10():
            use_argparse = True
        else:
            try:
                use_argparse = command.use_argparse
            except AttributeError:
                pass

        try:
            parser = command.create_parser("", command_name)
        except Exception as e:
            sys.stderr.write("Error parsing command {0}: {1}\n".format(command_name, e))
            continue

        try:  # and there is no "usage()" since 1.10
            usage = command.usage("")
        except AttributeError:
            usage = command.help

        dumper.start_command(command_name=command_name,
                             command_help_text=VersionAgnosticUtils().to_unicode(usage).replace("%prog",
                                                                                                command_name))
        module_to_use = _argparse if use_argparse else _optparse  # Choose appropriate module: argparse, optparse
        module_to_use.process_command(dumper, command, parser)
        dumper.close_command()
コード例 #5
0
    def xml(self):
        """

        :return: current commands as XML as described in package
        :rtype str
        """
        document = self.__document.toxml(encoding="utf-8")
        return VersionAgnosticUtils().to_unicode(
            document.decode("utf-8") if isinstance(document, bytes
                                                   ) else document)
コード例 #6
0
 def __process_hook(self, is_started, context, element):
     """
     Hook to be installed. Reports steps, features etc.
     :param is_started true if test/feature/scenario is started
     :type is_started bool
     :param context behave context
     :type context behave.runner.Context
     :param element feature/suite/step
     """
     element.location.file = element.location.filename  # To preserve _bdd_utils contract
     utils = VersionAgnosticUtils()
     if isinstance(element, Step):
         # Process step
         step_name = u"{0} {1}".format(utils.to_unicode(element.keyword), utils.to_unicode(element.name))
         if is_started:
             self._test_started(step_name, element.location)
         elif element.status == 'passed':
             self._test_passed(step_name, element.duration)
         elif element.status == 'failed':
             try:
                 trace = traceback.format_exc()
             except Exception:
                 trace = "".join(traceback.format_tb(element.exc_traceback))
             error_message = utils.to_unicode(element.error_message)
             if "Traceback " in error_message:
                 error_message = ""  # No reason to duplicate output (see PY-13647)
             self._test_failed(step_name, error_message, trace, duration=element.duration)
         elif element.status == 'undefined':
             self._test_undefined(step_name, element.location)
         else:
             self._test_skipped(step_name, element.status, element.location)
     elif not is_started and isinstance(element, Scenario) and element.status == 'failed':
         # To process scenarios with undefined/skipped tests
         for step in element.steps:
             assert isinstance(step, Step), step
             if step.status not in ['passed', 'failed']:  # Something strange, probably skipped or undefined
                 self.__process_hook(False, context, step)
         self._feature_or_scenario(is_started, element.name, element.location)
     elif isinstance(element, ScenarioOutline):
         self._feature_or_scenario(is_started, str(element.examples), element.location)
     else:
         self._feature_or_scenario(is_started, element.name, element.location)
コード例 #7
0
 def _test_failed(self, name, message, details):
     """
     Report test failure
     :param name: test name
     :type name str
     :param message: failure message
     :type message basestring
     :param details: failure details (probably stacktrace)
     :type details str
     """
     self.tc_messages.testFailed(name, message=VersionAgnosticUtils().to_unicode(message), details=details)
     self.__last_test_name = None
コード例 #8
0
    def set_arguments(self, command_args_text):
        """
        Adds "arguments help" to command.

                TODO: Use real list of arguments instead of this text when people migrate to argparse (Dj. 1.8)

        :param command_args_text: command text for args
        :type command_args_text str
        """
        assert bool(self.__command_element), "Not in a a command"
        self.__command_element.setAttribute(
            "args",
            VersionAgnosticUtils().to_unicode(command_args_text))
コード例 #9
0
    def __process_hook(self, is_started, context, element):
        """
        Hook to be installed. Reports steps, features etc.
        :param is_started true if test/feature/scenario is started
        :type is_started bool
        :param context behave context
        :type context behave.runner.Context
        :param element feature/suite/step
        """
        element.location.file = element.location.filename  # To preserve _bdd_utils contract
        utils = VersionAgnosticUtils()
        if isinstance(element, Step):
            # Process step
            step_name = u"{0} {1}".format(utils.to_unicode(element.keyword),
                                          utils.to_unicode(element.name))
            if is_started:
                self._test_started(step_name, element.location)
            elif element.status == 'passed':
                self._test_passed(step_name, element.duration)
            elif element.status == 'failed':
                # Correct way is to use element.errormessage
                # but assertions do not have trace there (due to Behave internals)
                # do, we collect it manually
                trace = ""
                if isinstance(element.exception, AssertionError):
                    trace = u"".join([
                        utils.to_unicode(l)
                        for l in traceback.format_tb(element.exc_traceback)
                    ])

                error_message = utils.to_unicode(element.error_message)

                self._test_failed(step_name,
                                  error_message,
                                  trace,
                                  duration=element.duration)
            elif element.status == 'undefined':
                self._test_undefined(step_name, element.location)
            else:
                self._test_skipped(step_name, element.status, element.location)
        elif not is_started and isinstance(
                element, Scenario) and element.status == 'failed':
            # To process scenarios with undefined/skipped tests
            for step in element.steps:
                assert isinstance(step, Step), step
                if step.status not in [
                        'passed', 'failed'
                ]:  # Something strange, probably skipped or undefined
                    self.__process_hook(False, context, step)
            self._feature_or_scenario(is_started, element.name,
                                      element.location)
        elif isinstance(element, ScenarioOutline):
            self._feature_or_scenario(is_started, str(element.examples),
                                      element.location)
        else:
            self._feature_or_scenario(is_started, element.name,
                                      element.location)
コード例 #10
0
ファイル: parser.py プロジェクト: mrofiq/pycharm_helpers
def report_data(dumper):
    """
    Fetches data from management commands and reports it to dumper.

    :type dumper _xml.XmlDumper
    :param dumper: destination to report
    """
    utility = ManagementUtility()
    for command_name in get_commands().keys():
        try:
            command = utility.fetch_command(command_name)
        except Exception as e:
            sys.stderr.write("Error fetching command {0}: {1}\n".format(
                command_name, e))
            continue

        assert isinstance(command, BaseCommand)

        use_argparse = False
        try:
            use_argparse = command.use_argparse
        except AttributeError:
            pass

        try:
            parser = command.create_parser("", command_name)
        except Exception as e:
            sys.stderr.write("Error parsing command {0}: {1}\n".format(
                command_name, e))
            continue

        dumper.start_command(
            command_name=command_name,
            command_help_text=VersionAgnosticUtils().to_unicode(
                command.usage("")).replace("%prog", command_name))
        module_to_use = _argparse if use_argparse else _optparse  # Choose appropriate module: argparse, optparse
        module_to_use.process_command(dumper, command, parser)
        dumper.close_command()
コード例 #11
0
ファイル: behave_runner.py プロジェクト: amaran1988/python3
    def __process_hook(self, is_started, context, element):
        """
        Hook to be installed. Reports steps, features etc.
        :param is_started true if test/feature/scenario is started
        :type is_started bool
        :param context behave context
        :type context behave.runner.Context
        :param element feature/suite/step
        """
        element.location.file = element.location.filename  # To preserve _bdd_utils contract
        utils = VersionAgnosticUtils()
        if isinstance(element, Step):
            # Process step
            step_name = u"{0} {1}".format(utils.to_unicode(element.keyword),
                                          utils.to_unicode(element.name))
            duration_ms = element.duration * 1000
            if is_started:
                self._test_started(step_name, element.location)
            elif element.status == 'passed':
                self._test_passed(step_name, duration_ms)
            elif element.status == 'failed':
                # Correct way is to use element.errormessage
                # but assertions do not have trace there (due to Behave internals)
                # do, we collect it manually
                error_message = element.error_message
                fetch_log = not error_message  # If no error_message provided, need to fetch log manually
                trace = ""
                if isinstance(element.exception, AssertionError):
                    trace = self._collect_trace(element, utils)

                # May be empty https://github.com/behave/behave/issues/468 for some exceptions
                if not trace and not error_message:
                    try:
                        error_message = traceback.format_exc()
                    except AttributeError:
                        # Exception may have empty stracktrace, and traceback.format_exc() throws
                        # AttributeError in this case
                        trace = self._collect_trace(element, utils)
                if not error_message:
                    # Format exception as last resort
                    error_message = element.exception
                message_as_string = utils.to_unicode(error_message)
                if fetch_log and self.__real_runner.config.log_capture:
                    message_as_string += u"\n" + utils.to_unicode(
                        self.__real_runner.log_capture.getvalue())
                self._test_failed(step_name,
                                  message_as_string,
                                  trace,
                                  duration=duration_ms)
            elif element.status == 'undefined':
                self._test_undefined(step_name, element.location)
            else:
                self._test_skipped(step_name, element.status, element.location)
        elif not is_started and isinstance(
                element, Scenario) and element.status == 'failed':
            # To process scenarios with undefined/skipped tests
            for step in element.steps:
                assert isinstance(step, Step), step
                if step.status not in [
                        'passed', 'failed'
                ]:  # Something strange, probably skipped or undefined
                    self.__process_hook(False, context, step)
            self._feature_or_scenario(is_started, element.name,
                                      element.location)
        elif isinstance(element, ScenarioOutline):
            self._feature_or_scenario(is_started, str(element.examples),
                                      element.location)
        else:
            self._feature_or_scenario(is_started, element.name,
                                      element.location)