Пример #1
0
    def assertResults(self, expected, actual):
        """
        Assert that output matches expected argument.

        This method has some special handling intended to ease testing of the
        the output we get from :py:module:`jig.output`.

        As an example, it can be used like this::

            self.assertResults(u'''
                ▾  Plugin 1

                ✓  commit

                Ran 1 plugin
                    Info 1 Warn 0 Stop 0
                ''', self.output)

        This method will automatically dedent and strip the expected and clear
        off any console formatting characters (those that turn text colors or
        bold text).
        """
        expected = dedent(expected).strip()

        actual = strip_paint(actual.strip())

        self.assertEqual(expected, actual)
Пример #2
0
    def assertResultsIn(self, expected, actual):
        """
        Assert that a string is in the expected argument.
        """
        expected = dedent(expected).strip()

        actual = strip_paint(actual.strip())

        self.assertIn(expected, actual)
Пример #3
0
    def run(self, test_range=None):
        """
        Run the tests for this plugin.

        Returns a list of :py:class:`Result` objects which represent the
        results from the test run.

        :param list test_range: None or the parsed range from
            :function:`parse_range`
        """
        # Use an empty config, we are not going to save this to disk
        pm = PluginManager(SafeConfigParser())

        # Add the plugin we are testing
        pm.add(self.plugin_dir)

        # The instance of our plugin we will run the pre_commit test on
        plugin = pm.plugins[0]

        # Capture the default plugin config for resets while testing
        default_settings = plugin.config

        results = []

        for exp in self.expectations:
            # Make sure that the range is off by 1
            assert exp.range[1] == exp.range[0] + 1

            # Is this expectation in the specified test range?
            if test_range and (exp.range not in test_range):
                # Skip this one, it's not one of the tests we should be running
                continue

            # Update the plugin config (settings) if available
            if exp.settings:
                plugin.config = exp.settings
            else:
                plugin.config = default_settings

            # View to help us create the output
            view = ConsoleView(collect_output=True, exit_on_exception=False)

            # Get a GitDiffIndex object from
            gdi = InstrumentedGitDiffIndex(
                self.timeline.repo.working_dir,
                self.timeline.diffs()[exp.range[0] - 1])

            # What is the numbered test directory reprsenting our commit?
            wd = abspath(
                join(self.plugin_dir, PLUGIN_TESTS_DIRECTORY,
                     '{0:02d}'.format(exp.range[1])))

            with cwd_bounce(wd):
                # Patch up the filename to be within our numbered directory
                # instead of the Git repository
                gdi.replace_path = (self.timeline.repo.working_dir, wd)

                # Gather up the input to the plugin for logging
                stdin = json.dumps({
                    'config': plugin.config,
                    'files': gdi
                },
                                   indent=2,
                                   cls=PluginDataJSONEncoder)

                # Now run the actual pre_commit hook for this plugin
                res = plugin.pre_commit(gdi)
                # Break apart into its pieces
                retcode, stdout, stderr = res  # pragma: no branch

            try:
                # Is it JSON data?
                data = json.loads(stdout)
            except ValueError:
                # Not JSON
                data = stdout

            if retcode == 0:
                # Format the results according to what you normally see in the
                # console.
                view.print_results({plugin: (retcode, data, stderr)})
            else:
                results.append(
                    FailureResult(
                        exp,
                        'Exit code: {0}\n\nStd out:\n{1}\n\nStd err:\n{2}'.
                        format(retcode, stdout or '(none)', stderr
                               or '(none)'), plugin))
                continue

            # Now remove the color character sequences to make things a little
            # easier to read, copy, and paste.
            actual = strip_paint(view._collect['stdout'].getvalue()
                                 or view._collect['stderr'].getvalue())

            # Also remove the summary and count at the end, these are not
            # really all that useful to test and just end up making the
            # expect.rst files overly verbose
            actual = RESULTS_SUMMARY_SIGNATURE_RE.sub('', actual)
            actual = RESULTS_SUMMARY_COUNT_RE.sub('', actual)

            resargs = (exp, actual, plugin, stdin, stdout)
            if actual.strip() != exp.output.strip():
                results.append(FailureResult(*resargs))
            else:
                results.append(SuccessResult(*resargs))

        return results
Пример #4
0
    def run(self, test_range=None):
        """
        Run the tests for this plugin.

        Returns a list of :py:class:`Result` objects which represent the
        results from the test run.

        :param list test_range: None or the parsed range from
            :function:`parse_range`
        """
        # Use an empty config, we are not going to save this to disk
        pm = PluginManager(SafeConfigParser())

        # Add the plugin we are testing
        pm.add(self.plugin_dir)

        # The instance of our plugin we will run the pre_commit test on
        plugin = pm.plugins[0]

        # Capture the default plugin config for resets while testing
        default_settings = plugin.config

        results = []

        for exp in self.expectations:
            # Make sure that the range is off by 1
            assert exp.range[1] == exp.range[0] + 1

            # Is this expectation in the specified test range?
            if test_range and (exp.range not in test_range):
                # Skip this one, it's not one of the tests we should be running
                continue

            # Update the plugin config (settings) if available
            if exp.settings:
                plugin.config = exp.settings
            else:
                plugin.config = default_settings

            # View to help us create the output
            view = ConsoleView(collect_output=True, exit_on_exception=False)

            # Get a GitDiffIndex object from
            gdi = InstrumentedGitDiffIndex(
                self.timeline.repo.working_dir,
                self.timeline.diffs()[exp.range[0] - 1])

            # What is the numbered test directory reprsenting our commit?
            wd = abspath(join(
                self.plugin_dir, PLUGIN_TESTS_DIRECTORY,
                '{0:02d}'.format(exp.range[1])))

            with cwd_bounce(wd):
                # Patch up the filename to be within our numbered directory
                # instead of the Git repository
                gdi.replace_path = (self.timeline.repo.working_dir, wd)

                # Gather up the input to the plugin for logging
                stdin = json.dumps({
                    'config': plugin.config,
                    'files': gdi},
                    indent=2, cls=PluginDataJSONEncoder)

                # Now run the actual pre_commit hook for this plugin
                res = plugin.pre_commit(gdi)
                # Break apart into its pieces
                retcode, stdout, stderr = res   # pragma: no branch

            try:
                # Is it JSON data?
                data = json.loads(stdout)
            except ValueError:
                # Not JSON
                data = stdout

            if retcode == 0:
                # Format the results according to what you normally see in the
                # console.
                view.print_results({plugin: (retcode, data, stderr)})
            else:
                results.append(FailureResult(
                    exp,
                    'Exit code: {0}\n\nStd out:\n{1}\n\nStd err:\n{2}'.format(
                        retcode, stdout or '(none)', stderr or '(none)'),
                    plugin))
                continue

            # Now remove the color character sequences to make things a little
            # easier to read, copy, and paste.
            actual = strip_paint(
                view._collect['stdout'].getvalue() or
                view._collect['stderr'].getvalue())

            # Also remove the summary and count at the end, these are not
            # really all that useful to test and just end up making the
            # expect.rst files overly verbose
            actual = RESULTS_SUMMARY_SIGNATURE_RE.sub('', actual)
            actual = RESULTS_SUMMARY_COUNT_RE.sub('', actual)

            resargs = (exp, actual, plugin, stdin, stdout)
            if actual.strip() != exp.output.strip():
                results.append(FailureResult(*resargs))
            else:
                results.append(SuccessResult(*resargs))

        return results