Exemplo n.º 1
0
def create_view():
    """
    Creates a view the command can use to output data.

    This method is separated from the :py:class:`BaseCommand` class to
    facilitate testing. By mocking out this method, commands can use views that
    have been configured to collect output instead of sending it to the
    terminal.
    """
    return ConsoleView()
Exemplo n.º 2
0
    def run_command(self, command=None):
        """
        Run a subcommand.
        """
        with patch('jig.commands.base.create_view') as cv:
            # We hijack the create_view function so we can tell it to collect
            # output and not exit on exception.
            view = ConsoleView()

            # Collect, don't print
            view.collect_output = True
            # Don't call sys.exit() on exception
            view.exit_on_exception = False

            cv.return_value = view

            # Keep a reference to this so output() and error() will work
            self.view = view

            return self.command(shlex.split(command or ''))
Exemplo n.º 3
0
    def run(self, test_range=None):
        """
        Run the tests for this plugin.

        Returns a list of :py:class:`Result` objects which represent the
        results from the test run.

        :param list test_range: None or the parsed range from
            :function:`parse_range`
        """
        # Use an empty config, we are not going to save this to disk
        pm = PluginManager(SafeConfigParser())

        # Add the plugin we are testing
        pm.add(self.plugin_dir)

        # The instance of our plugin we will run the pre_commit test on
        plugin = pm.plugins[0]

        # Capture the default plugin config for resets while testing
        default_settings = plugin.config

        results = []

        for exp in self.expectations:
            # Make sure that the range is off by 1
            assert exp.range[1] == exp.range[0] + 1

            # Is this expectation in the specified test range?
            if test_range and (exp.range not in test_range):
                # Skip this one, it's not one of the tests we should be running
                continue

            # Update the plugin config (settings) if available
            if exp.settings:
                plugin.config = exp.settings
            else:
                plugin.config = default_settings

            # View to help us create the output
            view = ConsoleView(collect_output=True, exit_on_exception=False)

            # Get a GitDiffIndex object from
            gdi = InstrumentedGitDiffIndex(
                self.timeline.repo.working_dir,
                self.timeline.diffs()[exp.range[0] - 1])

            # What is the numbered test directory reprsenting our commit?
            wd = abspath(
                join(self.plugin_dir, PLUGIN_TESTS_DIRECTORY,
                     '{0:02d}'.format(exp.range[1])))

            with cwd_bounce(wd):
                # Patch up the filename to be within our numbered directory
                # instead of the Git repository
                gdi.replace_path = (self.timeline.repo.working_dir, wd)

                # Gather up the input to the plugin for logging
                stdin = json.dumps({
                    'config': plugin.config,
                    'files': gdi
                },
                                   indent=2,
                                   cls=PluginDataJSONEncoder)

                # Now run the actual pre_commit hook for this plugin
                res = plugin.pre_commit(gdi)
                # Break apart into its pieces
                retcode, stdout, stderr = res  # pragma: no branch

            try:
                # Is it JSON data?
                data = json.loads(stdout)
            except ValueError:
                # Not JSON
                data = stdout

            if retcode == 0:
                # Format the results according to what you normally see in the
                # console.
                view.print_results({plugin: (retcode, data, stderr)})
            else:
                results.append(
                    FailureResult(
                        exp,
                        'Exit code: {0}\n\nStd out:\n{1}\n\nStd err:\n{2}'.
                        format(retcode, stdout or '(none)', stderr
                               or '(none)'), plugin))
                continue

            # Now remove the color character sequences to make things a little
            # easier to read, copy, and paste.
            actual = strip_paint(view._collect['stdout'].getvalue()
                                 or view._collect['stderr'].getvalue())

            # Also remove the summary and count at the end, these are not
            # really all that useful to test and just end up making the
            # expect.rst files overly verbose
            actual = RESULTS_SUMMARY_SIGNATURE_RE.sub('', actual)
            actual = RESULTS_SUMMARY_COUNT_RE.sub('', actual)

            resargs = (exp, actual, plugin, stdin, stdout)
            if actual.strip() != exp.output.strip():
                results.append(FailureResult(*resargs))
            else:
                results.append(SuccessResult(*resargs))

        return results
Exemplo n.º 4
0
    def setUp(self):
        self.view = ConsoleView()

        self.view.collect_output = True
        self.view.exit_on_exception = False
Exemplo n.º 5
0
class TestConsoleView(ViewTestCase):

    """
    With plugin results we can format them to the console.

    """
    def setUp(self):
        self.view = ConsoleView()

        self.view.collect_output = True
        self.view.exit_on_exception = False

    def test_error(self):
        """
        The plugin exits with something other than 0.
        """
        plugin = MockPlugin()
        plugin.name = 'Plugin 1'

        counts = self.view.print_results({
            plugin: (1, '', 'An error occurred')})

        self.assertEqual((0, 0, 0), counts)
        self.assertResults(u'''
            ▾  Plugin 1

            ✕  An error occurred

            {0}  Jig ran 1 plugin
                Info 0 Warn 0 Stop 0
                (1 plugin reported errors)
            '''.format(ATTENTION), self.output)

    def test_commit_specific_message(self):
        """
        Messages generalized for the entire commit.
        """
        plugin = MockPlugin()
        plugin.name = 'Plugin 1'

        counts = self.view.print_results({
            plugin: (0, 'commit', '')})

        self.assertEqual((1, 0, 0), counts)
        self.assertResults(u"""
            ▾  Plugin 1

            ✓  commit

            {0}  Jig ran 1 plugin
                Info 1 Warn 0 Stop 0
            """.format(ATTENTION), self.output)

    def test_file_specific_message(self):
        """
        Messages specific to the file being committed.
        """
        plugin = MockPlugin()
        plugin.name = 'Plugin 1'

        counts = self.view.print_results({
            plugin: (0, {u'a.txt': [[None, u'w', 'file']]}, '')})

        self.assertEqual((0, 1, 0), counts)
        self.assertResults(u"""
            ▾  Plugin 1

            ⚠  a.txt
                file

            {0}  Jig ran 1 plugin
                Info 0 Warn 1 Stop 0
            """.format(ATTENTION), self.output)

    def test_line_specific_message(self):
        """
        Messages specific to a single line.
        """
        plugin = MockPlugin()
        plugin.name = 'Plugin 1'

        counts = self.view.print_results({
            plugin: (0, {u'a.txt': [[1, 's', 'stop']]}, '')})

        self.assertEqual((0, 0, 1), counts)
        self.assertResults(u"""
            ▾  Plugin 1

            ✕  line 1: a.txt
                stop

            {0}  Jig ran 1 plugin
                Info 0 Warn 0 Stop 1
            """.format(EXPLODE), self.output)

    def test_two_plugins(self):
        """
        Formats messages (more than one) correctly.
        """
        plugin1 = MockPlugin()
        plugin1.name = 'Plugin 1'

        plugin2 = MockPlugin()
        plugin2.name = 'Plugin 2'

        results = OrderedDict()

        results[plugin1] = (0, ['a', 'b'], '')
        results[plugin2] = (0, ['a', 'b'], '')

        counts = self.view.print_results(results)

        self.assertEqual((4, 0, 0), counts)
        self.assertResults(u"""
            ▾  Plugin 1

            ✓  a

            ✓  b

            ▾  Plugin 2

            ✓  a

            ✓  b

            {0}  Jig ran 2 plugins
                Info 4 Warn 0 Stop 0
            """.format(ATTENTION), self.output)
Exemplo n.º 6
0
 def __init__(self, view=None):
     self.view = view or ConsoleView()
Exemplo n.º 7
0
    def run(self, test_range=None):
        """
        Run the tests for this plugin.

        Returns a list of :py:class:`Result` objects which represent the
        results from the test run.

        :param list test_range: None or the parsed range from
            :function:`parse_range`
        """
        # Use an empty config, we are not going to save this to disk
        pm = PluginManager(SafeConfigParser())

        # Add the plugin we are testing
        pm.add(self.plugin_dir)

        # The instance of our plugin we will run the pre_commit test on
        plugin = pm.plugins[0]

        # Capture the default plugin config for resets while testing
        default_settings = plugin.config

        results = []

        for exp in self.expectations:
            # Make sure that the range is off by 1
            assert exp.range[1] == exp.range[0] + 1

            # Is this expectation in the specified test range?
            if test_range and (exp.range not in test_range):
                # Skip this one, it's not one of the tests we should be running
                continue

            # Update the plugin config (settings) if available
            if exp.settings:
                plugin.config = exp.settings
            else:
                plugin.config = default_settings

            # View to help us create the output
            view = ConsoleView(collect_output=True, exit_on_exception=False)

            # Get a GitDiffIndex object from
            gdi = InstrumentedGitDiffIndex(
                self.timeline.repo.working_dir,
                self.timeline.diffs()[exp.range[0] - 1])

            # What is the numbered test directory reprsenting our commit?
            wd = abspath(join(
                self.plugin_dir, PLUGIN_TESTS_DIRECTORY,
                '{0:02d}'.format(exp.range[1])))

            with cwd_bounce(wd):
                # Patch up the filename to be within our numbered directory
                # instead of the Git repository
                gdi.replace_path = (self.timeline.repo.working_dir, wd)

                # Gather up the input to the plugin for logging
                stdin = json.dumps({
                    'config': plugin.config,
                    'files': gdi},
                    indent=2, cls=PluginDataJSONEncoder)

                # Now run the actual pre_commit hook for this plugin
                res = plugin.pre_commit(gdi)
                # Break apart into its pieces
                retcode, stdout, stderr = res   # pragma: no branch

            try:
                # Is it JSON data?
                data = json.loads(stdout)
            except ValueError:
                # Not JSON
                data = stdout

            if retcode == 0:
                # Format the results according to what you normally see in the
                # console.
                view.print_results({plugin: (retcode, data, stderr)})
            else:
                results.append(FailureResult(
                    exp,
                    'Exit code: {0}\n\nStd out:\n{1}\n\nStd err:\n{2}'.format(
                        retcode, stdout or '(none)', stderr or '(none)'),
                    plugin))
                continue

            # Now remove the color character sequences to make things a little
            # easier to read, copy, and paste.
            actual = strip_paint(
                view._collect['stdout'].getvalue() or
                view._collect['stderr'].getvalue())

            # Also remove the summary and count at the end, these are not
            # really all that useful to test and just end up making the
            # expect.rst files overly verbose
            actual = RESULTS_SUMMARY_SIGNATURE_RE.sub('', actual)
            actual = RESULTS_SUMMARY_COUNT_RE.sub('', actual)

            resargs = (exp, actual, plugin, stdin, stdout)
            if actual.strip() != exp.output.strip():
                results.append(FailureResult(*resargs))
            else:
                results.append(SuccessResult(*resargs))

        return results
Exemplo n.º 8
0
    def setUp(self):
        self.view = ConsoleView()

        self.view.collect_output = True
        self.view.exit_on_exception = False
Exemplo n.º 9
0
class TestConsoleView(ViewTestCase):
    """
    With plugin results we can format them to the console.

    """
    def setUp(self):
        self.view = ConsoleView()

        self.view.collect_output = True
        self.view.exit_on_exception = False

    def test_error(self):
        """
        The plugin exits with something other than 0.
        """
        plugin = MockPlugin()
        plugin.name = 'Plugin 1'

        counts = self.view.print_results(
            {plugin: (1, '', 'An error occurred')})

        self.assertEqual((0, 0, 0), counts)
        self.assertResults(
            u'''
            ▾  Plugin 1

            ✕  An error occurred

            {0}  Jig ran 1 plugin
                Info 0 Warn 0 Stop 0
                (1 plugin reported errors)
            '''.format(ATTENTION), self.output)

    def test_commit_specific_message(self):
        """
        Messages generalized for the entire commit.
        """
        plugin = MockPlugin()
        plugin.name = 'Plugin 1'

        counts = self.view.print_results({plugin: (0, 'commit', '')})

        self.assertEqual((1, 0, 0), counts)
        self.assertResults(
            u"""
            ▾  Plugin 1

            ✓  commit

            {0}  Jig ran 1 plugin
                Info 1 Warn 0 Stop 0
            """.format(ATTENTION), self.output)

    def test_file_specific_message(self):
        """
        Messages specific to the file being committed.
        """
        plugin = MockPlugin()
        plugin.name = 'Plugin 1'

        counts = self.view.print_results(
            {plugin: (0, {
                u'a.txt': [[None, u'w', 'file']]
            }, '')})

        self.assertEqual((0, 1, 0), counts)
        self.assertResults(
            u"""
            ▾  Plugin 1

            ⚠  a.txt
                file

            {0}  Jig ran 1 plugin
                Info 0 Warn 1 Stop 0
            """.format(ATTENTION), self.output)

    def test_line_specific_message(self):
        """
        Messages specific to a single line.
        """
        plugin = MockPlugin()
        plugin.name = 'Plugin 1'

        counts = self.view.print_results(
            {plugin: (0, {
                u'a.txt': [[1, 's', 'stop']]
            }, '')})

        self.assertEqual((0, 0, 1), counts)
        self.assertResults(
            u"""
            ▾  Plugin 1

            ✕  line 1: a.txt
                stop

            {0}  Jig ran 1 plugin
                Info 0 Warn 0 Stop 1
            """.format(EXPLODE), self.output)

    def test_two_plugins(self):
        """
        Formats messages (more than one) correctly.
        """
        plugin1 = MockPlugin()
        plugin1.name = 'Plugin 1'

        plugin2 = MockPlugin()
        plugin2.name = 'Plugin 2'

        results = OrderedDict()

        results[plugin1] = (0, ['a', 'b'], '')
        results[plugin2] = (0, ['a', 'b'], '')

        counts = self.view.print_results(results)

        self.assertEqual((4, 0, 0), counts)
        self.assertResults(
            u"""
            ▾  Plugin 1

            ✓  a

            ✓  b

            ▾  Plugin 2

            ✓  a

            ✓  b

            {0}  Jig ran 2 plugins
                Info 4 Warn 0 Stop 0
            """.format(ATTENTION), self.output)