Beispiel #1
0
    def repo_from_fixture(self, repo_name):
        """
        Creates a ``git.Repo`` from the given fixture.

        The fixture should be a directory containing numbered directories
        suitable for creating a ``NumberedDirectoriesToGit``.

        Returns a tuple of 3 objects: repo, working_dir, diffs.
        """
        ndgit = NumberedDirectoriesToGit(
            join(self.fixturesdir, repo_name))

        repo = ndgit.repo

        return (ndgit.repo, repo.working_dir, ndgit.diffs())
Beispiel #2
0
    def __init__(self, plugin_dir):
        self.plugin_dir = plugin_dir
        self.timeline = None
        self.expectations = None

        try:
            test_directory = join(plugin_dir, PLUGIN_TESTS_DIRECTORY)
            self.timeline = NumberedDirectoriesToGit(test_directory)
        except ValueError:
            raise ExpectationNoTests(
                'Could not find any tests: {0}.'.format(
                test_directory))

        try:
            expect_filename = join(
                plugin_dir, PLUGIN_TESTS_DIRECTORY,
                PLUGIN_EXPECTATIONS_FILENAME)

            with open(expect_filename, 'r', CODEC) as fh:
                expectation_text = fh.read()   # pragma: no branch

            self.expectations = list(get_expectations(expectation_text))
        except (IOError, OSError):
            raise ExpectationFileNotFound(
                'Missing expectation file: {0}.'.format(expect_filename))
Beispiel #3
0
    def get_nd2g(self, name):
        """
        Gets a NumberedDirectoriesToGit object.

        Where ``name`` is the basename of a directory in
        :file:`src/jig/tests/fixtures/numbereddirs`.
        """
        nd = join(dirname(__file__), 'fixtures', 'numbereddirs', name)

        return NumberedDirectoriesToGit(nd)
Beispiel #4
0
    def __init__(self, plugin_dir):
        self.plugin_dir = plugin_dir
        self.timeline = None
        self.expectations = None

        try:
            test_directory = join(plugin_dir, PLUGIN_TESTS_DIRECTORY)
            self.timeline = NumberedDirectoriesToGit(test_directory)
        except ValueError:
            raise ExpectationNoTests(
                'Could not find any tests: {0}.'.format(test_directory))

        try:
            expect_filename = join(plugin_dir, PLUGIN_TESTS_DIRECTORY,
                                   PLUGIN_EXPECTATIONS_FILENAME)

            with open(expect_filename, 'r', CODEC) as fh:
                expectation_text = fh.read()  # pragma: no branch

            self.expectations = list(get_expectations(expectation_text))
        except (IOError, OSError):
            raise ExpectationFileNotFound(
                'Missing expectation file: {0}.'.format(expect_filename))
Beispiel #5
0
    def test_update_existing_plugins(self):
        """
        Can update an existing plugin.
        """
        # Make our remote repository so we have something to pull from
        origin_repo = mkdtemp()
        root_commit_dir = join(origin_repo, '01')
        makedirs(root_commit_dir)

        # Create a plugin in the repo
        create_plugin(root_commit_dir, template='python', bundle='a', name='a')
        create_plugin(root_commit_dir, template='python', bundle='b', name='b')

        # This is the directory we will clone
        ngd = NumberedDirectoriesToGit(origin_repo)
        dir_to_clone = ngd.repo.working_dir

        # This is a trick, we give it the dir_to_clone when asked to install it
        def clone_local(plugin, to_dir, branch):
            # Instead of jumping on the Internet to clone this, we will use the
            # local numbered directory repository we setup above. This will
            # allow our update to occur with a git pull and avoid network
            # traffic which is always faster for tests.
            clone(dir_to_clone, to_dir)

        # First thing is to install the the plugin
        with patch('jig.commands.base.clone') as c:
            c.side_effect = clone_local

            self.run_command('add --gitrepo {0} http://repo'.format(
                self.gitrepodir))

        self.run_command('update --gitrepo {0}'.format(self.gitrepodir))

        self.assertResults(
            """
            Updating plugins

            Plugin a, b in bundle a, b
                Already up-to-date.""", self.output)
Beispiel #6
0
class PluginTestRunner(object):
    """
    Run tests to verify a plugin functions as expected.

    """
    def __init__(self, plugin_dir):
        self.plugin_dir = plugin_dir
        self.timeline = None
        self.expectations = None

        try:
            test_directory = join(plugin_dir, PLUGIN_TESTS_DIRECTORY)
            self.timeline = NumberedDirectoriesToGit(test_directory)
        except ValueError:
            raise ExpectationNoTests(
                'Could not find any tests: {0}.'.format(test_directory))

        try:
            expect_filename = join(plugin_dir, PLUGIN_TESTS_DIRECTORY,
                                   PLUGIN_EXPECTATIONS_FILENAME)

            with open(expect_filename, 'r', CODEC) as fh:
                expectation_text = fh.read()  # pragma: no branch

            self.expectations = list(get_expectations(expectation_text))
        except (IOError, OSError):
            raise ExpectationFileNotFound(
                'Missing expectation file: {0}.'.format(expect_filename))

    def run(self, test_range=None):
        """
        Run the tests for this plugin.

        Returns a list of :py:class:`Result` objects which represent the
        results from the test run.

        :param list test_range: None or the parsed range from
            :function:`parse_range`
        """
        # Use an empty config, we are not going to save this to disk
        pm = PluginManager(SafeConfigParser())

        # Add the plugin we are testing
        pm.add(self.plugin_dir)

        # The instance of our plugin we will run the pre_commit test on
        plugin = pm.plugins[0]

        # Capture the default plugin config for resets while testing
        default_settings = plugin.config

        results = []

        for exp in self.expectations:
            # Make sure that the range is off by 1
            assert exp.range[1] == exp.range[0] + 1

            # Is this expectation in the specified test range?
            if test_range and (exp.range not in test_range):
                # Skip this one, it's not one of the tests we should be running
                continue

            # Update the plugin config (settings) if available
            if exp.settings:
                plugin.config = exp.settings
            else:
                plugin.config = default_settings

            # View to help us create the output
            view = ConsoleView(collect_output=True, exit_on_exception=False)

            # Get a GitDiffIndex object from
            gdi = InstrumentedGitDiffIndex(
                self.timeline.repo.working_dir,
                self.timeline.diffs()[exp.range[0] - 1])

            # What is the numbered test directory reprsenting our commit?
            wd = abspath(
                join(self.plugin_dir, PLUGIN_TESTS_DIRECTORY,
                     '{0:02d}'.format(exp.range[1])))

            with cwd_bounce(wd):
                # Patch up the filename to be within our numbered directory
                # instead of the Git repository
                gdi.replace_path = (self.timeline.repo.working_dir, wd)

                # Gather up the input to the plugin for logging
                stdin = json.dumps({
                    'config': plugin.config,
                    'files': gdi
                },
                                   indent=2,
                                   cls=PluginDataJSONEncoder)

                # Now run the actual pre_commit hook for this plugin
                res = plugin.pre_commit(gdi)
                # Break apart into its pieces
                retcode, stdout, stderr = res  # pragma: no branch

            try:
                # Is it JSON data?
                data = json.loads(stdout)
            except ValueError:
                # Not JSON
                data = stdout

            if retcode == 0:
                # Format the results according to what you normally see in the
                # console.
                view.print_results({plugin: (retcode, data, stderr)})
            else:
                results.append(
                    FailureResult(
                        exp,
                        'Exit code: {0}\n\nStd out:\n{1}\n\nStd err:\n{2}'.
                        format(retcode, stdout or '(none)', stderr
                               or '(none)'), plugin))
                continue

            # Now remove the color character sequences to make things a little
            # easier to read, copy, and paste.
            actual = strip_paint(view._collect['stdout'].getvalue()
                                 or view._collect['stderr'].getvalue())

            # Also remove the summary and count at the end, these are not
            # really all that useful to test and just end up making the
            # expect.rst files overly verbose
            actual = RESULTS_SUMMARY_SIGNATURE_RE.sub('', actual)
            actual = RESULTS_SUMMARY_COUNT_RE.sub('', actual)

            resargs = (exp, actual, plugin, stdin, stdout)
            if actual.strip() != exp.output.strip():
                results.append(FailureResult(*resargs))
            else:
                results.append(SuccessResult(*resargs))

        return results
Beispiel #7
0
class PluginTestRunner(object):

    """
    Run tests to verify a plugin functions as expected.

    """
    def __init__(self, plugin_dir):
        self.plugin_dir = plugin_dir
        self.timeline = None
        self.expectations = None

        try:
            test_directory = join(plugin_dir, PLUGIN_TESTS_DIRECTORY)
            self.timeline = NumberedDirectoriesToGit(test_directory)
        except ValueError:
            raise ExpectationNoTests(
                'Could not find any tests: {0}.'.format(
                test_directory))

        try:
            expect_filename = join(
                plugin_dir, PLUGIN_TESTS_DIRECTORY,
                PLUGIN_EXPECTATIONS_FILENAME)

            with open(expect_filename, 'r', CODEC) as fh:
                expectation_text = fh.read()   # pragma: no branch

            self.expectations = list(get_expectations(expectation_text))
        except (IOError, OSError):
            raise ExpectationFileNotFound(
                'Missing expectation file: {0}.'.format(expect_filename))

    def run(self, test_range=None):
        """
        Run the tests for this plugin.

        Returns a list of :py:class:`Result` objects which represent the
        results from the test run.

        :param list test_range: None or the parsed range from
            :function:`parse_range`
        """
        # Use an empty config, we are not going to save this to disk
        pm = PluginManager(SafeConfigParser())

        # Add the plugin we are testing
        pm.add(self.plugin_dir)

        # The instance of our plugin we will run the pre_commit test on
        plugin = pm.plugins[0]

        # Capture the default plugin config for resets while testing
        default_settings = plugin.config

        results = []

        for exp in self.expectations:
            # Make sure that the range is off by 1
            assert exp.range[1] == exp.range[0] + 1

            # Is this expectation in the specified test range?
            if test_range and (exp.range not in test_range):
                # Skip this one, it's not one of the tests we should be running
                continue

            # Update the plugin config (settings) if available
            if exp.settings:
                plugin.config = exp.settings
            else:
                plugin.config = default_settings

            # View to help us create the output
            view = ConsoleView(collect_output=True, exit_on_exception=False)

            # Get a GitDiffIndex object from
            gdi = InstrumentedGitDiffIndex(
                self.timeline.repo.working_dir,
                self.timeline.diffs()[exp.range[0] - 1])

            # What is the numbered test directory reprsenting our commit?
            wd = abspath(join(
                self.plugin_dir, PLUGIN_TESTS_DIRECTORY,
                '{0:02d}'.format(exp.range[1])))

            with cwd_bounce(wd):
                # Patch up the filename to be within our numbered directory
                # instead of the Git repository
                gdi.replace_path = (self.timeline.repo.working_dir, wd)

                # Gather up the input to the plugin for logging
                stdin = json.dumps({
                    'config': plugin.config,
                    'files': gdi},
                    indent=2, cls=PluginDataJSONEncoder)

                # Now run the actual pre_commit hook for this plugin
                res = plugin.pre_commit(gdi)
                # Break apart into its pieces
                retcode, stdout, stderr = res   # pragma: no branch

            try:
                # Is it JSON data?
                data = json.loads(stdout)
            except ValueError:
                # Not JSON
                data = stdout

            if retcode == 0:
                # Format the results according to what you normally see in the
                # console.
                view.print_results({plugin: (retcode, data, stderr)})
            else:
                results.append(FailureResult(
                    exp,
                    'Exit code: {0}\n\nStd out:\n{1}\n\nStd err:\n{2}'.format(
                        retcode, stdout or '(none)', stderr or '(none)'),
                    plugin))
                continue

            # Now remove the color character sequences to make things a little
            # easier to read, copy, and paste.
            actual = strip_paint(
                view._collect['stdout'].getvalue() or
                view._collect['stderr'].getvalue())

            # Also remove the summary and count at the end, these are not
            # really all that useful to test and just end up making the
            # expect.rst files overly verbose
            actual = RESULTS_SUMMARY_SIGNATURE_RE.sub('', actual)
            actual = RESULTS_SUMMARY_COUNT_RE.sub('', actual)

            resargs = (exp, actual, plugin, stdin, stdout)
            if actual.strip() != exp.output.strip():
                results.append(FailureResult(*resargs))
            else:
                results.append(SuccessResult(*resargs))

        return results