def test_symlinks(self): """ Symlinks are ignored because they are not real files. """ self.commit(self.gitrepodir, 'text/a.txt', 'a') self.commit(self.gitrepodir, 'text/b.txt', 'b') self.commit(self.gitrepodir, 'text/c.txt', 'c') # Create the symlink that should be ignored by GitDiffIndex with cwd_bounce(self.gitrepodir): symlink('text', 'also_text') # We have to do this without our testcase since it's a special # situation. repo = Repo(self.gitrepodir) repo.git.add('also_text') # The symlink is staged, time to convert the diff gdi = GitDiffIndex(self.gitrepodir, repo.head.commit.diff()) # If we ignored the symlink, which we should, there should be no files self.assertEqual(0, len(list(gdi.files())))
def wrapper(testcase, *args, **kwargs): with cwd_bounce(testcase.gitrepodir): func(testcase, *args, **kwargs)
def run(self, test_range=None): """ Run the tests for this plugin. Returns a list of :py:class:`Result` objects which represent the results from the test run. :param list test_range: None or the parsed range from :function:`parse_range` """ # Use an empty config, we are not going to save this to disk pm = PluginManager(SafeConfigParser()) # Add the plugin we are testing pm.add(self.plugin_dir) # The instance of our plugin we will run the pre_commit test on plugin = pm.plugins[0] # Capture the default plugin config for resets while testing default_settings = plugin.config results = [] for exp in self.expectations: # Make sure that the range is off by 1 assert exp.range[1] == exp.range[0] + 1 # Is this expectation in the specified test range? if test_range and (exp.range not in test_range): # Skip this one, it's not one of the tests we should be running continue # Update the plugin config (settings) if available if exp.settings: plugin.config = exp.settings else: plugin.config = default_settings # View to help us create the output view = ConsoleView(collect_output=True, exit_on_exception=False) # Get a GitDiffIndex object from gdi = InstrumentedGitDiffIndex( self.timeline.repo.working_dir, self.timeline.diffs()[exp.range[0] - 1]) # What is the numbered test directory reprsenting our commit? wd = abspath( join(self.plugin_dir, PLUGIN_TESTS_DIRECTORY, '{0:02d}'.format(exp.range[1]))) with cwd_bounce(wd): # Patch up the filename to be within our numbered directory # instead of the Git repository gdi.replace_path = (self.timeline.repo.working_dir, wd) # Gather up the input to the plugin for logging stdin = json.dumps({ 'config': plugin.config, 'files': gdi }, indent=2, cls=PluginDataJSONEncoder) # Now run the actual pre_commit hook for this plugin res = plugin.pre_commit(gdi) # Break apart into its pieces retcode, stdout, stderr = res # pragma: no branch try: # Is it JSON data? data = json.loads(stdout) except ValueError: # Not JSON data = stdout if retcode == 0: # Format the results according to what you normally see in the # console. view.print_results({plugin: (retcode, data, stderr)}) else: results.append( FailureResult( exp, 'Exit code: {0}\n\nStd out:\n{1}\n\nStd err:\n{2}'. format(retcode, stdout or '(none)', stderr or '(none)'), plugin)) continue # Now remove the color character sequences to make things a little # easier to read, copy, and paste. actual = strip_paint(view._collect['stdout'].getvalue() or view._collect['stderr'].getvalue()) # Also remove the summary and count at the end, these are not # really all that useful to test and just end up making the # expect.rst files overly verbose actual = RESULTS_SUMMARY_SIGNATURE_RE.sub('', actual) actual = RESULTS_SUMMARY_COUNT_RE.sub('', actual) resargs = (exp, actual, plugin, stdin, stdout) if actual.strip() != exp.output.strip(): results.append(FailureResult(*resargs)) else: results.append(SuccessResult(*resargs)) return results
def run(self, test_range=None): """ Run the tests for this plugin. Returns a list of :py:class:`Result` objects which represent the results from the test run. :param list test_range: None or the parsed range from :function:`parse_range` """ # Use an empty config, we are not going to save this to disk pm = PluginManager(SafeConfigParser()) # Add the plugin we are testing pm.add(self.plugin_dir) # The instance of our plugin we will run the pre_commit test on plugin = pm.plugins[0] # Capture the default plugin config for resets while testing default_settings = plugin.config results = [] for exp in self.expectations: # Make sure that the range is off by 1 assert exp.range[1] == exp.range[0] + 1 # Is this expectation in the specified test range? if test_range and (exp.range not in test_range): # Skip this one, it's not one of the tests we should be running continue # Update the plugin config (settings) if available if exp.settings: plugin.config = exp.settings else: plugin.config = default_settings # View to help us create the output view = ConsoleView(collect_output=True, exit_on_exception=False) # Get a GitDiffIndex object from gdi = InstrumentedGitDiffIndex( self.timeline.repo.working_dir, self.timeline.diffs()[exp.range[0] - 1]) # What is the numbered test directory reprsenting our commit? wd = abspath(join( self.plugin_dir, PLUGIN_TESTS_DIRECTORY, '{0:02d}'.format(exp.range[1]))) with cwd_bounce(wd): # Patch up the filename to be within our numbered directory # instead of the Git repository gdi.replace_path = (self.timeline.repo.working_dir, wd) # Gather up the input to the plugin for logging stdin = json.dumps({ 'config': plugin.config, 'files': gdi}, indent=2, cls=PluginDataJSONEncoder) # Now run the actual pre_commit hook for this plugin res = plugin.pre_commit(gdi) # Break apart into its pieces retcode, stdout, stderr = res # pragma: no branch try: # Is it JSON data? data = json.loads(stdout) except ValueError: # Not JSON data = stdout if retcode == 0: # Format the results according to what you normally see in the # console. view.print_results({plugin: (retcode, data, stderr)}) else: results.append(FailureResult( exp, 'Exit code: {0}\n\nStd out:\n{1}\n\nStd err:\n{2}'.format( retcode, stdout or '(none)', stderr or '(none)'), plugin)) continue # Now remove the color character sequences to make things a little # easier to read, copy, and paste. actual = strip_paint( view._collect['stdout'].getvalue() or view._collect['stderr'].getvalue()) # Also remove the summary and count at the end, these are not # really all that useful to test and just end up making the # expect.rst files overly verbose actual = RESULTS_SUMMARY_SIGNATURE_RE.sub('', actual) actual = RESULTS_SUMMARY_COUNT_RE.sub('', actual) resargs = (exp, actual, plugin, stdin, stdout) if actual.strip() != exp.output.strip(): results.append(FailureResult(*resargs)) else: results.append(SuccessResult(*resargs)) return results