def test_display_vector_field(file_a=_file_a, file_b=_file_b, test_file=_test_file): a = imread(file_a) b = imread(file_b) window_size = 32 overlap = 16 search_area_size = 40 u, v, s2n = extended_search_area_piv(a, b, window_size, search_area_size=search_area_size, overlap=overlap, correlation_method='circular', normalized_correlation=False) x, y = get_coordinates(a.shape, search_area_size=search_area_size, overlap=overlap) x, y, u, v = transform_coordinates(x, y, u, v) mask = np.zeros_like(x) mask[-1,1] = 1 # test of invalid vector plot save(x, y, u, v, mask, 'tmp.txt') fig, ax = plt.subplots(figsize=(6, 6)) display_vector_field('tmp.txt', on_img=True, image_name=file_a, ax=ax) decorators.remove_ticks_and_titles(fig) fig.savefig('./tmp.png') res = compare.compare_images('./tmp.png', test_file, 0.001) assert res is None
def compare_mpl_plots(fig, force_update=False): """ This function is specialised in comparing matplotlib figures with a baseline. The baseline will be an image inside mpl_images in the current directory, with name matching the test's name. The baseline will be automatically created if it doesn't exist (but the test will still fail). Using force_update allows the user to iterate over how the image looks through running the tests multiple times with the flag enabled. :param fig: The produced image you want to check for regressions :param force_update: This can be handy during development. Setting this to True will always fail the test, but the baseline image will be updated, so you can easily iterate over the way you want the plotting function called """ test_name = os.environ["PYTEST_CURRENT_TEST"].replace( " (call)", "").replace("::", "__").replace(".py", "") test_dir = os.path.dirname(test_name) test_name = os.path.basename(test_name) baseline = os.path.join(test_dir, "mpl_images", test_name) + ".png" remove_ticks_and_titles(fig) if not os.path.isfile(baseline) or force_update: fig.savefig(baseline) pytest.fail( "Baseline image not found. We've created it. Running this test again should succeed" ) with tempfile.NamedTemporaryFile(mode="wb+", suffix=".png", delete=False) as fp: fig.savefig(fp.name) assert compare_images(baseline, fp.name, tol=2) is None, "Images don't match"
def wrapper(*args, **kwargs): anim = func(*args, **kwargs) if remove_text: fignum = plt.get_fignums()[0] fig = plt.figure(fignum) remove_ticks_and_titles(fig) try: _compare_animation(anim, baseline_images, format, nframes, tol) finally: plt.close('all')
def item_function_wrapper(*args, **kwargs): with plt.style.context(style, after_reset=True), switch_backend(backend): # Run test and get figure object if inspect.ismethod(original): # method # In some cases, for example if setup_method is used, # original appears to belong to an instance of the test # class that is not the same as args[0], and args[0] is the # one that has the correct attributes set up from setup_method # so we ignore original.__self__ and use args[0] instead. fig = original.__func__(*args, **kwargs) else: # function fig = original(*args, **kwargs) if remove_text: remove_ticks_and_titles(fig) # What we do now depends on whether we are generating the # reference images or simply running the test. if self.generate_dir is not None: self.generate_baseline_image(item, fig) if self.generate_hash_library is not None: hash_name = self.generate_test_name(item) self._generated_hash_library[ hash_name] = self.generate_image_hash(item, fig) # Only test figures if we are not generating hashes or images if self.generate_dir is None and self.generate_hash_library is None: result_dir = self.make_test_results_dir(item) # Compare to hash library if self.hash_library or compare.kwargs.get( 'hash_library', None): msg = self.compare_image_to_hash_library( item, fig, result_dir) # Compare against a baseline if specified else: msg = self.compare_image_to_baseline( item, fig, result_dir) close_mpl_figure(fig) if msg is None: shutil.rmtree(result_dir) else: pytest.fail(msg, pytrace=False) close_mpl_figure(fig)
def test_gettightbbox(): fig, ax = plt.subplots(figsize=(8, 6)) (l,) = ax.plot([1, 2, 3], [0, 1, 0]) ax_zoom = zoomed_inset_axes(ax, 4) ax_zoom.plot([1, 2, 3], [0, 1, 0]) mark_inset(ax, ax_zoom, loc1=1, loc2=3, fc="none", ec="0.3") remove_ticks_and_titles(fig) bbox = fig.get_tightbbox(fig.canvas.get_renderer()) np.testing.assert_array_almost_equal(bbox.extents, [-17.7, -13.9, 7.2, 5.4])
def test_gettightbbox(): fig, ax = plt.subplots(figsize=(8, 6)) l, = ax.plot([1, 2, 3], [0, 1, 0]) ax_zoom = zoomed_inset_axes(ax, 4) ax_zoom.plot([1, 2, 3], [0, 1, 0]) mark_inset(ax, ax_zoom, loc1=1, loc2=3, fc="none", ec='0.3') remove_ticks_and_titles(fig) bbox = fig.get_tightbbox(fig.canvas.get_renderer()) np.testing.assert_array_almost_equal(bbox.extents, [-17.7, -13.9, 7.2, 5.4])
def wrapper(*args, **kwargs): # First close anything from previous tests plt.close('all') anim = func(*args, **kwargs) if remove_text: fignum = plt.get_fignums()[0] fig = plt.figure(fignum) remove_ticks_and_titles(fig) try: _compare_animation(anim, baseline_images, fmt, nframes, tol) finally: plt.close('all')
def item_function_wrapper(*args, **kwargs): baseline_dir = compare.kwargs.get('baseline_dir', None) if baseline_dir is None: if self.baseline_dir is None: baseline_dir = os.path.join( os.path.dirname(item.fspath.strpath), 'baseline') else: baseline_dir = self.baseline_dir baseline_remote = False else: baseline_remote = baseline_dir.startswith( ('http://', 'https://')) if not baseline_remote: baseline_dir = os.path.join( os.path.dirname(item.fspath.strpath), baseline_dir) with plt.style.context(style, after_reset=True), switch_backend(backend): # Run test and get figure object if inspect.ismethod(original): # method # In some cases, for example if setup_method is used, # original appears to belong to an instance of the test # class that is not the same as args[0], and args[0] is the # one that has the correct attributes set up from setup_method # so we ignore original.__self__ and use args[0] instead. fig = original.__func__(*args, **kwargs) else: # function fig = original(*args, **kwargs) if remove_text: remove_ticks_and_titles(fig) # Find test name to use as plot name filename = compare.kwargs.get('filename', None) if filename is None: filename = item.name + '.png' filename = filename.replace('[', '_').replace(']', '_') filename = filename.replace('/', '_') filename = filename.replace('_.png', '.png') # What we do now depends on whether we are generating the # reference images or simply running the test. if self.generate_dir is None: # Save the figure result_dir = tempfile.mkdtemp(dir=self.results_dir) test_image = os.path.abspath( os.path.join(result_dir, filename)) fig.savefig(test_image, **savefig_kwargs) close_mpl_figure(fig) # Find path to baseline image if baseline_remote: baseline_image_ref = _download_file( baseline_dir, filename) else: baseline_image_ref = os.path.abspath( os.path.join(os.path.dirname(item.fspath.strpath), baseline_dir, filename)) if not os.path.exists(baseline_image_ref): pytest.fail( "Image file not found for comparison test in: " "\n\t{baseline_dir}" "\n(This is expected for new tests.)\nGenerated Image: " "\n\t{test}".format(baseline_dir=baseline_dir, test=test_image), pytrace=False) # distutils may put the baseline images in non-accessible places, # copy to our tmpdir to be sure to keep them in case of failure baseline_image = os.path.abspath( os.path.join(result_dir, 'baseline-' + filename)) shutil.copyfile(baseline_image_ref, baseline_image) msg = compare_images(baseline_image, test_image, tol=tolerance) if msg is None: shutil.rmtree(result_dir) else: pytest.fail(msg, pytrace=False) else: if not os.path.exists(self.generate_dir): os.makedirs(self.generate_dir) fig.savefig( os.path.abspath( os.path.join(self.generate_dir, filename)), **savefig_kwargs) close_mpl_figure(fig) pytest.skip("Skipping test, since generating data")
def item_function_wrapper(*args, **kwargs): baseline_dir = compare.kwargs.get('baseline_dir', None) if baseline_dir is None: if self.baseline_dir is None: baseline_dir = os.path.join( os.path.dirname(item.fspath.strpath), 'baseline') else: baseline_dir = self.baseline_dir baseline_remote = False baseline_remote = baseline_dir.startswith(('http://', 'https://')) if not baseline_remote: baseline_dir = os.path.join( os.path.dirname(item.fspath.strpath), baseline_dir) with plt.style.context(style, after_reset=True), switch_backend(backend): # Run test and get figure object if inspect.ismethod(original): # method # In some cases, for example if setup_method is used, # original appears to belong to an instance of the test # class that is not the same as args[0], and args[0] is the # one that has the correct attributes set up from setup_method # so we ignore original.__self__ and use args[0] instead. fig_or_figures = original.__func__(*args, **kwargs) else: # function fig_or_figures = original(*args, **kwargs) if isinstance(fig_or_figures, list): is_fig = [hasattr(f, "savefig") for f in fig_or_figures] if not all(is_fig): raise ValueError( "return value is not a list of figures") if remove_text: if isinstance(fig_or_figures, list): for f in fig_or_figures: remove_ticks_and_titles(f) else: remove_ticks_and_titles(fig_or_figures) # Find test name to use as plot name filename = compare.kwargs.get('filename', None) if filename is None: # example to clarify file nameing: # tests/test_feature.py::test_works --> filename: tests__test_feature_py__test_works # test/test_feature.py::test_class::test_member01 --> filename tests__test_feature_py__test_class__test_member01 subnames = item.nodeid.split('::') pathnames = subnames[0] pathnames = '__'.join( pathnames.replace('.', '_').split('/')) namespace = subnames[1:] namespace = '_'.join(namespace) filename = pathnames + '__' + namespace + '.png' filename = filename.replace('[', '_').replace(']', '_') filename = filename.replace('/', '_') filename = filename.replace('_.png', '.png') # What we do now depends on whether we are generating the # reference images or simply running the test. if self.generate_dir is None: # Save the figure result_dir = tempfile.mkdtemp(dir=self.results_dir) test_image = os.path.abspath( os.path.join(result_dir, filename)) if isinstance(fig_or_figures, list): for i, f in enumerate(fig_or_figures): path, ext = os.path.splitext(test_image) current_test_image = f'{path}_{i}{ext}' f.savefig(current_test_image, **savefig_kwargs) close_mpl_figure(f) else: fig_or_figures.savefig(test_image, **savefig_kwargs) close_mpl_figure(fig_or_figures) # Find path to baseline image if baseline_remote: baseline_image_ref = _download_file( baseline_dir, filename) else: baseline_image_ref = os.path.abspath( os.path.join(os.path.dirname(item.fspath.strpath), baseline_dir, filename)) if not os.path.exists(baseline_image_ref): pytest.fail( "Image file not found for comparison test in: " "\n\t{baseline_dir}" "\n(This is expected for new tests.)\nGenerated Image: " "\n\t{test}".format(baseline_dir=baseline_dir, test=test_image), pytrace=False) # distutils may put the baseline images in non-accessible places, # copy to our tmpdir to be sure to keep them in case of failure baseline_image = os.path.abspath( os.path.join(result_dir, 'baseline-' + filename)) shutil.copyfile(baseline_image_ref, baseline_image) # Compare image size ourselves since the Matplotlib # exception is a bit cryptic in this case and doesn't show # the filenames expected_shape = imread(baseline_image).shape[:2] actual_shape = imread(test_image).shape[:2] if expected_shape != actual_shape: error = SHAPE_MISMATCH_ERROR.format( expected_path=baseline_image, expected_shape=expected_shape, actual_path=test_image, actual_shape=actual_shape) pytest.fail(error, pytrace=False) msg = compare_images(baseline_image, test_image, tol=tolerance) if msg is None: shutil.rmtree(result_dir) else: pytest.fail(msg, pytrace=False) else: if not os.path.exists(self.generate_dir): os.makedirs(self.generate_dir) test_image = os.path.abspath( os.path.join(self.generate_dir, filename)) if isinstance(fig_or_figures, list): for i, f in enumerate(fig_or_figures): path, ext = os.path.splitext(test_image) current_test_image = f"{path}_{i}{ext}" f.savefig(current_test_image, **savefig_kwargs) close_mpl_figure(f) else: fig_or_figures.savefig(test_image, **savefig_kwargs) close_mpl_figure(fig_or_figures) pytest.skip("Skipping test, since generating data")
def test_closed_path_nan_removal(fig_test, fig_ref): ax_test = fig_test.subplots(2, 2).flatten() ax_ref = fig_ref.subplots(2, 2).flatten() # NaN on the first point also removes the last point, because it's closed. path = Path( [[-3, np.nan], [3, -3], [3, 3], [-3, 3], [-3, -3]], [Path.MOVETO, Path.LINETO, Path.LINETO, Path.LINETO, Path.CLOSEPOLY]) ax_test[0].add_patch(patches.PathPatch(path, facecolor='none')) path = Path( [[-3, np.nan], [3, -3], [3, 3], [-3, 3], [-3, np.nan]], [Path.MOVETO, Path.LINETO, Path.LINETO, Path.LINETO, Path.LINETO]) ax_ref[0].add_patch(patches.PathPatch(path, facecolor='none')) # NaN on second-last point should not re-close. path = Path( [[-2, -2], [2, -2], [2, 2], [-2, np.nan], [-2, -2]], [Path.MOVETO, Path.LINETO, Path.LINETO, Path.LINETO, Path.CLOSEPOLY]) ax_test[0].add_patch(patches.PathPatch(path, facecolor='none')) path = Path( [[-2, -2], [2, -2], [2, 2], [-2, np.nan], [-2, -2]], [Path.MOVETO, Path.LINETO, Path.LINETO, Path.LINETO, Path.LINETO]) ax_ref[0].add_patch(patches.PathPatch(path, facecolor='none')) # Test multiple loops in a single path (with same paths as above). path = Path( [[-3, np.nan], [3, -3], [3, 3], [-3, 3], [-3, -3], [-2, -2], [2, -2], [2, 2], [-2, np.nan], [-2, -2]], [Path.MOVETO, Path.LINETO, Path.LINETO, Path.LINETO, Path.CLOSEPOLY, Path.MOVETO, Path.LINETO, Path.LINETO, Path.LINETO, Path.CLOSEPOLY]) ax_test[1].add_patch(patches.PathPatch(path, facecolor='none')) path = Path( [[-3, np.nan], [3, -3], [3, 3], [-3, 3], [-3, np.nan], [-2, -2], [2, -2], [2, 2], [-2, np.nan], [-2, -2]], [Path.MOVETO, Path.LINETO, Path.LINETO, Path.LINETO, Path.LINETO, Path.MOVETO, Path.LINETO, Path.LINETO, Path.LINETO, Path.LINETO]) ax_ref[1].add_patch(patches.PathPatch(path, facecolor='none')) # NaN in first point of CURVE3 should not re-close, and hide entire curve. path = Path( [[-1, -1], [1, -1], [1, np.nan], [0, 1], [-1, 1], [-1, -1]], [Path.MOVETO, Path.LINETO, Path.CURVE3, Path.CURVE3, Path.LINETO, Path.CLOSEPOLY]) ax_test[2].add_patch(patches.PathPatch(path, facecolor='none')) path = Path( [[-1, -1], [1, -1], [1, np.nan], [0, 1], [-1, 1], [-1, -1]], [Path.MOVETO, Path.LINETO, Path.CURVE3, Path.CURVE3, Path.LINETO, Path.CLOSEPOLY]) ax_ref[2].add_patch(patches.PathPatch(path, facecolor='none')) # NaN in second point of CURVE3 should not re-close, and hide entire curve # plus next line segment. path = Path( [[-3, -3], [3, -3], [3, 0], [0, np.nan], [-3, 3], [-3, -3]], [Path.MOVETO, Path.LINETO, Path.CURVE3, Path.CURVE3, Path.LINETO, Path.LINETO]) ax_test[2].add_patch(patches.PathPatch(path, facecolor='none')) path = Path( [[-3, -3], [3, -3], [3, 0], [0, np.nan], [-3, 3], [-3, -3]], [Path.MOVETO, Path.LINETO, Path.CURVE3, Path.CURVE3, Path.LINETO, Path.LINETO]) ax_ref[2].add_patch(patches.PathPatch(path, facecolor='none')) # NaN in first point of CURVE4 should not re-close, and hide entire curve. path = Path( [[-1, -1], [1, -1], [1, np.nan], [0, 0], [0, 1], [-1, 1], [-1, -1]], [Path.MOVETO, Path.LINETO, Path.CURVE4, Path.CURVE4, Path.CURVE4, Path.LINETO, Path.CLOSEPOLY]) ax_test[3].add_patch(patches.PathPatch(path, facecolor='none')) path = Path( [[-1, -1], [1, -1], [1, np.nan], [0, 0], [0, 1], [-1, 1], [-1, -1]], [Path.MOVETO, Path.LINETO, Path.CURVE4, Path.CURVE4, Path.CURVE4, Path.LINETO, Path.CLOSEPOLY]) ax_ref[3].add_patch(patches.PathPatch(path, facecolor='none')) # NaN in second point of CURVE4 should not re-close, and hide entire curve. path = Path( [[-2, -2], [2, -2], [2, 0], [0, np.nan], [0, 2], [-2, 2], [-2, -2]], [Path.MOVETO, Path.LINETO, Path.CURVE4, Path.CURVE4, Path.CURVE4, Path.LINETO, Path.LINETO]) ax_test[3].add_patch(patches.PathPatch(path, facecolor='none')) path = Path( [[-2, -2], [2, -2], [2, 0], [0, np.nan], [0, 2], [-2, 2], [-2, -2]], [Path.MOVETO, Path.LINETO, Path.CURVE4, Path.CURVE4, Path.CURVE4, Path.LINETO, Path.LINETO]) ax_ref[3].add_patch(patches.PathPatch(path, facecolor='none')) # NaN in third point of CURVE4 should not re-close, and hide entire curve # plus next line segment. path = Path( [[-3, -3], [3, -3], [3, 0], [0, 0], [0, np.nan], [-3, 3], [-3, -3]], [Path.MOVETO, Path.LINETO, Path.CURVE4, Path.CURVE4, Path.CURVE4, Path.LINETO, Path.LINETO]) ax_test[3].add_patch(patches.PathPatch(path, facecolor='none')) path = Path( [[-3, -3], [3, -3], [3, 0], [0, 0], [0, np.nan], [-3, 3], [-3, -3]], [Path.MOVETO, Path.LINETO, Path.CURVE4, Path.CURVE4, Path.CURVE4, Path.LINETO, Path.LINETO]) ax_ref[3].add_patch(patches.PathPatch(path, facecolor='none')) # Keep everything clean. for ax in [*ax_test.flat, *ax_ref.flat]: ax.set(xlim=(-3.5, 3.5), ylim=(-3.5, 3.5)) remove_ticks_and_titles(fig_test) remove_ticks_and_titles(fig_ref)
def item_function_wrapper(*args, **kwargs): baseline_dir = compare.kwargs.get('baseline_dir', None) if baseline_dir is None: if self.baseline_dir is None: baseline_dir = os.path.join( os.path.dirname(item.fspath.strpath), 'baseline') else: if self.baseline_relative_dir: # baseline dir is relative to the current test baseline_dir = os.path.join( os.path.dirname(item.fspath.strpath), self.baseline_relative_dir) else: # baseline dir is relative to where pytest was run baseline_dir = self.baseline_dir baseline_remote = False baseline_remote = baseline_dir.startswith(('http://', 'https://')) if not baseline_remote: baseline_dir = os.path.join( os.path.dirname(item.fspath.strpath), baseline_dir) if baseline_remote and multi: pytest.fail( "Multi-baseline testing only works with local baselines.", pytrace=False) with plt.style.context(style, after_reset=True), switch_backend(backend): # Run test and get figure object if inspect.ismethod(original): # method # In some cases, for example if setup_method is used, # original appears to belong to an instance of the test # class that is not the same as args[0], and args[0] is the # one that has the correct attributes set up from setup_method # so we ignore original.__self__ and use args[0] instead. fig = original.__func__(*args, **kwargs) else: # function fig = original(*args, **kwargs) if remove_text: remove_ticks_and_titles(fig) # Find test name to use as plot name filename = compare.kwargs.get('filename', None) if filename is None: filename = item.name + '.' + extension filename = filename.replace('[', '_').replace(']', '_') filename = filename.replace('/', '_') filename = filename.replace('_.' + extension, '.' + extension) # What we do now depends on whether we are generating the # reference images or simply running the test. if self.generate_dir is None: # Save the figure result_dir = tempfile.mkdtemp(dir=self.results_dir) test_image = os.path.abspath( os.path.join(result_dir, filename)) fig.savefig(test_image, **savefig_kwargs) close_mpl_figure(fig) # Find path to baseline image if baseline_remote: baseline_image_refs = [ _download_file(baseline_dir, filename) ] else: baseline_image_refs = [ os.path.abspath( os.path.join( os.path.dirname(item.fspath.strpath), baseline_dir, filename)) ] # If multi is enabled, the given filename, without its extension, is assumed to be a directory in the baseline dir. # All files in this directory will be compared against, and if at least one of them matches, the test passes. # This conceptually only works with non-remote baselines! if multi: raw_name, ext = os.path.splitext( baseline_image_refs[0]) baseline_image_refs = glob.glob(os.path.join( raw_name, "**", "*" + ext), recursive=True) if len(baseline_image_refs) == 0: pytest.fail( "Image files not found for multi comparison test in: " "\n\t{baseline_dir}" "\n(This is expected for new tests.)\nGenerated Image: " "\n\t{test}".format(baseline_dir=baseline_dir, test=test_image), pytrace=False) actual_shape = imread(test_image).shape[:2] has_passed = False all_msgs = "" i = -1 for baseline_image_ref in baseline_image_refs: if not os.path.exists(baseline_image_ref): pytest.fail( "Image file not found for comparison test in: " "\n\t{baseline_dir}" "\n(This is expected for new tests.)\nGenerated Image: " "\n\t{test}".format(baseline_dir=baseline_dir, test=test_image), pytrace=False) # distutils may put the baseline images in non-accessible places, # copy to our tmpdir to be sure to keep them in case of failure i += 1 baseline_image = os.path.abspath( os.path.join(result_dir, 'baseline-' + str(i) + '-' + filename)) shutil.copyfile(baseline_image_ref, baseline_image) # Compare image size ourselves since the Matplotlib exception is a bit cryptic in this case # and doesn't show the filenames expected_shape = imread(baseline_image).shape[:2] if expected_shape != actual_shape: error = SHAPE_MISMATCH_ERROR.format( expected_path=baseline_image, expected_shape=expected_shape, actual_path=test_image, actual_shape=actual_shape) all_msgs += error + "\n\n" continue msg = compare_images(baseline_image, test_image, tol=tolerance) if msg is None: shutil.rmtree(result_dir) has_passed = True break else: all_msgs += msg + "\n\n" if not has_passed: if self.config.getoption("--mpl-upload"): all_msgs += "Test image: " + _upload_to_imgur( test_image) + "\n\n" pytest.fail(all_msgs, pytrace=False) else: if not os.path.exists(self.generate_dir): os.makedirs(self.generate_dir) fname = os.path.abspath( os.path.join(self.generate_dir, filename)) if multi: raw_name, ext = os.path.splitext(fname) if not os.path.exists(raw_name): os.makedirs(raw_name) fname = os.path.join(raw_name, "generated" + ext) fig.savefig(fname, **savefig_kwargs) close_mpl_figure(fig) pytest.skip("Skipping test, since generating data")
def pytest_runtest_call(self, item): # noqa compare = get_compare(item) if compare is None: yield return import matplotlib.pyplot as plt try: from matplotlib.testing.decorators import remove_ticks_and_titles except ImportError: from matplotlib.testing.decorators import ImageComparisonTest as MplImageComparisonTest remove_ticks_and_titles = MplImageComparisonTest.remove_text style = compare.kwargs.get('style', 'classic') remove_text = compare.kwargs.get('remove_text', False) backend = compare.kwargs.get('backend', 'agg') with plt.style.context(style, after_reset=True), switch_backend(backend): # Run test and get figure object wrap_figure_interceptor(self, item) yield test_name = generate_test_name(item) if test_name not in self.return_value: # Test function did not complete successfully return fig = self.return_value[test_name] if remove_text: remove_ticks_and_titles(fig) result_dir = self.make_test_results_dir(item) summary = { 'status': None, 'image_status': None, 'hash_status': None, 'status_msg': None, 'baseline_image': None, 'diff_image': None, 'rms': None, 'tolerance': None, 'result_image': None, 'baseline_hash': None, 'result_hash': None, } # What we do now depends on whether we are generating the # reference images or simply running the test. if self.generate_dir is not None: summary['status'] = 'skipped' summary['image_status'] = 'generated' summary['status_msg'] = 'Skipped test, since generating image.' generate_image = self.generate_baseline_image(item, fig) if self.results_always: # Make baseline image available in HTML result_image = (result_dir / "baseline.png").absolute() shutil.copy(generate_image, result_image) summary['baseline_image'] = \ result_image.relative_to(self.results_dir).as_posix() if self.generate_hash_library is not None: summary['hash_status'] = 'generated' image_hash = self.generate_image_hash(item, fig) self._generated_hash_library[test_name] = image_hash summary['baseline_hash'] = image_hash # Only test figures if not generating images if self.generate_dir is None: # Compare to hash library if self.hash_library or compare.kwargs.get('hash_library', None): msg = self.compare_image_to_hash_library(item, fig, result_dir, summary=summary) # Compare against a baseline if specified else: msg = self.compare_image_to_baseline(item, fig, result_dir, summary=summary) close_mpl_figure(fig) if msg is None: if not self.results_always: shutil.rmtree(result_dir) for image_type in ['baseline_image', 'diff_image', 'result_image']: summary[image_type] = None # image no longer exists else: self._test_results[test_name] = summary pytest.fail(msg, pytrace=False) close_mpl_figure(fig) self._test_results[test_name] = summary if summary['status'] == 'skipped': pytest.skip(summary['status_msg'])