예제 #1
0
 def test_spectogram(self):
     """
     Create spectogram plotting examples in tests/output directory.
     """
     # Create dynamic test_files to avoid dependencies of other modules.
     # set specific seed value such that random numbers are reproduceable
     np.random.seed(815)
     head = {
         'network': 'BW', 'station': 'BGLD',
         'starttime': UTCDateTime(2007, 12, 31, 23, 59, 59, 915000),
         'sampling_rate': 200.0, 'channel': 'EHE'}
     tr = Trace(data=np.random.randint(0, 1000, 824), header=head)
     st = Stream([tr])
     # 1 - using log=True
     with NamedTemporaryFile(suffix='.png') as tf:
         spectrogram.spectrogram(st[0].data, log=True, outfile=tf.name,
                                 samp_rate=st[0].stats.sampling_rate,
                                 show=False)
         # compare images
         expected_image = os.path.join(self.path, 'spectogram_log.png')
         compare_images(tf.name, expected_image, 0.001)
     # 2 - using log=False
     with NamedTemporaryFile(suffix='.png') as tf:
         spectrogram.spectrogram(st[0].data, log=False, outfile=tf.name,
                                 samp_rate=st[0].stats.sampling_rate,
                                 show=False)
         # compare images
         expected_image = os.path.join(self.path, 'spectogram.png')
         compare_images(tf.name, expected_image, 0.001)
예제 #2
0
    def test_collection(self):
        """
        Tests to plot beachballs as collection into an existing axis
        object. The moment tensor values are taken form the
        test_Beachball unit test. See that test for more information about
        the parameters.
        """
        mt = [[0.91, -0.89, -0.02, 1.78, -1.55, 0.47],
              [274, 13, 55],
              [130, 79, 98],
              [264.98, 45.00, -159.99],
              [160.55, 76.00, -46.78],
              [1.45, -6.60, 5.14, -2.67, -3.16, 1.36],
              [235, 80, 35],
              [138, 56, 168],
              [1, 1, 1, 0, 0, 0],
              [-1, -1, -1, 0, 0, 0],
              [1, -2, 1, 0, 0, 0],
              [1, -1, 0, 0, 0, 0],
              [1, -1, 0, 0, 0, -1],
              [179, 55, -78],
              [10, 42.5, 90],
              [10, 42.5, 92],
              [150, 87, 1],
              [0.99, -2.00, 1.01, 0.92, 0.48, 0.15],
              [5.24, -6.77, 1.53, 0.81, 1.49, -0.05],
              [16.578, -7.987, -8.592, -5.515, -29.732, 7.517],
              [-2.39, 1.04, 1.35, 0.57, -2.94, -0.94],
              [150, 87, 1]]

        # Initialize figure
        fig = plt.figure(figsize=(6, 6), dpi=300)
        ax = fig.add_subplot(111, aspect='equal')

        # Plot the stations or borders
        ax.plot([-100, -100, 100, 100], [-100, 100, -100, 100], 'rv')

        x = -100
        y = -100
        for i, t in enumerate(mt):
            # add the beachball (a collection of two patches) to the axis
            ax.add_collection(Beach(t, width=30, xy=(x, y), linewidth=.6))
            x += 50
            if (i + 1) % 5 == 0:
                x = -100
                y += 50

        # set the x and y limits and save the output
        ax.axis([-120, 120, -120, 120])
        # create and compare image
        with NamedTemporaryFile(suffix='.png') as tf:
            fig.savefig(tf.name)
            # compare images
            expected_image = os.path.join(self.path, 'bb_collection.png')
            compare_images(tf.name, expected_image, 0.001)
예제 #3
0
def test_contourf_c0p1():
    """
    Test the basic  contour plot
    """
    from matplotlib import pyplot  as plt
    vertices, elements, u = getTestMeshData()
    fig,ax = plt.subplots()
    fem_plt.contourf_c0p1(ax,vertices,elements,u)
    #uncomment this  to generate an image with only the diff "IMAGE ERROR"
    #ax.annotate('IMAGE ERROR', fontsize=22,xy=(0.5, 1.), xytext=(0.5,1.))
    my_dir = os.path.dirname(os.path.realpath(__file__))
    actual = os.path.join(my_dir,"contourf_c0p1.png")
    expected = os.path.join(my_dir,"test_images","contourf_c0p1.png")
    plt.savefig(actual)
    compare_images(expected,actual)
예제 #4
0
 def test_plotBenchmark(self):
     """
     Test benchmark plot.
     """
     path = os.path.join(os.path.dirname(__file__), 'data',
                         'seismic01_*_vz.su')
     sufiles = glob.glob(path)
     # new temporary file with PNG extension
     with NamedTemporaryFile(suffix='.png') as tf:
         # generate plot
         plotBenchmark(sufiles, outfile=tf.name, format='PNG')
         # compare images
         expected_image = os.path.join(os.path.dirname(__file__), 'images',
                                       'test_plotBenchmark.png')
         compare_images(tf.name, expected_image, 0.001)
def image_comparison_expect_rms(im1, im2, tol, expect_rms):
    """Compare two images, expecting a particular RMS error.

    im1 and im2 are filenames relative to the baseline_dir directory.

    tol is the tolerance to pass to compare_images.

    expect_rms is the expected RMS value, or None. If None, the test will
    succeed if compare_images succeeds. Otherwise, the test will succeed if
    compare_images fails and returns an RMS error almost equal to this value.
    """
    im1 = os.path.join(baseline_dir, im1)
    im2_src = os.path.join(baseline_dir, im2)
    im2 = os.path.join(result_dir, im2)
    # Move im2 from baseline_dir to result_dir. This will ensure that
    # compare_images writes the diff file to result_dir, instead of trying to
    # write to the (possibly read-only) baseline_dir.
    shutil.copyfile(im2_src, im2)
    results = compare_images(im1, im2, tol=tol, in_decorator=True)

    if expect_rms is None:
        assert_equal(None, results)
    else:
        assert_not_equal(None, results)
        assert_almost_equal(expect_rms, results['rms'], places=4)
def test_plotHeatmap_scale_regions():
    outfile = NamedTemporaryFile(suffix='.png', prefix='plotHeatmap_test_', delete=False)
    args = "-m {}/master_scale_reg.mat.gz --outFileName {}".format(ROOT, outfile.name).split()
    deeptools.plotHeatmap.main(args)
    res = compare_images(ROOT + '/master_scale_reg.png', outfile.name, tolerance)
    assert res is None, res
    os.remove(outfile.name)
예제 #7
0
파일: utils.py 프로젝트: itdxer/neupy
def image_comparison(original_image_path, figsize=(10, 10), tol=1e-3):
    """
    Context manager that initialize figure that should contain figure
    that should be compared with expected one.

    Parameters
    ----------
    original_image_path : str
        Path to original image that will use for comparison.
    figsize : tuple
        Figure size. Defaults to ``(10, 10)``.
    tol : float
        Comparison tolerance. Defaults to ``1e-3``.

    Raises
    ------
    AssertionError
        Exception would be trigger in case when generated images and
        original one are different.
    """
    currentdir = os.path.abspath(os.path.dirname(__file__))
    original_image_path = os.path.join(currentdir, original_image_path)

    with tempfile.NamedTemporaryFile(suffix='.png') as f:
        figure = plt.figure(figsize=figsize)

        yield figure

        figure.savefig(f.name)
        error = compare_images(f.name, original_image_path, tol=tol)

        if error:
            raise AssertionError("Image comparison failed. \n"
                                 "Information: {}".format(error))
예제 #8
0
    def __exit__(self, exception, *_):
        if exception:
            return

        if self.remove_text:
            _remove_text(self.fig)

        with tempfile.TemporaryDirectory() as tmpdir:
            actual_file = path_from_fixture(self.request, prefix=tmpdir, ext=self.ext)
            actual_filename = str(actual_file)
            if not actual_file.parent.exists():
                actual_file.parent.mkdir(parents=True)
            plt.savefig(actual_filename, **self.savefig_kwargs)

            baseline = path_from_fixture(self.request, prefix='baseline_plots', ext=self.ext)
            baseline_filename = str(baseline)

            if baseline.exists():
                try:
                    data = compare_images(baseline_filename, actual_filename,
                                          self.tol, in_decorator=True)
                except ValueError as exc:
                    if 'could not be broadcast' not in str(exc):
                        raise
                    else:
                        data = dict(actual=actual_filename, expected=baseline_filename)

                self.passed = data is None
                self.report(data)
            else:
                shutil.copyfile(actual_filename, baseline_filename)
                self.passed = True

        plt.close()
        self._exit_style()
예제 #9
0
    def test_plot_emsr(self):
        # Assess
        p2_0 = 0.046
        p2_1 = 0.556
        p2_2 = 0.673
        points = 40

        native_values = np.random.uniform(3., 8.5, points)
        native_sigmas = np.random.uniform(0.02, 0.2, points)
        target_values = p2_0 + p2_1 * native_values +\
          p2_2 * (native_values ** 2.)
        target_values += np.random.normal(0., 1, points)
        target_sigmas = np.random.uniform(0.025, 0.2, points)
        native_measures = [models.MagnitudeMeasure(
            agency=None, event=None, origin=None,
            scale='Mtest', value=v[0], standard_error=v[1])
            for v in zip(native_values, native_sigmas)]
        target_measures = [models.MagnitudeMeasure(
            agency=None, event=None, origin=None,
            scale='Mtest', value=v[0], standard_error=v[1])
            for v in zip(target_values, target_sigmas)]

        emsr = regression.EmpiricalMagnitudeScalingRelationship(
            native_measures, target_measures)
        emsr.apply_regression_model(regression.LinearModel)
        emsr.apply_regression_model(regression.PolynomialModel,
                                    order=2)

        # Act
        plot(emsr, ACTUAL1)

        # Assert
        self.assertFalse(compare_images(EXPECTED1, ACTUAL1, tol=4))
예제 #10
0
파일: test_images.py 프로젝트: wmwv/aplpy
    def generate_or_test(self, generate, figure, image, adjust_bbox=True):
        if generate is None:
            result_dir = tempfile.mkdtemp()
            test_image = os.path.abspath(os.path.join(result_dir, image))

            # distutils will put the baseline images in non-accessible places,
            # copy to our tmpdir to be sure to keep them in case of failure
            orig_baseline_image = os.path.abspath(os.path.join(self._baseline_images_dir, image))
            baseline_image = os.path.abspath(os.path.join(result_dir, 'baseline-'+image))
            shutil.copyfile(orig_baseline_image, baseline_image)

            figure.save(test_image)

            if not os.path.exists(baseline_image):
                raise Exception("""Image file not found for comparision test
                                Generated Image:
                                \t{test}
                                This is expected for new tests.""".format(
                                    test=test_image))

            msg = compare_images(baseline_image, test_image, tol=self._tolerance)

            if msg is None:
                shutil.rmtree(result_dir)
            else:
                raise Exception(msg)
        else:
            figure.save(os.path.abspath(os.path.join(generate, image)), adjust_bbox=adjust_bbox)
            pytest.skip("Skipping test, since generating data")
예제 #11
0
    def assert_same(self, tmpdir, tol=0.1):

        os.chdir(tmpdir.strpath)

        expected = tmpdir.join('expected.png').strpath
        script = tmpdir.join('actual.py').strpath
        actual = tmpdir.join('glue_plot.png').strpath

        self.viewer.axes.figure.savefig(expected)

        self.viewer.export_as_script(script)
        subprocess.call([sys.executable, script])

        msg = compare_images(expected, actual, tol=tol)

        if msg:

            from base64 import b64encode

            print("SCRIPT:")
            with open(script, 'r') as f:
                print(f.read())

            print("EXPECTED:")
            with open(expected, 'rb') as f:
                print(b64encode(f.read()).decode())

            print("ACTUAL:")
            with open(actual, 'rb') as f:
                print(b64encode(f.read()).decode())

            pytest.fail(msg, pytrace=False)
예제 #12
0
        def decorated_compare_images(*args,**kwargs):
            result = func(*args,**kwargs)
            extension = '.png' # TODO: test more backends
            for fname in baseline_images:
                actual = fname + extension

                # compute filename for baseline image
                module_name = func.__module__
                if module_name=='__main__':
                    # FIXME: this won't work for nested packages in matplotlib.tests
                    import warnings
                    warnings.warn('test module run as script. guessing baseline image locations')
                    script_name = sys.argv[0]
                    basedir = os.path.abspath(os.path.dirname(script_name))
                    subdir = os.path.splitext(os.path.split(script_name)[1])[0]
                else:
                    mods = module_name.split('.')
                    assert mods.pop(0)=='matplotlib'
                    assert mods.pop(0)=='tests'
                    subdir = os.path.join(*mods)
                    basedir = os.path.dirname(matplotlib.tests.__file__)
                baseline_dir = os.path.join(basedir,'baseline_images',subdir)
                expected = os.path.join(baseline_dir,fname) + extension

                # compare the images
                tol=1e-3 # default tolerance
                err = compare_images( expected, actual, tol,
                                      in_decorator=True )
                if err:
                    raise ImageComparisonFailure(
                        'images not close: %(actual)s vs. %(expected)s '
                        '(RMS %(rms).3f)'%err)
            return result
예제 #13
0
                def do_test():
                    figure = plt.figure(fignum)

                    if self._remove_text:
                        self.remove_text(figure)

                    figure.savefig(actual_fname, **self._savefig_kwarg)

                    err = compare_images(expected_fname, actual_fname,
                                         self._tol, in_decorator=True)

                    try:
                        if not os.path.exists(expected_fname):
                            raise ImageComparisonFailure(
                                'image does not exist: %s' % expected_fname)

                        if err:
                            raise ImageComparisonFailure(
                                'images not close: %(actual)s vs. %(expected)s '
                                '(RMS %(rms).3f)'%err)
                    except ImageComparisonFailure:
                        if not check_freetype_version(self._freetype_version):
                            raise KnownFailureTest(
                                "Mismatched version of freetype.  Test requires '%s', you have '%s'" %
                                (self._freetype_version, ft2font.__freetype_version__))
                        raise
def test_plotProfiler_overlapped_lines():
    outfile = NamedTemporaryFile(suffix='.png', prefix='plotHeatmap_test_', delete=False)
    args = "-m {}/master.mat.gz --outFileName {} " \
           "--plotType overlapped_lines --yMin -1".format(ROOT, outfile.name).split()
    deeptools.plotProfile.main(args)
    res = compare_images(ROOT + '/profile_master_overlap_lines.png', outfile.name, tolerance)
    assert res is None, res
    os.remove(outfile.name)
def test_plotHeatmap_multiple_colors_muti_scales():
    outfile = NamedTemporaryFile(suffix='.png', prefix='plotHeatmap_test_', delete=False)
    args = "-m {}/master_multi.mat.gz --colorList white,blue white,red --zMin 1 0 --zMax 4 5 " \
           "--outFileName {}".format(ROOT, outfile.name).split()
    deeptools.plotHeatmap.main(args)
    res = compare_images(ROOT + '/heatmap_master_multi_color.png', outfile.name, tolerance)
    assert res is None, res
    os.remove(outfile.name)
def test_plotProfiler():
    outfile = NamedTemporaryFile(suffix='.png', prefix='plotHeatmap_test_', delete=False)
    args = "-m {}/master.mat.gz --outFileName {} --regionsLabel uno dos " \
           "--plotType std".format(ROOT, outfile.name).split()
    deeptools.plotProfile.main(args)
    res = compare_images(ROOT + '/profile_master.png', outfile.name, tolerance)
    assert res is None, res
    os.remove(outfile.name)
def test_plotHeatmap_multi_bigwig_pergroup():
    outfile = NamedTemporaryFile(suffix='.png', prefix='plotHeatmap_test_', delete=False)
    args = "-m {}/master_multi.mat.gz --perGroup --samplesLabel file1 file2 file3 file4 " \
           "--outFileName {}".format(ROOT, outfile.name).split()
    deeptools.plotHeatmap.main(args)
    res = compare_images(ROOT + '/heatmap_master_multi_pergroup.png', outfile.name, tolerance)
    assert res is None, res
    os.remove(outfile.name)
def test_plotHeatmap_multiple_colormap_no_boxes():
    outfile = NamedTemporaryFile(suffix='.png', prefix='plotHeatmap_test_', delete=False)
    args = "-m {}/master_multi.mat.gz --colorMap Reds binary terrain --boxAroundHeatmaps no " \
           "--outFileName {}".format(ROOT, outfile.name).split()
    deeptools.plotHeatmap.main(args)
    res = compare_images(ROOT + '/heatmap_master_multi_colormap_no_box.png', outfile.name, tolerance)
    assert res is None, res
    os.remove(outfile.name)
def test_plotProfiler_multibigwig():
    outfile = NamedTemporaryFile(suffix='.png', prefix='plotHeatmap_test_', delete=False)
    args = "-m {}/master_multi.mat.gz --outFileName {} " \
           "--numPlotsPerRow 2 --yMax 1.5".format(ROOT, outfile.name).split()
    deeptools.plotProfile.main(args)
    res = compare_images(ROOT + '/profile_master_multi.png', outfile.name, tolerance)
    assert res is None, res
    os.remove(outfile.name)
def test_plotHeatmap_interpolation():
    outfile = NamedTemporaryFile(suffix='.png', prefix='plotHeatmap_test_', delete=False)
    args = "-m {}/large_matrix.mat.gz --interpolation bilinear " \
           "--outFileName {}".format(ROOT, outfile.name).split()
    deeptools.plotHeatmap.main(args)
    res = compare_images(ROOT + '/heatmap_master_interpolation_bilinear.png', outfile.name, tolerance)
    assert res is None, res
    os.remove(outfile.name)
예제 #21
0
    def check_graphic(self, tol=_DEFAULT_IMAGE_TOLERANCE):
        """Checks the CRC matches for the current matplotlib.pyplot figure, and closes the figure."""

        unique_id = self._unique_id()

        figure = plt.gcf()

        try:
            expected_fname = os.path.join(os.path.dirname(__file__),
                                          'results', 'visual_tests',
                                          unique_id + '.png')

            if not os.path.isdir(os.path.dirname(expected_fname)):
                os.makedirs(os.path.dirname(expected_fname))

            #: The path where the images generated by the tests should go.
            image_output_directory = os.path.join(os.path.dirname(__file__),
                                                  'result_image_comparison')
            if not os.access(image_output_directory, os.W_OK):
                if not os.access(os.getcwd(), os.W_OK):
                    raise IOError('Write access to a local disk is required '
                                  'to run image tests.  Run the tests from a '
                                  'current working directory you have write '
                                  'access to to avoid this issue.')
                else:
                    image_output_directory = os.path.join(
                        os.getcwd(), 'iris_image_test_output')
            result_fname = os.path.join(image_output_directory,
                                        'result-' + unique_id + '.png')

            if not os.path.isdir(os.path.dirname(result_fname)):
                # Handle race-condition where the directories are
                # created sometime between the check above and the
                # creation attempt below.
                try:
                    os.makedirs(os.path.dirname(result_fname))
                except OSError as err:
                    # Don't care about "File exists"
                    if err.errno != 17:
                        raise

            figure.savefig(result_fname)

            if not os.path.exists(expected_fname):
                warnings.warn('Created image for test %s' % unique_id)
                shutil.copy2(result_fname, expected_fname)

            err = mcompare.compare_images(expected_fname, result_fname, tol=tol)

            if _DISPLAY_FIGURES:
                if err:
                    print('Image comparison would have failed. Message: %s' % err)
                plt.show()
            else:
                assert not err, 'Image comparison failed. Message: %s' % err

        finally:
            plt.close()
예제 #22
0
def compare_figure(fname, savefig_kwargs={}, tol=0):
    actual = os.path.join(result_dir, fname)
    plt.savefig(actual, **savefig_kwargs)

    expected = os.path.join(result_dir, "expected_%s" % fname)
    shutil.copyfile(os.path.join(baseline_dir, fname), expected)
    err = compare_images(expected, actual, tol=tol)
    if err:
        raise ImageComparisonFailure(err)
예제 #23
0
def compare_image_lists(new_result, old_result, decimals):
    fns = ['old.png', 'new.png']
    num_images = len(old_result)
    assert(num_images > 0)
    for i in range(num_images):
        mpimg.imsave(fns[0], np.loads(zlib.decompress(old_result[i])))
        mpimg.imsave(fns[1], np.loads(zlib.decompress(new_result[i])))
        assert compare_images(fns[0], fns[1], 10**(-decimals)) == None
        for fn in fns: os.remove(fn)
def compare_figure(fname):
    actual = os.path.join(result_dir, fname)
    plt.savefig(actual)

    expected = os.path.join(result_dir, "expected_%s" % fname)
    shutil.copyfile(os.path.join(baseline_dir, fname), expected)
    err = compare_images(expected, actual, tol=5e-3)
    if err:
        raise ImageComparisonFailure('images not close: %s vs. %s' % (actual, expected))
예제 #25
0
 def generate_or_test(self, generate, figure, image, test_image=None, baseline_image=None):
     baseline_image = os.path.abspath(os.path.join(self._baseline_images_dir, image))
     test_image = os.path.abspath(os.path.join(self._result_dir, image))
     if generate:
         figure.savefig(baseline_image)
         pytest.skip("Skipping test, since generating data")
     else:
         figure.savefig(test_image)
         msg = compare_images(baseline_image, test_image, tol=self._tolerance)
         assert msg is None
예제 #26
0
 def compare_figures(self, fname, tol=1, **kwargs):
     """Saves and compares the figure to the reference figure with the same
     name"""
     import matplotlib.pyplot as plt
     from matplotlib.testing.compare import compare_images
     plt.savefig(os.path.join(odir, fname), **kwargs)
     results = compare_images(
         os.path.join(ref_dir, fname), os.path.join(odir, fname),
         tol=tol)
     self.assertIsNone(results, msg=results)
예제 #27
0
파일: base.py 프로젝트: angelcalayag/obspy
 def compare(self, reltol=1):
     """
     Run :func:`matplotlib.testing.compare.compare_images` and raise an
     unittest.TestCase.failureException with the message string given by
     matplotlib if the comparison exceeds the allowed tolerance.
     """
     from matplotlib.testing.compare import compare_images
     msg = compare_images(self.baseline_image, self.name, tol=self.tol)
     if msg:
         raise ImageComparisonException(msg)
예제 #28
0
def test_pdf_savefig_when_color_is_none(tmpdir):
    fig, ax = plt.subplots()
    plt.axis('off')
    ax.plot(np.sin(np.linspace(-5, 5, 100)), 'v', c='none')
    actual_image = tmpdir.join('figure.pdf')
    expected_image = tmpdir.join('figure.eps')
    fig.savefig(str(actual_image), format='pdf')
    fig.savefig(str(expected_image), format='eps')
    result = compare_images(str(actual_image), str(expected_image), 0)
    assert result is None
예제 #29
0
        def item_function_wrapper(*args, **kwargs):

            generate_path = self.config.getoption("--mpl-generate-path")

            # Run test and get figure object
            import inspect
            if inspect.ismethod(original):  # method
                fig = original(*args[1:], **kwargs)
            else:  # function
                fig = original(*args, **kwargs)

            # Find test name to use as plot name
            filename = compare.kwargs.get('filename', None)
            if filename is None:
                filename = original.__name__ + '.png'

            # What we do now depends on whether we are generating the reference
            # images or simply running the test.
            if generate_path is None:

                # Save the figure
                result_dir = tempfile.mkdtemp()
                test_image = os.path.abspath(os.path.join(result_dir, filename))

                fig.savefig(test_image, **savefig_kwargs)

                # Find path to baseline image
                baseline_image_ref = os.path.abspath(os.path.join(os.path.dirname(item.fspath.strpath), baseline_dir, filename))

                if not os.path.exists(baseline_image_ref):
                    raise Exception("""Image file not found for comparison test
                                    Generated Image:
                                    \t{test}
                                    This is expected for new tests.""".format(
                        test=test_image))

                # distutils may put the baseline images in non-accessible places,
                # copy to our tmpdir to be sure to keep them in case of failure
                baseline_image = os.path.abspath(os.path.join(result_dir, 'baseline-' + filename))
                shutil.copyfile(baseline_image_ref, baseline_image)

                msg = compare_images(baseline_image, test_image, tol=tolerance)

                if msg is None:
                    shutil.rmtree(result_dir)
                else:
                    raise Exception(msg)

            else:

                if not os.path.exists(generate_path):
                    os.makedirs(generate_path)

                fig.savefig(os.path.abspath(os.path.join(generate_path, filename)), **savefig_kwargs)
                pytest.skip("Skipping test, since generating data")
예제 #30
0
def compare_figure(fname, savefig_kwargs={}, tol=0):
    # TODO remove this before tagging 2.0
    raise KnownFailureTest('temporarily disabled until 2.0 tag')
    actual = os.path.join(result_dir, fname)
    plt.savefig(actual, **savefig_kwargs)

    expected = os.path.join(result_dir, "expected_%s" % fname)
    shutil.copyfile(os.path.join(baseline_dir, fname), expected)
    err = compare_images(expected, actual, tol=tol)
    if err:
        raise ImageComparisonFailure(err)
예제 #31
0
def test_addplot09(bolldata):

    sdf = bolldata[50:130]

    fname = base+'09.png'
    tname = os.path.join(tdir,fname)
    rname = os.path.join(refd,fname)

    ap = mpf.make_addplot((sdf['PercentB'])-0.45,panel=1,color='g',type='bar', width=0.75, mav=(7,10,15))
    mpf.plot(sdf,addplot=ap,panel_ratios=(1,1),figratio=(1,1),figscale=1.5,savefig=tname)

    tsize = os.path.getsize(tname)
    print(glob.glob(tname),'[',tsize,'bytes',']')

    rsize = os.path.getsize(rname)
    print(glob.glob(rname),'[',rsize,'bytes',']')

    # Using 0.9*IMGCOMP_TOLERANCE here because discovered that if 
    # the only difference is the presence or absence of mav lines,
    # then the default IMGCOMP_TOLERANCE is too linient:
    result = compare_images(rname,tname,tol=0.9*IMGCOMP_TOLERANCE)
    if result is not None:
       print('result=',result)
    assert result is None
예제 #32
0
def test_addplot10(bolldata):

    sdf = bolldata[50:130]

    fname = base+'10.png'
    tname = os.path.join(tdir,fname)
    rname = os.path.join(refd,fname)

    ap = mpf.make_addplot(sdf,panel=1,type='candle',ylabel='Candle',mav=12)
    mpf.plot(sdf,mav=10,ylabel='OHLC',addplot=ap,panel_ratios=(1,1),figratio=(1,1),figscale=1.5,savefig=tname)

    tsize = os.path.getsize(tname)
    print(glob.glob(tname),'[',tsize,'bytes',']')

    rsize = os.path.getsize(rname)
    print(glob.glob(rname),'[',rsize,'bytes',']')

    # Using 0.9*IMGCOMP_TOLERANCE here because discovered that if 
    # the only difference is the presence or absence of mav lines,
    # then the default IMGCOMP_TOLERANCE is too linient:
    result = compare_images(rname,tname,tol=0.9*IMGCOMP_TOLERANCE)
    if result is not None:
       print('result=',result)
    assert result is None
예제 #33
0
                def do_test():
                    if self._remove_text:
                        self.remove_text(figure)

                    figure.savefig(actual_fname, **self._savefig_kwarg)

                    err = compare_images(expected_fname, actual_fname,
                                         self._tol, in_decorator=True)

                    try:
                        if not os.path.exists(expected_fname):
                            raise ImageComparisonFailure(
                                'image does not exist: %s' % expected_fname)

                        if err:
                            raise ImageComparisonFailure(
                                'images not close: %(actual)s vs. %(expected)s '
                                '(RMS %(rms).3f)'%err)
                    except ImageComparisonFailure:
                        if not check_freetype_version(self._freetype_version):
                            raise KnownFailureTest(
                                "Mismatched version of freetype.  Test requires '%s', you have '%s'" %
                                (self._freetype_version, ft2font.__freetype_version__))
                        raise
예제 #34
0
 def decorated_compare_images():
     # set the default format of savefig
     matplotlib.rc('savefig', extension=extension)
     # change to the result directory for the duration of the test
     old_dir = os.getcwd()
     os.chdir(result_dir)
     try:
         result = func() # actually call the test function
     finally:
         os.chdir(old_dir)
     for original, expected in zip(orig_expected_fnames, expected_fnames):
         if not os.path.exists(original):
             raise ImageComparisonFailure(
                 'image does not exist: %s'%original)
         shutil.copyfile(original, expected)
     for actual,expected in zip(actual_fnames,expected_fnames):
         # compare the images
         err = compare_images( expected, actual, tol,
                               in_decorator=True )
         if err:
             raise ImageComparisonFailure(
                 'images not close: %(actual)s vs. %(expected)s '
                 '(RMS %(rms).3f)'%err)
     return result
예제 #35
0
파일: framework.py 프로젝트: rcjackson/yt
def compare_image_lists(new_result, old_result, decimals):
    fns = []
    for i in range(2):
        tmpfd, tmpname = tempfile.mkstemp(suffix='.png')
        os.close(tmpfd)
        fns.append(tmpname)
    num_images = len(old_result)
    assert (num_images > 0)
    for i in range(num_images):
        mpimg.imsave(fns[0], np.loads(zlib.decompress(old_result[i])))
        mpimg.imsave(fns[1], np.loads(zlib.decompress(new_result[i])))
        results = compare_images(fns[0], fns[1], 10**(-decimals))
        if results is not None:
            if os.environ.get("JENKINS_HOME") is not None:
                tempfiles = [
                    line.strip() for line in results.split('\n')
                    if line.endswith(".png")
                ]
                for fn in tempfiles:
                    sys.stderr.write("\n[[ATTACHMENT|{}]]".format(fn))
                sys.stderr.write('\n')
        assert_equal(results, None, results)
        for fn in fns:
            os.remove(fn)
def test_plot_tracks_bed_vlines():
    extension = '.png'
    outfile = NamedTemporaryFile(suffix=extension,
                                 prefix='pyGenomeTracks_test_',
                                 delete=False)
    bed_file = os.path.join(ROOT, 'regionsXfakeChr.bed')
    for suf in ['', '_incorrect']:
        ini_file = os.path.join(ROOT, f"bed_vlines{suf}.ini")
        args = f"--tracks {ini_file} --BED {bed_file} "\
               "--trackLabelFraction 0.5 --width 38 --dpi 130 "\
               "--trackLabelHAlign center "\
               f"--outFileName {outfile.name}".split()
        pygenometracks.plotTracks.main(args)
        for region in ['X:3000000-3300000', 'fakeChr:0-100']:
            region_str = region.replace(':', '-')
            output_file = outfile.name[:-4] + '_' + region_str + extension
            expected_file = os.path.join(
                ROOT, 'master_bed_vlines_' + region_str + extension)
            res = compare_images(expected_file, output_file, tolerance)
            assert res is None, res

            os.remove(output_file)
        if 'incorrect' in ini_file:
            os.remove(ini_file)
예제 #37
0
def test_plot_tracks_with_hic_rasterize_height_2chr_individual():
    extension = '.pdf'
    ini_file = os.path.join(ROOT, "browser_tracks_hic_rasterize_height.ini")
    for region in ['X:2500000-2600000', 'Y:0-1000000']:
        outfile = NamedTemporaryFile(suffix=extension,
                                     prefix='pyGenomeTracks_test_',
                                     delete=False)
        expected_file = os.path.join(
            ROOT, 'master_plot_hic_rasterize_height_' +
            region.replace(':', '-') + extension)
        # matplotlib compare on pdf will create a png next to it.
        # To avoid issues related to write in test_data folder
        # We copy the expected file into a temporary place
        new_expected_file = NamedTemporaryFile(suffix='.pdf',
                                               prefix='pyGenomeTracks_test_',
                                               delete=False)
        os.system(f'cp {expected_file} {new_expected_file.name}')
        expected_file = new_expected_file.name
        args = f"--tracks {ini_file} --region {region} "\
               "--trackLabelFraction 0.23 --width 38 --dpi 10 "\
               f"--outFileName {outfile.name}".split()
        pygenometracks.plotTracks.main(args)
        res = compare_images(expected_file, outfile.name, tolerance)
    assert res is None, res
예제 #38
0
    def test_ma_plot_data_set(self):
        y = np.array(self.y, dtype=float)
        y[np.arange(5), np.arange(5)] = np.nan
        fig = plot_ma_data_set(self.X,
                               y,
                               self.y_true,
                               fig_size=(12, 3),
                               legend_dict={
                                   'loc': 'lower center',
                                   'bbox_to_anchor': (0.5, 0.1),
                                   'ncol': 3
                               },
                               tick_dict={
                                   'labelbottom': True,
                                   'labelleft': True
                               })

        fig.tight_layout()
        fig.savefig(self.path_prefix + 'data_set_returned_result.pdf')
        comparison = compare_images(
            self.path_prefix + 'data_set_expected_result.pdf',
            self.path_prefix + 'data_set_returned_result.pdf',
            tol=0)
        self.assertIsNone(comparison)
예제 #39
0
    def do_compare(self, result_fname, expected_fname, tol):
        """
        Runs the comparison of the result file with the expected file.

        If an RMS difference greater than ``tol`` is found an assertion
        error is raised with an appropriate message with the paths to
        the files concerned.

        """
        if not os.path.exists(expected_fname):
            warnings.warn('Created image in %s' % expected_fname)
            shutil.copy2(result_fname, expected_fname)

        err = mcompare.compare_images(expected_fname,
                                      result_fname,
                                      tol=tol,
                                      in_decorator=True)

        if err:
            msg = ('Images were different (RMS: %s).\n%s %s %s\nConsider '
                   'running idiff to inspect these differences.'
                   '' %
                   (err['rms'], err['actual'], err['expected'], err['diff']))
            assert False, msg
예제 #40
0
def test_pnf03(bolldata):

    df = bolldata

    fname = base + '03.png'
    tname = os.path.join(tdir, fname)
    rname = os.path.join(refd, fname)

    mpf.plot(df,
             type='pnf',
             pnf_params=dict(box_size='atr', atr_length=2),
             volume=True,
             savefig=tname)

    tsize = os.path.getsize(tname)
    print(glob.glob(tname), '[', tsize, 'bytes', ']')

    rsize = os.path.getsize(rname)
    print(glob.glob(rname), '[', rsize, 'bytes', ']')

    result = compare_images(rname, tname, tol=IMGCOMP_TOLERANCE)
    if result is not None:
        print('result=', result)
    assert result is None
예제 #41
0
파일: plotting.py 프로젝트: dilawar/scanpy
def test_rank_genes_groups():
    pbmc = sc.datasets.pbmc68k_reduced()
    tolerance = 15

    # test ranked genes panels
    outfile = NamedTemporaryFile(suffix='.png', prefix='scanpy_test_rank_genes_groups_', delete=False)

    sc.pl.rank_genes_groups(pbmc, n_genes=12, n_panels_per_row=3, show=False)
    pl.savefig(outfile.name, dpi=80)
    pl.close()

    res = compare_images(ROOT + '/master_ranked_genes_sharey.png', outfile.name, tolerance)
    assert res is None, res

    # test ranked genes panels sharey = False
    sc.pl.rank_genes_groups(pbmc, n_genes=12, n_panels_per_row=3, sharey=False, show=False)
    pl.savefig(outfile.name, dpi=80)
    pl.close()

    res = compare_images(ROOT + '/master_ranked_genes.png', outfile.name, tolerance)
    assert res is None, res

    # test ranked genes using heatmap
    sc.pl.rank_genes_groups_heatmap(pbmc, n_genes=5, show=False)
    pl.savefig(outfile.name, dpi=80)
    pl.close()

    res = compare_images(ROOT + '/master_ranked_genes_heatmap.png', outfile.name, tolerance)
    assert res is None, res

    # test ranked genes using stacked violin plots
    sc.pl.rank_genes_groups_stacked_violin(pbmc, n_genes=3, show=False)
    pl.savefig(outfile.name, dpi=80)
    pl.close()

    res = compare_images(ROOT + '/master_ranked_genes_stacked_violin.png', outfile.name, tolerance)
    assert res is None, res

    # test ranked genes using dotplot
    sc.pl.rank_genes_groups_dotplot(pbmc, n_genes=4, show=False)
    pl.savefig(outfile.name, dpi=80)
    pl.close()

    res = compare_images(ROOT + '/master_ranked_genes_dotplot.png', outfile.name, tolerance)
    assert res is None, res

    # test ranked genes using matrixplot
    sc.pl.rank_genes_groups_matrixplot(pbmc, n_genes=5, show=False)
    pl.savefig(outfile.name, dpi=80)
    pl.close()

    res = compare_images(ROOT + '/master_ranked_genes_matrixplot.png', outfile.name, tolerance)
    assert res is None, res

    # # test ranked genes using violin plots
    # sc.pl.rank_genes_groups_violin(pbmc, groups=pbmc.obs.bulk_labels.cat.categories[0], n_genes=5,
    #                                jitter=False, strip=False, show=False)
    # pl.savefig(outfile.name, dpi=80)
    # pl.close()
    #
    # res = compare_images(ROOT + '/master_ranked_genes_violin.png', outfile.name, tolerance)
    # assert res is None, res

    os.remove(outfile.name)
예제 #42
0
 def new_test_func(*args, **kwargs):
     if inspect.ismethod(original):
         result = original.__func__(*args, **kwargs)
     else:
         result = original(*args, **kwargs)
     for baseline_image, test_image, baseline_rcparams, test_rcparams in self._get_baseline_result_pairs(
             test_suite_name, filename, self.extensions):
         # save image
         fig = plt.gcf()
         if fig is not None:
             if self.fig_size is not None:
                 fig.set_size_inches(self.fig_size)
                 fig.set_tight_layout(True)
             fig.savefig(test_image, **self.savefig_kwargs)
             # save rcParams
             with open(test_rcparams, "w") as rcfile:
                 from pprint import pprint
                 rc = matplotlib.rcParams.copy()
                 rc.pop("datapath")  # hide datapath
                 pprint(rc, rcfile)
             import pytest
             if self.is_compare_image and os.path.exists(
                     baseline_image):
                 msg = compare_images(baseline_image,
                                      test_image,
                                      tol=self.tolerance)
                 if msg is not None:
                     msg += "\n"
                     msg += self.compare_rcParam(
                         baseline_rcparams, test_rcparams)
                     # print image in base64
                     # print("====================")
                     # print("Expected Image:")
                     # self._print_image_base64(baseline_image)
                     # print("Actual Image:")
                     # self._print_image_base64(test_image)
                     # print("====================")
                     self.print_image_testing_note(file=sys.stderr)
                     if self.on_compare_fail is not None:
                         self.on_compare_fail()
                     if self.on_fail is not None:
                         self.on_fail()
                     pytest.fail(msg, pytrace=False)
                 else:
                     # clearup the image as they are the same with the baseline
                     os.remove(test_image)
                     os.remove(test_rcparams)
                     if not os.listdir(os.path.dirname(test_image)):
                         os.rmdir(os.path.dirname(test_image))
             else:
                 # checking if the created image is empty
                 verify(test_image)
                 actual_image = _png.read_png_int(test_image)
                 actual_image = actual_image[:, :, :
                                             3]  # remove the alpha channel (if exists)
                 import numpy as np
                 if np.any(actual_image):
                     self.print_image_testing_note(file=sys.stderr)
                     if self.is_compare_image:
                         pytest.skip(
                             "Image file not found for comparison test "
                             "(This is expected for new tests.)\nGenerated Image: "
                             "\n\t{test}".format(test=test_image))
                     else:
                         self._logger.info(
                             "\nGenerated Image: {test}".format(
                                 test=test_image))
                 else:
                     # empty image created
                     if self.on_empty_image is not None:
                         self.on_empty_image()
                     if self.on_fail is not None:
                         self.on_fail()
                     pytest.fail(
                         "Image file not found for comparison test "
                         "(This is expected for new tests.),"
                         " but the new image created is empty.")
     return result
예제 #43
0
        def item_function_wrapper(*args, **kwargs):

            baseline_dir = compare.kwargs.get('baseline_dir', None)
            if baseline_dir is None:
                if self.baseline_dir is None:
                    baseline_dir = os.path.join(
                        os.path.dirname(item.fspath.strpath), 'baseline')
                else:
                    if self.baseline_relative_dir:
                        # baseline dir is relative to the current test
                        baseline_dir = os.path.join(
                            os.path.dirname(item.fspath.strpath),
                            self.baseline_relative_dir)
                    else:
                        # baseline dir is relative to where pytest was run
                        baseline_dir = self.baseline_dir
                baseline_remote = False

            baseline_remote = baseline_dir.startswith(('http://', 'https://'))
            if not baseline_remote:
                baseline_dir = os.path.join(
                    os.path.dirname(item.fspath.strpath), baseline_dir)

            if baseline_remote and multi:
                pytest.fail(
                    "Multi-baseline testing only works with local baselines.",
                    pytrace=False)

            with plt.style.context(style,
                                   after_reset=True), switch_backend(backend):

                # Run test and get figure object
                if inspect.ismethod(original):  # method
                    # In some cases, for example if setup_method is used,
                    # original appears to belong to an instance of the test
                    # class that is not the same as args[0], and args[0] is the
                    # one that has the correct attributes set up from setup_method
                    # so we ignore original.__self__ and use args[0] instead.
                    fig = original.__func__(*args, **kwargs)
                else:  # function
                    fig = original(*args, **kwargs)

                if remove_text:
                    remove_ticks_and_titles(fig)

                # Find test name to use as plot name
                filename = compare.kwargs.get('filename', None)
                if filename is None:
                    filename = item.name + '.' + extension
                    filename = filename.replace('[', '_').replace(']', '_')
                    filename = filename.replace('/', '_')
                    filename = filename.replace('_.' + extension,
                                                '.' + extension)

                # What we do now depends on whether we are generating the
                # reference images or simply running the test.
                if self.generate_dir is None:

                    # Save the figure
                    result_dir = tempfile.mkdtemp(dir=self.results_dir)
                    test_image = os.path.abspath(
                        os.path.join(result_dir, filename))

                    fig.savefig(test_image, **savefig_kwargs)
                    close_mpl_figure(fig)

                    # Find path to baseline image
                    if baseline_remote:
                        baseline_image_refs = [
                            _download_file(baseline_dir, filename)
                        ]
                    else:
                        baseline_image_refs = [
                            os.path.abspath(
                                os.path.join(
                                    os.path.dirname(item.fspath.strpath),
                                    baseline_dir, filename))
                        ]

                    # If multi is enabled, the given filename, without its extension, is assumed to be a directory in the baseline dir.
                    # All files in this directory will be compared against, and if at least one of them matches, the test passes.
                    # This conceptually only works with non-remote baselines!
                    if multi:
                        raw_name, ext = os.path.splitext(
                            baseline_image_refs[0])
                        baseline_image_refs = glob.glob(os.path.join(
                            raw_name, "**", "*" + ext),
                                                        recursive=True)
                        if len(baseline_image_refs) == 0:
                            pytest.fail(
                                "Image files not found for multi comparison test in: "
                                "\n\t{baseline_dir}"
                                "\n(This is expected for new tests.)\nGenerated Image: "
                                "\n\t{test}".format(baseline_dir=baseline_dir,
                                                    test=test_image),
                                pytrace=False)

                    actual_shape = imread(test_image).shape[:2]

                    has_passed = False
                    all_msgs = ""
                    i = -1
                    for baseline_image_ref in baseline_image_refs:
                        if not os.path.exists(baseline_image_ref):
                            pytest.fail(
                                "Image file not found for comparison test in: "
                                "\n\t{baseline_dir}"
                                "\n(This is expected for new tests.)\nGenerated Image: "
                                "\n\t{test}".format(baseline_dir=baseline_dir,
                                                    test=test_image),
                                pytrace=False)

                        # distutils may put the baseline images in non-accessible places,
                        # copy to our tmpdir to be sure to keep them in case of failure
                        i += 1
                        baseline_image = os.path.abspath(
                            os.path.join(result_dir, 'baseline-' + str(i) +
                                         '-' + filename))
                        shutil.copyfile(baseline_image_ref, baseline_image)

                        # Compare image size ourselves since the Matplotlib exception is a bit cryptic in this case
                        # and doesn't show the filenames
                        expected_shape = imread(baseline_image).shape[:2]
                        if expected_shape != actual_shape:
                            error = SHAPE_MISMATCH_ERROR.format(
                                expected_path=baseline_image,
                                expected_shape=expected_shape,
                                actual_path=test_image,
                                actual_shape=actual_shape)
                            all_msgs += error + "\n\n"
                            continue

                        msg = compare_images(baseline_image,
                                             test_image,
                                             tol=tolerance)

                        if msg is None:
                            shutil.rmtree(result_dir)
                            has_passed = True
                            break
                        else:
                            all_msgs += msg + "\n\n"

                    if not has_passed:
                        if self.config.getoption("--mpl-upload"):
                            all_msgs += "Test image: " + _upload_to_imgur(
                                test_image) + "\n\n"
                        pytest.fail(all_msgs, pytrace=False)

                else:

                    if not os.path.exists(self.generate_dir):
                        os.makedirs(self.generate_dir)

                    fname = os.path.abspath(
                        os.path.join(self.generate_dir, filename))
                    if multi:
                        raw_name, ext = os.path.splitext(fname)
                        if not os.path.exists(raw_name):
                            os.makedirs(raw_name)
                        fname = os.path.join(raw_name, "generated" + ext)

                    fig.savefig(fname, **savefig_kwargs)
                    close_mpl_figure(fig)
                    pytest.skip("Skipping test, since generating data")
예제 #44
0
    def check_graphic(self, tol=_DEFAULT_IMAGE_TOLERANCE):
        """Checks the CRC matches for the current matplotlib.pyplot figure, and closes the figure."""

        test_id = self.id()

        figure = plt.gcf()

        try:
            expected_fname = os.path.join(os.path.dirname(__file__),
                                          'reference', 'visual_tests',
                                          test_id + '.png')

            if not os.path.isdir(os.path.dirname(expected_fname)):
                os.makedirs(os.path.dirname(expected_fname))

            #: The path where the images generated by the tests should go.
            image_output_directory = os.path.join(os.path.dirname(__file__),
                                                  'result_image_comparison')
            if not os.access(image_output_directory, os.W_OK):
                if not os.access(os.getcwd(), os.W_OK):
                    raise IOError('Write access to a local disk is required '
                                  'to run image tests.  Run the tests from a '
                                  'current working directory you have write '
                                  'access to to avoid this issue.')
                else:
                    image_output_directory = os.path.join(os.getcwd(), 'result_image_comparison')
            result_fname = os.path.join(image_output_directory, test_id + '.png')

            if not os.path.isdir(os.path.dirname(result_fname)):
                # Handle race-condition where the directories are
                # created sometime between the check above and the
                # creation attempt below.
                try:
                    os.makedirs(os.path.dirname(result_fname))
                except OSError as err:
                    # Don't care about "File exists"
                    if err.errno != 17:
                        raise

            # Output filename if the test output a file itself (using -o)
            output_fname = os.path.join(os.path.dirname(__file__), test_id+'.png')
            # If the test created an output file itself then move that to the results folder, otherwise create an output
            if os.path.exists(output_fname):
                shutil.move(os.path.join(os.path.dirname(__file__), test_id+'.png'), result_fname)
            else:
                figure.savefig(result_fname)

            if not os.path.exists(expected_fname):
                logging.warn('Created image for test %s' % test_id)
                shutil.copy2(result_fname, expected_fname)

            try:
                err = mcompare.compare_images(expected_fname, result_fname, tol=tol)
            except ValueError:
                failed_name = mcompare.make_test_filename(result_fname, 'failed-diff')
                shutil.copy2(os.path.join(os.path.dirname(__file__),
                                          'reference', 'kitten.png'), failed_name)
                err = "Images differ in size and so are not comparable"

            if _DISPLAY_FIGURES:
                if err:
                    print(('Image comparison would have failed. Message: %s' % err))
                plt.show()
            else:
                assert not err, 'Image comparison failed. Message: %s' % err

        finally:
            plt.close()
예제 #45
0
def check_images(pth1, pth2, *, tol):
    result = compare_images(pth1, pth2, tol=tol)
    assert result is None, result
예제 #46
0
파일: idiff.py 프로젝트: etraiger/iris
def step_over_diffs(result_dir, action, display=True):
    processed = False
    dname = os.path.dirname(iris.tests.__file__)
    lock = filelock.FileLock(os.path.join(dname, _POSTFIX_LOCK))
    if action in ['first', 'last']:
        kind = action
    elif action in ['similar', 'different']:
        kind = 'most {}'.format(action)
    else:
        emsg = 'Unknown action: {!r}'
        raise ValueError(emsg.format(action))
    if display:
        msg = ('\nComparing the {!r} expected image with '
               'the test result image.')
        print(msg.format(kind))

    # Remove old image diff results.
    target = os.path.join(result_dir, '*{}'.format(_POSTFIX_DIFF))
    for fname in glob(target):
        os.remove(fname)

    with lock.acquire(timeout=30):
        # Load the imagerepo.
        repo_fname = os.path.join(dname, _POSTFIX_JSON)
        with open(repo_fname, 'rb') as fi:
            repo = json.load(codecs.getreader('utf-8')(fi))

        # Filter out all non-test result image files.
        target_glob = os.path.join(result_dir, 'result-*.png')
        results = []
        for fname in sorted(glob(target_glob)):
            # We only care about PNG images.
            try:
                im = Image.open(fname)
                if im.format != 'PNG':
                    # Ignore - it's not a png image.
                    continue
            except IOError:
                # Ignore - it's not an image.
                continue
            results.append(fname)

        count = len(results)

        for count_index, result_fname in enumerate(results):
            key = os.path.splitext('-'.join(result_fname.split('-')[1:]))[0]
            try:
                # Calculate the test result perceptual image hash.
                phash = imagehash.phash(Image.open(result_fname),
                                        hash_size=iris.tests._HASH_SIZE)
                uris = repo[key]
                hash_index, distance = _calculate_hit(uris, phash, action)
                uri = uris[hash_index]
            except KeyError:
                wmsg = 'Ignoring unregistered test result {!r}.'
                warnings.warn(wmsg.format(key))
                continue
            with temp_png(key) as expected_fname:
                processed = True
                resource = requests.get(uri)
                if resource.status_code == 200:
                    with open(expected_fname, 'wb') as fo:
                        fo.write(resource.content)
                else:
                    # Perhaps the uri has not been pushed into the repo yet,
                    # so check if a local "developer" copy is available ...
                    local_fname = os.path.join(result_dir,
                                               os.path.basename(uri))
                    if not os.path.isfile(local_fname):
                        emsg = 'Bad URI {!r} for test {!r}.'
                        raise ValueError(uri, key)
                    else:
                        # The temporary expected filename has the test name
                        # baked into it, and is used in the diff plot title.
                        # So copy the local file to the exected file to
                        # maintain this helpfulness.
                        shutil.copy(local_fname, expected_fname)
                mcompare.compare_images(expected_fname, result_fname, tol=0)
                diff_fname = os.path.splitext(result_fname)[0] + _POSTFIX_DIFF
                args = expected_fname, result_fname, diff_fname
                if display:
                    msg = ('Image {} of {}: hamming distance = {} ' '[{!r}]')
                    status = msg.format(count_index + 1, count, distance, kind)
                    prefix = repo, key, repo_fname, phash, status
                    yield prefix + args
                else:
                    yield args
        if display and not processed:
            print('\nThere are no iris test result images to process.\n')
예제 #47
0
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.testing.compare import compare_images
import matplotlib.image as mpimg
import os

data = pd.read_csv('data.csv', sep=',', index_col='Region')

fig, axs = plt.subplots(nrows=1, ncols=6, figsize=(13, 6), dpi=72, sharey=True)
fig.subplots_adjust(hspace=0.1, wspace=0.05)

for x, y in list(enumerate(data.index)):
    axs[x].scatter(range(3),
                   data.loc[y],
                   color=['tab:orange', 'tab:blue', 'tab:green'],
                   facecolors='none',
                   linewidth=3,
                   s=200)
    axs[x].set_title(y)
    axs[x].set_xticks(range(3))
    axs[x].set_xticklabels(data.columns, rotation='vertical')
    axs[x].margins(x=0.1, y=0.025)

axs[0].set_ylabel('Poblacion')
fig.tight_layout()

plt.savefig('generadaxComparar.png')
#255*0.12 = 30.6 almenos el 88% de pixeles corresponden, por tanto se puede decir que las imágenes son iguales
if compare_images('original.png', 'generadaxComparar.png', tol=30.6) is None:
    os.system('rm -r generadaxComparar.png')
    mpimg.imsave('generada.png', mpimg.imread('original.png'))
예제 #48
0
def test_data_MC(tmp_path):
    fname = tmp_path / "fig.pdf"
    histo_dict_list = [
        {
            "label": "Background",
            "isData": False,
            "yields": np.asarray([12.5, 14]),
            "variable": "x",
        },
        {
            "label": "Signal",
            "isData": False,
            "yields": np.asarray([2, 5]),
            "variable": "x",
        },
        {
            "label": "Data",
            "isData": True,
            "yields": np.asarray([13, 15]),
            "variable": "x",
        },
    ]
    total_model_unc = np.sqrt([0.17, 0.29])
    bin_edges = np.asarray([1, 2, 3])
    matplotlib_visualize.data_MC(histo_dict_list, total_model_unc, bin_edges,
                                 fname)
    assert compare_images("tests/contrib/reference/data_MC.pdf", str(fname),
                          0) is None

    histo_dict_list_log = copy.deepcopy(histo_dict_list)
    histo_dict_list_log[0]["yields"] = np.asarray([2000, 14])
    histo_dict_list_log[2]["yields"] = np.asarray([2010, 15])
    total_model_unc_log = np.asarray([50, 1.5])
    bin_edges_log = np.asarray([10, 100, 1000])
    fname_log = fname.with_name(fname.stem + "_log" + fname.suffix)

    # automatic log scale
    matplotlib_visualize.data_MC(histo_dict_list_log,
                                 total_model_unc_log,
                                 bin_edges_log,
                                 fname,
                                 log_scale_x=True)
    assert (compare_images("tests/contrib/reference/data_MC_log.pdf",
                           str(fname_log), 0) is None)

    # linear scale forced
    matplotlib_visualize.data_MC(histo_dict_list,
                                 total_model_unc,
                                 bin_edges,
                                 fname,
                                 log_scale=False)
    assert compare_images("tests/contrib/reference/data_MC.pdf", str(fname),
                          0) is None

    # three open figures, does not change when calling with close_figure
    assert len(plt.get_fignums()) == 3

    # log scale forced
    matplotlib_visualize.data_MC(
        histo_dict_list_log,
        total_model_unc_log,
        bin_edges_log,
        fname,
        log_scale=True,
        log_scale_x=True,
        close_figure=True,
    )
    assert (compare_images("tests/contrib/reference/data_MC_log.pdf",
                           str(fname_log), 0) is None)
    assert len(plt.get_fignums()) == 3
    plt.close("all")
예제 #49
0
파일: plotting.py 프로젝트: dilawar/scanpy
def test_scatterplots():

    pbmc = sc.datasets.pbmc68k_reduced()
    outfile = NamedTemporaryFile(suffix='.png', prefix='scanpy_test_scatter_', delete=False)

    # test pca
    sc.pl.pca(pbmc, color='bulk_labels', show=False)
    pl.savefig(outfile.name, dpi=80)
    pl.close()

    res = compare_images(ROOT + '/master_pca.png', outfile.name, tolerance)
    assert res is None, res

    # test projection='3d'
    sc.pl.pca(pbmc, color='bulk_labels', projection='3d', show=False)
    pl.savefig(outfile.name, dpi=80, show=False)
    pl.close()

    res = compare_images(ROOT + '/master_3dprojection.png', outfile.name, tolerance)
    assert res is None, res

    sc.pl.pca(pbmc, color=['CD3D', 'CD79A'], components=['1,2', '1,3'],
              vmax=5, use_raw=False, vmin=-5, cmap='seismic', show=False)
    pl.savefig(outfile.name, dpi=80)
    pl.close()

    res = compare_images(ROOT + '/master_multipanel.png', outfile.name, tolerance)
    assert res is None, res

    # test tsne
    # I am removing this test because  slight differences are present even
    # after setting a random_state.
    # sc.tl.tsne(pbmc, random_state=0, n_pcs=30)
    # sc.pl.tsne(pbmc, color=['CD3D', 'louvain'], show=False)
    # pl.savefig(outfile.name, dpi=80)
    # pl.close()

    # res = compare_images(ROOT + '/master_tsne.png', outfile.name, tolerance)
    # assert res is None, res

    # test umap with louvain clusters and palette
    sc.pl.umap(pbmc, color=['louvain'],
               palette=['b', 'g', 'r', 'yellow', 'black', 'gray', 'lightblue'],
               frameon=False, show=False)
    pl.savefig(outfile.name, dpi=80)
    pl.close()

    res = compare_images(ROOT + '/master_umap.png', outfile.name, tolerance)
    assert res is None, res

    # test umap with gene expression
    sc.pl.umap(pbmc, color=['LYZ', 'CD79A'], s=20, alpha=0.5, frameon=False, show=False)
    pl.savefig(outfile.name, dpi=80)
    pl.close()

    res = compare_images(ROOT + '/master_umap_gene_expr.png', outfile.name, tolerance)
    assert res is None, res

    # test edges = True
    sc.pp.neighbors(pbmc)
    sc.pl.umap(pbmc, color='louvain', edges=True, edges_width=0.1, s=50, show=False)
    pl.savefig(outfile.name, dpi=80)
    pl.close()

    res = compare_images(ROOT + '/master_umap_with_edges.png', outfile.name, tolerance)
    assert res is None, res
예제 #50
0
prefix = 'addplot'
tdir = 'test_images/'
refd = 'reference_images/'
#os.system('rm -f '+tdir+prefix+'*.jpg')
os.system('rm -f ' + tdir + prefix + '*.png')

IMGCOMP_TOLERANCE = 7.0

# ---- Test 01 -----

fname = prefix + '01.png'
mpf.plot(df, volume=True, savefig=tdir + fname)

os.system('ls -l ' + tdir + fname)

result = compare_images(refd + fname, tdir + fname, tol=IMGCOMP_TOLERANCE)
if result is not None:
    print('result=', result)
assert result is None

# ---- Test 02 -----

fname = prefix + '02.png'
apdict = mpf.make_addplot(df['LowerB'])
mpf.plot(df, volume=True, addplot=apdict, savefig=tdir + fname)

os.system('ls -l ' + tdir + fname)

result = compare_images(refd + fname, tdir + fname, tol=IMGCOMP_TOLERANCE)
if result is not None:
    print('result=', result)
예제 #51
0
파일: __init__.py 프로젝트: imbasimba/pywwt
def assert_widget_image(tmpdir, widget, filename, fail_now=True):
    """
    Render an image from the given WWT widget and assert that it matches an
    expected version. The expected version might vary depending on the platform
    and/or OpenGL renderer.
    """
    # If requested, save the "actual" images in another directory that will be
    # preserved beyond the test run.

    if IMAGE_OUTPUT_DIR:
        actual = os.path.join(IMAGE_OUTPUT_DIR, filename)
    else:
        actual = tmpdir.join(filename).strpath

    widget.render(actual)

    # Get the path to the "expected" image. There are can be a variety of
    # versions, unfortunately, due to differences between different OpenGL
    # renderers.

    expected = None

    if FRAMEWORK_VARIANT is not None:
        p = os.path.join(DATA, FRAMEWORK + FRAMEWORK_VARIANT, filename)
        if os.path.exists(p):
            expected = p

    if expected is None:
        expected = os.path.join(DATA, FRAMEWORK, filename)

    # Do the actual comparison.

    try:
        msg = compare_images(expected, actual, tol=1.6)
    except Exception as e:
        msg = 'Image comparison failed with exception: {}'.format(e)
        print_exc()

    if msg is None:
        return  # success!

    # If we're on a CI environment, output a script to regenerate the images.

    if RUNNING_ON_CI:
        with open(expected, 'rb') as f:
            expected = b64encode(f.read()).decode()

        with open(actual, 'rb') as f:
            actual = b64encode(f.read()).decode()

        print(
            REPRODUCIBILITY_SCRIPT.format(
                actual=actual,
                expected=expected,
                filename=filename,
            ))

    if fail_now:
        pytest.fail(msg, pytrace=False)

    return '{}: {}'.format(filename, msg)
예제 #52
0
 def do_test():
     err = compare_images(expected, actual, tol, in_decorator=True)
     if err:
         raise SkipTest("Error between {} and {} is {:.5f}, which is bigger then the tolerance of {:.5f}".format(actual, expected, err['rms'], tol))
예제 #53
0
        def item_function_wrapper(*args, **kwargs):

            baseline_dir = compare.kwargs.get('baseline_dir', None)
            if baseline_dir is None:
                if self.baseline_dir is None:
                    baseline_dir = os.path.join(
                        os.path.dirname(item.fspath.strpath), 'baseline')
                else:
                    baseline_dir = self.baseline_dir
                baseline_remote = False

            baseline_remote = baseline_dir.startswith(('http://', 'https://'))
            if not baseline_remote:
                baseline_dir = os.path.join(
                    os.path.dirname(item.fspath.strpath), baseline_dir)

            with plt.style.context(style,
                                   after_reset=True), switch_backend(backend):

                # Run test and get figure object
                if inspect.ismethod(original):  # method
                    # In some cases, for example if setup_method is used,
                    # original appears to belong to an instance of the test
                    # class that is not the same as args[0], and args[0] is the
                    # one that has the correct attributes set up from setup_method
                    # so we ignore original.__self__ and use args[0] instead.
                    fig_or_figures = original.__func__(*args, **kwargs)
                else:  # function
                    fig_or_figures = original(*args, **kwargs)

                if isinstance(fig_or_figures, list):
                    is_fig = [hasattr(f, "savefig") for f in fig_or_figures]
                    if not all(is_fig):
                        raise ValueError(
                            "return value is not a list of figures")

                if remove_text:
                    if isinstance(fig_or_figures, list):
                        for f in fig_or_figures:
                            remove_ticks_and_titles(f)
                    else:
                        remove_ticks_and_titles(fig_or_figures)

                # Find test name to use as plot name
                filename = compare.kwargs.get('filename', None)
                if filename is None:
                    # example to clarify file nameing:
                    # tests/test_feature.py::test_works --> filename: tests__test_feature_py__test_works
                    # test/test_feature.py::test_class::test_member01 --> filename tests__test_feature_py__test_class__test_member01
                    subnames = item.nodeid.split('::')
                    pathnames = subnames[0]
                    pathnames = '__'.join(
                        pathnames.replace('.', '_').split('/'))
                    namespace = subnames[1:]
                    namespace = '_'.join(namespace)
                    filename = pathnames + '__' + namespace + '.png'
                    filename = filename.replace('[', '_').replace(']', '_')
                    filename = filename.replace('/', '_')
                    filename = filename.replace('_.png', '.png')

                # What we do now depends on whether we are generating the
                # reference images or simply running the test.
                if self.generate_dir is None:

                    # Save the figure
                    result_dir = tempfile.mkdtemp(dir=self.results_dir)
                    test_image = os.path.abspath(
                        os.path.join(result_dir, filename))

                    if isinstance(fig_or_figures, list):
                        for i, f in enumerate(fig_or_figures):
                            path, ext = os.path.splitext(test_image)
                            current_test_image = f'{path}_{i}{ext}'
                            f.savefig(current_test_image, **savefig_kwargs)
                            close_mpl_figure(f)
                    else:
                        fig_or_figures.savefig(test_image, **savefig_kwargs)
                        close_mpl_figure(fig_or_figures)

                    # Find path to baseline image
                    if baseline_remote:
                        baseline_image_ref = _download_file(
                            baseline_dir, filename)
                    else:
                        baseline_image_ref = os.path.abspath(
                            os.path.join(os.path.dirname(item.fspath.strpath),
                                         baseline_dir, filename))

                    if not os.path.exists(baseline_image_ref):
                        pytest.fail(
                            "Image file not found for comparison test in: "
                            "\n\t{baseline_dir}"
                            "\n(This is expected for new tests.)\nGenerated Image: "
                            "\n\t{test}".format(baseline_dir=baseline_dir,
                                                test=test_image),
                            pytrace=False)

                    # distutils may put the baseline images in non-accessible places,
                    # copy to our tmpdir to be sure to keep them in case of failure
                    baseline_image = os.path.abspath(
                        os.path.join(result_dir, 'baseline-' + filename))
                    shutil.copyfile(baseline_image_ref, baseline_image)

                    # Compare image size ourselves since the Matplotlib
                    # exception is a bit cryptic in this case and doesn't show
                    # the filenames
                    expected_shape = imread(baseline_image).shape[:2]
                    actual_shape = imread(test_image).shape[:2]
                    if expected_shape != actual_shape:
                        error = SHAPE_MISMATCH_ERROR.format(
                            expected_path=baseline_image,
                            expected_shape=expected_shape,
                            actual_path=test_image,
                            actual_shape=actual_shape)
                        pytest.fail(error, pytrace=False)

                    msg = compare_images(baseline_image,
                                         test_image,
                                         tol=tolerance)

                    if msg is None:
                        shutil.rmtree(result_dir)
                    else:
                        pytest.fail(msg, pytrace=False)

                else:

                    if not os.path.exists(self.generate_dir):
                        os.makedirs(self.generate_dir)
                    test_image = os.path.abspath(
                        os.path.join(self.generate_dir, filename))

                    if isinstance(fig_or_figures, list):
                        for i, f in enumerate(fig_or_figures):
                            path, ext = os.path.splitext(test_image)
                            current_test_image = f"{path}_{i}{ext}"
                            f.savefig(current_test_image, **savefig_kwargs)
                            close_mpl_figure(f)
                    else:
                        fig_or_figures.savefig(test_image, **savefig_kwargs)
                        close_mpl_figure(fig_or_figures)
                    pytest.skip("Skipping test, since generating data")
예제 #54
0
    def test_save_chart_image(self, request, database_backend, test_full_class,
                              tmpdir):
        """
        Verify API works.

        NB No db verification in API test, as API for verifying does not exist.
        TODO: Verify db saved chart contents when load/edit features added.
        """
        test_database = request.getfixturevalue(database_backend)

        test_existing_class = NewClass.from_dict(test_full_class.json_dict())
        for student in test_existing_class:
            if student.avatar_id:
                Path(test_existing_class.temp_avatars_dir,
                     student.avatar_id).write_text(student.avatar_id)
        # Create class in db:
        test_database.create_class(test_existing_class)

        # Find class id to load:
        classes = test_database.get_classes()
        test_class_id = classes[0].id

        # test_class = Class.from_dict(test_full_class_data_set['json_dict_rep'])
        test_class = test_database.load_class(test_class_id)

        test_data_dict = {
            'class_id': test_class_id,
            'class_name': "test_class_name",
            'chart_name': "test_chart_name",
            'chart_default_filename': "test_chart_default_filename",
            'chart_params': {
                "some": "chart",
                "default": "params"
            },
            'score-students_dict': {
                0: [test_class.students[0]],  # Cali
                1: [
                    test_class.students[1],  # Monty
                    test_class.students[7]
                ],  # Regina
                3: [
                    test_class.students[2],  # Abby
                    test_class.students[9]
                ],  # Alex
                # No score, not returned: None: [test_class.students[3],  # Zach
                #                                test_class.students[11]],  # Edgar
                50: [test_class.students[4]],  # Janell
                99: [test_class.students[5]],  # Matthew
                100: [test_class.students[6]],  # Olivia
                2: [test_class.students[8]],  # Ashley
                4: [test_class.students[10]],  # Melissa
                6: [test_class.students[12]],  # Danielle
                7: [test_class.students[13]],  # Kayla
                8: [test_class.students[14]],  # Jaleigh
            },
        }
        # Create chart in db:
        test_database.create_chart(test_data_dict)

        mock_plt = plt.figure(figsize=(19.20, 10.80))

        test_image = io.BytesIO()
        # Images must both be saved as '.png' for comparison.
        test_image_path = Path(tmpdir, 'test image.png')
        mock_plt.savefig(test_image_path, format='png', dpi=300)
        test_image.seek(0)  # Return pointer to start of binary stream.

        save_chart_path = test_database.save_chart_image(
            test_data_dict, mock_plt)
        # Return pointer:
        test_image.seek(0)  # Return pointer to start of binary stream.
        # Path exists ad image at path is expected data:
        assert save_chart_path.exists()

        compare_images(save_chart_path, test_image_path, 0.0001)
예제 #55
0
        def item_function_wrapper(*args, **kwargs):

            baseline_dir = compare.kwargs.get('baseline_dir', None)
            if baseline_dir is None:
                if self.baseline_dir is None:
                    baseline_dir = os.path.join(
                        os.path.dirname(item.fspath.strpath), 'baseline')
                else:
                    baseline_dir = self.baseline_dir
                baseline_remote = False
            else:
                baseline_remote = baseline_dir.startswith(
                    ('http://', 'https://'))
                if not baseline_remote:
                    baseline_dir = os.path.join(
                        os.path.dirname(item.fspath.strpath), baseline_dir)

            with plt.style.context(style,
                                   after_reset=True), switch_backend(backend):

                # Run test and get figure object
                if inspect.ismethod(original):  # method
                    # In some cases, for example if setup_method is used,
                    # original appears to belong to an instance of the test
                    # class that is not the same as args[0], and args[0] is the
                    # one that has the correct attributes set up from setup_method
                    # so we ignore original.__self__ and use args[0] instead.
                    fig = original.__func__(*args, **kwargs)
                else:  # function
                    fig = original(*args, **kwargs)

                if remove_text:
                    remove_ticks_and_titles(fig)

                # Find test name to use as plot name
                filename = compare.kwargs.get('filename', None)
                if filename is None:
                    filename = item.name + '.png'
                    filename = filename.replace('[', '_').replace(']', '_')
                    filename = filename.replace('/', '_')
                    filename = filename.replace('_.png', '.png')

                # What we do now depends on whether we are generating the
                # reference images or simply running the test.
                if self.generate_dir is None:

                    # Save the figure
                    result_dir = tempfile.mkdtemp(dir=self.results_dir)
                    test_image = os.path.abspath(
                        os.path.join(result_dir, filename))

                    fig.savefig(test_image, **savefig_kwargs)
                    close_mpl_figure(fig)

                    # Find path to baseline image
                    if baseline_remote:
                        baseline_image_ref = _download_file(
                            baseline_dir, filename)
                    else:
                        baseline_image_ref = os.path.abspath(
                            os.path.join(os.path.dirname(item.fspath.strpath),
                                         baseline_dir, filename))

                    if not os.path.exists(baseline_image_ref):
                        pytest.fail(
                            "Image file not found for comparison test in: "
                            "\n\t{baseline_dir}"
                            "\n(This is expected for new tests.)\nGenerated Image: "
                            "\n\t{test}".format(baseline_dir=baseline_dir,
                                                test=test_image),
                            pytrace=False)

                    # distutils may put the baseline images in non-accessible places,
                    # copy to our tmpdir to be sure to keep them in case of failure
                    baseline_image = os.path.abspath(
                        os.path.join(result_dir, 'baseline-' + filename))
                    shutil.copyfile(baseline_image_ref, baseline_image)

                    msg = compare_images(baseline_image,
                                         test_image,
                                         tol=tolerance)

                    if msg is None:
                        shutil.rmtree(result_dir)
                    else:
                        pytest.fail(msg, pytrace=False)

                else:

                    if not os.path.exists(self.generate_dir):
                        os.makedirs(self.generate_dir)

                    fig.savefig(
                        os.path.abspath(
                            os.path.join(self.generate_dir, filename)),
                        **savefig_kwargs)
                    close_mpl_figure(fig)
                    pytest.skip("Skipping test, since generating data")
예제 #56
0
 def test_Beachball(self):
     """
     Create beachball examples in tests/output directory.
     """
     # http://en.wikipedia.org/wiki/File:USGS_sumatra_mts.gif
     mt = [0.91, -0.89, -0.02, 1.78, -1.55, 0.47]
     with NamedTemporaryFile(suffix='.png') as tf:
         Beachball(mt, outfile=tf.name)
         expected_image = os.path.join(self.path, 'bb_sumatra_mt.png')
         compare_images(tf.name, expected_image, 0.001)
     np1 = [274, 13, 55]
     with NamedTemporaryFile(suffix='.png') as tf:
         Beachball(np1, outfile=tf.name)
         expected_image = os.path.join(self.path, 'bb_sumatra_np1.png')
         compare_images(tf.name, expected_image, 0.001)
     np2 = [130, 79, 98]
     with NamedTemporaryFile(suffix='.png') as tf:
         Beachball(np2, outfile=tf.name)
         expected_image = os.path.join(self.path, 'bb_sumatra_np2.png')
         compare_images(tf.name, expected_image, 0.001)
     #
     np1 = [264.98, 45.00, -159.99]
     with NamedTemporaryFile(suffix='.png') as tf:
         Beachball(np1, outfile=tf.name)
         expected_image = os.path.join(self.path, 'bb_19950128_np1.png')
         compare_images(tf.name, expected_image, 0.001)
     np2 = [160.55, 76.00, -46.78]
     with NamedTemporaryFile(suffix='.png') as tf:
         Beachball(np2, outfile=tf.name)
         expected_image = os.path.join(self.path, 'bb_19950128_np2.png')
         compare_images(tf.name, expected_image, 0.001)
     #
     mt = [1.45, -6.60, 5.14, -2.67, -3.16, 1.36]
     with NamedTemporaryFile(suffix='.png') as tf:
         Beachball(mt, outfile=tf.name)
         expected_image = os.path.join(self.path, 'bb_20090102_mt.png')
         compare_images(tf.name, expected_image, 0.001)
     np1 = [235, 80, 35]
     with NamedTemporaryFile(suffix='.png') as tf:
         Beachball(np1, outfile=tf.name)
         expected_image = os.path.join(self.path, 'bb_20090102_np1.png')
         compare_images(tf.name, expected_image, 0.001)
     np2 = [138, 56, 168]
     with NamedTemporaryFile(suffix='.png') as tf:
         Beachball(np2, outfile=tf.name)
         expected_image = os.path.join(self.path, 'bb-20090102-np2.png')
         compare_images(tf.name, expected_image, 0.001)
     # Explosion
     mt = [1, 1, 1, 0, 0, 0]
     with NamedTemporaryFile(suffix='.png') as tf:
         Beachball(mt, outfile=tf.name)
         expected_image = os.path.join(self.path, 'bb_explosion.png')
         compare_images(tf.name, expected_image, 0.001)
     # Implosion
     mt = [-1, -1, -1, 0, 0, 0]
     with NamedTemporaryFile(suffix='.png') as tf:
         Beachball(mt, outfile=tf.name)
         expected_image = os.path.join(self.path, 'bb_implosion.png')
         compare_images(tf.name, expected_image, 0.001)
     # CLVD - Compensate Linear Vector Dipole
     mt = [1, -2, 1, 0, 0, 0]
     with NamedTemporaryFile(suffix='.png') as tf:
         Beachball(mt, outfile=tf.name)
         expected_image = os.path.join(self.path, 'bb_clvd.png')
         compare_images(tf.name, expected_image, 0.001)
     # Double Couple
     mt = [1, -1, 0, 0, 0, 0]
     with NamedTemporaryFile(suffix='.png') as tf:
         Beachball(mt, outfile=tf.name)
         expected_image = os.path.join(self.path, 'bb_double_couple.png')
         compare_images(tf.name, expected_image, 0.001)
     # Lars
     mt = [1, -1, 0, 0, 0, -1]
     with NamedTemporaryFile(suffix='.png') as tf:
         Beachball(mt, outfile=tf.name)
         expected_image = os.path.join(self.path, 'bb_lars.png')
         compare_images(tf.name, expected_image, 0.001)
     # http://wwweic.eri.u-tokyo.ac.jp/yuji/Aki-nada/
     np1 = [179, 55, -78]
     with NamedTemporaryFile(suffix='.png') as tf:
         Beachball(np1, outfile=tf.name)
         expected_image = os.path.join(self.path, 'bb_geiyo_np1.png')
         compare_images(tf.name, expected_image, 0.001)
     #
     np1 = [10, 42.5, 90]
     with NamedTemporaryFile(suffix='.png') as tf:
         Beachball(np1, outfile=tf.name)
         expected_image = os.path.join(self.path, 'bb_honshu_np1.png')
         compare_images(tf.name, expected_image, 0.001)
     np2 = [10, 42.5, 92]
     with NamedTemporaryFile(suffix='.png') as tf:
         Beachball(np2, outfile=tf.name)
         expected_image = os.path.join(self.path, 'bb_honshu_np2.png')
         compare_images(tf.name, expected_image, 0.001)
     # http://wwweic.eri.u-tokyo.ac.jp/yuji/tottori/
     np1 = [150, 87, 1]
     with NamedTemporaryFile(suffix='.png') as tf:
         Beachball(np1, outfile=tf.name)
         expected_image = os.path.join(self.path, 'bb_tottori_np1.png')
         compare_images(tf.name, expected_image, 0.001)
     # http://iisee.kenken.go.jp/staff/thara/2004/09/20040905_1/2nd.html
     mt = [0.99, -2.00, 1.01, 0.92, 0.48, 0.15]
     with NamedTemporaryFile(suffix='.png') as tf:
         Beachball(mt, outfile=tf.name)
         expected_image = os.path.join(self.path, 'bb_20040905_1_mt.png')
         compare_images(tf.name, expected_image, 0.001)
     # http://iisee.kenken.go.jp/staff/thara/2004/09/20040905_0/1st.html
     mt = [5.24, -6.77, 1.53, 0.81, 1.49, -0.05]
     with NamedTemporaryFile(suffix='.png') as tf:
         Beachball(mt, outfile=tf.name)
         expected_image = os.path.join(self.path, 'bb_20040905_0_mt.png')
         compare_images(tf.name, expected_image, 0.001)
     # http://iisee.kenken.go.jp/staff/thara/miyagi.htm
     mt = [16.578, -7.987, -8.592, -5.515, -29.732, 7.517]
     with NamedTemporaryFile(suffix='.png') as tf:
         Beachball(mt, outfile=tf.name)
         expected_image = os.path.join(self.path, 'bb_miyagi_mt.png')
         compare_images(tf.name, expected_image, 0.001)
     # http://iisee.kenken.go.jp/staff/thara/20050613/chile.html
     mt = [-2.39, 1.04, 1.35, 0.57, -2.94, -0.94]
     with NamedTemporaryFile(suffix='.png') as tf:
         Beachball(mt, outfile=tf.name)
         expected_image = os.path.join(self.path, 'bb_chile_mt.png')
         compare_images(tf.name, expected_image, 0.001)
        """
        # test div by int
        assert (wf3/2)(0,0) == .5/(2*π)**0.25/(2*π*4)**.25, "Error dividing by int"    
        # test div by float
        assert (wf3/0.5)(0,0) == 2/(2*π)**0.25/(2*π*4)**.25, "Error dividing by float"
        """

        wf1 = cls_type.init_gaussian((0, 1))*1j
        plot_params = {"x_range": (-4, 4), "N": 40,
                       "method": "pdf", "x_label": "Q"}
        plt.close()
        plot_result = wf1.plot_wf(**plot_params)
        plot_result.savefig("wavefunction_plot_test_file_new.png")
        from matplotlib.testing.compare import compare_images
        try:
            assert not compare_images("wavefunction_plot_test_file.png", "wavefunction_plot_test_file_new.png", .001),"Error plotting wf"
        except AssertionError:
            print("AssertionError: Error plotting wf")
        finally:
            import os
            os.remove("wavefunction_plot_test_file_new.png")


    """
    test 3 D figure plot

    # plot_params["method"] = "cartesian"
    # plot_params["method"] = "polar"
    plot_params["method"] = "3d"
    plt.close()
    wf1.plot_wf(**plot_params)
예제 #58
0
def ggplot_equals(gg, right):
    """
    Compare ggplot object to image determined by `right`

    Parameters
    ----------
    gg : ggplot
        ggplot object
    right : str | tuple
        Identifier. If a tuple, then first element is the
        identifier and the second element is a `dict`.
        The `dict` can have two keys
            - tol - tolerance for the image comparison, a float.
            - savefig_kwargs - Parameter used by MPL to save
                               the figure. This is a `dict`.

    The right looks like any one of the following::

       - 'identifier'
       - ('identifier', {'tol': 17})
       - ('identifier', {'tol': 17, 'savefig_kwargs': {'dpi': 80}})

    This function is meant to monkey patch ggplot.__eq__
    so that tests can use the `assert` statement.
    """
    _setup()
    if isinstance(right, (tuple, list)):
        name, params = right
        tol = params.get('tol', TOLERANCE)
        _savefig_kwargs = params.get('savefig_kwargs', {})
    else:
        name, tol = right, TOLERANCE
        _savefig_kwargs = {}

    savefig_kwargs = {'dpi': DPI}
    savefig_kwargs.update(_savefig_kwargs)

    gg += test_theme
    fig = gg.draw()
    test_file = inspect.stack()[1][1]
    filenames = make_test_image_filenames(name, test_file)

    # savefig ignores the figure face & edge colors
    facecolor = fig.get_facecolor()
    edgecolor = fig.get_edgecolor()
    if edgecolor:
        savefig_kwargs['facecolor'] = facecolor
    if edgecolor:
        savefig_kwargs['edgecolor'] = edgecolor
        savefig_kwargs['frameon'] = True

    # Save the figure before testing whether the original image
    # actually exists. This makes creating new tests much easier,
    # as the result image can afterwards just be copied.
    fig.savefig(filenames.result, **savefig_kwargs)
    _teardown()
    if os.path.exists(filenames.baseline):
        shutil.copyfile(filenames.baseline, filenames.expected)
    else:
        # Putting the exception in short function makes for
        #  short pytest error messages
        raise_no_baseline_image(filenames.baseline)

    err = compare_images(filenames.expected,
                         filenames.result,
                         tol,
                         in_decorator=True)
    gg._err = err  # For the pytest error message
    return False if err else True
예제 #59
0
def step_over_diffs(result_dir, action, display=True):
    processed = False
    dname = os.path.dirname(tephi.tests.__file__)
    lock = filelock.FileLock(os.path.join(dname, _POSTFIX_LOCK))
    if action in ["first", "last"]:
        kind = action
    elif action in ["similar", "different"]:
        kind = "most {}".format(action)
    else:
        emsg = "Unknown action: {!r}"
        raise ValueError(emsg.format(action))
    if display:
        msg = ("\nComparing the {!r} expected image with "
               "the test result image.")
        print(msg.format(kind))

    # Remove old image diff results.
    target = os.path.join(result_dir, f"*{_POSTFIX_DIFF}")
    for fname in glob(target):
        os.remove(fname)

    with lock.acquire(timeout=30):
        # Load the imagerepo.
        repo_fname = os.path.join(dname, _POSTFIX_JSON)
        with open(repo_fname, "rb") as fi:
            repo = json.load(codecs.getreader("utf-8")(fi))

        # Filter out all non-test result image files.
        target_glob = os.path.join(result_dir, "result-*.png")
        results = []
        for fname in sorted(glob(target_glob)):
            # We only care about PNG images.
            try:
                im = Image.open(fname)
                if im.format != "PNG":
                    # Ignore - it's not a png image.
                    continue
            except IOError:
                # Ignore - it's not an image.
                continue
            results.append(fname)

        count = len(results)

        for count_index, result_fname in enumerate(results):
            key = os.path.splitext("-".join(result_fname.split("-")[1:]))[0]
            try:
                # Calculate the test result perceptual image hash.
                phash = imagehash.phash(Image.open(result_fname),
                                        hash_size=tephi.tests._HASH_SIZE)
                uris = repo[key]
                hash_index, distance = _calculate_hit(uris, phash, action)
                uri = uris[hash_index]
            except KeyError:
                wmsg = "Ignoring unregistered test result {!r}."
                warnings.warn(wmsg.format(key))
                continue
            with temp_png(key) as expected_fname:
                processed = True
                response = requests.get(uri)
                if response.status_code == requests.codes.ok:
                    with open(expected_fname, "wb") as fo:
                        fo.write(response.content)
                else:
                    # Perhaps the uri has not been pushed into the repo yet,
                    # so check if a local "developer" copy is available ...
                    local_fname = os.path.join(result_dir,
                                               os.path.basename(uri))
                    if not os.path.isfile(local_fname):
                        emsg = "Bad URI {!r} for test {!r}."
                        raise ValueError(emsg.format(uri, key))
                    else:
                        # The temporary expected filename has the test name
                        # baked into it, and is used in the diff plot title.
                        # So copy the local file to the exected file to
                        # maintain this helpfulness.
                        shutil.copy(local_fname, expected_fname)
                try:
                    mcompare.compare_images(expected_fname,
                                            result_fname,
                                            tol=0)
                except Exception as e:
                    if isinstance(e, ValueError) or isinstance(
                            e, ImageComparisonFailure):
                        print("Could not compare {}: {}".format(
                            result_fname, e))
                        continue
                    else:
                        # Propagate the exception, keeping the stack trace
                        raise
                diff_fname = os.path.splitext(result_fname)[0] + _POSTFIX_DIFF
                args = expected_fname, result_fname, diff_fname
                if display:
                    msg = "Image {} of {}: hamming distance = {} " "[{!r}]"
                    status = msg.format(count_index + 1, count, distance, kind)
                    prefix = repo, key, repo_fname, phash, status
                    yield prefix + args
                else:
                    yield args
        if display and not processed:
            print("\nThere are no tephi test result images to process.\n")
예제 #60
0
import os

from matplotlib.testing.compare import compare_images

pngs = [f for f in os.listdir('test-data') if f.endswith('.png')]

for png in pngs:
    print(png)
    res = compare_images(os.path.join('test-data', png),
                         os.path.join('outputs', png), 17)
    print(res)