예제 #1
0
def color_check(plugin, fmt="png"):
    """Check roundtrip behavior for color images.

    All major input types should be handled as ubytes and read
    back correctly.
    """
    img = img_as_ubyte(data.chelsea())
    r1 = roundtrip(img, plugin, fmt)
    testing.assert_allclose(img, r1)

    img2 = img > 128
    r2 = roundtrip(img2, plugin, fmt)
    testing.assert_allclose(img2.astype(np.uint8), r2)

    img3 = img_as_float(img)
    with expected_warnings(["precision loss|unclosed file"]):
        r3 = roundtrip(img3, plugin, fmt)
    testing.assert_allclose(r3, img)

    with expected_warnings(["precision loss"]):
        img4 = img_as_int(img)
    if fmt.lower() in (("tif", "tiff")):
        img4 -= 100
        with expected_warnings(["sign loss"]):
            r4 = roundtrip(img4, plugin, fmt)
        testing.assert_allclose(r4, img4)
    else:
        with expected_warnings(["sign loss|precision loss|unclosed file"]):
            r4 = roundtrip(img4, plugin, fmt)
            testing.assert_allclose(r4, img_as_ubyte(img4))

    img5 = img_as_uint(img)
    with expected_warnings(["precision loss|unclosed file"]):
        r5 = roundtrip(img5, plugin, fmt)
    testing.assert_allclose(r5, img)
예제 #2
0
def test_neg_inf():
    expected_costs = np.where(a == 1, np.inf, 0)
    expected_path = [(1, 6),
                     (1, 5),
                     (1, 4),
                     (1, 3),
                     (1, 2),
                     (2, 1),
                     (3, 1),
                     (4, 1),
                     (5, 1),
                     (6, 1)]
    test_neg = np.where(a == 1, -1, 0)
    test_inf = np.where(a == 1, np.inf, 0)
    with expected_warnings(['Upgrading NumPy' + warning_optional]):
        m = mcp.MCP(test_neg, fully_connected=True)
    costs, traceback = m.find_costs([(1, 6)])
    return_path = m.traceback((6, 1))
    assert_array_equal(costs, expected_costs)
    assert_array_equal(return_path, expected_path)
    with expected_warnings(['Upgrading NumPy' + warning_optional]):
        m = mcp.MCP(test_inf, fully_connected=True)
    costs, traceback = m.find_costs([(1, 6)])
    return_path = m.traceback((6, 1))
    assert_array_equal(costs, expected_costs)
    assert_array_equal(return_path, expected_path)
예제 #3
0
def check_wrap_around(ndim, axis):
    # create a ramp, but with the last pixel along axis equalling the first
    elements = 100
    ramp = np.linspace(0, 12 * np.pi, elements)
    ramp[-1] = ramp[0]
    image = ramp.reshape(tuple([elements if n == axis else 1
                                for n in range(ndim)]))
    image_wrapped = np.angle(np.exp(1j * image))

    index_first = tuple([0] * ndim)
    index_last = tuple([-1 if n == axis else 0 for n in range(ndim)])
    # unwrap the image without wrap around
    # We do not want warnings about length 1 dimensions
    with expected_warnings([r'Image has a length 1 dimension|\A\Z']):
        image_unwrap_no_wrap_around = unwrap_phase(image_wrapped, seed=0)
    print('endpoints without wrap_around:',
          image_unwrap_no_wrap_around[index_first],
          image_unwrap_no_wrap_around[index_last])
    # without wrap around, the endpoints of the image should differ
    assert_(abs(image_unwrap_no_wrap_around[index_first] -
                image_unwrap_no_wrap_around[index_last]) > np.pi)
    # unwrap the image with wrap around
    wrap_around = [n == axis for n in range(ndim)]
    # We do not want warnings about length 1 dimensions
    with expected_warnings([r'Image has a length 1 dimension.|\A\Z']):
        image_unwrap_wrap_around = unwrap_phase(image_wrapped, wrap_around,
                                                seed=0)
    print('endpoints with wrap_around:',
          image_unwrap_wrap_around[index_first],
          image_unwrap_wrap_around[index_last])
    # with wrap around, the endpoints of the image should be equal
    assert_almost_equal(image_unwrap_wrap_around[index_first],
                        image_unwrap_wrap_around[index_last])
예제 #4
0
def test_save_buttons():
    viewer = get_image_viewer()
    sv = SaveButtons()
    viewer.plugins[0] += sv

    import tempfile
    fid, filename = tempfile.mkstemp(suffix='.png')
    os.close(fid)

    timer = QtCore.QTimer()
    timer.singleShot(100, QtGui.QApplication.quit)

    # exercise the button clicks
    sv.save_stack.click()
    sv.save_file.click()

    # call the save functions directly
    sv.save_to_stack()
    with expected_warnings(['precision loss']):
        sv.save_to_file(filename)

    img = data.imread(filename)

    with expected_warnings(['precision loss']):
        assert_almost_equal(img, img_as_uint(viewer.image))

    img = io.pop()
    assert_almost_equal(img, viewer.image)

    os.remove(filename)
예제 #5
0
def test_wavelet_threshold():
    rstate = np.random.RandomState(1234)

    img = astro_gray
    sigma = 0.1
    noisy = img + sigma * rstate.randn(*(img.shape))
    noisy = np.clip(noisy, 0, 1)

    # employ a single, user-specified threshold instead of BayesShrink sigmas
    with expected_warnings([PYWAVELET_ND_INDEXING_WARNING]):
        denoised = _wavelet_threshold(noisy, wavelet='db1', method=None,
                                      threshold=sigma)
    psnr_noisy = compare_psnr(img, noisy)
    psnr_denoised = compare_psnr(img, denoised)
    assert_(psnr_denoised > psnr_noisy)

    # either method or threshold must be defined
    with testing.raises(ValueError):
        _wavelet_threshold(noisy, wavelet='db1', method=None, threshold=None)

    # warns if a threshold is provided in a case where it would be ignored
    with expected_warnings(["Thresholding method ",
                            PYWAVELET_ND_INDEXING_WARNING]):
        _wavelet_threshold(noisy, wavelet='db1', method='BayesShrink',
                           threshold=sigma)
예제 #6
0
def test_imsave_incorrect_dimension():
    with temporary_file(suffix='.png') as fname:
        with testing.raises(ValueError):
            with expected_warnings([fname + ' is a low contrast image']):
                imsave(fname, np.zeros((2, 3, 3, 1)))
        with testing.raises(ValueError):
            with expected_warnings([fname + ' is a low contrast image']):
                imsave(fname, np.zeros((2, 3, 2)))
예제 #7
0
    def _test_image(self, image):
        with expected_warnings(['precision loss']):
            result_opening = grey.opening(image, self.disk)
        testing.assert_equal(result_opening, self.expected_opening)

        with expected_warnings(['precision loss']):
            result_closing = grey.closing(image, self.disk)
        testing.assert_equal(result_closing, self.expected_closing)
예제 #8
0
def test_deprecated_params_attributes():
    for t in ('projective', 'affine', 'similarity'):
        tform = estimate_transform(t, SRC, DST)
        with expected_warnings(['`_matrix`.*deprecated']):
            assert_equal(tform._matrix, tform.params)

    tform = estimate_transform('polynomial', SRC, DST, order=3)
    with expected_warnings(['`_params`.*deprecated']):
        assert_equal(tform._params, tform.params)
예제 #9
0
def test_euler_number():
    with expected_warnings(['`background`|CObject type']):
        en = regionprops(SAMPLE)[0].euler_number
    assert en == 0

    SAMPLE_mod = SAMPLE.copy()
    SAMPLE_mod[7, -3] = 0
    with expected_warnings(['`background`|CObject type']):
        en = regionprops(SAMPLE_mod)[0].euler_number
    assert en == -1
예제 #10
0
def test_resize3d_keep():
    # keep 3rd dimension
    x = np.zeros((5, 5, 3), dtype=np.double)
    x[1, 1, :] = 1
    with expected_warnings(['The default mode']):
        resized = resize(x, (10, 10), order=0)
    ref = np.zeros((10, 10, 3))
    ref[2:4, 2:4, :] = 1
    assert_almost_equal(resized, ref)
    with expected_warnings(['The default mode']):
        resized = resize(x, (10, 10, 3), order=0)
    assert_almost_equal(resized, ref)
예제 #11
0
def test_3d_fallback_black_tophat():
    image = np.ones((7, 7, 7), dtype=bool)
    image[2, 2:4, 2:4] = 0
    image[3, 2:5, 2:5] = 0
    image[4, 3:5, 3:5] = 0

    with expected_warnings(['operator.*deprecated|\A\Z']):
        new_image = grey.black_tophat(image)
    footprint = ndi.generate_binary_structure(3,1)
    with expected_warnings(['operator.*deprecated|\A\Z']):
        image_expected = ndi.black_tophat(image,footprint=footprint)
    testing.assert_array_equal(new_image, image_expected)
예제 #12
0
def test_warp_clip():
    x = np.zeros((5, 5), dtype=np.double)
    x[2, 2] = 1

    with expected_warnings(['The default mode', 'The default multichannel']):
        outx = rescale(x, 3, order=3, clip=False)
    assert outx.min() < 0

    with expected_warnings(['The default mode', 'The default multichannel']):
        outx = rescale(x, 3, order=3, clip=True)
    assert_almost_equal(outx.min(), 0)
    assert_almost_equal(outx.max(), 1)
예제 #13
0
def test_spacing_1():
    n = 30
    lx, ly, lz = n, n, n
    data, _ = make_3d_syntheticdata(lx, ly, lz)

    # Rescale `data` along Y axis
    # `resize` is not yet 3D capable, so this must be done by looping in 2D.
    data_aniso = np.zeros((n, n * 2, n))
    for i, yz in enumerate(data):
        data_aniso[i, :, :] = resize(yz, (n * 2, n),
                                     mode='constant',
                                     anti_aliasing=False)

    # Generate new labels
    small_l = int(lx // 5)
    labels_aniso = np.zeros_like(data_aniso)
    labels_aniso[lx // 5, ly // 5, lz // 5] = 1
    labels_aniso[lx // 2 + small_l // 4,
                 ly - small_l // 2,
                 lz // 2 - small_l // 4] = 2

    # Test with `spacing` kwarg
    # First, anisotropic along Y
    with expected_warnings(['"cg" mode' + '|' + SCIPY_RANK_WARNING,
                            NUMPY_MATRIX_WARNING]):
        labels_aniso = random_walker(data_aniso, labels_aniso, mode='cg',
                                     spacing=(1., 2., 1.))
    assert (labels_aniso[13:17, 26:34, 13:17] == 2).all()

    # Rescale `data` along X axis
    # `resize` is not yet 3D capable, so this must be done by looping in 2D.
    data_aniso = np.zeros((n, n * 2, n))
    for i in range(data.shape[1]):
        data_aniso[i, :, :] = resize(data[:, 1, :], (n * 2, n),
                                     mode='constant',
                                     anti_aliasing=False)

    # Generate new labels
    small_l = int(lx // 5)
    labels_aniso2 = np.zeros_like(data_aniso)
    labels_aniso2[lx // 5, ly // 5, lz // 5] = 1
    labels_aniso2[lx - small_l // 2,
                  ly // 2 + small_l // 4,
                  lz // 2 - small_l // 4] = 2

    # Anisotropic along X
    with expected_warnings(['"cg" mode' + '|' + SCIPY_RANK_WARNING,
                            NUMPY_MATRIX_WARNING]):
        labels_aniso2 = random_walker(data_aniso,
                                      labels_aniso2,
                                      mode='cg', spacing=(2., 1., 1.))
    assert (labels_aniso2[26:34, 13:17, 13:17] == 2).all()
예제 #14
0
def test_3d_fallback_white_tophat():
    image = np.zeros((7, 7, 7), dtype=bool)
    image[2, 2:4, 2:4] = 1
    image[3, 2:5, 2:5] = 1
    image[4, 3:5, 3:5] = 1

    with expected_warnings([r'operator.*deprecated|\A\Z']):
        new_image = grey.white_tophat(image)
    footprint = ndi.generate_binary_structure(3, 1)
    with expected_warnings([r'operator.*deprecated|\A\Z']):
        image_expected = ndi.white_tophat(
            image.view(dtype=np.uint8), footprint=footprint)
    assert_array_equal(new_image, image_expected)
예제 #15
0
def test_multispectral_2d():
    lx, ly = 70, 100
    data, labels = make_2d_syntheticdata(lx, ly)
    data = data[..., np.newaxis].repeat(2, axis=-1)  # Expect identical output
    with expected_warnings(['"cg" mode' + '|' + SCIPY_EXPECTED]):
        multi_labels = random_walker(data, labels, mode='cg',
                                     multichannel=True)
    assert data[..., 0].shape == labels.shape
    with expected_warnings(['"cg" mode' + '|' + SCIPY_EXPECTED]):
        single_labels = random_walker(data[..., 0], labels, mode='cg')
    assert (multi_labels.reshape(labels.shape)[25:45, 40:60] == 2).all()
    assert data[..., 0].shape == labels.shape
    return data, multi_labels, single_labels, labels
예제 #16
0
def test_rescale_multichannel_defaults():
    # ensure multichannel=None matches the previous default behaviour

    # 2D: multichannel should default to False
    x = np.zeros((8, 3), dtype=np.double)
    with expected_warnings(['The default mode', 'The default multichannel']):
        scaled = rescale(x, 2, order=0)
    assert_equal(scaled.shape, (16, 6))

    # 3D: multichannel should default to True
    x = np.zeros((8, 8, 3), dtype=np.double)
    with expected_warnings(['The default mode', 'The default multichannel']):
        scaled = rescale(x, 2, order=0,)
    assert_equal(scaled.shape, (16, 16, 3))
예제 #17
0
def test_wavelet_denoising():
    rstate = np.random.RandomState(1234)

    # version with one odd-sized dimension
    astro_gray_odd = astro_gray[:, :-1]
    astro_odd = astro[:, :-1]

    for img, multichannel, convert2ycbcr in [(astro_gray, False, False),
                                             (astro_gray_odd, False, False),
                                             (astro_odd, True, False),
                                             (astro_odd, True, True)]:
        sigma = 0.1
        noisy = img + sigma * rstate.randn(*(img.shape))
        noisy = np.clip(noisy, 0, 1)

        # Verify that SNR is improved when true sigma is used
        with expected_warnings([PYWAVELET_ND_INDEXING_WARNING]):
            denoised = restoration.denoise_wavelet(noisy, sigma=sigma,
                                                   multichannel=multichannel,
                                                   convert2ycbcr=convert2ycbcr)
        psnr_noisy = compare_psnr(img, noisy)
        psnr_denoised = compare_psnr(img, denoised)
        assert_(psnr_denoised > psnr_noisy)

        # Verify that SNR is improved with internally estimated sigma
        with expected_warnings([PYWAVELET_ND_INDEXING_WARNING]):
            denoised = restoration.denoise_wavelet(noisy,
                                                   multichannel=multichannel,
                                                   convert2ycbcr=convert2ycbcr)
        psnr_noisy = compare_psnr(img, noisy)
        psnr_denoised = compare_psnr(img, denoised)
        assert_(psnr_denoised > psnr_noisy)

        # SNR is improved less with 1 wavelet level than with the default.
        denoised_1 = restoration.denoise_wavelet(noisy,
                                                 multichannel=multichannel,
                                                 wavelet_levels=1,
                                                 convert2ycbcr=convert2ycbcr)
        psnr_denoised_1 = compare_psnr(img, denoised_1)
        assert_(psnr_denoised > psnr_denoised_1)
        assert_(psnr_denoised_1 > psnr_noisy)

        # Test changing noise_std (higher threshold, so less energy in signal)
        with expected_warnings([PYWAVELET_ND_INDEXING_WARNING]):
            res1 = restoration.denoise_wavelet(noisy, sigma=2 * sigma,
                                               multichannel=multichannel)
        with expected_warnings([PYWAVELET_ND_INDEXING_WARNING]):
            res2 = restoration.denoise_wavelet(noisy, sigma=sigma,
                                               multichannel=multichannel)
        assert_(np.sum(res1**2) <= np.sum(res2**2))
예제 #18
0
def test_2d_cg():
    lx = 70
    ly = 100
    data, labels = make_2d_syntheticdata(lx, ly)
    with expected_warnings(['"cg" mode' + '|' + SCIPY_EXPECTED]):
        labels_cg = random_walker(data, labels, beta=90, mode='cg')
    assert (labels_cg[25:45, 40:60] == 2).all()
    assert data.shape == labels.shape
    with expected_warnings(['"cg" mode' + '|' + SCIPY_EXPECTED]):
        full_prob = random_walker(data, labels, beta=90, mode='cg',
                                  return_full_prob=True)
    assert (full_prob[1, 25:45, 40:60] >=
            full_prob[0, 25:45, 40:60]).all()
    assert data.shape == labels.shape
    return data, labels_cg
예제 #19
0
def test_resize3d_keep():
    # keep 3rd dimension
    x = np.zeros((5, 5, 3), dtype=np.double)
    x[1, 1, :] = 1
    with expected_warnings(['The default mode']):
        resized = resize(x, (10, 10), order=0)
        with pytest.raises(ValueError):
            # output_shape too short
            resize(x, (10, ), order=0)
    ref = np.zeros((10, 10, 3))
    ref[2:4, 2:4, :] = 1
    assert_almost_equal(resized, ref)
    with expected_warnings(['The default mode']):
        resized = resize(x, (10, 10, 3), order=0)
    assert_almost_equal(resized, ref)
예제 #20
0
def test_multispectral_3d():
    n = 30
    lx, ly, lz = n, n, n
    data, labels = make_3d_syntheticdata(lx, ly, lz)
    data = data[..., np.newaxis].repeat(2, axis=-1)  # Expect identical output
    with expected_warnings(['"cg" mode']):
        multi_labels = random_walker(data, labels, mode='cg', 
                                     multichannel=True)
    assert data[..., 0].shape == labels.shape
    with expected_warnings(['"cg" mode']):
        single_labels = random_walker(data[..., 0], labels, mode='cg')
    assert (multi_labels.reshape(labels.shape)[13:17, 13:17, 13:17] == 2).all()
    assert (single_labels.reshape(labels.shape)[13:17, 13:17, 13:17] == 2).all()
    assert data[..., 0].shape == labels.shape
    return data, multi_labels, single_labels, labels
예제 #21
0
def test_equalize_ubyte():
    with expected_warnings(['precision loss']):
        img = skimage.img_as_ubyte(test_img)
    img_eq = exposure.equalize_hist(img)

    cdf, bin_edges = exposure.cumulative_distribution(img_eq)
    check_cdf_slope(cdf)
예제 #22
0
def test_low_data_range():
    with expected_warnings(["Low image data range|CObject type is marked",
                            "tight_layout : falling back to Agg|\A\Z"]):
        ax_im = io.imshow(im_lo)
    assert ax_im.get_clim() == (im_lo.min(), im_lo.max())
    # check that a colorbar was created
    assert ax_im.colorbar is not None
예제 #23
0
def test_imexport_imimport():
    shape = (2, 2)
    image = np.zeros(shape)
    with expected_warnings(['precision loss']):
        pil_image = ndarray_to_pil(image)
    out = pil_to_ndarray(pil_image)
    assert out.shape == shape
예제 #24
0
 def test_grey(self):
     with expected_warnings(['precision loss']):
         tmp = np.arange(12, dtype=float).reshape((4, 3)) / 11
         x = prepare_for_display(tmp)
     assert_array_equal(x[..., 0], x[..., 2])
     assert x[0, 0, 0] == 0
     assert x[3, 2, 0] == 255
예제 #25
0
def test_2d_cg_mg():
    lx = 70
    ly = 100
    data, labels = make_2d_syntheticdata(lx, ly)
    expected = 'scipy.sparse.sparsetools|%s' % PYAMG_SCIPY_EXPECTED
    with expected_warnings([expected]):
        labels_cg_mg = random_walker(data, labels, beta=90, mode='cg_mg')
    assert (labels_cg_mg[25:45, 40:60] == 2).all()
    assert data.shape == labels.shape
    with expected_warnings([expected]):
        full_prob = random_walker(data, labels, beta=90, mode='cg_mg',
                              return_full_prob=True)
    assert (full_prob[1, 25:45, 40:60] >=
            full_prob[0, 25:45, 40:60]).all()
    assert data.shape == labels.shape
    return data, labels_cg_mg
예제 #26
0
def test_trivial_cases():
    # When all voxels are labeled
    img = np.ones((10, 10))
    labels = np.ones((10, 10))

    with expected_warnings(["Returning provided labels"]):
        pass_through = random_walker(img, labels)
    np.testing.assert_array_equal(pass_through, labels)

    # When all voxels are labeled AND return_full_prob is True
    labels[:, :5] = 3
    expected = np.concatenate(((labels == 1)[..., np.newaxis],
                               (labels == 3)[..., np.newaxis]), axis=2)
    with expected_warnings(["Returning provided labels"]):
        test = random_walker(img, labels, return_full_prob=True)
    np.testing.assert_array_equal(test, expected)
예제 #27
0
    def test_rank_filter(self, filter):
        @test_parallel()
        def check():
            expected = self.refs[filter]
            result = getattr(rank, filter)(self.image, self.selem)
            if filter == "entropy":
                # There may be some arch dependent rounding errors
                # See the discussions in
                # https://github.com/scikit-image/scikit-image/issues/3091
                # https://github.com/scikit-image/scikit-image/issues/2528
                assert_allclose(expected, result, atol=0, rtol=1E-15)
            elif filter == "otsu":
                # OTSU May also have some optimization dependent failures
                # See the discussions in
                # https://github.com/scikit-image/scikit-image/issues/3091
                # Pixel 3, 5 was found to be problematic. It can take either
                # a value of 41 or 81 depending on the specific optimizations
                # used.
                assert result[3, 5] in [41, 81]
                result[3, 5] = 81
                # Pixel [19, 18] is also found to be problematic for the same
                # reason.
                assert result[19, 18] in [141, 172]
                result[19, 18] = 172
                assert_array_equal(expected, result)
            else:
                assert_array_equal(expected, result)

        with expected_warnings(['precision loss']):
            check()
예제 #28
0
def test_wavelet_denoising_nd():
    rstate = np.random.RandomState(1234)
    for method in ['VisuShrink', 'BayesShrink']:
        for ndim in range(1, 5):
            # Generate a very simple test image
            if ndim < 3:
                img = 0.2*np.ones((128, )*ndim)
            else:
                img = 0.2*np.ones((16, )*ndim)
            img[(slice(5, 13), ) * ndim] = 0.8

            sigma = 0.1
            noisy = img + sigma * rstate.randn(*(img.shape))
            noisy = np.clip(noisy, 0, 1)

            # Mark H. 2018.08:
            #   The issue arises because when ndim in [1, 2]
            #   ``waverecn`` calls ``_match_coeff_dims``
            #   Which includes a numpy 1.15 deprecation.
            #   for larger number of dimensions _match_coeff_dims isn't called
            #   for some reason.
            anticipated_warnings = (PYWAVELET_ND_INDEXING_WARNING
                                    if ndim < 3 else None)
            with expected_warnings([anticipated_warnings]):
                # Verify that SNR is improved with internally estimated sigma
                denoised = restoration.denoise_wavelet(noisy, method=method)
            psnr_noisy = compare_psnr(img, noisy)
            psnr_denoised = compare_psnr(img, denoised)
            assert_(psnr_denoised > psnr_noisy)
예제 #29
0
def test_denoise_bilateral_nan():
    img = np.full((50, 50), np.NaN)
    # This is in fact an optional warning for our test suite.
    # Python 3.5 will not trigger a warning.
    with expected_warnings([r'invalid|\A\Z']):
        out = restoration.denoise_bilateral(img, multichannel=False)
    assert_equal(img, out)
예제 #30
0
def test_mask():
    length = 100
    ramps = [np.linspace(0, 4 * np.pi, length),
             np.linspace(0, 8 * np.pi, length),
             np.linspace(0, 6 * np.pi, length)]
    image = np.vstack(ramps)
    mask_1d = np.ones((length,), dtype=np.bool)
    mask_1d[0] = mask_1d[-1] = False
    for i in range(len(ramps)):
        # mask all ramps but the i'th one
        mask = np.zeros(image.shape, dtype=np.bool)
        mask |= mask_1d.reshape(1, -1)
        mask[i, :] = False   # unmask i'th ramp
        image_wrapped = np.ma.array(np.angle(np.exp(1j * image)), mask=mask)
        image_unwrapped = unwrap_phase(image_wrapped)
        image_unwrapped -= image_unwrapped[0, 0]    # remove phase shift
        # The end of the unwrapped array should have value equal to the
        # endpoint of the unmasked ramp
        assert_array_almost_equal_nulp(image_unwrapped[:, -1], image[i, -1])
        assert_(np.ma.isMaskedArray(image_unwrapped))

        # Same tests, but forcing use of the 3D unwrapper by reshaping
        with expected_warnings(['length 1 dimension']):
            shape = (1,) + image_wrapped.shape
            image_wrapped_3d = image_wrapped.reshape(shape)
            image_unwrapped_3d = unwrap_phase(image_wrapped_3d)
            # remove phase shift
            image_unwrapped_3d -= image_unwrapped_3d[0, 0, 0]
        assert_array_almost_equal_nulp(image_unwrapped_3d[:, :, -1],
                                       image[i, -1])
예제 #31
0
def test_equals():
    arr = np.zeros((100, 100), dtype=np.int)
    arr[0:25, 0:25] = 1
    arr[50:99, 50:99] = 2

    regions = regionprops(arr)
    r1 = regions[0]

    regions = regionprops(arr)
    r2 = regions[0]
    r3 = regions[1]

    with expected_warnings(['`background`|CObject type']):
        assert_equal(r1 == r2, True, "Same regionprops are not equal")
        assert_equal(r1 != r3, True, "Different regionprops are equal")
def test_iradon_sart_dtype(dtype):
    sinogram = np.zeros((16, 1), dtype=int)
    sinogram[8, 0] = 1.
    sinogram64 = sinogram.astype('float64')
    sinogram32 = sinogram.astype('float32')

    with expected_warnings(['Input data is cast to float']):
        assert iradon_sart(sinogram, theta=[0]).dtype == 'float64'

    assert iradon_sart(sinogram64, theta=[0]).dtype == sinogram64.dtype
    assert iradon_sart(sinogram32, theta=[0]).dtype == sinogram32.dtype

    assert iradon_sart(sinogram, theta=[0], dtype=dtype).dtype == dtype
    assert iradon_sart(sinogram32, theta=[0], dtype=dtype).dtype == dtype
    assert iradon_sart(sinogram64, theta=[0], dtype=dtype).dtype == dtype
예제 #33
0
def test_wavelet_denoising_levels():
    rstate = np.random.RandomState(1234)
    ndim = 2
    N = 256
    wavelet = 'db1'
    # Generate a very simple test image
    img = 0.2*np.ones((N, )*ndim)
    img[(slice(5, 13), ) * ndim] = 0.8

    sigma = 0.1
    noisy = img + sigma * rstate.randn(*(img.shape))
    noisy = np.clip(noisy, 0, 1)

    with expected_warnings([PYWAVELET_ND_INDEXING_WARNING]):
        denoised = restoration.denoise_wavelet(noisy, wavelet=wavelet)
    denoised_1 = restoration.denoise_wavelet(noisy, wavelet=wavelet,
                                             wavelet_levels=1)
    psnr_noisy = compare_psnr(img, noisy)
    psnr_denoised = compare_psnr(img, denoised)
    psnr_denoised_1 = compare_psnr(img, denoised_1)

    # multi-level case should outperform single level case
    assert_(psnr_denoised > psnr_denoised_1 > psnr_noisy)

    # invalid number of wavelet levels results in a ValueError
    max_level = pywt.dwt_max_level(np.min(img.shape),
                                   pywt.Wavelet(wavelet).dec_len)
    with testing.raises(ValueError):
        with expected_warnings([PYWAVELET_ND_INDEXING_WARNING]):
            restoration.denoise_wavelet(
                noisy, wavelet=wavelet, wavelet_levels=max_level + 1)
    with testing.raises(ValueError):
        with expected_warnings([PYWAVELET_ND_INDEXING_WARNING]):
            restoration.denoise_wavelet(
                noisy,
                wavelet=wavelet, wavelet_levels=-1)
예제 #34
0
def test_ransac_non_valid_best_model():
    """Example from GH issue #5572"""
    def is_model_valid(model, *random_data) -> bool:
        """Allow models with a maximum of 10 degree tilt from the vertical

        """
        tilt = abs(np.arccos(np.dot(model.params[1], [0, 0, 1])))
        return tilt <= (10 / 180 * np.pi)

    rnd = np.random.RandomState(1)
    data = np.linspace([0, 0, 0], [0.3, 0, 1], 1000) + rnd.rand(1000, 3) - 0.5
    with expected_warnings(["Estimated model is not valid"]):
        ransac(data, LineModelND, min_samples=2,
               residual_threshold=0.3, max_trials=50, random_state=0,
               is_model_valid=is_model_valid)
예제 #35
0
def test_imsave_filelike():
    shape = (2, 2)
    image = np.zeros(shape)
    s = BytesIO()

    # save to file-like object
    with expected_warnings(['precision loss',
                            'is a low contrast image']):
        imsave(s, image)

    # read from file-like object
    s.seek(0)
    out = imread(s)
    assert_equal(out.shape, shape)
    assert_allclose(out, image)
예제 #36
0
def test_montage_simple_rgb():
    n_images, n_rows, n_cols, n_channels = 2, 2, 2, 2
    arr_in = np.arange(
        n_images * n_rows * n_cols * n_channels,
        dtype=float,
    )
    arr_in = arr_in.reshape(n_images, n_rows, n_cols, n_channels)

    with expected_warnings(["`multichannel` is a deprecated argument"]):
        arr_out = montage(arr_in, multichannel=True)
    arr_ref = np.array([[[0, 1], [2, 3], [8, 9], [10, 11]],
                        [[4, 5], [6, 7], [12, 13], [14, 15]],
                        [[7, 8], [7, 8], [7, 8], [7, 8]],
                        [[7, 8], [7, 8], [7, 8], [7, 8]]])
    assert_array_equal(arr_out, arr_ref)
예제 #37
0
def test_16bit():
    image = np.zeros((21, 21), dtype=np.uint16)
    selem = np.ones((3, 3), dtype=np.uint8)

    for bitdepth in range(17):
        value = 2 ** bitdepth - 1
        image[10, 10] = value
        if bitdepth > 11:
            expected = ['Bitdepth of %s' % (bitdepth - 1)]
        else:
            expected = []
        with expected_warnings(expected):
            assert rank.minimum(image, selem)[10, 10] == 0
            assert rank.maximum(image, selem)[10, 10] == value
            assert rank.mean(image, selem)[10, 10] == int(value / selem.size)
예제 #38
0
def test_denoise_nl_means_2d_multichannel_deprecated():
    # reduce image size because nl means is slow
    img = np.copy(astro[:50, :50])

    # add some random noise
    sigma = 0.1
    imgn = img + sigma * np.random.standard_normal(img.shape)
    imgn = np.clip(imgn, 0, 1)

    psnr_noisy = peak_signal_noise_ratio(img, imgn)
    with expected_warnings(["`multichannel` is a deprecated argument"]):
        denoised = restoration.denoise_nl_means(imgn,
                                                3, 5, h=0.75 * sigma,
                                                multichannel=True,
                                                sigma=sigma)
    psnr_denoised = peak_signal_noise_ratio(denoised, img)

    # make sure noise is reduced
    assert psnr_denoised > psnr_noisy

    # providing multichannel argument positionally also warns
    with expected_warnings(["Providing the `multichannel` argument"]):
        restoration.denoise_nl_means(imgn, 3, 5, 0.75 * sigma, True,
                                     sigma=sigma)
def test_3d_inactive():
    n = 30
    lx, ly, lz = n, n, n
    data, labels = make_3d_syntheticdata(lx, ly, lz)
    old_labels = np.copy(labels)
    labels[5:25, 26:29, 26:29] = -1
    after_labels = np.copy(labels)
    with expected_warnings([
            '"cg" mode|CObject type' + '|' + SCIPY_RANK_WARNING,
            NUMPY_MATRIX_WARNING
    ]):
        labels = random_walker(data, labels, mode='cg')
    assert (labels.reshape(data.shape)[13:17, 13:17, 13:17] == 2).all()
    assert data.shape == labels.shape
    return data, labels, old_labels, after_labels
예제 #40
0
def test_unsharp_masking_with_different_ranges_deprecated(
        shape, offset, multichannel, preserve):
    radius = 2.0
    amount = 1.0
    dtype = np.int16
    array = (np.random.random(shape) * 5 + offset).astype(dtype)
    negative = np.any(array < 0)
    with expected_warnings(["`multichannel` is a deprecated argument"]):
        output = unsharp_mask(array,
                              radius,
                              amount,
                              multichannel=multichannel,
                              preserve_range=preserve)
    if preserve is False:
        assert np.any(output <= 1)
        assert np.any(output >= -1)
        if negative is False:
            assert np.any(output >= 0)
    assert output.dtype in [np.float32, np.float64]
    assert output.shape == shape

    # providing multichannel positionally also raises a warning
    with expected_warnings(["Providing the `multichannel`"]):
        output = unsharp_mask(array, radius, amount, multichannel, preserve)
예제 #41
0
def test_basic():
    with expected_warnings(['Upgrading NumPy' + warning_optional]):
        m = mcp.MCP(a, fully_connected=True)
    costs, traceback = m.find_costs([(1, 6)])
    return_path = m.traceback((7, 2))
    assert_array_equal(
        costs,
        [[1., 1., 1., 1., 1., 1., 1., 1.], [1., 0., 0., 0., 0., 0., 0., 1.],
         [1., 0., 1., 1., 1., 1., 1., 1.], [1., 0., 1., 2., 2., 2., 2., 2.],
         [1., 0., 1., 2., 3., 3., 3., 3.], [1., 0., 1., 2., 3., 4., 4., 4.],
         [1., 0., 1., 2., 3., 4., 5., 5.], [1., 1., 1., 2., 3., 4., 5., 6.]])

    assert_array_equal(return_path, [(1, 6), (1, 5), (1, 4), (1, 3), (1, 2),
                                     (2, 1), (3, 1), (4, 1), (5, 1), (6, 1),
                                     (7, 2)])
예제 #42
0
def test_compare_ubyte_vs_float():

    # Create signed int8 image that and convert it to uint8
    image_uint = img_as_ubyte(data.camera()[:50, :50])
    image_float = img_as_float(image_uint)

    methods = ['autolevel', 'bottomhat', 'equalize', 'gradient', 'threshold',
               'subtract_mean', 'enhance_contrast', 'pop', 'tophat']

    for method in methods:
        func = getattr(rank, method)
        out_u = func(image_uint, disk(3))
        with expected_warnings(['precision loss']):
            out_f = func(image_float, disk(3))
        assert_equal(out_u, out_f)
예제 #43
0
def test_nD_gray_conversion(func, shape):
    img = cp.random.rand(*shape)

    msg_list = []
    if img.ndim == 3 and func == gray2rgb:
        msg_list.append("Pass-through of possibly RGB images in gray2rgb")
    elif img.ndim == 2 and func == rgb2gray:
        msg_list.append("The behavior of rgb2gray will change")

    with expected_warnings(msg_list):
        out = func(img)

    common_ndim = min(out.ndim, len(shape))

    assert out.shape[:common_ndim] == shape[:common_ndim]
def test_multispectral_2d(dtype, channel_axis):
    lx, ly = 70, 100
    data, labels = make_2d_syntheticdata(lx, ly)
    data = data.astype(dtype, copy=False)
    data = data[..., np.newaxis].repeat(2, axis=-1)  # Expect identical output

    data = np.moveaxis(data, -1, channel_axis)
    with expected_warnings([
            '"cg" mode' + '|' + SCIPY_RANK_WARNING, NUMPY_MATRIX_WARNING,
            'The probability range is outside'
    ]):
        multi_labels = random_walker(data,
                                     labels,
                                     mode='cg',
                                     channel_axis=channel_axis)
    data = np.moveaxis(data, channel_axis, -1)

    assert data[..., 0].shape == labels.shape
    with expected_warnings(
        ['"cg" mode' + '|' + SCIPY_RANK_WARNING, NUMPY_MATRIX_WARNING]):
        single_labels = random_walker(data[..., 0], labels, mode='cg')
    assert (multi_labels.reshape(labels.shape)[25:45, 40:60] == 2).all()
    assert data[..., 0].shape == labels.shape
    return data, multi_labels, single_labels, labels
예제 #45
0
def test_adapthist_alpha():
    """Test an RGBA color image
    """
    img = skimage.img_as_float(data.astronaut())
    alpha = np.ones((img.shape[0], img.shape[1]), dtype=float)
    img = np.dstack((img, alpha))
    with expected_warnings(['precision loss']):
        adapted = exposure.equalize_adapthist(img)
    assert adapted.shape != img.shape
    img = img[:, :, :3]
    full_scale = skimage.exposure.rescale_intensity(img)
    assert img.shape == adapted.shape
    assert_almost_equal = np.testing.assert_almost_equal
    assert_almost_equal(peak_snr(full_scale, adapted), 109.60, 2)
    assert_almost_equal(norm_brightness_err(full_scale, adapted), 0.0235, 3)
예제 #46
0
def test_ssim_dynamic_range_and_data_range():
    # Tests deprecation of "dynamic_range" in favor of "data_range"
    N = 30
    X = np.random.rand(N, N) * 255
    Y = np.random.rand(N, N) * 255

    with expected_warnings(
            '`dynamic_range` has been deprecated in favor of '
            '`data_range`. The `dynamic_range` keyword argument '
            'will be removed in v0.14'):
        out2 = ssim(X, Y, dynamic_range=255)

    out1 = ssim(X, Y, data_range=255)

    assert_equal(out1, out2)
예제 #47
0
파일: test_pil.py 프로젝트: fmg30/diss
def test_imsave_boolean_input():
    shape = (2, 2)
    image = np.eye(*shape, dtype=np.bool)
    s = BytesIO()

    # save to file-like object
    with expected_warnings(
        ['is a boolean image: setting True to 255 and False to 0']):
        imsave(s, image)

    # read from file-like object
    s.seek(0)
    out = imread(s)
    assert_equal(out.shape, shape)
    assert_allclose(out.astype(bool), image)
예제 #48
0
    def test_inverse(self):
        with expected_warnings([SCIPY_ND_INDEXING_WARNING]):
            F = self.f(self.img)
            g = inverse(F, predefined_filter=self.f)
            assert_equal(g.shape, self.img.shape)

            g1 = inverse(F[::-1, ::-1], predefined_filter=self.f)
            assert_((g - g1[::-1, ::-1]).sum() < 55)

            # test cache
            g1 = inverse(F[::-1, ::-1], predefined_filter=self.f)
            assert_((g - g1[::-1, ::-1]).sum() < 55)

            g1 = inverse(F[::-1, ::-1], self.filt_func)
            assert_((g - g1[::-1, ::-1]).sum() < 55)
예제 #49
0
 def test_ndarray_exclude_border(self):
     nd_image = np.zeros((5, 5, 5))
     nd_image[[1, 0, 0], [0, 1, 0], [0, 0, 1]] = 1
     nd_image[3, 0, 0] = 1
     nd_image[2, 2, 2] = 1
     expected = np.zeros_like(nd_image, dtype=bool)
     expected[2, 2, 2] = True
     expectedNoBorder = np.zeros_like(nd_image, dtype=bool)
     expectedNoBorder[2, 2, 2] = True
     expectedNoBorder[0, 0, 1] = True
     expectedNoBorder[3, 0, 0] = True
     with expected_warnings(["indices argument is deprecated"]):
         result = peak.peak_local_max(nd_image,
                                      min_distance=2,
                                      exclude_border=2,
                                      indices=False)
         assert_equal(result, expected)
         # Check that bools work as expected
         assert_equal(
             peak.peak_local_max(nd_image,
                                 min_distance=2,
                                 exclude_border=2,
                                 indices=False),
             peak.peak_local_max(nd_image,
                                 min_distance=2,
                                 exclude_border=True,
                                 indices=False))
         assert_equal(
             peak.peak_local_max(nd_image,
                                 min_distance=2,
                                 exclude_border=0,
                                 indices=False),
             peak.peak_local_max(nd_image,
                                 min_distance=2,
                                 exclude_border=False,
                                 indices=False))
         # Check both versions with  no border
         assert_equal(
             peak.peak_local_max(nd_image,
                                 min_distance=2,
                                 exclude_border=0,
                                 indices=False),
             expectedNoBorder,
         )
         assert_equal(
             peak.peak_local_max(nd_image,
                                 exclude_border=False,
                                 indices=False), nd_image.astype(bool))
예제 #50
0
def test_unsupervised_wiener_deprecated_user_param():
    psf = np.ones((5, 5), dtype=float) / 25
    data = convolve2d(test_img, psf, 'same')
    otf = uft.ir2tf(psf, data.shape, is_real=False)
    _, laplacian = uft.laplacian(2, data.shape)
    with expected_warnings(
        ["`max_iter` is a deprecated key", "`min_iter` is a deprecated key"]):
        restoration.unsupervised_wiener(data,
                                        otf,
                                        reg=laplacian,
                                        is_real=False,
                                        user_params={
                                            "max_iter": 200,
                                            "min_iter": 30
                                        },
                                        random_state=5)
예제 #51
0
def test_validate_interpolation_order(dtype, order):
    if order is None:
        # Default order
        assert (_validate_interpolation_order(dtype, None) == 0
                if dtype == bool else 1)
    elif order < 0 or order > 5:
        # Order not in valid range
        with testing.raises(ValueError):
            _validate_interpolation_order(dtype, order)
    elif dtype == bool and order != 0:
        # Deprecated order for bool array
        with expected_warnings(["Input image dtype is bool"]):
            assert _validate_interpolation_order(bool, order) == order
    else:
        # Valid use case
        assert _validate_interpolation_order(dtype, order) == order
예제 #52
0
def test_rescale_nan_warning(in_range, out_range):
    image = np.arange(12, dtype=float).reshape(3, 4)
    image[1, 1] = np.nan

    msg = (r"One or more intensity levels are NaN\."
           r" Rescaling will broadcast NaN to the full image\.")

    # 2019/11/10 Passing NaN to np.clip raises a DeprecationWarning for
    # versions above 1.17
    # TODO: Remove once NumPy removes this DeprecationWarning
    numpy_warning_1_17_plus = (
        r"Passing `np.nan` to mean no clipping in np.clip "
        r"has always been unreliable|\A\Z")

    with expected_warnings([msg, numpy_warning_1_17_plus]):
        exposure.rescale_intensity(image, in_range, out_range)
예제 #53
0
 def test_adjacent_and_same(self):
     image = cp.zeros((10, 20))
     labels = cp.zeros((10, 20), int)
     image[5, 5:6] = 1
     labels[5, 5:6] = 1
     with expected_warnings(["indices argument is deprecated"]):
         result = peak.peak_local_max(
             image,
             labels=labels,
             footprint=cp.ones((3, 3), bool),
             min_distance=1,
             threshold_rel=0,
             indices=False,
             exclude_border=False,
         )
     assert cp.all(result == (labels == 1))
예제 #54
0
 def test_not_adjacent_and_different(self):
     image = np.zeros((10, 20))
     labels = np.zeros((10, 20), int)
     image[5, 5] = 1
     image[5, 8] = .5
     labels[image > 0] = 1
     expected = (labels == 1)
     with expected_warnings(["indices argument is deprecated"]):
         result = peak.peak_local_max(image,
                                      labels=labels,
                                      footprint=np.ones((3, 3), bool),
                                      min_distance=1,
                                      threshold_rel=0,
                                      indices=False,
                                      exclude_border=False)
     assert np.all(result == expected)
예제 #55
0
    def test_background(self):
        x = np.zeros((2, 3, 3), int)
        x[0] = np.array([[1, 0, 0], [1, 0, 0], [0, 0, 0]])
        x[1] = np.array([[0, 0, 0], [0, 1, 5], [0, 0, 0]])

        lnb = x.copy()
        lnb[0] = np.array([[0, 1, 1], [0, 1, 1], [1, 1, 1]])
        lnb[1] = np.array([[1, 1, 1], [1, 0, 2], [1, 1, 1]])
        lb = x.copy()
        lb[0] = np.array([[0, BG, BG], [0, BG, BG], [BG, BG, BG]])
        lb[1] = np.array([[BG, BG, BG], [BG, 0, 1], [BG, BG, BG]])

        with expected_warnings(['`background`']):
            assert_array_equal(label(x), lnb)

        assert_array_equal(label(x, background=0), lb)
예제 #56
0
    def test_16bit(self):
        image = np.zeros((21, 21), dtype=np.uint16)
        footprint = np.ones((3, 3), dtype=np.uint8)

        for bitdepth in range(17):
            value = 2**bitdepth - 1
            image[10, 10] = value
            if bitdepth >= 11:
                expected = ['Bad rank filter performance']
            else:
                expected = []
            with expected_warnings(expected):
                assert rank.minimum(image, footprint)[10, 10] == 0
                assert rank.maximum(image, footprint)[10, 10] == value
                mean_val = rank.mean(image, footprint)[10, 10]
                assert mean_val == int(value / footprint.size)
예제 #57
0
def test_uint_image_holes():
    labeled_holes_image = np.array(
        [[0, 0, 0, 0, 0, 0, 1, 0, 0, 0], [0, 1, 1, 1, 1, 1, 0, 0, 0, 0],
         [0, 1, 0, 0, 1, 1, 0, 0, 0, 0], [0, 1, 1, 1, 0, 1, 0, 0, 0, 0],
         [0, 1, 1, 1, 1, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 2, 2, 2],
         [0, 0, 0, 0, 0, 0, 0, 2, 0, 2], [0, 0, 0, 0, 0, 0, 0, 2, 2, 2]],
        dtype=np.uint8)
    expected = np.array(
        [[0, 0, 0, 0, 0, 0, 1, 0, 0, 0], [0, 1, 1, 1, 1, 1, 0, 0, 0, 0],
         [0, 1, 1, 1, 1, 1, 0, 0, 0, 0], [0, 1, 1, 1, 1, 1, 0, 0, 0, 0],
         [0, 1, 1, 1, 1, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 1, 1, 1],
         [0, 0, 0, 0, 0, 0, 0, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 1, 1, 1]],
        dtype=bool)
    with expected_warnings(['returned as a boolean array']):
        observed = remove_small_holes(labeled_holes_image, area_threshold=3)
    assert_array_equal(observed, expected)
예제 #58
0
    def test_bitdepth(self):
        # test the different bit depth for rank16

        elem = np.ones((3, 3), dtype=np.uint8)
        out = np.empty((100, 100), dtype=np.uint16)
        mask = np.ones((100, 100), dtype=np.uint8)

        for i in range(5):
            image = np.ones((100, 100), dtype=np.uint16) * 255 * 2 ** i
            if i > 3:
                expected = ["Bitdepth of"]
            else:
                expected = []
            with expected_warnings(expected):
                rank.mean_percentile(image=image, selem=elem, mask=mask,
                                     out=out, shift_x=0, shift_y=0, p0=.1, p1=.9)
예제 #59
0
def test_range(dtype, f_and_dt):
    imin, imax = dtype_range[dtype]
    x = np.linspace(imin, imax, 10).astype(dtype)

    f, dt = f_and_dt

    with expected_warnings(['precision loss|sign loss|\A\Z']):
        y = f(x)

    omin, omax = dtype_range[dt]

    if imin == 0 or omin == 0:
        omin = 0
        imin = 0

    _verify_range("From %s to %s" % (np.dtype(dtype), np.dtype(dt)), y, omin,
                  omax, np.dtype(dt))
예제 #60
0
def test_denoise_sigma_range():
    img = checkerboard_gray.copy()[:50, :50]
    # add some random noise
    img += 0.5 * img.std() * np.random.rand(*img.shape)
    img = np.clip(img, 0, 1)
    out1 = restoration.denoise_bilateral(img,
                                         sigma_color=0.1,
                                         sigma_spatial=10,
                                         multichannel=False)
    with expected_warnings(
            '`sigma_range` has been deprecated in favor of `sigma_color`. '
            'The `sigma_range` keyword argument will be removed in v0.14'):
        out2 = restoration.denoise_bilateral(img,
                                             sigma_range=0.1,
                                             sigma_spatial=10,
                                             multichannel=False)
    assert_equal(out1, out2)