コード例 #1
0
def test_wiener(dtype):
    psf = np.ones((5, 5), dtype=dtype) / 25
    data = convolve2d(test_img, psf, 'same')
    np.random.seed(0)
    data += 0.1 * data.std() * np.random.standard_normal(data.shape)
    data = data.astype(dtype, copy=False)
    deconvolved = restoration.wiener(data, psf, 0.05)
    assert deconvolved.dtype == _supported_float_type(dtype)

    rtol, atol = _get_rtol_atol(dtype)
    path = fetch('restoration/tests/camera_wiener.npy')
    np.testing.assert_allclose(deconvolved,
                               np.load(path),
                               rtol=rtol,
                               atol=atol)

    _, laplacian = uft.laplacian(2, data.shape)
    otf = uft.ir2tf(psf, data.shape, is_real=False)
    assert otf.real.dtype == _supported_float_type(dtype)
    deconvolved = restoration.wiener(data,
                                     otf,
                                     0.05,
                                     reg=laplacian,
                                     is_real=False)
    assert deconvolved.real.dtype == _supported_float_type(dtype)
    np.testing.assert_allclose(np.real(deconvolved),
                               np.load(path),
                               rtol=rtol,
                               atol=atol)
コード例 #2
0
def test_wiener():
    psf = np.ones((5, 5)) / 25
    data = convolve2d(test_img, psf, "same")
    np.random.seed(0)
    data += 0.1 * data.std() * np.random.standard_normal(data.shape)
    deconvolved = restoration.wiener(data, psf, 0.05)

    path = pjoin(dirname(abspath(__file__)), "camera_wiener.npy")
    np.testing.assert_allclose(deconvolved, np.load(path), rtol=1e-3)

    _, laplacian = uft.laplacian(2, data.shape)
    otf = uft.ir2tf(psf, data.shape, is_real=False)
    deconvolved = restoration.wiener(data, otf, 0.05, reg=laplacian, is_real=False)
    np.testing.assert_allclose(np.real(deconvolved), np.load(path), rtol=1e-3)
コード例 #3
0
    def _wiener_deconvolve(self, image, psf, balance=5e8):
        '''Perform Wiener-Hunt deconvolution given an impulse response'''

        deconvolved = restoration.wiener(image, psf, balance)
        deconvolved = deconvolved - np.min(deconvolved)

        return deconvolved
コード例 #4
0
def deblur_image(image):
    psf = np.ones((5, 5)) / 25
    image = conv2(image, psf, 'same')
    image += 0.1 * image.std() * np.random.standard_normal(image.shape)

    deconvolved = restoration.wiener(image, psf, 1, clip=False)
    return deconvolved
コード例 #5
0
def denoiser(denoiser_name, img, sigma):
    '''
    :param denoiser_name: str| 'wavelet' or 'TVM' or 'bilateral' or 'deconv' or 'NLM'
    :param img: (H,W,C) | np.array | [0,1]
    :param sigma:  for wavelet
    :return:(H,W,C) | np.array | [0,1]
    '''
    from skimage.restoration import (denoise_tv_chambolle, denoise_bilateral,
                                     denoise_wavelet, denoise_nl_means, wiener)
    if denoiser_name == 'wavelet':
        return denoise_wavelet(img,
                               sigma=sigma,
                               mode='soft',
                               multichannel=True,
                               convert2ycbcr=True,
                               method='BayesShrink')
    elif denoiser_name == 'TVM':
        return denoise_tv_chambolle(img, multichannel=True)
    elif denoiser_name == 'bilateral':
        return denoise_bilateral(img, bins=1000, multichannel=True)
    elif denoiser_name == 'deconv':
        return wiener(img)
    elif denoiser_name == 'NLM':
        return denoise_nl_means(img, multichannel=True)
    else:
        raise Exception(
            'Incorrect denoiser mentioned. Options: wavelet, TVM, bilateral, deconv, NLM'
        )
コード例 #6
0
def wiener_restoration(noisePicture):
    # wiener滤波复原
    psf = np.ones((5, 5)) / 25
    img = convolve2d(noisePicture, psf, 'same')
    img += 0.1 * img.std() * np.random.standard_normal(img.shape)
    deconvolved_img = restoration.wiener(image=img, psf=psf, balance=1100)
    return deconvolved_img
コード例 #7
0
def wiener(img):
    psf = np.ones((5, 5)) / 25
    img = convolve2d(img, psf, 'same')
    img += 0.1 * img.std() * np.random.standard_normal(img.shape)
    deconvolved_img = restoration.wiener(img, psf, 1100)
    # deconvolved_img = restoration.unsupervised_wiener(img, psf)
    return deconvolved_img
コード例 #8
0
def deblur(image,n=5,m=25):
    psf = np.ones((n, n)) / m
    B = image.reshape(image.shape[0],image.shape[1],3)[:,:,0]
    G = image.reshape(image.shape[0],image.shape[1],3)[:,:,1]
    R = image.reshape(image.shape[0],image.shape[1],3)[:,:,2]
    deconvolved_B = restoration.wiener(B, psf,1,clip=False)
    deconvolved_G = restoration.wiener(G, psf,1,clip=False)
    deconvolved_R = restoration.wiener(R, psf,1,clip=False)
    rgbArray = np.zeros((image.shape[0], image.shape[1], 3))
    rgbArray[..., 0] = deconvolved_B
    rgbArray[..., 1] = deconvolved_G
    rgbArray[..., 2] = deconvolved_R
    #print(rgbArray)
    #rgbArray[rgbArray < 0] = 0
    #rgbArray[rgbArray >= 1] = 1 - EPSILON
    return rgbArray
def reconstruct(sample, Cx, Cz, half_width=140, deconvolve=False):
    """
    Reconstruct the multiview data.
    Parameters:
        sample :    data (ordered as angle, z, x)
        cx:         x coordinate of the rotation axis
        cz:         z coordinate of the rotation axis
        half_width: semize of the reconstructed rectangle 
        deconvolve: if True apply xz devonvolution on each view before reconstruction
    Return:
        reconstructed:     reconstructed section 
    """
    sample_selection = sample[:, Cz - half_width:Cz + half_width,
                              Cx - half_width:Cx + half_width]

    sum_im = np.zeros((2 * half_width, 2 * half_width))

    for angle_index, angle in enumerate(np.arange(0, 360, rotation_angle)):

        if deconvolve:
            view = wiener(sample_selection[angle_index, :, :],
                          psf,
                          balance=0.1)
        else:
            view = sample_selection[angle_index, :, :]

        rotated = rotate(view, angle, preserve_range=True, mode='reflect')
        sum_im += rotated
    reconstructed = sum_im / nangles

    return (reconstructed)
コード例 #10
0
ファイル: __init__.py プロジェクト: barrettsa/maskgen
def transform(img, source, target, **kwargs):
    kernelSize = int(kwargs['kernelSize']) if 'kernelSize' in kwargs else 25
    rgb = img.convert('RGB')
    cv_image = numpy.array(rgb)
    if 'inputmaskname' in kwargs:
        mask = numpy.asarray(
            tool_set.openImageFile(kwargs['inputmaskname']).to_mask())
        mask[mask > 0] == 1
    else:
        mask = numpy.ones(
            (cv_image.shape[0], cv_image.shape[1])).astype('uint8')
    inverted_mask = numpy.ones(
        (cv_image.shape[0], cv_image.shape[1])).astype('uint8')
    inverted_mask[mask == 1] = 0
    side = int(kernelSize**(1 / 2.0))
    psf = numpy.ones((side, side)) / kernelSize
    img = color.rgb2grey(cv_image)
    deconvolved_img = restoration.wiener(img, psf, 1)[0]
    for c in range(3):
        cv_image[:, :,
                 c] = deconvolved_img * cv_image[:, :,
                                                 c] * mask + cv_image[:, :,
                                                                      c] * inverted_mask
    Image.fromarray(cv_image, 'RGB').save(target)
    return {'Blur Type': 'Wiener'}, None
コード例 #11
0
def blind_lucy_wrapper(image, max_its=8, its=5, N_filter=3, weiner=False,
                       estimation_noise=0, filter_estimation=1,
                       observation_noise=0):
    f = io.imread('../data/'+image+'.png', dtype=float)
    if len(f.shape) == 3: f = f.mean(axis=2)
    f /= f.max()
    print(f.shape)

    g = helper.gaussian(sigma=N_filter/3, N=N_filter)
    g_k = helper.gaussian(sigma=N_filter/3 * filter_estimation, N=N_filter)
    g_0 = g_k.copy()

    c = fftconvolve(f, g, mode='same')
    #c += observation_noise*np.random.randn(*c.shape)

    f_k = f + estimation_noise*np.random.randn(*f.shape)
    #f_k = c.copy()

    for k in range(int(max_its)):
        g_k = richardson_lucy(g_k, f_k, iterations=int(its), clip=True)
        if weiner: f_k = wiener(f_k, g_k, 1e-5)
        else: f_k = richardson_lucy(f_k, g_k, iterations=int(its), clip=True)

        print("on {}, f.max() = {:0.3e}, g.max() = {:0.3e}".format(k, np.abs(f_k.max()),
                                                         np.abs(g_k.max())))

    f_k, g_k = np.abs(f_k), np.abs(g_k)
    helper.show_images({'estimation':f_k, 'original':f, 'observations':c})
コード例 #12
0
def test_wiener():
    psf = np.ones((5, 5)) / 25
    data = convolve2d(test_img, psf, 'same')
    np.random.seed(0)
    data += 0.1 * data.std() * np.random.standard_normal(data.shape)
    deconvolved = restoration.wiener(data, psf, 0.05)

    path = fetch('restoration/tests/camera_wiener.npy')
    np.testing.assert_allclose(deconvolved, np.load(path), rtol=1e-3)

    _, laplacian = uft.laplacian(2, data.shape)
    otf = uft.ir2tf(psf, data.shape, is_real=False)
    deconvolved = restoration.wiener(data,
                                     otf,
                                     0.05,
                                     reg=laplacian,
                                     is_real=False)
    np.testing.assert_allclose(np.real(deconvolved), np.load(path), rtol=1e-3)
コード例 #13
0
 def deblurring(self):
     psf = np.ones((25, 25))
     m_temporaryImageList = []
     m_temporaryImageList[:] = ImageConverter.imageFIFO[:]
     ImageConverter.imageFIFO[:] = []
     for image in m_temporaryImageList:
         # deconvulating image data ; tbh; i don't understand this code snippet, copied from stackoverflow
         deconvolved_image = restoration.wiener(image, psf, balance=2110)
         ImageConverter.imageFIFO.append(deconvolved_image)
     del m_temporaryImageList
コード例 #14
0
def weiner_noise_reduction(img):
    # data.astronaut()
    img = color.rgb2gray(img)
    from scipy.signal import convolve2d
    psf = np.ones((5, 5)) / 25
    img = convolve2d(img, psf, 'same')
    img += 0.1 * img.std() * np.random.standard_normal(img.shape)
    deconvolved_img = restoration.wiener(img, psf, 1100)

    return deconvolved_img
コード例 #15
0
def wiener():
    img = color.rgb2gray(data.astronaut())
    imgO = img.copy()
    psf = np.ones((5, 5)) / 25
    img = convolve2d(img, psf, 'same')
    img += 0.1 * img.std() * np.random.standard_normal(img.shape)
    imgN = img.copy()
    deconvolved_img = restoration.wiener(img, psf, 1100)
    imgR = img.copy()

    return [imgO, imgN, imgR]
コード例 #16
0
def deconvolve(image, psf, iterations=7, clip=False):
    '''
    performs the Richardson-Lucy deconvolution
    '''
    # return res.unsupervised_wiener(image, psf)[0]
    return res.wiener(image,
                      psf,
                      balance=0.001,
                      clip=clip,
                      is_real=True,
                      reg=0 * np.sqrt(image))
コード例 #17
0
def img_deblur_gaussian(img, blurMap, k_size, step_size, coef=50, plot=False):
    """   
    Operation: Returns the result of the deblurring img according to blurMap 
    Inputs:
        2d image array
        2d blur map array of the image
        integer width of the square Gaussian kernel 
        integer of the distance between each segment (needs to be smaller or equal than k_size)
        a float coefficient to determine the linear ratio between the blur level and the kernel's standard deviation
        a boolean to plot the blur map or not
    Outputs:
        2d deblured image array
    """
    x_end = img.shape[1] - k_size + 1
    y_end = img.shape[0] - k_size + 1
    deblur = np.zeros_like(img, dtype=np.float64)
    aveMap = np.zeros_like(img, dtype=np.float64)

    for stride_x in range(0, x_end, step_size):
        xStart = stride_x
        xStop = stride_x + k_size
        xCentre = stride_x + k_size // 2
        for stride_y in range(0, y_end, step_size):
            yStart = stride_y
            yStop = stride_y + k_size
            yCentre = stride_y + k_size // 2

            b = img[yStart:yStop, xStart:xStop]
            g = fspecial(
                (k_size, k_size), coef * blurMap[yCentre, xCentre]
            )  # generate a Gaussian kernel accordiing to bler map value
            d = ski.wiener(b, g, balance=0.1,
                           clip=False)  # deblur using Wiener deconvolution
            deblur[yStart:yStop,
                   xStart:xStop] = deblur[yStart:yStop, xStart:xStop] + d
            aveMap[yStart:yStop,
                   xStart:xStop] = aveMap[yStart:yStop, xStart:xStop] + 1

    # Normalize the deblur to be between 0 and the max value of the img
    deblur_norm = deblur / aveMap
    deblur_norm = deblur_norm - deblur_norm.min()
    deblur_norm = (deblur_norm * img.max() / deblur_norm.max()).astype(
        np.uint8)

    if plot:
        fig, ax = plt.subplots(1, 2, figsize=(20, 20))
        display_image(img, axes=ax[0])
        ax[0].set_title('Input Image')
        display_image(deblur_norm, axes=ax[1])
        ax[1].set_title('Deblured')

    return deblur_norm
コード例 #18
0
ファイル: deblur.py プロジェクト: JyotsanaS/off
def main():
    image = imread("/Users/gsamaras/Downloads/boat.tif")
    #plt.imshow(arr, cmap='gray')
    #plt.show()
    #blurred_arr = imfilter(arr, "blur")
    psf = np.ones((5, 5)) / 25
    image = conv2(image, psf, 'same')
    image += 0.1 * image.std() * np.random.standard_normal(image.shape)

    deconvolved = restoration.wiener(image, psf, 1, clip=False)
    #print deconvolved
    plt.imshow(deconvolved, cmap='gray')
    plt.show()
コード例 #19
0
def simple_deblur(blurred_noised):
    """Deblur a blurred image with Wiener filter

    Args:
        blurred_noised: blurred image with noise

    Returns:
        Deblurred image
    """
    img = np.copy(blurred_noised)
    psf = np.ones((5, 5)) / 25
    # img = signal.convolve2d(img, psf, 'same')
    # img += 0.1 * img.std() * np.random.standard_normal(img.shape)
    deblurred = restoration.wiener(img, psf, 1100)
    return deblurred
コード例 #20
0
ファイル: convolution.py プロジェクト: ashinecas/prysm
    def deconv(self,
               other,
               balance=1000,
               reg=None,
               is_real=True,
               clip=False,
               postnormalize=True):
        """Perform the deconvolution of this convolvable object by another.

        Parameters
        ----------
        other : `Convolvable`
            another convolvable object, used as the PSF in a Wiener deconvolution
        balance : `float`, optional
            regularization parameter; passed through to skimage
        reg : `numpy.ndarray`, optional
            regularization operator, passed through to skimage
        is_real : `bool`, optional
            True if self and other are both real
        clip : `bool`, optional
            clips self and other into (0,1)
        postnormalize : `bool`, optional
            normalize the result such that it falls in [0,1]


        Returns
        -------
        `Convolvable`
            a new Convolable object

        Notes
        -----
        See skimage:
        http://scikit-image.org/docs/dev/api/skimage.restoration.html#skimage.restoration.wiener

        """
        from skimage.restoration import wiener

        result = wiener(self.data,
                        other.data,
                        balance=balance,
                        reg=reg,
                        is_real=is_real,
                        clip=clip)
        if postnormalize:
            result += result.min()
            result /= result.max()
        return Convolvable(result, self.x, self.y, False)
コード例 #21
0
 def denoise(denoiser_name, img, sigma):
     from skimage.restoration import (denoise_tv_chambolle, denoise_bilateral, denoise_wavelet, denoise_nl_means, wiener)
     if denoiser_name == 'wavelet':
         """Input scale - [0, 1]
         """
         return denoise_wavelet(img, sigma=sigma, mode='soft', multichannel=True, convert2ycbcr=True, method='BayesShrink')
     elif denoiser_name == 'TVM':
         return denoise_tv_chambolle(img, multichannel=True)
     elif denoiser_name == 'bilateral':
         return denoise_bilateral(img, bins=1000, multichannel=True)
     elif denoiser_name == 'deconv':
         return wiener(img)
     elif denoiser_name == 'NLM':
         return denoise_nl_means(img, multichannel=True)
     else:
         raise Exception('Incorrect denoiser mentioned. Options: wavelet, TVM, bilateral, deconv, NLM')
コード例 #22
0
def clean_noise(image: np.ndarray)->np.ndarray:
    image = np.where(image<-1, -1, image)
    image = np.where(image>1, 1, image)

    p2, p98 = np.percentile(image, (2, 98))
    image_rescale = exposure.rescale_intensity(image, in_range=(p2, p98))
    
    # Adaptive Equalization
    image_adapteq = exposure.equalize_adapthist(image, clip_limit=0.03)
    
    psf = np.ones((5, 5)) / 25
    img = convolve2d(image_adapteq, psf, 'same')
    img += 0.1*img.std() * np.random.standard_normal(img.shape)
    
    deconvolved_image = restoration.wiener(img, psf, 1100)
    
    return deconvolved_image
コード例 #23
0
ファイル: filters.py プロジェクト: JonasFreibur/ledebruiteur
def wiener_filter(img,
                  unsupervised=True,
                  wiener_balance=1100,
                  psf_size=5,
                  psf_numerator=25):
    """Wiener filter to sharpen an image

    This filter is used to estimate the desired value of a noisy signal.
    The Wiener filter minimizes the root mean square error between the estimated random process and the desired process.

    Arguments:
        img {array} -- Image array [Non-normalize (0-255)]

    Keyword Arguments:
        unsupervised {bool} -- true for supervised algorithm, false otherwise (default: {True})
        wiener_balance {int} -- Wiener balance parameter (default: {1100})
        psf_size {int} -- PSF kernel size (default: {5})
        psf_numerator {int} -- PSF kernel numerator (default: {25})

    Returns:
        array -- Filtered image [Non-normalize (0-255)]
    """

    img = np.array(img, np.float32)
    img = cv2.normalize(img,
                        None,
                        alpha=0,
                        beta=1,
                        norm_type=cv2.NORM_MINMAX,
                        dtype=cv2.CV_32F)  # Allow to normalize image

    psf = np.ones((psf_size, psf_size)) / psf_numerator
    convolved_img = convolve2d(img, psf, 'same')
    convolved_img += 0.1 * convolved_img.std() * \
        np.random.standard_normal(convolved_img.shape)

    deconvolved = None
    if unsupervised:
        deconvolved, _ = unsupervised_wiener(convolved_img, psf)
    else:
        deconvolved = wiener(convolved_img, psf, wiener_balance)

    cv2.absdiff(deconvolved, deconvolved)

    return deconvolved * 255
コード例 #24
0
def denoiser(denoiser_name, img, sigma):
    # For bilateral: sigma is sigma_spatial : float
    #     Standard deviation for range distance.
    #     A larger value results in averaging of pixels with larger spatial differences.
    # For wavelet: sigma is The noise standard deviation used
    #     when computing the wavelet detail coefficient threshold(s)
    from skimage.restoration import (denoise_tv_chambolle, denoise_bilateral,
                                     denoise_wavelet, denoise_nl_means, wiener,
                                     estimate_sigma)
    batch_shape = img.shape
    images = np.zeros(batch_shape)
    batch_size = batch_shape[0]
    for idx in range(batch_size):
        # change to [0 1] for image processing
        img[idx, :, :, :] = (img[idx, :, :, :] + 1) / 2.0
        sigma1 = estimate_sigma(img[idx, :, :, :],
                                average_sigmas=False,
                                multichannel=True)
        if denoiser_name == 'wavelet':
            images[idx, :, :, :] = denoise_wavelet(img[idx, :, :, :],
                                                   sigma=sigma1,
                                                   mode='soft',
                                                   multichannel=True,
                                                   convert2ycbcr=True,
                                                   method='BayesShrink')
        elif denoiser_name == 'TVM':
            images[idx, :, :, :] = denoise_tv_chambolle(img[idx, :, :, :],
                                                        multichannel=True)
        elif denoiser_name == 'bilateral':
            images[idx, :, :, :] = denoise_bilateral(img[idx, :, :, :],
                                                     sigma_spatial=sigma,
                                                     bins=1000,
                                                     multichannel=True)
        elif denoiser_name == 'deconv':
            images[idx, :, :, :] = wiener(img[idx, :, :, :])
        elif denoiser_name == 'NLM':
            images[idx, :, :, :] = denoise_nl_means(img[idx, :, :, :],
                                                    multichannel=True)
        else:
            raise Exception(
                'Incorrect denoiser mentioned. Options: wavelet, TVM, bilateral, deconv, NLM'
            )
        # change back to [-1 1]
        images[idx, :, :, :] = images[idx, :, :, :] * 2.0 - 1.0
    return images
コード例 #25
0
ファイル: Recon_tomopy_fxi.py プロジェクト: sankhesh/tomviz
def wiener_denoise(prj_norm, wiener_param, denoise_flag):
    import skimage.restoration as skr
    if not denoise_flag or not len(wiener_param):
        return prj_norm

    ss = prj_norm.shape
    psf = wiener_param['psf']
    reg = wiener_param['reg']
    balance = wiener_param['balance']
    is_real = wiener_param['is_real']
    clip = wiener_param['clip']
    for j in range(ss[0]):
        prj_norm[j] = skr.wiener(prj_norm[j],
                                 psf=psf,
                                 reg=reg,
                                 balance=balance,
                                 is_real=is_real,
                                 clip=clip)
    return prj_norm
コード例 #26
0
def test_image_shape():
    """Test that shape of output image in deconvolution is same as input.

    This addresses issue #1172.
    """
    point = np.zeros((5, 5), np.float)
    point[2, 2] = 1.0
    psf = nd.gaussian_filter(point, sigma=1.0)
    # image shape: (45, 45), as reported in #1172
    image = skimage.img_as_float(camera()[110:155, 225:270])  # just the face
    image_conv = nd.convolve(image, psf)
    deconv_sup = restoration.wiener(image_conv, psf, 1)
    deconv_un = restoration.unsupervised_wiener(image_conv, psf)[0]
    # test the shape
    np.testing.assert_equal(image.shape, deconv_sup.shape)
    np.testing.assert_equal(image.shape, deconv_un.shape)
    # test the reconstruction error
    sup_relative_error = np.abs(deconv_sup - image) / image
    un_relative_error = np.abs(deconv_un - image) / image
    np.testing.assert_array_less(np.median(sup_relative_error), 0.1)
    np.testing.assert_array_less(np.median(un_relative_error), 0.1)
コード例 #27
0
def test_image_shape():
    """Test that shape of output image in deconvolution is same as input.

    This addresses issue #1172.
    """
    point = np.zeros((5, 5), np.float)
    point[2, 2] = 1.
    psf = ndi.gaussian_filter(point, sigma=1.)
    # image shape: (45, 45), as reported in #1172
    image = util.img_as_float(camera()[65:165, 215:315])  # just the face
    image_conv = ndi.convolve(image, psf)
    deconv_sup = restoration.wiener(image_conv, psf, 1)
    deconv_un = restoration.unsupervised_wiener(image_conv, psf)[0]
    # test the shape
    np.testing.assert_equal(image.shape, deconv_sup.shape)
    np.testing.assert_equal(image.shape, deconv_un.shape)
    # test the reconstruction error
    sup_relative_error = np.abs(deconv_sup - image) / image
    un_relative_error = np.abs(deconv_un - image) / image
    np.testing.assert_array_less(np.median(sup_relative_error), 0.1)
    np.testing.assert_array_less(np.median(un_relative_error), 0.1)
コード例 #28
0
def denoise(prj, denoise_flag):
    if denoise_flag == 1:  # Wiener denoise
        import skimage.restoration as skr

        ss = prj.shape
        psf = np.ones([2, 2]) / (2**2)
        reg = None
        balance = 0.3
        is_real = True
        clip = True
        for j in range(ss[0]):
            prj[j] = skr.wiener(prj[j],
                                psf=psf,
                                reg=reg,
                                balance=balance,
                                is_real=is_real,
                                clip=clip)
    elif denoise_flag == 2:  # Gaussian denoise
        from skimage.filters import gaussian as gf

        prj = gf(prj, [0, 1, 1])
    return prj
コード例 #29
0
ファイル: MotionBlur.py プロジェクト: Jishnu-git/IP-Project
def removeMotionBlur(image, kernelSize, angle):
    kernel = makeKernel(kernelSize, angle)
    return restoration.wiener(image, kernel, 0.1)
コード例 #30
0
convolver = 10*np.ones((30,30))
convolver[10:20,10:20]=0



data2D = data_prospa.read_2d_file(datafolder+"dataIMG.2d")
data3D=data_prospa.read_3d_file(datafolder+"data.3d")
#params=ConfigParser.ConfigParser()
#params.read(datafolder+"acqu.par")

#implement gating on 3d dataset
data2D*=0.01
data3D*=0.01
processedout=np.sum(data3D.real[:,:,0:],2)
processedout*= (data2D.real.max() / processedout.max())
deconv80=restoration.wiener(processedout,convolver,200,clip=False)


data2D=data_prospa.read_2d_file("data/6 - ring 2mm/dataIMG.2d")
data2D=0.01*data2D.real
deconv40=restoration.wiener(data2D,convolver[::2,::2],200,clip=False)


fig1,ax=plt.subplots(nrows=1,ncols=5,squeeze=False,figsize=(15,5))
ax[0,0].matshow(convolver)
ax[0,0].set_title('original')
ms11=ax[0,1].matshow(processedout)
ax[0,1].set_title('imagedata80')
fixclim=ms11.get_clim()

ms12=ax[0,2].matshow(data2D)
コード例 #31
0
ファイル: util.py プロジェクト: MK8J/PV_analysis
def deconvolve(image, psf, iterations=7, clip=False):
    '''
    performs the Richardson-Lucy deconvolution
    '''
    # return res.unsupervised_wiener(image, psf)[0]
    return res.wiener(image, psf, balance=0.001, clip=clip, is_real=True, reg=0 * np.sqrt(image))
コード例 #32
0
#!/usr/bin/env python
import numpy as np
from skimage import color, data, restoration
from matplotlib.pyplot import subplots, show
from scipy.signal import convolve2d

img = color.rgb2gray(data.astronaut())
psf = np.ones((5, 5)) / 25
noisy = convolve2d(img, psf, 'same')
noisy += 0.1 * noisy.std() * np.random.standard_normal(noisy.shape)
deconvolved_img = restoration.wiener(noisy, psf, 1100)

fg, ax = subplots(1, 2, figsize=(12, 5))

ax[0].imshow(img)
ax[1].imshow(deconvolved_img)

show()
コード例 #33
0
convolver=np.load('psf40.npy')
convolver=5.0*img.imread(datafolder+"pattern2mm.png",flatten=True)/255.0
convolver+=5

data2D = data_prospa.read_2d_file(datafolder+"dataIMG.2d")
data3D=data_prospa.read_3d_file(datafolder+"data.3d")
#params=ConfigParser.ConfigParser()
#params.read(datafolder+"acqu.par")

#implement gating on 3d dataset
data2D*=0.01
data3D*=0.01
processedout=np.sum(data3D.real[:,:,0:],2)
processedout*= (data2D.real.max() / processedout.max())
deconv=restoration.wiener(processedout,convolver,3e-1,clip=False)
deconv=restoration.wiener(processedout,convolver,200,clip=False)

#np.save('psf40.npy', deconv)

fig1,ax=plt.subplots(nrows=1,ncols=4,squeeze=False,figsize=(15,5))
ms11=ax[0,0].matshow(data2D.real)
ax[0,0].set_title('Sumimage')
fixclim=ms11.get_clim()

ms12=ax[0,1].matshow(processedout)
ms12.set_clim(fixclim)
ax[0,1].set_title('Gateimage')

ms13=ax[0,2].matshow(convolver)
ax[0,2].set_title('Convolver')
コード例 #34
0
#padsmp[10:-10,-10:-1]=trainsmp[:,0:9]

FO=fft.fftshift(fft.fft2(padsmp))

FTSlev=np.percentile(np.abs(FTS),80)
FOlev=np.percentile(np.abs(FO),80)

threshFTS=np.where(np.abs(FTS)<FTSlev,0,FTS)
threshFO=np.where(np.abs(FO)<FOlev,0,FO)

threshFSS=np.where(np.abs(threshFO)>0,threshFTS/threshFO,0)#threshFTS)
iFSS=fft.ifftshift(fft.ifft2(threshFSS))

psfstart=(iFSS.shape[0]/2)-9
psf = np.abs(iFSS)[psfstart:psfstart+20,psfstart:psfstart+20]
psf=restoration.wiener(trainsig,trainsmp,balance=500)[psfstart:psfstart+20,psfstart:psfstart+20]
##%% Make psf2
#reTrain=restoration.wiener(trainsig,psf,balance=90)
#FRT=fft.fftshift(fft.fft2(reTrain))
#FRTlev=np.percentile(np.abs(FRT),90)
#threshFRT=np.where(np.abs(FRT)<FRTlev, 0, FRT)
#threshFSS2=np.where(np.abs(threshFO)>0, threshFRT/threshFO, threshFRT)
#iFSS2=fft.ifftshift(fft.ifft2(threshFSS2))
#plt.matshow(np.abs(iFSS2))
#plt.title('SweetSpot 2')
#FRT2=restoration.wiener(reTrain,np.abs(iFSS2)[14:19,14:19],balance=90)
#plt.matshow(reTrain)
#plt.title('reconstructed training sample')
#plt.matshow(FRT2)
#plt.title('rereconstructed training sample')
#%%Display fourier
コード例 #35
0
plt.show()

############       Wiener     #####################

# create the motion blur kernel
im = cv2.imread(('../images/lena_gray_512.tif'), 0)
size = 5

# generating the kernel
kernel_motion_blur = np.zeros((size, size))
kernel_motion_blur[int((size - 1) / 2), :] = np.ones(size)
kernel_motion_blur = kernel_motion_blur / size
H_kernel = np.pad(kernel_motion_blur,
                  (((im.shape[0] - size) // 2, (im.shape[0] - size) // 2),
                   ((im.shape[1] - size) // 2, (im.shape[1] - size) // 2)))
output = cv2.filter2D(im, -1, H_kernel)
Degradate_image = random_noise(output, mode='gaussian', seed=None, clip=True)
psf = kernel_motion_blur
deconvolved_img = wiener(Degradate_image, psf, 10)
deconvolvedW, _ = unsupervised_wiener(Degradate_image, psf)

plt.subplot(221), plt.imshow(im, cmap='gray'), plt.title('Origin')
plt.subplot(222), plt.imshow(Degradate_image,
                             cmap='gray'), plt.title('G_image')
plt.subplot(223), plt.imshow(deconvolved_img, cmap='gray'), plt.title('Wiener')
plt.subplot(224), plt.imshow(deconvolvedW,
                             cmap='gray'), plt.title('unsupervised_wiener')

plt.show()

######
コード例 #36
0
Image._show(ImageChops.subtract(im, im8), title="im-ImageFilter.Kernel")
Image._show(ImageChops.subtract(im, im9), title="im-ImageFilter.RankFilter")

# In[10]:

img = np.float32(io.imread(img_name, as_gray=True))
# img = color.rgb2gray(io.imread('image.jpg'))
img = color.rgb2gray(img)

psf = np.ones((7, 7)) / 49
# img = convolve2d(img, psf, 'same')

# Add noise
# img += 0.01 * img.std() * np.random.standard_normal(img.shape)

deconv_img = restoration.wiener(img, psf, 1100)
deconv_img2, _ = restoration.unsupervised_wiener(img, psf)

# ImageViewer(img).show()
# ImageViewer(deconv_img).show()
# ImageViewer(deconv_img2).show()

cv2.imshow("Input Image", img)
cv2.imshow("Deconv1 Image", deconv_img)
cv2.imshow("Deconv2 Image", deconv_img2)
cv2.waitKey(0)
cv2.destroyAllWindows()

# In[11]: