Exemple #1
0
def test_denoise_tv_bregman_3d_multichannel():
    img_astro = astro.copy()
    denoised0 = restoration.denoise_tv_bregman(img_astro[..., 0], weight=60.0)
    denoised = restoration.denoise_tv_bregman(img_astro, weight=60.0,
                                              multichannel=True)

    assert_equal(denoised0, denoised[..., 0])
Exemple #2
0
def test_denoise_tv_bregman_3d_multichannel_deprecation():
    img_astro = astro.copy()
    denoised0 = restoration.denoise_tv_bregman(img_astro[..., 0], weight=60.0)
    with expected_warnings(["`multichannel` is a deprecated argument"]):
        denoised = restoration.denoise_tv_bregman(img_astro, weight=60.0,
                                                  multichannel=True)

    assert_equal(denoised0, denoised[..., 0])
Exemple #3
0
def test_denoise_bregman_types(dtype):
    img = checkerboard_gray.copy()[:50, :50]
    # add some random noise
    img += 0.5 * img.std() * np.random.rand(*img.shape)
    img = np.clip(img, 0, 1).astype(dtype)

    # check that we can process multiple float types
    restoration.denoise_tv_bregman(img, weight=5)
Exemple #4
0
def test_denoise_tv_bregman_3d_multichannel(channel_axis):
    img_astro = astro.copy()
    denoised0 = restoration.denoise_tv_bregman(img_astro[..., 0], weight=60.0)
    img_astro = np.moveaxis(img_astro, -1, channel_axis)
    denoised = restoration.denoise_tv_bregman(img_astro, weight=60.0,
                                              channel_axis=channel_axis)
    _at = functools.partial(slice_at_axis,
                            axis=channel_axis % img_astro.ndim)
    assert_equal(denoised0, denoised[_at(0)])
Exemple #5
0
def test_denoise_tv_bregman_multichannel():
    img = checkerboard_gray.copy()[:50, :50]
    # add some random noise
    img += 0.5 * img.std() * np.random.rand(*img.shape)
    img = np.clip(img, 0, 1)

    out1 = restoration.denoise_tv_bregman(img, weight=60.0)
    out2 = restoration.denoise_tv_bregman(img, weight=60.0, multichannel=True)

    assert_equal(out1, out2)
Exemple #6
0
def test_denoise_tv_bregman_3d():
    img = lena
    # add some random noise
    img += 0.5 * img.std() * np.random.random(img.shape)
    img = np.clip(img, 0, 1)

    out1 = restoration.denoise_tv_bregman(img, weight=10)
    out2 = restoration.denoise_tv_bregman(img, weight=5)

    # make sure noise is reduced
    assert img.std() > out1.std()
    assert out1.std() > out2.std()
def test_denoise_tv_bregman_3d():
    img = lena
    # add some random noise
    img += 0.5 * img.std() * np.random.random(img.shape)
    img = np.clip(img, 0, 1)

    out1 = restoration.denoise_tv_bregman(img, weight=10)
    out2 = restoration.denoise_tv_bregman(img, weight=5)

    # make sure noise is reduced
    assert img.std() > out1.std()
    assert out1.std() > out2.std()
def test_denoise_tv_bregman_3d():
    img = checkerboard.copy()
    # add some random noise
    img += 0.5 * img.std() * np.random.rand(*img.shape)
    img = np.clip(img, 0, 1)

    out1 = restoration.denoise_tv_bregman(img, weight=10)
    out2 = restoration.denoise_tv_bregman(img, weight=5)

    # make sure noise is reduced in the checkerboard cells
    assert_(img[30:45, 5:15].std() > out1[30:45, 5:15].std())
    assert_(out1[30:45, 5:15].std() > out2[30:45, 5:15].std())
Exemple #9
0
def test_denoise_tv_bregman_3d():
    img = checkerboard.copy()
    # add some random noise
    img += 0.5 * img.std() * np.random.rand(*img.shape)
    img = np.clip(img, 0, 1)

    out1 = restoration.denoise_tv_bregman(img, weight=10)
    out2 = restoration.denoise_tv_bregman(img, weight=5)

    # make sure noise is reduced in the checkerboard cells
    assert_(img[30:45, 5:15].std() > out1[30:45, 5:15].std())
    assert_(out1[30:45, 5:15].std() > out2[30:45, 5:15].std())
Exemple #10
0
def _deepdream(graph, sess, op_tensor, X, feed_dict, layer, path_outdir, path_logdir):
	tensor_shape = op_tensor.get_shape().as_list()

	with graph.as_default() as g:
		n = (config["N"] + 1) // 2
		feature_map = tf.placeholder(dtype = tf.int32)
		tmp1 = tf.reduce_mean(tf.multiply(tf.gather(tf.transpose(op_tensor),feature_map),tf.diag(tf.ones_like(feature_map, dtype = tf.float32))), axis = 0)
		tmp2 = 1e-3 * tf.reduce_mean(tf.square(X), axis = (1, 2 ,3))
		tmp = tmp1 - tmp2
		t_grad = tf.gradients(ys = tmp, xs = X)[0]

		lap_in = tf.placeholder(np.float32, name='lap_in')
		laplacian_pyramid = lap_normalize(lap_in, scale_n = config["NUM_LAPLACIAN_LEVEL"])

		image_to_resize = tf.placeholder(np.float32, name='image_to_resize')
		size_to_resize = tf.placeholder(np.int32, name='size_to_resize')
		resize_image = tf.image.resize_bilinear(image_to_resize, size_to_resize)

		with sess.as_default() as sess:
			tile_size = sess.run(tf.shape(X), feed_dict = feed_dict)[1 : 3]

			end = len(units)
			for k in range(0, end, n):
				c = n
				if k + n >= end:
					c = end - ((end // n) * n)
				img = np.random.uniform(size = (c, tile_size[0], tile_size[1], 3)) + 117.0
				feed_dict[feature_map] = units[k : k + c]

				for octave in range(config["NUM_OCTAVE"]):
					if octave > 0:
						hw = np.float32(img.shape[1:3])*config["OCTAVE_SCALE"]
						img = sess.run(resize_image, {image_to_resize : img, size_to_resize : np.int32(hw)})

						for i, im in enumerate(img):
							min_img = im.min()
							max_img = im.max()
							temp = denoise_tv_bregman((im - min_img) / (max_img - min_img), weight = config["TV_DENOISE_WEIGHT"])
							img[i] = temp * (max_img - min_img) + min_img

					for j in range(config["NUM_ITERATION"]):
						sz = tile_size
						h, w = img.shape[1:3]
						sx = np.random.randint(sz[1], size=1)
						sy = np.random.randint(sz[0], size=1)
						img_shift = np.roll(np.roll(img, sx, 2), sy, 1)
						grad = np.zeros_like(img)
						for y in range(0, max(h-sz[0]//2,sz[0]), sz[0] // 2):
							for x in range(0, max(h-sz[1]//2,sz[1]), sz[1] // 2):
									feed_dict[X] = img_shift[:, y:y+sz[0],x:x+sz[1]]
									try:
										grad[:, y:y+sz[0],x:x+sz[1]] = sess.run(t_grad, feed_dict=feed_dict)
									except:
										pass

						lap_out = sess.run(laplacian_pyramid, feed_dict={lap_in:np.roll(np.roll(grad, -sx, 2), -sy, 1)})
						img = img + lap_out
				is_success = write_results(img, (layer, units, k), path_outdir, path_logdir, method = "deepdream")
				print("%s -> featuremap completed." % (", ".join(str(num) for num in units[k:k+c])))
	return is_success
Exemple #11
0
def threshold(image,
              *,
              sigma=0.,
              radius=0,
              offset=0.,
              method='sauvola',
              smooth_method='Gaussian'):
    """Use scikit-image filters to "intelligently" threshold an image.

    Parameters
    ----------
    image : array, shape (M, N, ...[, 3])
        Input image, conformant with scikit-image data type
        specification [1]_.
    sigma : float, optional
        If positive, use Gaussian filtering to smooth the image before
        thresholding.
    radius : int, optional
        If given, use local median thresholding instead of global.
    offset : float, optional
        If given, reduce the threshold by this amount. Higher values
        result in fewer pixels above the threshold.
    method: {'sauvola', 'niblack', 'median'}
        Which method to use for thresholding. Sauvola is 100x faster, but
        median might be more accurate.
    smooth_method: {'Gaussian', 'TV'}
        Which method to use for smoothing. Choose from Gaussian smoothing
        and total variation denoising.

    Returns
    -------
    thresholded : image of bool, same shape as `image`
        The thresholded image.

    References
    ----------
    .. [1] http://scikit-image.org/docs/dev/user_guide/data_types.html
    """
    if sigma > 0:
        if smooth_method.lower() == 'gaussian':
            image = filters.gaussian(image, sigma=sigma)
        elif smooth_method.lower() == 'tv':
            image = restoration.denoise_tv_bregman(image, weight=sigma)
    if radius == 0:
        t = filters.threshold_otsu(image) + offset
    else:
        if method == 'median':
            footprint = hyperball(image.ndim, radius=radius)
            t = ndi.median_filter(image, footprint=footprint) + offset
        elif method == 'sauvola':
            w = 2 * radius + 1
            t = threshold_sauvola(image, window_size=w, k=offset)
        elif method == 'niblack':
            w = 2 * radius + 1
            t = threshold_niblack(image, window_size=w, k=offset)
        else:
            raise ValueError('Unknown method %s. Valid methods are median,'
                             'niblack, and sauvola.' % method)
    thresholded = image > t
    return thresholded
Exemple #12
0
def make_prediction():
    import sys
    import numpy as np
    import pandas as pd

    from skimage.data import imread
    from skimage.filters import threshold_adaptive
    from skimage.restoration import denoise_tv_bregman

    from sklearn.cross_validation import train_test_split
    from sklearn.grid_search import GridSearchCV

    from sklearn.neighbors import KNeighborsClassifier
    from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier

    from model_design import model_design
    classifier = model_design()

    X, IDs = [], range(6284, 12504)
    for ID in IDs:
        original = imread('../data/testResized/' + str(ID) +'.Bmp', as_grey=True)
        denoised = denoise_tv_bregman(original, 3)
        binarilized = threshold_adaptive(denoised, block_size=13, method='gaussian')
        feature = binarilized.reshape(1,400)[0]
        X.append(feature)
    X = np.array(X)

    y = classifier.predict(X)
    result = pd.DataFrame({'Id': IDs, 'Class': y})
    result.to_csv('../result/06-09-2015_AdaBoostXTC.csv', sep=',', index=None, columns=['Id', 'Class'])
Exemple #13
0
def processSignature(image) :
    sign_image = restor.denoise_tv_bregman(image, weight = 4)
    sign_image = toUINT8(sign_image)    
    th = filters.threshold_otsu(sign_image)
    sign_image[sign_image > th] = 255 
    sign_image = normalizeSignatureIm(sign_image)
    return sign_image 
Exemple #14
0
def tvd(x0, rho, gamma):
    """
    Proximal operator for the total variation denoising penalty

    Requires scikit-image be installed

    Parameters
    ----------
    x0 : array_like
        The starting or initial point used in the proximal update step

    rho : float
        Momentum parameter for the proximal step (larger value -> stays closer to x0)

    gamma : float
        A constant that weights how strongly to enforce the constraint

    Returns
    -------
    theta : array_like
        The parameter vector found after running the proximal update step

    Raises
    ------
    ImportError
        If scikit-image fails to be imported
    """
    try:
        from skimage.restoration import denoise_tv_bregman
    except ImportError:
        print('Error: scikit-image not found. TVD will not work.')
        return x0

    return denoise_tv_bregman(x0, rho / gamma)
Exemple #15
0
def load_image_data(heigth, width):

    base_image = file.import_image('lightning')
    base_image = resize(base_image, (
        heigth,
        width,
    ), order=1)
    base_image_blurred = gaussian(base_image, sigma=4)
    # base_image_blurred = gaussian(base_image,sigma=15)
    base_image_tv = denoise_tv_bregman(base_image, weight=10)
    # base_image = gaussian(base_image,sigma=10)

    labels_blurred = base_image_blurred[:, :, 0]
    labels_blurred = data.normalize_01(labels_blurred)
    # labels -= 0.5
    m = np.mean(labels_blurred)
    labels_blurred -= m

    labels_tv = base_image_tv[:, :, 0]
    labels_tv = data.normalize_01(labels_tv)
    labels_tv -= m

    # return labels_blurred.reshape(-1,1),labels_tv.reshape(-1,1)
    # return [labels_tv.reshape(-1,1)]
    return [labels_blurred.reshape(-1, 1)]
def blur_predict(model, X, type="median", filter_size=3, sigma=1.0):
  
    if type == "median":
        blured_X = np.array(list(map(lambda x: ndimage.median_filter(x, filter_size), 
                                     X)))
    elif type == "gaussian":
        blured_X = np.array(list(map(lambda x: ndimage.gaussian_filter(x, filter_size),
                                     X)))
    elif type == "f_gaussian":
        blured_X = np.array(list(map(lambda x: filters.gaussian_filter(x.reshape((28, 28)), sigma=sigma).reshape(784),
                                     X))) 
    elif type == "tv_chambolle":
        blured_X = np.array(list(map(lambda x: restoration.denoise_tv_chambolle(x.reshape((28, 28)), weight=0.2).reshape(784),
                                     X)))
    elif type == "tv_bregman":
        blured_X = np.array(list(map(lambda x: restoration.denoise_tv_bregman(x.reshape((28, 28)), weight=5.0).reshape(784),
                                     X)))
    elif type == "bilateral":
        blured_X = np.array(list(map(lambda x: restoration.denoise_bilateral(np.abs(x).reshape((28, 28))).reshape(784),
                                     X)))
    elif type == "nl_means":
        blured_X = np.array(list(map(lambda x: restoration.nl_means_denoising(x.reshape((28, 28))).reshape(784),
                                     X)))
        
    elif type == "none":
        blured_X = X 

    else:
        raise ValueError("unsupported filter type", type)

    return predict(model, blured_X)
Exemple #17
0
def sart_tv(tils,
            angles,
            sart_iters,
            tv_w,
            tv_maxit,
            tv_eps=1e-3,
            relaxation=0.3,
            show=False):
    image = None
    for i in tqdm(range(sart_iters)):
        image = iradon_sart(tils, angles, image=image, relaxation=relaxation)
        if show:
            plt.imshow(image, cmap="gray")
            plt.show()

        image = denoise_tv_bregman(image,
                                   tv_w,
                                   eps=tv_eps,
                                   max_iter=tv_maxit,
                                   isotropic=False)
        if show:
            plt.imshow(image, cmap="gray")
            plt.show()
    image = iradon_sart(tils, angles, image=image, relaxation=relaxation)
    return image
Exemple #18
0
def tvd(x0, rho, gamma):
    """
    Proximal operator for the total variation denoising penalty

    Requires scikit-image be installed

    Parameters
    ----------
    x0 : array_like
        The starting or initial point used in the proximal update step

    rho : float
        Momentum parameter for the proximal step (larger value -> stays closer to x0)

    gamma : float
        A constant that weights how strongly to enforce the constraint

    Returns
    -------
    theta : array_like
        The parameter vector found after running the proximal update step

    Raises
    ------
    ImportError
        If scikit-image fails to be imported

    """

    return denoise_tv_bregman(x0, rho / gamma)
Exemple #19
0
def augmentData2(image):
    """
    this is simple modification of the image
    1) restoration by denoising
    2) Add gaussian noise with scales 10 and 20
    3) Each noisy image is softened by gaussian filters sigmas = [0.5, 1, 2]
    """
    list_of_images = [image]            
    g_image = image.copy()
    #g_image = sutils.toUINT8((filters.gaussian(g_image, sigma=1))
    g_image = sutils.toUINT8(restor.denoise_tv_bregman(g_image, 5))
    list_of_images.append(g_image)
    
    image_noise = addGaussianNoise(image, 10)
    list_g = [0.5, 1, 2]
    for sigma in list_g :
        g_image = sutils.toUINT8(filters.gaussian(image_noise, sigma=sigma))
        list_of_images.append(g_image) 
        
    image_noise = addGaussianNoise(image, 20)
    for sigma in list_g :
        g_image = sutils.toUINT8(filters.gaussian(image_noise, sigma=sigma))
        list_of_images.append(g_image) 
        
    return list_of_images
    def __call__(self, x0, rho):
        """
        Proximal operator for the total variation denoising penalty

        Requires scikit-image be installed

        Parameters
        ----------
        x0 : array_like
            The starting or initial point used in the proximal update step

        rho : float
            Momentum parameter for the proximal step (larger value -> stays closer to x0)

        Raises
        ------
        ImportError
            If scikit-image fails to be imported

        """
        try:
            from skimage.restoration import denoise_tv_bregman
        except ImportError:
            print('Error: scikit-image not found. TVD will not work.')
            return x0

        return denoise_tv_bregman(x0, rho / self.penalty)
Exemple #21
0
 def calc(self, projs, theta, sart_plot=False):
     image_r = super(SartTVReconstructor, self).calc(projs,
                                                     theta,
                                                     sart_plot=sart_plot)
     #denoise with tv
     self.image_r = denoise_tv_bregman(image_r, self.tv_weight,
                                       self.tv_n_iter)
     return self.image_r
Exemple #22
0
 def filter_frame(self, data):
     logging.debug("Running Denoise")
     weight = self.parameters['weight']
     max_iter = self.parameters['max_iterations']
     eps = self.parameters['error_threshold']
     isotropic = self.parameters['isotropic']
     return denoise_tv_bregman(data[0, ...], weight, max_iter=max_iter,
                               eps=eps, isotropic=isotropic)
Exemple #23
0
 def _filter_denoise(imgs, weight=0.003):
     """
     TV denoising
     :param imgs: slice to denoise [2D]
     :param weight: TV weight
     :return:
     """
     return restoration.denoise_tv_bregman(imgs, weight=weight)
Exemple #24
0
 def denoise_bregman(image):
     denoised_image = denoise_tv_bregman(image[0, :, :, 0],
                                         weight=100000000.0,
                                         max_iter=100,
                                         eps=1e-3)
     denoised_image = np.expand_dims(np.expand_dims(denoised_image,
                                                    axis=2),
                                     axis=0)
     return denoised_image.astype(np.float32)
def denoise(path):
    image = io.imread(path)
    filename = os.path.splitext(os.path.basename(path))[0]
    
    new_path_bregman = 'denoised_images/bregman/' + filename + '.jpg'
    io.imsave(new_path_bregman, denoise_tv_bregman(image, weight=10))

    new_path_bregman = 'denoised_images/chambolle/' + filename + '.jpg'
    io.imsave(new_path_bregman, denoise_tv_chambolle(image, weight=0.1, multichannel=True))
def denoise(img_dir, path):
    image = io.imread(path)
    filename = os.path.splitext(os.path.basename(path))[0]
    
    new_path_bregman = 'denoised_images/orig/bregman/' + img_dir + '/' + filename + '.png'
    io.imsave(new_path_bregman, np.clip(denoise_tv_bregman(image, weight=10), -1, 1))

    new_path_bregman = 'denoised_images/orig/chambolle/' + img_dir + '/' + filename + '.png'
    io.imsave(new_path_bregman, np.clip(denoise_tv_chambolle(image, weight=0.1, multichannel=True), -1, 1))
def refine_worm(image, initial_area, candidate_edges):
    # find strong worm edges (roughly equivalent to the edges found by find_initial_worm,
    # which are in candidate_edges): smooth the image, do canny edge-finding, and
    # then keep only those edges near candidate_edges
    smooth_image = restoration.denoise_tv_bregman(image, 140).astype(numpy.float32)
    smoothed, gradient, sobel = canny.prepare_canny(smooth_image, 8, initial_area)
    local_maxima = canny.canny_local_maxima(gradient, sobel)
    candidate_edge_region = ndimage.binary_dilation(candidate_edges, iterations=4)
    strong_edges = local_maxima & candidate_edge_region

    # Now threshold the image to find dark blobs as our initial worm region
    # First, find areas in the initial region unlikely to be worm pixels
    mean, std = mcd.robust_mean_std(smooth_image[initial_area][::4], 0.85)
    non_worm = (smooth_image > mean - std) & initial_area
    # now fit a smoothly varying polynomial to the non-worm pixels in the initial
    # region of interest, and subtract that from the actual image to generate
    # an image with a flat illumination field
    background = polyfit.fit_polynomial(smooth_image, mask=non_worm, degree=2)
    minus_bg = smooth_image - background
    # now recalculate a threshold from the background-subtracted pixels
    mean, std = mcd.robust_mean_std(minus_bg[initial_area][::4], 0.85)
    initial_worm = (minus_bg < mean - std) & initial_area
    # Add any pixels near the strong edges to our candidate worm position
    initial_worm |= ndimage.binary_dilation(strong_edges, iterations=3)
    initial_worm = mask.fill_small_radius_holes(initial_worm, 5)

    # Now grow/shrink the initial_worm region so that as many of the strong
    # edges from the canny filter are in contact with the region edges as possible.
    ac = active_contour.EdgeClaimingAdvection(initial_worm, strong_edges,
        max_region_mask=initial_area)
    stopper = active_contour.StoppingCondition(ac, max_iterations=100)
    while stopper.should_continue():
        ac.advect(iters=1)
        ac.smooth(iters=1, depth=2)
    worm_mask = mask.fill_small_radius_holes(ac.mask, 7)

    # Now, get edges from the image at a finer scale
    smoothed, gradient, sobel = canny.prepare_canny(smooth_image, 0.3, initial_area)
    local_maxima = canny.canny_local_maxima(gradient, sobel)
    strong_sum = strong_edges.sum()
    highp = 100 * (1 - 1.5*strong_sum/local_maxima.sum())
    lowp = max(100 * (1 - 3*strong_sum/local_maxima.sum()), 0)
    low_worm, high_worm = numpy.percentile(gradient[local_maxima], [lowp, highp])
    fine_edges = canny.canny_hysteresis(local_maxima, gradient, low_worm, high_worm)

    # Expand out the identified worm area to include any of these finer edges
    closed_edges = ndimage.binary_closing(fine_edges, structure=S)
    worm = ndimage.binary_propagation(worm_mask, mask=worm_mask|closed_edges, structure=S)
    worm = ndimage.binary_closing(worm, structure=S, iterations=2)
    worm = mask.fill_small_radius_holes(worm, 5)
    worm = ndimage.binary_opening(worm)
    worm = mask.get_largest_object(worm)
    # Last, smooth the shape a bit to reduce sharp corners, but not too much to
    # sand off the tail
    ac = active_contour.CurvatureMorphology(worm, max_region_mask=initial_area)
    ac.smooth(depth=2, iters=2)
    return strong_edges, ac.mask
def denoiseTV_Bregman(imagen,isotropic):
    """
    -isotropic es el atributo para cambiar entre filtrado isotropico y anisotropico
    """
    noisy = img_as_float(imagen)

    denoise = denoise_tv_bregman(noisy, 7, 9, 0.08, isotropic)

    return denoise
def reconstruct(img,
                drop_rate,
                recons,
                weight,
                drop_rate_post=0,
                lab=False,
                verbose=False,
                input_filepath=''):
    assert torch.is_tensor(img)
    temp = np.rollaxis(img.numpy(), 0, 3)
    w = np.ones_like(temp)
    if drop_rate > 0:
        # independent channel/pixel salt and pepper
        temp2 = random_noise(temp, 's&p', amount=drop_rate, salt_vs_pepper=0)
        # per-pixel all channel salt and pepper
        r = temp2 - temp
        w = (np.absolute(r) < 1e-6).astype('float')
        temp = temp + r
    if lab:
        temp = color.rgb2lab(temp)
    if recons == 'none':
        temp = temp
    elif recons == 'chambolle':
        temp = denoise_tv_chambolle(temp, weight=weight, multichannel=True)
    elif recons == 'bregman':
        if drop_rate == 0:
            temp = denoise_tv_bregman(temp, weight=1 / weight, isotropic=True)
        else:
            temp = minimize_tv_bregman(temp,
                                       w,
                                       weight=1 / weight,
                                       gsiter=10,
                                       eps=0.01,
                                       isotropic=True)
    elif recons == 'tvl2':
        temp = minimize_tv(temp,
                           w,
                           lam=weight,
                           p=2,
                           solver='L-BFGS-B',
                           verbose=verbose)
    elif recons == 'tvinf':
        temp = minimize_tv_inf(temp,
                               w,
                               tau=weight,
                               p=2,
                               solver='L-BFGS-B',
                               verbose=verbose)
    else:
        print('unsupported reconstruction method ' + recons)
        exit()
    if lab:
        temp = color.lab2rgb(temp)
    # temp = random_noise(temp, 's&p', amount=drop_rate_post, salt_vs_pepper=0)
    temp = torch.from_numpy(np.rollaxis(temp, 2, 0)).float()
    return temp
def test_denoise_tv_bregman_float_result_range():
    # lena image
    img = lena_gray
    int_lena = np.multiply(img, 255).astype(np.uint8)
    assert np.max(int_lena) > 1
    denoised_int_lena = restoration.denoise_tv_bregman(int_lena, weight=60.0)
    # test if the value range of output float data is within [0.0:1.0]
    assert denoised_int_lena.dtype == np.float
    assert np.max(denoised_int_lena) <= 1.0
    assert np.min(denoised_int_lena) >= 0.0
Exemple #31
0
def predict_data():
    X, IDs = [], range(6284, 12504)
    for ID in IDs:
        original = imread('../data/testResized/' + str(ID) +'.Bmp', as_grey=True)
        denoised = denoise_tv_bregman(original, 3)
        binarilized = threshold_adaptive(denoised, block_size=13, method='gaussian')
        feature = binarilized.reshape(1,400)[0]
        X.append(feature)
    X = np.array(X)
    return X
Exemple #32
0
def tvd(x, rho, penalty):
    """
    Total variation denoising proximal operator

    Parameters
    ----------
    penalty : float
    """

    return denoise_tv_bregman(x, rho / penalty)
Exemple #33
0
def denoiseTvBregman():
    a = np.zeros((40, 40))
    a[10:-10, 10:-10] = 1.
    imgO = a.copy()
    a += 0.3 * np.random.randn(*a.shape)
    imgN = a.copy()
    denoised_a = denoise_tv_bregman(a, 7, 5, 0.1)
    imgR = denoised_a.copy()

    return [imgO, imgN, imgR]
Exemple #34
0
def TV_Bregman(images, factor):
    augmented_images = []
    for i in range(len(images)):
        sigma_est = estimate_sigma(
            images[i], multichannel=True, average_sigmas=True) / 100
        tv_denoised = denoise_tv_bregman(images[i], sigma_est * factor)
        tv_denoised = (255 * tv_denoised).astype(np.uint8)
        augmented_images.append(np.expand_dims(tv_denoised, axis=2))
        del tv_denoised
    return np.asarray(augmented_images)
Exemple #35
0
def den_Breg(img, fname, stfolder, sfn, wt, itn, epsv, iso, ext, prev):
    tmp = restoration.denoise_tv_bregman(img,
                                         wt,
                                         max_iter=itn,
                                         eps=epsv,
                                         isotropic=int(iso))
    corr = misc.toimage(255 * tmp, cmin=0, cmax=255)
    if prev == 'N':
        save_tif(stfolder, sfn, fname, corr, 'Den_TV_Br', ext)
    return corr
Exemple #36
0
def tvd(x, rho, penalty):
    """
    Total variation denoising proximal operator

    Parameters
    ----------
    penalty : float
    """

    return denoise_tv_bregman(x, rho / penalty)
Exemple #37
0
def test_denoise_tv_bregman_float_result_range():
    # lena image
    img = lena_gray.copy()
    int_lena = np.multiply(img, 255).astype(np.uint8)
    assert np.max(int_lena) > 1
    denoised_int_lena = restoration.denoise_tv_bregman(int_lena, weight=60.0)
    # test if the value range of output float data is within [0.0:1.0]
    assert denoised_int_lena.dtype == np.float
    assert np.max(denoised_int_lena) <= 1.0
    assert np.min(denoised_int_lena) >= 0.0
Exemple #38
0
 def tv_bregman(self, weight, max_iter=100, eps=0.001, isotropic=True):
     denoised = [
         R.denoise_tv_bregman(np.array(item, np.float32),
                              weight=weight,
                              max_iter=max_iter,
                              eps=eps,
                              isotropic=isotropic) for item in self.img
     ]
     return [[cv2.cvtColor(np.array(item, np.uint8), cv2.COLOR_RGB2BGR)]
             for item in denoised]
Exemple #39
0
def test_denoise_tv_bregman_float_result_range():
    # astronaut image
    img = astro_gray.copy()
    int_astro = np.multiply(img, 255).astype(np.uint8)
    assert_(np.max(int_astro) > 1)
    denoised_int_astro = restoration.denoise_tv_bregman(int_astro, weight=60.0)
    # test if the value range of output float data is within [0.0:1.0]
    assert_(denoised_int_astro.dtype == np.float)
    assert_(np.max(denoised_int_astro) <= 1.0)
    assert_(np.min(denoised_int_astro) >= 0.0)
def test_denoise_tv_bregman_float_result_range():
    # astronaut image
    img = astro_gray.copy()
    int_astro = np.multiply(img, 255).astype(np.uint8)
    assert_(np.max(int_astro) > 1)
    denoised_int_astro = restoration.denoise_tv_bregman(int_astro, weight=60.0)
    # test if the value range of output float data is within [0.0:1.0]
    assert_(denoised_int_astro.dtype == np.float)
    assert_(np.max(denoised_int_astro) <= 1.0)
    assert_(np.min(denoised_int_astro) >= 0.0)
Exemple #41
0
def make_step(net, xy, step_size=1.5, end='fc8', clip=True, unit=None, denoise_weight=0.1, margin=0, w=224, h=224):
    '''Basic gradient ascent step.'''

    src = net.blobs['data'] # input image is stored in Net's 'data' blob
    
    dst = net.blobs[end]
    acts = net.forward(end=end)

    if end in fc_layers:
        fc = acts[end][0]
        best_unit = fc.argmax()
        best_act = fc[best_unit]
        obj_act = fc[unit]
        # print "unit: %s [%.2f], obj: %s [%.2f]" % (best_unit, fc[best_unit], unit, obj_act)
    
    one_hot = np.zeros_like(dst.data)
    
    if end in fc_layers:
      one_hot.flat[unit] = 1.
    elif end in conv_layers:
      one_hot[:, unit, xy, xy] = 1.
    else:
      raise Exception("Invalid layer type!")

    dst.diff[:] = one_hot

    net.backward(start=end)
    g = src.diff[0]

    # Mask out gradient to limit the drawing region
    if margin != 0:
      mask = np.zeros_like(g)

      for dx in range(0 + margin, w - margin):
        for dy in range(0 + margin, h - margin):
          mask[:, dx, dy] = 1
      g *= mask

    src.data[:] += step_size/np.abs(g).mean() * g

    if clip:
        bias = net.transformer.mean['data']
        src.data[:] = np.clip(src.data, -bias, 255-bias) 

    # Run a separate TV denoising process on the resultant image
    asimg = deprocess( net, src.data[0] ).astype(np.float64)
    denoised = denoise_tv_bregman(asimg, weight=denoise_weight, max_iter=100, eps=1e-3)

    src.data[0] = preprocess( net, denoised )
    
    # reset objective for next step
    dst.diff.fill(0.)

    return best_unit, best_act, obj_act
 def filter_frames(self, data):
     data = data[0]
     logging.debug("Running Denoise")
     weight = self.parameters['weight']
     max_iter = self.parameters['max_iterations']
     eps = self.parameters['error_threshold']
     isotropic = self.parameters['isotropic']
     data = np.nan_to_num(data[0, ...])
     result = denoise_tv_bregman(data, weight, max_iter=max_iter,
                                 eps=eps, isotropic=isotropic)
     return result
Exemple #43
0
 def filter_frames(self, data):
     data = data[0]
     logging.debug("Running Denoise")
     weight = self.parameters['weight']
     max_iter = self.parameters['max_iterations']
     eps = self.parameters['error_threshold']
     isotropic = self.parameters['isotropic']
     data = np.nan_to_num(data[0, ...])
     result = denoise_tv_bregman(data, weight, max_iter=max_iter,
                                 eps=eps, isotropic=isotropic)
     return result
def preprocess():
    labels = pd.read_csv('../data/trainLabels.csv', sep=',')
    X, y = [], np.array(labels.Class)

    for ID in labels.ID:
        original = imread('../data/trainResized/' + str(ID) +'.Bmp', as_grey=True)
        denoised = denoise_tv_bregman(original, 3)
        binarilized = threshold_adaptive(denoised, block_size=13, method='gaussian')
        feature = binarilized.reshape(1,400)[0]
        X.append(feature)
    X = np.array(X)
    return X, y
Exemple #45
0
def model_design(run_as_main=False):

    from skimage.data import imread
    from skimage.filters import threshold_adaptive
    from skimage.restoration import denoise_tv_bregman

    from sklearn.cross_validation import train_test_split, StratifiedKFold

    from sklearn.svm import SVC
    from sklearn.linear_model import LogisticRegression
    from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier
    from sklearn.ensemble import AdaBoostClassifier, BaggingClassifier

    labels = pd.read_csv('../data/trainLabels.csv', sep=',')
    X, y = [], np.array(labels.Class)


    for ID in labels.ID:
        original = imread('../data/trainResized/' + str(ID) +'.Bmp', as_grey=True)
        denoised = denoise_tv_bregman(original, 3)
        binarilized = threshold_adaptive(denoised, block_size=13, method='gaussian')
        feature = binarilized.reshape(1,400)[0]
        X.append(feature)
    X = np.array(X)

    X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=1)

    clf = AdaBoostClassifier(base_estimator=
                             ExtraTreesClassifier(
                                 n_estimators=500,
                                 criterion='entropy',
                                 class_weight='auto',
                                 n_jobs=-1
                             ), n_estimators=50)

    # clf = AdaBoostClassifier(base_estimator=
    #                          RandomForestClassifier(
    #                              n_estimators=500,
    #                              criterion='entropy',
    #                              class_weight='auto',
    #                              n_jobs=-1
    #                          ), n_estimators=20)

    clf.fit(X_train, y_train)
    print clf.score(X_test, y_test)
cameraman_uni = sk.img_as_float(io.imread('../../cameraman_uni.bmp'))
cameraman_nor = sk.img_as_float(io.imread('../../cameraman_nor.bmp'))
cameraman_ric = sk.img_as_float(io.imread('../../cameraman_ric.bmp'))
cameraman_sp = sk.img_as_float(io.imread('../../cameraman_sp.bmp'))

baboon = sk.img_as_float(io.imread('../../baboon.bmp'))
baboon_uni = sk.img_as_float(io.imread('../../baboon_uni.bmp'))
baboon_nor = sk.img_as_float(io.imread('../../baboon_nor.bmp'))
baboon_ric = sk.img_as_float(io.imread('../../baboon_ric.bmp'))
baboon_sp = sk.img_as_float(io.imread('../../baboon_sp.bmp'))

# Define weight
w = 12

# Apply filters
lena_uni_f = sk.img_as_float(res.denoise_tv_bregman(lena_uni,12))
lena_nor_f = sk.img_as_float(res.denoise_tv_bregman(lena_nor,12))
lena_ric_f = sk.img_as_float(res.denoise_tv_bregman(lena_ric,12))
lena_sp_f = sk.img_as_float(res.denoise_tv_bregman(lena_sp,12))

cameraman_uni_f = sk.img_as_float(res.denoise_tv_bregman(cameraman_uni,12))
cameraman_nor_f = sk.img_as_float(res.denoise_tv_bregman(cameraman_nor,12))
cameraman_ric_f = sk.img_as_float(res.denoise_tv_bregman(cameraman_ric,12))
cameraman_sp_f = sk.img_as_float(res.denoise_tv_bregman(cameraman_sp,12))

baboon_uni_f = sk.img_as_float(res.denoise_tv_bregman(baboon_uni,12))
baboon_nor_f = sk.img_as_float(res.denoise_tv_bregman(baboon_nor,12))
baboon_ric_f = sk.img_as_float(res.denoise_tv_bregman(baboon_ric,12))
baboon_sp_f = sk.img_as_float(res.denoise_tv_bregman(baboon_sp,12))

# Calculate MSE
img = color.rgb2gray(data.astronaut())
img += 0.5 * img.std() * np.random.randn(*img.shape)
img.clip(0., 1., img)
# %%
tic = time()
denoise_wie = wiener(img)
print('Wiener {:.1f} sec'.format(time()-tic))
tic = time()
denoise_med = medfilt2d(img)
print('Median {:.1f} sec'.format(time()-tic))
tic = time()
denoise_tvc = skres.denoise_tv_chambolle(img)
print('TV Chambolle {:.1f} sec'.format(time()-tic))
tic = time()
denoise_tvb = skres.denoise_tv_bregman(img, weight=10.)
print('TV Bregman {:.1f} sec'.format(time()-tic))
tic = time()
denoise_bil = skres.denoise_bilateral(img, multichannel=False)
print('Bilateral {:.1f} sec'.format(time()-tic))
# %%
fg, axs = subplots(2, 3, sharex=True, sharey=True, figsize=(12, 10))
axs = axs.ravel()
axs[0].imshow(img, cmap='gray')
axs[1].imshow(denoise_wie, cmap='gray')
axs[2].imshow(denoise_med, cmap='gray')
axs[3].imshow(denoise_tvc, cmap='gray')
axs[4].imshow(denoise_tvb, cmap='gray')
axs[5].imshow(denoise_bil, cmap='gray')

axs[0].set_title('Original noisy')