示例#1
0
def raw2phasecorr(arr_list,clip=0): #cv
    import cv2
    cx = 0.0
    cy = 0.0
    stb_arr_list=[]
    prev_frame = arr_list[0]
    prev_image = np.float32(restoration.denoise_tv_chambolle(prev_frame.astype('uint16'), weight=0.1, multichannel=True)) #ref
    for frame in arr_list:           
        image = np.float32(restoration.denoise_tv_chambolle(frame.astype('uint16'), weight=0.1, multichannel=True))
        # TODO: set window around phase correlation
        dp = cv2.phaseCorrelate(prev_image, image)
        cx = cx - dp[0]
        cy = cy - dp[1]
        xform = np.float32([[1, 0, cx], [0, 1, cy]])
        stable_image = cv2.warpAffine(frame.astype('float32'), xform, dsize=(image.shape[1], image.shape[0]))
        prev_image = image
        #clip sides
        ht,wd=np.shape(stable_image)
#         clip=0.125 #0.25
        lt=int(wd*clip)
        rt=int(wd-wd*clip)
        up=int(ht*clip)
        dw=int(ht-ht*clip)
        stable_image_clipped=stable_image[up:dw,lt:rt]
        stb_arr_list.append(stable_image_clipped)
    return stb_arr_list
示例#2
0
def phasecorr(imlist,imlist2=None,clip=0): #cv  [rowini,rowend,colini,colend]
    import cv2
    cx = 0.0
    cy = 0.0            
    imlist_stb=[]
    if imlist2!=None:
        imlist2_stb=[]

    imi=0
    im_prev = imlist[0]
    im_denoised_prev = np.float32(restoration.denoise_tv_chambolle(im_prev.astype('uint16'), weight=0.1, multichannel=True)) #ref
    for im in imlist:           
        im_denoised = np.float32(restoration.denoise_tv_chambolle(im.astype('uint16'), weight=0.1, multichannel=True))
        # TODO: set window around phase correlation
        dp = cv2.phaseCorrelate(im_denoised_prev, im_denoised)
        cx = cx - dp[0]
        cy = cy - dp[1]
        xform = np.float32([[1, 0, cx], [0, 1, cy]])
        im_stb = cv2.warpAffine(im.astype('float32'), xform, dsize=(im_denoised.shape[1], im_denoised.shape[0]))
        imlist_stb.append(imclipper(im_stb,clip))

        if imlist2!=None:
            im2=imlist2[imi]
            im2_stb=cv2.warpAffine(im2.astype('float32'), xform, dsize=(im_denoised.shape[1], im_denoised.shape[0]))
            imlist2_stb.append(imclipper(im2_stb,clip))

        im_denoised_prev = im_denoised
        imi+=1
    if imlist2!=None:
        return imlist_stb,imlist2_stb
    else:
        return imlist_stb
示例#3
0
def denoising(astro):
	noisy = astro + 0.6 * astro.std() * np.random.random(astro.shape)
	noisy = np.clip(noisy, 0, 1)
	fig, ax = plt.subplots(nrows=2, ncols=3, figsize=(8, 5), sharex=True,
						   sharey=True, subplot_kw={'adjustable': 'box-forced'})

	plt.gray()

	ax[0, 0].imshow(noisy)
	ax[0, 0].axis('off')
	ax[0, 0].set_title('noisy')
	ax[0, 1].imshow(denoise_tv_chambolle(noisy, weight=0.1, multichannel=True))
	ax[0, 1].axis('off')
	ax[0, 1].set_title('TV')
	ax[0, 2].imshow(denoise_bilateral(noisy, sigma_range=0.05, sigma_spatial=15))
	ax[0, 2].axis('off')
	ax[0, 2].set_title('Bilateral')

	ax[1, 0].imshow(denoise_tv_chambolle(noisy, weight=0.2, multichannel=True))
	ax[1, 0].axis('off')
	ax[1, 0].set_title('(more) TV')
	ax[1, 1].imshow(denoise_bilateral(noisy, sigma_range=0.1, sigma_spatial=15))
	ax[1, 1].axis('off')
	ax[1, 1].set_title('(more) Bilateral')
	ax[1, 2].imshow(astro)
	ax[1, 2].axis('off')
	ax[1, 2].set_title('original')

	fig.tight_layout()

	plt.show()
示例#4
0
def test_denoise_tv_chambolle_multichannel():
    denoised0 = restoration.denoise_tv_chambolle(astro[..., 0], weight=0.1)
    denoised = restoration.denoise_tv_chambolle(astro, weight=0.1,
                                                multichannel=True)
    assert_equal(denoised[..., 0], denoised0)

    # tile astronaut subset to generate 3D+channels data
    astro3 = np.tile(astro[:64, :64, np.newaxis, :], [1, 1, 2, 1])
    # modify along tiled dimension to give non-zero gradient on 3rd axis
    astro3[:, :, 0, :] = 2*astro3[:, :, 0, :]
    denoised0 = restoration.denoise_tv_chambolle(astro3[..., 0], weight=0.1)
    denoised = restoration.denoise_tv_chambolle(astro3, weight=0.1,
                                                multichannel=True)
    assert_equal(denoised[..., 0], denoised0)
    def plot_preprocessed_image(self):
        """
        plots pre-processed image. The plotted image is the same as obtained at the end
        of the get_text_candidates method.
        """
        image = restoration.denoise_tv_chambolle(self.image, weight=0.1)
        thresh = threshold_otsu(image)
        bw = closing(image > thresh, square(2))
        cleared = bw.copy()

        label_image = measure.label(cleared)
        borders = np.logical_xor(bw, cleared)

        label_image[borders] = -1
        image_label_overlay = label2rgb(label_image, image=image)

        fig, ax = plt.subplots(ncols=1, nrows=1, figsize=(12, 12))
        ax.imshow(image_label_overlay)

        for region in regionprops(label_image):
            if region.area < 10:
                continue

            minr, minc, maxr, maxc = region.bbox
            rect = mpatches.Rectangle((minc, minr), maxc - minc, maxr - minr,
                                      fill=False, edgecolor='red', linewidth=2)
            ax.add_patch(rect)

        plt.show()
def detect_edges(image_array):
    """ Detect edges in a given image
    Takes a numpy.array representing an image,
    apply filters and edge detection and return a numpy.array

    Parameters
    ----------
    image_array : ndarray (2D)
        Image data to be processed. Detect edges on this 2D array representing the image

    Returns
    -------
    edges : ndarray (2D)
        Edges of an image.
    """
    #Transform image into grayscale
    img = rgb2gray(image_array)
    #Remove some noise from the image
    img = denoise_tv_chambolle(img, weight=0.55)
    #Apply canny
    edges = filter.canny(img, sigma=3.2)
    #Clear the borders
    clear_border(edges, 15)
    #Dilate edges to make them more visible and connected
    edges = binary_dilation(edges, selem=diamond(3))
    return edges
    def compute(self, src):
        image = img_as_ubyte(src)

        # denoise image
        denoised = denoise_tv_chambolle(image, weight=0.05)
        denoised_equalize= exposure.equalize_hist(denoised)

        # find continuous region (low gradient) --> markers
        markers = rank.gradient(denoised_equalize, disk(5)) < 10
        markers = ndi.label(markers)[0]

        # local gradient
        gradient = rank.gradient(denoised, disk(2))

        # labels
        labels = watershed(gradient, markers)

        # display results
        fig, axes = plt.subplots(2,3)
        axes[0, 0].imshow(image)#, cmap=plt.cm.spectral, interpolation='nearest')
        axes[0, 1].imshow(denoised, cmap=plt.cm.spectral, interpolation='nearest')
        axes[0, 2].imshow(markers, cmap=plt.cm.spectral, interpolation='nearest')
        axes[1, 0].imshow(gradient, cmap=plt.cm.spectral, interpolation='nearest')
        axes[1, 1].imshow(labels, cmap=plt.cm.spectral, interpolation='nearest', alpha=.7)
        plt.show()
示例#8
0
def test_denoise_tv_chambolle_weighting():
    # make sure a specified weight gives consistent results regardless of
    # the number of input image dimensions
    rstate = np.random.RandomState(1234)
    img2d = astro_gray.copy()
    img2d += 0.15 * rstate.standard_normal(img2d.shape)
    img2d = np.clip(img2d, 0, 1)

    # generate 4D image by tiling
    img4d = np.tile(img2d[..., None, None], (1, 1, 2, 2))

    w = 0.2
    denoised_2d = restoration.denoise_tv_chambolle(img2d, weight=w)
    denoised_4d = restoration.denoise_tv_chambolle(img4d, weight=w)
    assert_(measure.compare_ssim(denoised_2d,
                                 denoised_4d[:, :, 0, 0]) > 0.99)
示例#9
0
def denoise_image(input, output):
    kidney_image = io.imread(input)
    # estimate the noise in the image
    # do a test denosing using a total variation filter
    kidney_image_denoised_tv = restoration.denoise_tv_chambolle(
        kidney_image, weight=0.1)
    io.imsave(output, kidney_image_denoised_tv)
示例#10
0
def blur_predict(model, X, type="median", filter_size=3, sigma=1.0):
  
    if type == "median":
        blured_X = np.array(list(map(lambda x: ndimage.median_filter(x, filter_size), 
                                     X)))
    elif type == "gaussian":
        blured_X = np.array(list(map(lambda x: ndimage.gaussian_filter(x, filter_size),
                                     X)))
    elif type == "f_gaussian":
        blured_X = np.array(list(map(lambda x: filters.gaussian_filter(x.reshape((28, 28)), sigma=sigma).reshape(784),
                                     X))) 
    elif type == "tv_chambolle":
        blured_X = np.array(list(map(lambda x: restoration.denoise_tv_chambolle(x.reshape((28, 28)), weight=0.2).reshape(784),
                                     X)))
    elif type == "tv_bregman":
        blured_X = np.array(list(map(lambda x: restoration.denoise_tv_bregman(x.reshape((28, 28)), weight=5.0).reshape(784),
                                     X)))
    elif type == "bilateral":
        blured_X = np.array(list(map(lambda x: restoration.denoise_bilateral(np.abs(x).reshape((28, 28))).reshape(784),
                                     X)))
    elif type == "nl_means":
        blured_X = np.array(list(map(lambda x: restoration.nl_means_denoising(x.reshape((28, 28))).reshape(784),
                                     X)))
        
    elif type == "none":
        blured_X = X 

    else:
        raise ValueError("unsupported filter type", type)

    return predict(model, blured_X)
示例#11
0
def denoise_image(data, type=None):
    from skimage.restoration import denoise_tv_chambolle, denoise_bilateral

    if type == "tv":
        return denoise_tv_chambolle(data, weight=0.2, multichannel=True)

    return denoise_bilateral(data, sigma_range=0.1, sigma_spatial=15)
示例#12
0
文件: projeto.py 项目: geogob/Python
def preProcessing(imGrayLevel):
    
    #Escopo de algumas Operações básicas utilizadas no pré-processamento:
    #..............................................
    h = ia.iahistogram(imGrayLevel) #Equalização...
    n = imGrayLevel.size
    T = 255./n * np.cumsum(h)
    T = T.astype(uint8)
    #..............................................
    T1 = np.arange(256)  # função identidade
    T2 = ia.ianormalize(np.log(T1+30)) # logaritmica - realce partes escuras
    
    
    #T5 = ia.ianormalize(T1/50) # reduz o número de níveis de cinza
    #..................................................
    
    ax1.imshow(imRGB)
    ax1.set_title('rgb')
    
    ax2.imshow(imGrayLevel, vmin=0, vmax=255, cmap=plt.cm.gray)
    ax2.set_title('gray level')
    
    imGrayLevel =  denoise_tv_chambolle(imGrayLevel, weight=0.1, multichannel=True)
    imGrayLevel = img_as_ubyte(imGrayLevel)#Conversão de Float para UINT-8
    ax3.imshow(imGrayLevel, vmin=0, vmax=255, cmap=plt.cm.gray) #Filtro de suavização de textura
    ax3.set_title('tv signal filter')
    
    realceNucleos = T2[T[imGrayLevel]] #Realce de partes escuras da imagem equalizada
    ax4.imshow(realceNucleos, vmin=0, vmax=255, cmap=plt.cm.gray) 
    ax4.set_title('logaritimica')
    return realceNucleos
示例#13
0
def test_denoise_tv_chambolle_1d():
    """Apply the TV denoising algorithm on a 1D sinusoid."""
    x = 125 + 100 * np.sin(np.linspace(0, 8 * np.pi, 1000))
    x += 20 * np.random.rand(x.size)
    x = np.clip(x, 0, 255)
    res = restoration.denoise_tv_chambolle(x.astype(np.uint8), weight=0.1)
    assert_(res.dtype == np.float)
    assert_(res.std() * 255 < x.std())
def SaveImage(img, filename):
    logging.info('save output to %s', filename)
    out = PostprocessImage(img)
    if args.remove_noise != 0.0:
        out = denoise_tv_chambolle(out,
                                   weight=args.remove_noise,
                                   multichannel=True)
    io.imsave(filename, out)
def preprocess(X):
    progbar = Progbar(X.shape[0])  # progress bar for pre-processing status tracking

    for i in range(X.shape[0]):
        for j in range(X.shape[1]):
            X[i, j] = denoise_tv_chambolle(X[i, j], weight=0.1, multichannel=False)
        progbar.add(1)
    return X
示例#16
0
def separable_iterated_tv_chambolle(im, sigma_x=1, sigma_y=1, niters=5):
    if (sigma_x <= 0) and (sigma_y <= 0):
        return im
    im_w = np.copy(im)

    for i in range(niters):
        if sigma_y > 0:
            im_w = np.array([
                denoise_tv_chambolle(cv,
                                     mad_std(cv) * sigma_y) for cv in im_w.T
            ]).T  # columns
        if sigma_x > 0:
            im_w = np.array([
                denoise_tv_chambolle(rv,
                                     mad_std(rv) * sigma_x) for rv in im_w
            ])  # rows
    return im_w
示例#17
0
def test_denoise_tv_chambolle_1d():
    """Apply the TV denoising algorithm on a 1D sinusoid."""
    x = 125 + 100*np.sin(np.linspace(0, 8*np.pi, 1000))
    x += 20 * np.random.rand(x.size)
    x = np.clip(x, 0, 255)
    res = restoration.denoise_tv_chambolle(x.astype(np.uint8), weight=0.1)
    assert_(res.dtype == np.float)
    assert_(res.std() * 255 < x.std())
示例#18
0
def denoise_roi(roi, channel=None):
    """Apply further saturation, denoising, unsharp filtering, and erosion
    as image preprocessing for blob detection.

    Each step can be configured including turned off by
    :attr:`config.process_settings`.
    
    Args:
        roi: Region of interest as a 3D (z, y, x) array. Note that 4D arrays 
            with channels are not allowed as the Scikit-Image gaussian filter 
            only accepts specifically 3 channels, presumably for RGB.
        channel (List[int]): Sequence of channel indices in ``roi`` to
            saturate. Defaults to None to use all channels.
    
    Returns:
        Denoised region of interest.
    """
    multichannel, channels = setup_channels(roi, channel, 3)
    roi_out = None
    for chl in channels:
        roi_show = roi[..., chl] if multichannel else roi
        settings = config.get_roi_profile(chl)
        # find gross density
        saturated_mean = np.mean(roi_show)

        # further saturation
        denoised = np.clip(roi_show, settings["clip_min"],
                           settings["clip_max"])

        tot_var_denoise = settings["tot_var_denoise"]
        if tot_var_denoise:
            # total variation denoising
            denoised = restoration.denoise_tv_chambolle(denoised,
                                                        weight=tot_var_denoise)

        # sharpening
        unsharp_strength = settings["unsharp_strength"]
        if unsharp_strength:
            blur_size = 8
            # turn off multichannel since assume operation on single channel at
            # a time and to avoid treating as multichannel if 3D ROI happens to
            # have x size of 3
            blurred = filters.gaussian(denoised, blur_size, multichannel=False)
            high_pass = denoised - unsharp_strength * blurred
            denoised = denoised + high_pass

        # further erode denser regions to decrease overlap among blobs
        thresh_eros = settings["erosion_threshold"]
        if thresh_eros and saturated_mean > thresh_eros:
            #print("denoising for saturated mean of {}".format(saturated_mean))
            denoised = morphology.erosion(denoised, morphology.octahedron(1))
        if multichannel:
            if roi_out is None:
                roi_out = np.zeros(roi.shape, dtype=denoised.dtype)
            roi_out[..., chl] = denoised
        else:
            roi_out = denoised
    return roi_out
示例#19
0
def TVFilter_I(Im, tv_weight):
    """ In-place total variation filter for Image3D's """

    Im_np = Im.data.cpu().numpy().squeeze(0)
    Im_np = res.denoise_tv_chambolle(Im_np, weight=tv_weight)

    Im_data = torch.tensor(Im_np, device=Im.device, dtype=Im.dtype)
    Im.data = Im_data.unsqueeze(0).clone()
    return Im
示例#20
0
def denoise(img_dir, path):
    image = io.imread(path)
    filename = os.path.splitext(os.path.basename(path))[0]
    
    new_path_bregman = 'denoised_images/orig/bregman/' + img_dir + '/' + filename + '.png'
    io.imsave(new_path_bregman, np.clip(denoise_tv_bregman(image, weight=10), -1, 1))

    new_path_bregman = 'denoised_images/orig/chambolle/' + img_dir + '/' + filename + '.png'
    io.imsave(new_path_bregman, np.clip(denoise_tv_chambolle(image, weight=0.1, multichannel=True), -1, 1))
示例#21
0
def denoise(path):
    image = io.imread(path)
    filename = os.path.splitext(os.path.basename(path))[0]
    
    new_path_bregman = 'denoised_images/bregman/' + filename + '.jpg'
    io.imsave(new_path_bregman, denoise_tv_bregman(image, weight=10))

    new_path_bregman = 'denoised_images/chambolle/' + filename + '.jpg'
    io.imsave(new_path_bregman, denoise_tv_chambolle(image, weight=0.1, multichannel=True))
 def preprocess_image(self):
     """
     Denoises and increases contrast.
     """
     image = restoration.denoise_tv_chambolle(self.image, weight=0.1)
     thresh = threshold_otsu(image)
     self.bw = closing(image > thresh, square(2))
     self.cleared = self.bw.copy()
     return self.cleared
示例#23
0
    def preProcessMLimg(self, image, smallest_size, lowest_region_intensity):
        """
        # =======Some preprocessing to get rid of junk parts and make image clearer.=========
        #
        # --smallest_size/biggest_size: cells size out of this range are ignored.
        # --lowest_region_intensity: cells with mean region intensity below this are ignored.
        # --cell_region_opening_factor: degree of opening operation on individual cell mask.
        # --cell_region_closing_factor: degree of closing operation on individual cell mask.
        #====================================================================================
        """
        openingfactor=2
        closingfactor=3
        binary_adaptive_block_size=335
        cell_region_opening_factor=1
        cell_region_closing_factor=2
        
        image = denoise_tv_chambolle(image, weight=0.01) # Denoise the image.
      
        # -----------------------------------------------Set background to 0-----------------------------------------------
        AdaptiveThresholding = threshold_local(image, binary_adaptive_block_size, offset=0)
        BinaryMask = image >= AdaptiveThresholding
        OpeningBinaryMask = opening(BinaryMask, square(int(openingfactor)))
        RegionProposal_Mask = closing(OpeningBinaryMask, square(int(closingfactor)))
        
        clear_border(RegionProposal_Mask)
        # label image regions, prepare for regionprops
        label_image = label(RegionProposal_Mask)  
        FinalpreProcessROIMask = np.zeros((image.shape[0], image.shape[1]))
        
        for region in regionprops(label_image,intensity_image = image): 
            
            # skip small images
            if region.area > smallest_size and region.mean_intensity > lowest_region_intensity:
                
                # draw rectangle around segmented coins
                minr, minc, maxr, maxc = region.bbox

                bbox_area = (maxr-minr)*(maxc-minc)
                # Based on the boundingbox for each cell from first image in the stack, raw image of slightly larger region is extracted from each round.
                RawRegionImg = image[minr:maxr, minc:maxc] # Raw region image 

                #---------Get the cell filled mask-------------
                filled_mask_bef, MeanIntensity_Background = imageanalysistoolbox.get_cell_filled_mask(RawRegionImg = RawRegionImg, region_area = bbox_area*0.2, 
                                                                                                      cell_region_opening_factor = cell_region_opening_factor, 
                                                                                                      cell_region_closing_factor = cell_region_closing_factor)

                filled_mask_convolve2d = imageanalysistoolbox.smoothing_filled_mask(RawRegionImg, filled_mask_bef = filled_mask_bef, region_area = bbox_area*0.2, threshold_factor = 1.1)
                #----------Put region maks back to original image.-----------
                preProcessROIMask = np.zeros((image.shape[0], image.shape[1]))
                preProcessROIMask[minr:maxr, minc:maxc] = filled_mask_convolve2d
                
                FinalpreProcessROIMask += preProcessROIMask
                
        FinalpreProcessROIMask = np.where(FinalpreProcessROIMask > 1, 1, FinalpreProcessROIMask)
        ClearedImg = FinalpreProcessROIMask * image
        
        return ClearedImg
示例#24
0
 def preprocess_image(self):
     """
     Denoises and increases contrast. 
     """
     image = restoration.denoise_tv_chambolle(self.image, weight=0.1)
     thresh = threshold_otsu(image)
     self.bw = closing(image > thresh, square(2))
     self.cleared = self.bw.copy()
     return self.cleared 
 def preprocess_image(self):
     # Total-variation denoising
     image = restoration.denoise_tv_chambolle(self.image, weight=0.1)
     # Return threshold value based on Otsu's method
     thresh = threshold_otsu(image)
     # Increases contrast
     self.bw = closing(image <= thresh, square(1))
     self.cleared = self.bw.copy()
     return self.cleared
示例#26
0
def get_image10():

    noisy = denoise_tv_chambolle(io.imread(request.args.get('link')), weight=float(request.args.get('weight')), multichannel=True)


    skimage.io.imsave('test_noise.png', noisy)
    filename = 'test_noise.png'

    return send_file(filename, mimetype='image/gif')
示例#27
0
def deoniseTvChambolle():
    img = color.rgb2gray(data.astronaut())[:50, :50]
    imgO = img.copy()
    img += 0.5 * img.std() * np.random.randn(*img.shape)
    imgN = img.copy()
    denoised_img = denoise_tv_chambolle(img, weight=60)
    imgR = denoised_img.copy()

    return [imgO, imgN, imgR]
示例#28
0
def saveBadScene(scene, weight):
    gt, formula = loadScene(scene); 
    Dd = denoise_tv_chambolle(formula, weight, multichannel=False)
    vmin, vmax = np.min(gt), np.max(gt); 
    plt.figure(figsize=(14,3)); 
    plt.subplot(131); plt.imshow(formula, interpolation='nearest', vmin=vmin, vmax=vmax); plt.title("Baseline"); plt.colorbar(); plt.axis('off'); 
    plt.subplot(132); plt.imshow(Dd, interpolation='nearest', vmin=vmin, vmax=vmax); plt.title("Total Variation"); plt.colorbar(); plt.axis('off'); 
    plt.subplot(133); plt.imshow(gt, interpolation='nearest', vmin=vmin, vmax=vmax); plt.title("Ground Truth %s"%scene); plt.axis('off'); plt.colorbar(); 
    plt.savefig('%s_TV_bad.png'%scene, bbox_inches='tight')
示例#29
0
 def updateImg(self):
     self.Tile = self.Image[self.index[0]:self.index[1],self.index[2]:self.index[3]]
     if len(self.Tile.shape)==3:
         self.Tile=npy.sum(self.Tile,axis=2)/3
     self.Tile = (self.Tile-npy.min(self.Tile))*255.0/(npy.max(self.Tile)-npy.min(self.Tile))
     self.Tile = self.Tile.astype(npy.uint8)
     self.Tile = npy.transpose(self.Tile,axes=[1,0])
     #self.Tile = restoration.denoise_bilateral(self.Tile,win_size=5,sigma_spatial=100,sigma_range=0.5)
     self.Tile = restoration.denoise_tv_chambolle(self.Tile, weight=0.1, multichannel=False,n_iter_max=20)
示例#30
0
def preprocess(X):
    "Pre-process images that are fed to neural network"
    progbar = Progbar(X.shape[0])  # progress bar for pre-processing status tracking

    for i in range(X.shape[0]):
        for j in range(X.shape[1]):
            X[i, j] = denoise_tv_chambolle(X[i, j], weight=0.1, multichannel=False)
        progbar.add(1)
    return X		# Denoising weight is the regularization parameter
示例#31
0
    def _watershed(img, number, region, denoising_weight, sigma, truncate,
                   min_distance, compactness):
        """
        Private function to run the watershed segmentation for a prepared image.

        :param img: The image to run the watershed segmentation
        :type img: Numpy array
        :param number: The unique tile number used for logging
        :type number: Integer
        :param region: The region for with the watershed segmentation is executed. (Used for logging)
        :type region: String
        :param denoising_weight: The weight factor for denoising the image before watershed segmentation. See: https://scikit-image.org/docs/dev/api/skimage.restoration.html#skimage.restoration.denoise_tv_chambolle
        :type denoising_weight: Float
        :param sigma: Sigma value for gaussian blurring the image before watershed segmentation. See: https://scikit-image.org/docs/dev/api/skimage.filters.html#skimage.filters.gaussian
        :type sigma: Float
        :param truncate: Truncation value for gaussian blurring the image before watershed segmentation. See: https://scikit-image.org/docs/dev/api/skimage.filters.html#skimage.filters.gaussian
        :type truncate: Float
        :param min_distance: Minimum distance for local maxia representing tree tops.
        :type min_distance: Float
        :param compactness: Compactness of a watershed basin: See https://scikit-image.org/docs/dev/api/skimage.morphology.html#skimage.morphology.watershed
        :type compactness: Float
        :return: The image containing the labeled areas for individual trees.
        :rtype: Numpy array
        """
        if denoising_weight != 0:
            denoised = denoise_tv_chambolle(img, weight=denoising_weight)
            log.debug("Denoised {} area {}".format(region, number))
        else:
            denoised = img

        # Gauss filter
        gauss = gaussian(denoised,
                         sigma,
                         mode='constant',
                         cval=0,
                         preserve_range=True,
                         truncate=truncate)
        log.debug("Gaussian smoothed {} area {}".format(region, number))

        # Create a mask so the segmentation will only occur on the trees
        mask = numpy.copy(img)
        mask[mask != 0] = 1

        # Local maxima
        local_max = peak_local_max(gauss,
                                   indices=False,
                                   min_distance=min_distance,
                                   exclude_border=False)
        markers = label(local_max)
        log.debug("Peaked local maxima in {} area {}".format(region, number))

        # watershed
        labels = watershed(gauss, markers, mask=mask, compactness=compactness)
        log.debug("Applied watershed delineation in {} area {}".format(
            region, number))
        return labels
示例#32
0
    def plot_preprocessed_image(self):
        '''plots pre-processed image and returns crops of chars.'''
        
        rects = []
        image = restoration.denoise_tv_chambolle(self.image, weight=0.2)
        thresh = threshold_otsu(image)
        bw = closing(image > thresh, square(2))
        cleared = bw.copy()
        
        label_image = measure.label(cleared)
        #borders = np.logical_xor(bw, cleared)
       
        #label_image[borders] = -1
        #image_label_overlay = label2rgb(label_image, image=image)
        
        #fig, ax = plt.subplots(ncols=1, nrows=1, figsize=(12, 12))
        #ax.imshow(image_label_overlay)
        
        for region in regionprops(label_image):
            if region.area < 10:
                continue
        
            minr, minc, maxr, maxc = region.bbox

            if minr >= 2:
                minr -= 2
            if minc >= 2:
                minc -= 3
            maxr += 1
            maxc += 2
            
            rects.append([minc, minr, maxc, maxr]) #EXTREMELY IMPORTANT LINE, [x1, y1, x2, y2]
            
            #rect = mpatches.Rectangle((minc, minr), maxc - minc, maxr - minr,
                                      #fill=False, edgecolor='red', linewidth=2)
            #ax.add_patch(rect)
        
        #plt.show()

        def is_nested(rects, i, j):
            return (rects[j][0] >= rects[i][0] and rects[j][2] <= rects[i][2] and rects[j][1] >= rects[i][1] and rects[j][3] <= rects[i][3]) or (rects[j][0] <= rects[i][0] and rects[j][2] >= rects[i][2] and rects[j][1] <= rects[i][1] and rects[j][3] >= rects[i][3])
        
        # makes list of indices of nested rectangles in rects
        deletes = []
        for i in range(len(rects)-1):
            for j in range(i+1, len(rects)):
                if (is_nested(rects, i, j)):
                    deletes.append(j)
        # deletes nested rectangles in rects
        num_dels = 0
        for d in deletes:
            del rects[d-num_dels]
            num_dels += 1
            print 'successful deletion'
        
        return rects
def reconstruct(img,
                drop_rate,
                recons,
                weight,
                drop_rate_post=0,
                lab=False,
                verbose=False,
                input_filepath=''):
    assert torch.is_tensor(img)
    temp = np.rollaxis(img.numpy(), 0, 3)
    w = np.ones_like(temp)
    if drop_rate > 0:
        # independent channel/pixel salt and pepper
        temp2 = random_noise(temp, 's&p', amount=drop_rate, salt_vs_pepper=0)
        # per-pixel all channel salt and pepper
        r = temp2 - temp
        w = (np.absolute(r) < 1e-6).astype('float')
        temp = temp + r
    if lab:
        temp = color.rgb2lab(temp)
    if recons == 'none':
        temp = temp
    elif recons == 'chambolle':
        temp = denoise_tv_chambolle(temp, weight=weight, multichannel=True)
    elif recons == 'bregman':
        if drop_rate == 0:
            temp = denoise_tv_bregman(temp, weight=1 / weight, isotropic=True)
        else:
            temp = minimize_tv_bregman(temp,
                                       w,
                                       weight=1 / weight,
                                       gsiter=10,
                                       eps=0.01,
                                       isotropic=True)
    elif recons == 'tvl2':
        temp = minimize_tv(temp,
                           w,
                           lam=weight,
                           p=2,
                           solver='L-BFGS-B',
                           verbose=verbose)
    elif recons == 'tvinf':
        temp = minimize_tv_inf(temp,
                               w,
                               tau=weight,
                               p=2,
                               solver='L-BFGS-B',
                               verbose=verbose)
    else:
        print('unsupported reconstruction method ' + recons)
        exit()
    if lab:
        temp = color.lab2rgb(temp)
    # temp = random_noise(temp, 's&p', amount=drop_rate_post, salt_vs_pepper=0)
    temp = torch.from_numpy(np.rollaxis(temp, 2, 0)).float()
    return temp
示例#34
0
def test_denoise_tv_chambolle_float_result_range():
    # lena image
    img = lena_gray
    int_lena = np.multiply(img, 255).astype(np.uint8)
    assert np.max(int_lena) > 1
    denoised_int_lena = restoration.denoise_tv_chambolle(int_lena, weight=60.0)
    # test if the value range of output float data is within [0.0:1.0]
    assert denoised_int_lena.dtype == np.float
    assert np.max(denoised_int_lena) <= 1.0
    assert np.min(denoised_int_lena) >= 0.0
示例#35
0
def TV_Chambolle(images, factor):
    augmented_images = []
    for i in range(len(images)):
        sigma_est = estimate_sigma(
            images[i], multichannel=True, average_sigmas=True) / 100
        tv_denoised = denoise_tv_chambolle(images[i], sigma_est * factor)
        tv_denoised = (255 * tv_denoised).astype(np.uint8)
        augmented_images.append(tv_denoised)
        del tv_denoised
    return np.asarray(augmented_images)
示例#36
0
    def denoiseImage(self, sg, thr=1.2):
        from skimage.restoration import (denoise_tv_chambolle,
                                         denoise_bilateral, denoise_wavelet,
                                         estimate_sigma)
        sigma_est = estimate_sigma(sg, multichannel=False, average_sigmas=True)
        sgnew = denoise_tv_chambolle(sg, weight=0.2, multichannel=False)
        #sgnew = denoise_bilateral(sg, sigma_color=0.05, sigma_spatial=15, multichannel=False)
        #sgnew = denoise_wavelet(sg, multichannel=False)

        return sgnew
示例#37
0
 def tv_chambolle(self, weight=0.1, eps=0.0002, max_iter=200):
     denoised = [
         R.denoise_tv_chambolle(np.array(item, np.float32),
                                weight=weight,
                                eps=eps,
                                n_iter_max=max_iter,
                                multichannel=True) for item in self.img
     ]
     return [[cv2.cvtColor(np.array(item, np.uint8), cv2.COLOR_RGB2BGR)]
             for item in denoised]
def denoiseTV_Chambolle(imagen,multichannel):
    """
    -Tiende a producir imagenes como las de los dibujos animados.
    -Reduce al minimo la variacion total de la imagen
    """
    noisy = img_as_float(imagen)

    denoise = denoise_tv_chambolle(noisy, 7, 9, 0.08,multichannel)

    return denoise
示例#39
0
def test_denoise_tv_chambolle_float_result_range():
    # lena image
    img = lena_gray
    int_lena = np.multiply(img, 255).astype(np.uint8)
    assert np.max(int_lena) > 1
    denoised_int_lena = restoration.denoise_tv_chambolle(int_lena, weight=60.0)
    # test if the value range of output float data is within [0.0:1.0]
    assert denoised_int_lena.dtype == np.float
    assert np.max(denoised_int_lena) <= 1.0
    assert np.min(denoised_int_lena) >= 0.0
def metric_denoise_tv_chambolle(input, truth):

    filtered = denoise_tv_chambolle(input)
    filtered = filtered.astype(np.float32)
    mse = np.sum((filtered - truth)**2) / input.size
    struct_sim = ssim(filtered,
                      truth,
                      data_range=filtered.max() - filtered.min())

    return mse, struct_sim
示例#41
0
    def __call__(self, sample):
        image, mask = sample

        if np.random.rand() > self.prob:
            return image, mask

        weight = np.random.uniform(low=0, high=self.denoise_weight)
        image = denoise_tv_chambolle(image, weight=weight, multichannel=True)

        return image, mask
示例#42
0
def iterativeBackPropagation(hrImage, lrImages, lrMasks, transforms, H,
                             itermax, interpOrder):

    #Convert LR images to a list of vectors
    y = []
    for i in range(len(lrImages)):
        y.append(
            convert_image_to_vector(lrImages[i]) *
            convert_image_to_vector(lrMasks[i]))

    #Convert HR Image to vector
    x = convert_image_to_vector(hrImage)
    outputImage = nibabel.Nifti1Image(hrImage.get_data(), hrImage.affine)

    #Compute HR mask
    hrMaskSum = np.zeros(hrImage.get_data().shape, dtype=np.float32)
    for i in range(len(lrImages)):
        tmp1 = apply_affine_itk_transform_on_image(input_image=lrMasks[i],
                                                   transform=transforms[i][0],
                                                   center=transforms[i][1],
                                                   reference_image=hrImage,
                                                   order=0)
        hrMaskSum += tmp1.get_data()

    #index = np.nonzero(hrMaskSum)

    for j in range(itermax):

        error = ibpComputeError(x, H, y,
                                nibabel.Nifti1Image(hrMaskSum, hrImage.affine),
                                lrImages, transforms, interpOrder)
        #    #simulation and error computation
        #    hrError = np.zeros(hrImage.get_data().shape, dtype=np.float32)
        #
        #    for i in range(len(lrImages)):
        #      lrError = convert_vector_to_image(H[i].dot(x)-y[i], lrImages[i])
        #      tmp2 = apply_affine_itk_transform_on_image(input_image = lrError, transform=transforms[i][0], center=transforms[i][1], reference_image=hrImage, order=interpOrder)
        #      hrError += tmp2.get_data()
        #
        #    hrError2 = np.zeros(hrImage.get_data().shape, dtype=np.float32)
        #    hrError2[index] = hrError[index] / hrMaskSum[index]
        #
        #filter error map
        from skimage.restoration import denoise_tv_chambolle
        hrError2 = denoise_tv_chambolle(error, weight=5)

        #update hr image and x
        outputImage = nibabel.Nifti1Image(outputImage.get_data() - hrError2,
                                          hrImage.affine)
        nibabel.save(nibabel.Nifti1Image(hrError2, hrImage.affine),
                     'error_iter' + str(j) + '.nii.gz')
        nibabel.save(outputImage, 'ibp_iter' + str(j) + '.nii.gz')
        x = convert_image_to_vector(outputImage)

    return outputImage
示例#43
0
def pipeline(image,
             shape: tuple = None,
             smoothing: float = 0.0,
             denoising: float = 0.0,
             with_hog_attached: bool = False,
             with_dominant_color_attached: bool = False,
             pixels_per_cell: tuple = (3, 3),
             cells_per_block: tuple = (5, 5),
             orientations: int = 9,
             converter=None,
             debug=False):
    if shape and len(shape) > 1:
        image = resize(image, shape, anti_aliasing=True, mode='reflect')

    original = image
    if image.shape[-1] == 4:
        image = image[:, :, :3]
    if smoothing:
        image = gaussian(image, sigma=smoothing, multichannel=False)
    if denoising:
        image = denoise_tv_chambolle(image,
                                     weight=denoising,
                                     multichannel=True)
    converted = None
    if callable(converter):
        image = converter(image)
        converted = image
    if with_dominant_color_attached:
        multichannel = True if len(
            image.shape) == 3 and image.shape[-1] > 1 else False
        fd, show = hog(image,
                       orientations=orientations,
                       pixels_per_cell=pixels_per_cell,
                       cells_per_block=cells_per_block,
                       visualize=True,
                       multichannel=multichannel,
                       block_norm="L2-Hys")
        dom_color = dominant_color(image, k=3 if multichannel else 2)
        if debug:
            plot_debug(converted, converter, dom_color, original, shape, show)
        image = np.concatenate((dom_color, fd), axis=None)
    elif with_hog_attached:
        fd, show = hog(image,
                       orientations=orientations,
                       pixels_per_cell=pixels_per_cell,
                       cells_per_block=cells_per_block,
                       visualize=True,
                       multichannel=True,
                       block_norm="L2-Hys")
        if debug:
            plt.imshow(show)
        image = np.concatenate((image.flatten(), fd), axis=None)
    image = image.flatten()
    return image
def total_variation_filtering(file_path, output_name, weight):
    img = img_as_float(io.imread(file_path))
    img_denoised = denoise_tv_chambolle(img, weight, multichannel=True)
    img_denoised_ubyte = img_as_ubyte(img_denoised)

    if len(img_denoised_ubyte.shape) > 2 and img_denoised_ubyte.shape[2] == 4:
        img_denoised_ubyte = cv2.cvtColor(img_denoised_ubyte,
                                          cv2.COLOR_BGRA2BGR)

    output_folder_path = OUTPUT_FOLDER_NAME + "\\tv_filter\\"
    plt.imsave(output_folder_path + output_name, img_denoised_ubyte)
示例#45
0
def test_denoise_tv_chambolle_float_result_range():
    # astronaut image
    img = astro_gray
    int_astro = np.multiply(img, 255).astype(np.uint8)
    assert_(np.max(int_astro) > 1)
    denoised_int_astro = restoration.denoise_tv_chambolle(int_astro,
                                                          weight=0.1)
    # test if the value range of output float data is within [0.0:1.0]
    assert_(denoised_int_astro.dtype == np.float)
    assert_(np.max(denoised_int_astro) <= 1.0)
    assert_(np.min(denoised_int_astro) >= 0.0)
示例#46
0
def test_denoise_tv_chambolle_float_result_range():
    # astronaut image
    img = astro_gray
    int_astro = np.multiply(img, 255).astype(np.uint8)
    assert_(np.max(int_astro) > 1)
    denoised_int_astro = restoration.denoise_tv_chambolle(int_astro,
                                                          weight=0.1)
    # test if the value range of output float data is within [0.0:1.0]
    assert_(denoised_int_astro.dtype == np.float)
    assert_(np.max(denoised_int_astro) <= 1.0)
    assert_(np.min(denoised_int_astro) >= 0.0)
示例#47
0
 def denoise_image(self, img_noise, method='nlm'):
     if method == 'nlm':
         img_denoised = restoration.denoise_nl_means(
             img_noise.reshape(self.data_info['origin_shape'])).reshape(
                 img_noise.shape)
     elif method == 'tv':
         img_denoised = restoration.denoise_tv_chambolle(
             img_noise.reshape(self.data_info['origin_shape']))
     elif method == 'mpo':
         img_denoised = self.denoise_image_mpo(img_noise)
     return img_denoised
示例#48
0
def BF_proc(newimg, backimg, FilterSts, FilterOrder, FilterSelect, back_sbs):

    if back_sbs == 2 and FilterSts == 2:
        if FilterSelect == 'Median':
            ImageFinal = ndimage.median_filter(np.subtract(newimg, backimg),
                                               FilterOrder)
        elif FilterSelect == 'Gaussian':
            ImageFinal = ndimage.gaussian_filter(np.subtract(newimg, backimg),
                                                 FilterOrder)
        elif FilterSelect == 'Uniform':
            ImageFinal = ndimage.uniform_filter(np.subtract(newimg, backimg),
                                                FilterOrder)
        elif FilterSelect == 'Denoise TV':
            ImageFinal = denoise_tv_chambolle(np.subtract(newimg, backimg),
                                              weight=FilterOrder,
                                              multichannel=True)
        elif FilterSelect == 'Bilateral':
            ImageFinal = denoise_bilateral(np.subtract(newimg, backimg),
                                           sigma_range=FilterOrder,
                                           sigma_spatial=15)
    elif back_sbs == 2 and FilterSts == 0:
        ImageFinal = np.subtract(newimg, backimg)
    elif back_sbs == 0 and FilterSts == 2:
        if FilterSelect == 'Median':
            ImageFinal = ndimage.median_filter(newimg, FilterOrder)
        elif FilterSelect == 'Gaussian':
            ImageFinal = ndimage.gaussian_filter(newimg, FilterOrder)
        elif FilterSelect == 'Uniform':
            ImageFinal = ndimage.uniform_filter(newimg, FilterOrder)
        elif FilterSelect == 'Denoise TV':
            ImageFinal = denoise_tv_chambolle(newimg,
                                              weight=FilterOrder,
                                              multichannel=True)
        elif FilterSelect == 'Bilateral':
            ImageFinal = denoise_bilateral(newimg,
                                           sigma_range=FilterOrder,
                                           sigma_spatial=15)
    elif back_sbs == 0 and FilterSts == 0:
        ImageFinal = newimg

    return ImageFinal
示例#49
0
def test_denoise_tv_chambolle_3d():
    """Apply the TV denoising algorithm on a 3D image representing a sphere."""
    x, y, z = np.ogrid[0:40, 0:40, 0:40]
    mask = (x - 22)**2 + (y - 20)**2 + (z - 17)**2 < 8**2
    mask = 100 * mask.astype(np.float)
    mask += 60
    mask += 20 * np.random.rand(*mask.shape)
    mask[mask < 0] = 0
    mask[mask > 255] = 255
    res = restoration.denoise_tv_chambolle(mask.astype(np.uint8), weight=0.1)
    assert_(res.dtype == np.float)
    assert_(res.std() * 255 < mask.std())
示例#50
0
文件: ibp.py 项目: rousseau/fbrain
def iterativeBackPropagation(hrImage, lrImages, lrMasks, transforms, H, itermax, interpOrder):

    # Convert LR images to a list of vectors
    y = []
    for i in range(len(lrImages)):
        y.append(convert_image_to_vector(lrImages[i]) * convert_image_to_vector(lrMasks[i]))

    # Convert HR Image to vector
    x = convert_image_to_vector(hrImage)
    outputImage = nibabel.Nifti1Image(hrImage.get_data(), hrImage.affine)

    # Compute HR mask
    hrMaskSum = np.zeros(hrImage.get_data().shape, dtype=np.float32)
    for i in range(len(lrImages)):
        tmp1 = apply_affine_itk_transform_on_image(
            input_image=lrMasks[i],
            transform=transforms[i][0],
            center=transforms[i][1],
            reference_image=hrImage,
            order=0,
        )
        hrMaskSum += tmp1.get_data()

    # index = np.nonzero(hrMaskSum)

    for j in range(itermax):

        error = ibpComputeError(
            x, H, y, nibabel.Nifti1Image(hrMaskSum, hrImage.affine), lrImages, transforms, interpOrder
        )
        #    #simulation and error computation
        #    hrError = np.zeros(hrImage.get_data().shape, dtype=np.float32)
        #
        #    for i in range(len(lrImages)):
        #      lrError = convert_vector_to_image(H[i].dot(x)-y[i], lrImages[i])
        #      tmp2 = apply_affine_itk_transform_on_image(input_image = lrError, transform=transforms[i][0], center=transforms[i][1], reference_image=hrImage, order=interpOrder)
        #      hrError += tmp2.get_data()
        #
        #    hrError2 = np.zeros(hrImage.get_data().shape, dtype=np.float32)
        #    hrError2[index] = hrError[index] / hrMaskSum[index]
        #
        # filter error map
        from skimage.restoration import denoise_tv_chambolle

        hrError2 = denoise_tv_chambolle(error, weight=5)

        # update hr image and x
        outputImage = nibabel.Nifti1Image(outputImage.get_data() - hrError2, hrImage.affine)
        nibabel.save(nibabel.Nifti1Image(hrError2, hrImage.affine), "error_iter" + str(j) + ".nii.gz")
        nibabel.save(outputImage, "ibp_iter" + str(j) + ".nii.gz")
        x = convert_image_to_vector(outputImage)

    return outputImage
示例#51
0
def test_denoise_tv_chambolle_3d():
    """Apply the TV denoising algorithm on a 3D image representing a sphere."""
    x, y, z = np.ogrid[0:40, 0:40, 0:40]
    mask = (x - 22)**2 + (y - 20)**2 + (z - 17)**2 < 8**2
    mask = 100 * mask.astype(np.float)
    mask += 60
    mask += 20 * np.random.rand(*mask.shape)
    mask[mask < 0] = 0
    mask[mask > 255] = 255
    res = restoration.denoise_tv_chambolle(mask.astype(np.uint8), weight=0.1)
    assert_(res.dtype == np.float)
    assert_(res.std() * 255 < mask.std())
示例#52
0
def preprocess1(X,weight=0.1):
    """
    Pre-process images that are fed to neural network.
    :param X: X
    """
    progbar = Progbar(X.shape[0])  # progress bar for pre-processing status tracking

    for i in range(X.shape[0]):
        for j in range(X.shape[1]):
            X[i, j] = denoise_tv_chambolle(X[i, j], weight=weight, multichannel=False)
        progbar.add(1)
    return X
示例#53
0
	def handle_task(self, gearman_worker, gearman_job):
		model_name = (gearman_job.data[:100].split('_')[0]).strip()
		data = gearman_job.data[100:]

		stream = io.BytesIO(data)
		
		logging.info('model({}) is callbacked'.format(model_name))
		start = time.time()
		image = self.xp.asarray(Image.open(stream).convert('RGB'),
					dtype=self.xp.float32).transpose(2, 0, 1)
		image = image.reshape((1,) + image.shape)
		x = Variable(image)

		if (self.models.has_key(model_name)):
			# y = self.model(x)
			y = self.models[model_name](x)
		else:
			return ''

		result = cuda.to_cpu(y.data)
		result = result.transpose(0, 2, 3, 1)
		result = result.reshape((result.shape[1:]))
		#result = np.uint8(result)
		result = result/255.0

		logging.info('{} runtime: {}s'.format(model_name, time.time() - start))

		# 增加降噪处理 
		denoise_rate = self.denoise.get(model_name, self.args.denoise)
		logging.info('{} denoise {}'.format(model_name, denoise_rate))
		if denoise_rate > 0.001:
			denoise_time = time.time()
			denoise_rate = 1.0 if denoise_rate>1.0 else denoise_rate
			result = denoise_tv_chambolle(result, weight=denoise_rate, 
							multichannel=True, 
							n_iter_max = 4)
			logging.info('{} denoise runtime {}'.format(model_name, time.time()-denoise_time))
		result = np.uint8(result*255)

		import ipdb;ipdb.set_trace()
		# 读写图片
		# file_name = '/tmp/' + str(os.getuid()) + str(int(time.time())) + '.jpg'
		file_name = '/tmp/' + str(self.args.gpu) + str(int(time.time() * 10000)) + '.jpg'
		Image.fromarray(result).save(file_name)
		
		with open(file_name, 'r') as pf:
			content = pf.read()
		
		cmd = 'rm -f {file_name}'.format(file_name=file_name)
		commands.getstatusoutput(cmd)
		
		return content
示例#54
0
def readChestCTFile(f):

    # Read the file into an array
    array = np.frombuffer(f.getvalue(), dtype='uint8') # or use uint16?
    img = cv2.imdecode(array, cv2.CV_LOAD_IMAGE_GRAYSCALE)
    imarray = np.array(img)

    imarray = denoise_tv_chambolle(imarray, weight=0.001, multichannel=False)
    imarray = (imarray*255).astype('uint8')

    slice3D = np.expand_dims(imarray, axis=2)
    
    return slice3D
示例#55
0
def findGoldMask(data, options):
	data = denoise_tv_chambolle(data, weight=0.8, multichannel=False)
	data = exposure.rescale_intensity(data, out_range=(-1, 1))
	thresh = threshold_otsu(data)
	sigma1 = numpy.std(data[ numpy.where(data<thresh) ])
	sigma2 = numpy.std(data[ numpy.where(data>thresh) ])
	markers = numpy.zeros(data.shape, dtype=numpy.uint)
	markers[data < thresh-0.5*sigma1] = 1
	markers[data > thresh+0.5*sigma2] = 2
	labels = random_walker(data, markers, beta=10, mode='bf')
	labels[labels != 2]=0
	labels[labels == 2]=1
	
	return labels
示例#56
0
def test_denoise_tv_chambolle_3d():
    """Apply the TV denoising algorithm on a 3D image representing a sphere."""
    x, y, z = np.ogrid[0:40, 0:40, 0:40]
    mask = (x - 22) ** 2 + (y - 20) ** 2 + (z - 17) ** 2 < 8 ** 2
    mask = 100 * mask.astype(np.float)
    mask += 60
    mask += 20 * np.random.random(mask.shape)
    mask[mask < 0] = 0
    mask[mask > 255] = 255
    res = restoration.denoise_tv_chambolle(mask.astype(np.uint8), weight=100)
    assert res.dtype == np.float
    assert res.std() * 255 < mask.std()

    # test wrong number of dimensions
    assert_raises(ValueError, restoration.denoise_tv_chambolle, np.random.random((8, 8, 8, 8)))
示例#57
0
def denoiseMaps(imgs, wt = 0.05, ep = 0.02, cycles = 200):
    """denoiseMaps(imgs, wt = 0.05, ep = 0.02, cycles = 200)
    
    A wrapper for denoise_tv_chambolle
    
    returns
    
    A list of denoised images
    """
    from skimage.restoration import denoise_tv_chambolle
    denoised = []
    for img in imgs:
        dn = denoise_tv_chambolle(img, weight=wt, eps=ep, n_iter_max=cycles, multichannel=False)
        denoised.append(dn)

    return denoised
示例#58
0
def run():
    print('toto')
    import io
    import os
    import sys
    import argparse
    from skimage import io
    from skimage import filter
    from skimage import restoration
    from skimage import measure

    kidney_image = io.imread('manu.jpg')
    # estimate the noise in the image
    # do a test denosing using a total variation filter
    kidney_image_denoised_tv = restoration.denoise_tv_chambolle( kidney_image, weight=0.1)
    io.imsave('denoise_image.jpg', kidney_image_denoised_tv)
示例#59
0
def test_denoise_tv_chambolle_2d():
    # astronaut image
    img = astro_gray.copy()
    # add noise to astronaut
    img += 0.5 * img.std() * np.random.rand(*img.shape)
    # clip noise so that it does not exceed allowed range for float images.
    img = np.clip(img, 0, 1)
    # denoise
    denoised_astro = restoration.denoise_tv_chambolle(img, weight=0.1)
    # which dtype?
    assert_(denoised_astro.dtype in [np.float, np.float32, np.float64])
    from scipy import ndimage as ndi
    grad = ndi.morphological_gradient(img, size=((3, 3)))
    grad_denoised = ndi.morphological_gradient(denoised_astro, size=((3, 3)))
    # test if the total variation has decreased
    assert_(grad_denoised.dtype == np.float)
    assert_(np.sqrt((grad_denoised**2).sum()) < np.sqrt((grad**2).sum()))
示例#60
0
def test_denoise_tv_chambolle_2d():
    # lena image
    img = lena_gray
    # add noise to lena
    img += 0.5 * img.std() * np.random.random(img.shape)
    # clip noise so that it does not exceed allowed range for float images.
    img = np.clip(img, 0, 1)
    # denoise
    denoised_lena = restoration.denoise_tv_chambolle(img, weight=60.0)
    # which dtype?
    assert denoised_lena.dtype in [np.float, np.float32, np.float64]
    from scipy import ndimage

    grad = ndimage.morphological_gradient(img, size=((3, 3)))
    grad_denoised = ndimage.morphological_gradient(denoised_lena, size=((3, 3)))
    # test if the total variation has decreased
    assert grad_denoised.dtype == np.float
    assert np.sqrt((grad_denoised ** 2).sum()) < np.sqrt((grad ** 2).sum()) / 2