Beispiel #1
0
def sharpen(Xs,ys,sigma1=3,sigma2=1,alpha=30,ratio=0.6,t=None):
    print "sharpening images..."

    if ratio > 1.0:
        print "Do you really want a ratio of %f for sharpen?" % ratio
        print "Every images produced will always be similar"

    rand_list = randomindex(Xs.shape[0]*ratio,Xs.shape[0])
    Xs = Xs[rand_list]
    ys = ys[rand_list]

    tx = []
    ty = []

    for X, y in zip(Xs,ys):
        blurred_l = ndimage.gaussian_filter(X, sigma1)

        filter_blurred_l = ndimage.gaussian_filter(blurred_l, sigma2)
        sharpened = blurred_l + alpha * (blurred_l - filter_blurred_l)

        tx.append(sharpened)
        ty.append(y)

        if t: t.print_update(1)

    return np.array(tx), np.array(ty)
Beispiel #2
0
def _compute_auto_correlation(image, sigma):
    """Compute auto-correlation matrix using sum of squared differences.

    Parameters
    ----------
    image : ndarray
        Input image.
    sigma : float
        Standard deviation used for the Gaussian kernel, which is used as
        weighting function for the auto-correlation matrix.

    Returns
    -------
    Axx : ndarray
        Element of the auto-correlation matrix for each pixel in input image.
    Axy : ndarray
        Element of the auto-correlation matrix for each pixel in input image.
    Ayy : ndarray
        Element of the auto-correlation matrix for each pixel in input image.

    """

    if image.ndim == 3:
        image = img_as_float(rgb2grey(image))

    imx, imy = _compute_derivatives(image)

    # structure tensore
    Axx = ndimage.gaussian_filter(imx * imx, sigma, mode='constant', cval=0)
    Axy = ndimage.gaussian_filter(imx * imy, sigma, mode='constant', cval=0)
    Ayy = ndimage.gaussian_filter(imy * imy, sigma, mode='constant', cval=0)

    return Axx, Axy, Ayy
Beispiel #3
0
def roysam_watershed(dna,thresh=None,blur_factor=3):
    '''
    Run watershed on mixed gradient & intensity image as suggested by Lin et al.

    -Input
    dna:            DNA image
    thresh:         Gray value threshold (default: computed using Murphy's RC)
    blur_factor:    Blur factor (default: 3)
    
    
    REFERENCE
    Gang Lin, Umesh Adiga, Kathy Olson, John F. Guzowski, Carol A. Barnes, and Badrinath Roysam
    "A Hybrid 3-D Watershed Algorithm Incorporating Gradient Cues & Object Models for Automatic
        Segmentation of Nuclei in Confocal Image Stacks"
     Vol. 56A, No. 1, pp. 23-36 Cytometry Part A, November 2003.
    '''
    if thresh is None:
        thresh = 'murphy_rc'
    M = (ndimage.gaussian_filter(dna,4) > thresholding.threshold(dna,thresh))
    G = pymorph.gradm(dna)
    D = ndimage.distance_transform_edt(M)
    D *= np.exp(1-G/float(G.max()))
    T = ndimage.gaussian_filter(D.max() - D,blur_factor)
    if T.max() < 256:
        T = pymorph.to_uint8(T)
    else:
        T = pymorph.to_uint8(T*(256.0/T.max()))
    T *= M
    R = pymorph.regmin(T)
    R *= M
    R,N = ndimage.label(R)
    R[(R==0)&(M==0)] = N+1
    W,WL = mahotas.cwatershed(T,R,return_lines=True)
    W *= M
    return W,WL
def whiten(image):
    
    #return image
    
    tmp = image - image.mean()
    return tmp / numpy.std(tmp)
   
    if image.ndim > 3:
        raise TypeError('Not more than 3 dimensions supported')

    if image.ndim == 3:
        tmp = numpy.empty_like(image)
        for c in range(image.shape[2]):
            tmp[:, :, c] = whiten(image[:, :, c])

        result = numpy.zeros_like(image)
        for c1 in range(image.shape[2]):
            for c2 in range(image.shape[2]):
                if c1 == c2:
                    result[:, :, c1] += tmp[:, :, c2]
                else:
                    result[:, :, c1] -= tmp[:, :, c2]

        return result

    sigma1 = 0.2
    img1 = ndimage.gaussian_filter(image, sigma1)
    sigma2 = 5 * sigma1
    img2 = ndimage.gaussian_filter(image, sigma2)
    result = img1 - img2
    return result
Beispiel #5
0
def test_multiple_modes():
    # Test that the filters with multiple mode cababilities for different
    # dimensions give the same result as applying a single mode.
    arr = np.array([[1., 0., 0.],
                    [1., 1., 0.],
                    [0., 0., 0.]])

    mode1 = 'reflect'
    mode2 = ['reflect', 'reflect']

    assert_equal(sndi.gaussian_filter(arr, 1, mode=mode1),
                 sndi.gaussian_filter(arr, 1, mode=mode2))
    assert_equal(sndi.prewitt(arr, mode=mode1),
                 sndi.prewitt(arr, mode=mode2))
    assert_equal(sndi.sobel(arr, mode=mode1),
                 sndi.sobel(arr, mode=mode2))
    assert_equal(sndi.laplace(arr, mode=mode1),
                 sndi.laplace(arr, mode=mode2))
    assert_equal(sndi.gaussian_laplace(arr, 1, mode=mode1),
                 sndi.gaussian_laplace(arr, 1, mode=mode2))
    assert_equal(sndi.maximum_filter(arr, size=5, mode=mode1),
                 sndi.maximum_filter(arr, size=5, mode=mode2))
    assert_equal(sndi.minimum_filter(arr, size=5, mode=mode1),
                 sndi.minimum_filter(arr, size=5, mode=mode2))
    assert_equal(sndi.gaussian_gradient_magnitude(arr, 1, mode=mode1),
                 sndi.gaussian_gradient_magnitude(arr, 1, mode=mode2))
    assert_equal(sndi.uniform_filter(arr, 5, mode=mode1),
                 sndi.uniform_filter(arr, 5, mode=mode2))
Beispiel #6
0
def test_gaussian_filter():
    # Test gaussian filter with np.float16
    # gh-8207
    data = np.array([1],dtype = np.float16)
    sigma = 1.0
    with assert_raises(RuntimeError):
        sndi.gaussian_filter(data,sigma)
def gaussian_filter(image,sigma,derivative_sequence=None):
	if( derivative_sequence != None and numpy.array(derivative_sequence).any() ):
		array = image.getArray(data.image_types.FLOAT)
		return data.Image(ndimage.gaussian_filter(array,sigma,order=derivative_sequence),image)
	else:
		array = image.getArray()
		return data.Image(ndimage.gaussian_filter(array,sigma,order=derivative_sequence),image)
Beispiel #8
0
def mkData():
    global data, cache, ui
    dtype = (ui.dtypeCombo.currentText(), ui.rgbCheck.isChecked())
    if dtype not in cache:
        if dtype[0] == 'uint8':
            dt = np.uint8
            loc = 128
            scale = 64
            mx = 255
        elif dtype[0] == 'uint16':
            dt = np.uint16
            loc = 4096
            scale = 1024
            mx = 2**16
        elif dtype[0] == 'float':
            dt = np.float
            loc = 1.0
            scale = 0.1
        
        if ui.rgbCheck.isChecked():
            data = np.random.normal(size=(20,512,512,3), loc=loc, scale=scale)
            data = ndi.gaussian_filter(data, (0, 6, 6, 0))
        else:
            data = np.random.normal(size=(20,512,512), loc=loc, scale=scale)
            data = ndi.gaussian_filter(data, (0, 6, 6))
        if dtype[0] != 'float':
            data = np.clip(data, 0, mx)
        data = data.astype(dt)
        cache[dtype] = data
        
    data = cache[dtype]
    updateLUT()
Beispiel #9
0
def gmm_fit_kb(data_raw, init_sigma):
    data = np.copy(data_raw)
    data_smooth_b = gaussian_filter(data, 10)
    B_e_x = data_smooth_b.mean()
    data_smooth_s = gaussian_filter(data, 1)
    sort_indexes = data_smooth_s.argsort()
    data[data < B_e_x] = B_e_x
    data = data - B_e_x
    data[data < 0] = 0
    bin_centres_x = np.array(list(range(0, len(data_raw))))
    sub_bin_centres_x = np.linspace(0, len(data_raw), 2 * len(data_raw))

    Mu_e_x_1 = sort_indexes[0]
    A_e_x_1 = data[Mu_e_x_1]
    Sigma_e_x_1 = np.abs(init_sigma)

    Mu_e_x_2 = sort_indexes[1]
    A_e_x_2 = data[Mu_e_x_2]
    Sigma_e_x_2 = np.abs(init_sigma)

    p0_x = np.array([A_e_x_1, Mu_e_x_1, Sigma_e_x_1, A_e_x_2, Mu_e_x_2, Sigma_e_x_2])
    opt_fun = lambda x, *p: p[0] * np.exp(-(x - p[1]) ** 2 / (2. * p[2] ** 2)) + p[
        3
    ] * np.exp(-(x - p[4]) ** 2 / (2. * p[5] ** 2))
    try:
        coeff_x, var_matrix_x = curve_fit(opt_fun, bin_centres_x, data, p0=p0_x)
    except RuntimeError:
        print("optimal parameters not found")
        coeff_x = p0_x
    data_fit = opt_fun(sub_bin_centres_x, *coeff_x)
    print("Fit centromere position (bp) = ", (coeff_x[1] + coeff_x[4]) / 2.)
    print("Fit std 1 = ", coeff_x[2])
    print("Fit std 2 = ", coeff_x[5])
    return coeff_x, data_fit, sub_bin_centres_x, data
Beispiel #10
0
    def _process(self, img):
        for c in xrange(3):
            channel = img[:, :, c]
            gaussian_filter(channel, output=channel, sigma=self._sigma)
            img[:, :, c] = channel

        return img
Beispiel #11
0
 def __init__(self, sigma, mode="wrap"):
     self.sigma = sigma
     self.mode = mode
     size = 1 + 2 * int(round(3 * self.sigma)) # шесть сигм )
     self.kernel = np.zeros(size, np.float64)
     self.kernel[size // 2] = 1.
     ndimage.gaussian_filter(self.kernel, self.sigma, mode="constant", output=self.kernel)
Beispiel #12
0
def smooth_corrected(z, mask, w_smooth):
     mask1=snd.gaussian_filter(np.float32(mask), w_smooth, mode="constant", cval=0)
     ztemp=np.nan_to_num(z)
     ztemp[mask==0]=0.0
     zs=snd.gaussian_filter(ztemp, w_smooth, mode="constant", cval=0)
     zs[mask1>0]=zs[mask1>0]/mask1[mask1>0]
     return zs, mask1
Beispiel #13
0
def cont(imagen, depth=2**16, gaussian=3, screenpercent=0.7,t=0):

    imagen = gaussian_filter(imagen, gaussian)

    if t==0:
        otsu = threshold_otsu(imagen, depth)
    elif t==1:
        otsu = filters.threshold_isodata(imagen, depth)
    else:
        otsu = filters.threshold_li(imagen)
    imagen = binarizar(imagen, otsu)
    imagen = gaussian_filter(imagen, gaussian)

    contours = measure.find_contours(imagen, 1)
    centro = np.asanyarray([1280*0.5, 960*0.5])
    while len(contours) > 1:
        if sum(np.abs(centro - contours[1].mean(axis=0)) < [1280*screenpercent*0.5, 960*screenpercent*0.5]) != 2:
            del contours[1]
        elif sum(np.abs(centro - contours[0].mean(axis=0)) < [1280*screenpercent*0.5, 960*screenpercent*0.5]) != 2:
            del contours[0]
        else:
            if contours[1].size < contours[0].size:
                del contours[1]
            else:
                del contours[0]
    return imagen, contours[0]
Beispiel #14
0
 def loss(self, H): 
     if not hasattr(self, 'sigma'): 
         self.sigma = .05*np.array(H.shape)
     npts = nonzero(self.npoints(H))
     Hs = H/npts
     gaussian_filter(Hs, sigma=self.sigma, mode='constant', output=Hs)
     return dist2loss(Hs)
 def __init__(self, location, fig, ax):
     self.location = location
     self.fig = fig
     self.ax = ax
     # get alignment data for given location #
     try:
         self.afm_data_exists = 1
         self.afm_data = afm_alignment_data(location)
     except IOError:
         self.afm_data_exists = 0
     if self.afm_data_exists:
         # smooth data #
         self.afm_data.amplitude = ndimage.gaussian_filter(self.afm_data.amplitude, 0.7)
         self.afm_data.phase = ndimage.gaussian_filter(self.afm_data.phase, 0.7)
         
         # make afm data subplots #
         gs = gridspec.GridSpecFromSubplotSpec(1, 2, self.ax.get_subplotspec(), wspace=0.3)
         r_ax = plt.subplot(gs[0])
         theta_ax = plt.subplot(gs[1])
         # plot on subplot axes #
         self.add_alignment_subplot(self.afm_data.amplitude, r_ax, x=None, y=None, name='$r$')
         self.add_alignment_subplot(self.afm_data.phase, theta_ax, x=None, y=None, name=r'$\theta$')
         for tl in theta_ax.get_yticklabels():
             tl.set_visible(False)
         r_ax.set_ylabel('y position (nm)')
         r_ax.set_xlabel('x position (nm)')
def density_to_sim_data(density, out=None):
    """
    Takes a 2D image input, returns a 4D SIM dataset output
    """
    if out == None:
        sim_data = np.zeros(
            (num_rotations, num_phases) + density.shape, dtype=np.float64)
    else:
        sim_data = out

    """
    Construct the illumination pattern
    """
    illumination = generate_illumination(density)
    sim_data_to_visualization(illumination, 'illumination.tif')

    """
    Simulate the imaging process: multiply by the illumination, and blur
    """
    for t in range(num_rotations):
        for p in range(num_phases):
            sim_data[t, p, :, :] = illumination[t, p, :, :] * density
            gaussian_filter(sim_data[t, p, :, :],
                            sigma=emission_sigma,
                            output=sim_data[t, p, :, :])
    return sim_data
def rgb2grey(imgrgb):
    imgrey = np.zeros((imgrgb.shape[0],imgrgb.shape[1]), dtype=np.uint8)
    for i in range(imgrgb.shape[0]):
       for j in range(imgrgb.shape[1]):
           imgrey[i,j] = (imgrgb[i,j,0] /3  + imgrgb[i,j,1] /3 + imgrgb[i,j,2]/3)
    ndimage.gaussian_filter(l2, sigma=5)
    return imgrey
def PlotProfile(label1, label2, data1, data2, filename, smooth, binsz):
    plt.clf()
    plt.figure(figsize=(5,4))
    plt.rc('text', usetex=True)
    plt.rc('font', family='serif')
    
    counts1 = np.append(data1['FLUX'].data/(EXPOSURE*PIXEL_SA), 0)
    glats1 = np.append(data1['GLAT_MIN'][0], data1['GLAT_MAX'].data)

    profile1 = np.histogram(glats1, bins=np.sort(glats1), weights=counts1)
    xstep=binsz
    y_smooth_1 = gaussian_filter(profile1[0], smooth / xstep)
    print y_smooth_1.mean()
    
    counts2 = np.append(data2['FLUX'].data/(EXPOSURE*PIXEL_SA*(data2['FLUX'].data.size())), 0)
    glats2 = np.append(data2['GLAT_MIN'][0], data2['GLAT_MAX'].data)

    profile2 = np.histogram(glats2, bins=np.sort(glats2), weights=counts2)
    xstep=binsz
    y_smooth_2 = gaussian_filter(profile2[0], smooth / xstep)
    print y_smooth_2.mean()
    
    x1 = 0.5 * (glats1[1:] + glats1[:-1])
    plt.plot(x1, y_smooth_1, label='{0}'.format(label1))
    plt.plot(x1, y_smooth_2, label='{0}'.format(label2))
    plt.hlines(0, LatLow+binsz, LatHigh-binsz)
    plt.xlabel(r'Galactic Latitude/$deg$', fontsize=10)
    plt.ylabel(r'Surface Brightness/ph cm$^{-2}$ s$^{-1} sr^{-1}$', fontsize=10)
    plt.xlim([LatLow+binsz, LatHigh-binsz])
    plt.tick_params(axis='x', labelsize=10)
    plt.grid(b=True, which='major', color='0.75', linewidth=0.5)
    plt.legend(prop={'size':8})
    plt.savefig(filename)
Beispiel #19
0
    def LocalSNR(self,pSrc, sigma,bksigma = None, uiobject=None):
        """
        Local SNR: [ mean(x,y) ]/stdev(x,y)
        Can be approximated as [ Gauss{I,sigma}] / sqrt[ Gauss{I-Gauss{I,sigma},sigma}^2]
        """
        if uiobject:
            uiobject.pbar.startProgress(3,"Calculating LocSNR")
        blurIm = scind.gaussian_filter(pSrc,sigma,order=[0,0])
        if uiobject:
            print "[LocalSNR] 1/4 blur'd"
        devIm = pSrc-blurIm
        if uiobject:
            uiobject.pbar.doProgress("dev'd")
            print "[LocalSNR] 2/4 dev'd"
        sdIm  = np.sqrt(scind.gaussian_filter(devIm**2,sigma,order=[0,0]))
        sdIm[sdIm<1.e-6]=1. # prevent div by zero
        if uiobject:
            uiobject.pbar.doProgress("std'd")
            print "[LocalSNR] 3/4 std'd"
        locnormIm = blurIm/sdIm
        if uiobject:
            print "[LocalSNR] 4/4 snr'd"
        if bksigma is not None:
            blurIm = scind.gaussian_filter(locnormIm,bksigma,order=[0,0])
            locnormIm -= blurIm
            if uiobject:
                print "[LocalSNR] 5/4 trend removed"
        if uiobject:
            uiobject.pbar.endProgress()
#        blurIm = scind.gaussian_filter(locnormIm,sigma,order=[2,0])**2+scind.gaussian_filter(locnormIm,sigma,order=[0,2])**2
#        return blurIm
        return locnormIm
Beispiel #20
0
 def process_frames(self, data):        
     if len(data[0].shape)>2:
         sino = np.mean(data[0],axis=1)
     else:
         sino = data[0]
     (nrow, ncol) = sino.shape
     dsp_row = 1
     dsp_col = 1                    
     if ncol>2000:
         dsp_col = 4             
     if nrow>2000:
         dsp_row = 2        
     # Denoising 
     # There's a critical reason to use different window sizes
     # between coarse and fine search.
     sino_csearch = ndi.gaussian_filter(sino, (3,1), mode='reflect')
     sino_fsearch = ndi.gaussian_filter(sino, (2,2), mode='reflect')
     sino_dsp = self._downsample(sino_csearch, dsp_row, dsp_col)
     fine_srange = max(self.search_radius, dsp_col)
     off_set = 0.5*dsp_col if dsp_col>1 else 0.0
     if self.est_cor is None:
         self.est_cor = (ncol-1.0)/2.0
     else:
         self.est_cor = np.float32(self.est_cor)
     start_cor = np.int16(
         np.floor(1.0 * (self.est_cor + self.smin) / dsp_col))        
     stop_cor = np.int16(
         np.ceil(1.0 * (self.est_cor + self.smax) / dsp_col))
     raw_cor = self._coarse_search(sino_dsp, start_cor, stop_cor,
                                    self.ratio, self.drop)
     cor = self._fine_search(
         sino_fsearch, raw_cor*dsp_col + off_set, fine_srange,
          self.search_step, self.ratio, self.drop)
     return [np.array([cor]), np.array([cor])]
    def __sharpner(self, image_data):
        blurred_f = ndimage.gaussian_filter(image_data, 3)
        filter_blurred_f = ndimage.gaussian_filter(blurred_f, 1)
        alpha = 30
        image_sharpened = blurred_f + alpha * (blurred_f - filter_blurred_f)

        return image_sharpened
Beispiel #22
0
def faceNorm(im, contrast = 0.2) :
	""" Calculates a lighting neutral image
	    Input: im [numpy.ndarray] Image in whatever format
		Output: [numpy.ndarray] UINT version of image
	"""
	alpha = 0.1
	tau = 10.0
	gamma = 0.2
	sigma1 = 1.0
	sigma2 = 3.0

	im = stretch(im)
	c = ndimage.gaussian_filter(im, sigma1)
	s = ndimage.gaussian_filter(im, sigma2)
	q = numpy.asarray(c - s)
	w = numpy.asarray(c+s+0.000001)
	nDoG = q/w

	A = contrast # The smaller the greater the enhancement
	B = 1
	w = nDoG*(A+B)
	ww = numpy.abs(nDoG)+A
	cenDoG = w/ww

	return toUInt(cenDoG)
Beispiel #23
0
    def _plot_path_energy(self, experiment_results):
        Tmax = 6000
        for repetition in experiment_results:
            nodes = experiment_results.nodes_have_metric("routeEnergy")
            for node in nodes:
                data = experiment_results.get_tuple_metric_per_node("routeEnergy", node, repetition)

                T = [float(pair[0]) for pair in data]
                R = [float(pair[1]) for pair in data]
                bw = 0.5

                trange = [0, Tmax]
                bins = 5000

                dx = (trange[1] - trange[0]) / bins

                # compute sum_i K(x - x_i) y_i
                hist_R, edges = np.histogram(T, range=trange, bins=bins, weights=R)
                kde_R = gaussian_filter(hist_R, bw / dx)

                # compute sum_i K(x - x_i)
                hist_T, edges = np.histogram(T, range=trange, bins=bins)
                kde_T = gaussian_filter(hist_T, bw / dx)

                # compute the Nadaraya-Watson estimate
                interpolated_R = kde_R / kde_T

                # computer x-axis
                domain = (edges[1:] + edges[:-1]) / 2.0

                if self.draw:
                    plt.plot(domain, interpolated_R)
                    file_name = "alternative_path-energy_node-" + str(node)
                    plt.savefig(os.path.join(self.location, self.scenario + "_" + file_name + ".png"))
                    plt.close()
Beispiel #24
0
def preproc_dog(bg):
    """test low level image filter
    """

    #special dependencies
    from scipy.ndimage import gaussian_filter
    from pyrankfilter import rankfilter,__version__

    bg = bg.astype(float)
    f1 = gaussian_filter(bg,sigma=14.5)
    f2 = gaussian_filter(bg,sigma=15)

    f = ((f1-f2+3)*10).astype('uint8')

    loc_max = rankfilter(f,'highest',20,infSup=[0,0])

    r = bg.copy()
    r[loc_max>0]=0

    plt.figure()
    plt.subplot(2,2,1)
    plt.imshow(bg)
    plt.subplot(2,2,2)
    plt.imshow(f)
    plt.colorbar()
    plt.subplot(2,2,3)
    plt.imshow(loc_max)
    plt.subplot(2,2,4)
    plt.imshow(r)
    plt.show()
def blur_image(img):
    '''Return the blurred image that's used when sampling'''
    blur = np.zeros(list(img.shape)+[2], img.dtype)
    for z in range(img.shape[2]):
        blur[:,:,z, 0] = laplace(gaussian_filter(img[:,:,z], 3))
        blur[:,:,z, 1] = gaussian_filter(img[:,:,z], 5)
    return blur
Beispiel #26
0
    def tantriggs(self, x, alpha=0.1,gamma=0.2,sigma0=1,sigma1=2,tau=10.0):
        x = np.array(x, dtype=np.float32)
        x = np.power(x, gamma)
        s0 = 3*sigma0
        s1 = 3*sigma1
        if ((s0%2)==0):
            s0+=1
        if ((s1%2)==0):
            s1+=1

        x = np.asarray(
            ndimage.gaussian_filter(x, sigma0) - ndimage.gaussian_filter(x, sigma1)
            )

        x = x / np.power(
            np.mean(np.power(np.abs(x), alpha)),
            1.0 / alpha
            )
        x = x / np.power(
                np.mean(
                    np.power(
                        np.minimum(np.abs(x), tau),
                        alpha
                    )
                ),
                1.0 / alpha
            )

        x = np.tanh(x / tau) * tau
        x = cv2.normalize(x,x,-220,0,cv2.NORM_MINMAX)
        return np.array(x, np.uint8)
def generate_surface_from_stack(stack, sd=(10, 10, 10), surface_blur_sd=5):
    """Return a 2D image encoding a height map, generated from the input stack.
    The image is generated by first blurring the stack, then taking the z index
    of the brightest point for each X, Y location. The resultant surface is
    then smoothed with a gaussian filter.

    sd: standard deviation in each direction
    surface_blur_sd: standard deviation of smoothing applied to 2D surface."""


    ydim, xdim, zdim = stack.shape
    pad_val = 5
    padding = pad_val * 2
    padded_stack = np.zeros((ydim + padding, xdim + padding, zdim + padding),
                            dtype=stack.dtype)
    start = pad_val
    end_add = pad_val
    padded_stack[start:ydim+end_add, start:xdim+end_add, start:zdim+end_add] = stack
    smoothed_stack = nd.gaussian_filter(padded_stack, sd)
    cropped_stack = smoothed_stack[start:ydim+end_add, start:xdim+end_add, start:zdim+end_add]
    raw_surface = np.argmax(cropped_stack, 2)
    raw_surface[np.logical_and(raw_surface==0, cropped_stack[:,:,0] == 0)] = zdim-1
    smoothed_surface = nd.gaussian_filter(raw_surface, surface_blur_sd)

    return smoothed_surface
    def get_M(self, X, mode='reflect', cval=0.):
        '''Get the Harris-Laplace scale-adapted second moment matrix'''
        # Compute the gaussian smoothed image
        # N.B.: using ndimage for speed; could replace with gaussian filter
        # class above
        G = ndimage.gaussian_filter(X, self.sigma_d, mode=mode, cval=cval)

        # Compute derivatives of gaussian-smoothed image in x and y directions
        Lx, Ly = self.get_grads_xy(G, mode=mode, cval=cval)

        # Compute second derivatives from first derivatives in x and y
        # directions
        Lxx, Lxy = self.get_grads_xy(Lx, mode=mode, cval=cval)
        _, Lyy = self.get_grads_xy(Ly, mode=mode, cval=cval)

        # Convolve each second derivative matrix with the integration gaussian
        # N.B.: using ndimage for speed; could replace with gaussian filter
        # class above
        Lxx = ndimage.gaussian_filter(Lxx, self.sigma_i, mode=mode, cval=cval)
        Lxy = ndimage.gaussian_filter(Lxy, self.sigma_i, mode=mode, cval=cval)
        Lyy = ndimage.gaussian_filter(Lyy, self.sigma_i, mode=mode, cval=cval)

        # Get an empty matrix and store the second derivatives
        M = np.empty((*X.shape, 2, 2))
        M[:, :, 0, 0] = Lxx
        M[:, :, 1, 0] = Lxy
        M[:, :, 0, 1] = Lxy
        M[:, :, 1, 1] = Lyy

        # Apply scale correction
        M = (self.sigma_d ** 2.) * M
        return M
Beispiel #29
0
def nan_gaussian_filter(X, sigma, keep_nans=False, gf_kwargs=dict()):
    """Equivalent to scipy.ndimage.gaussian_filter, but allows NaNs

    For inputs see original function

    if keep_nans is True, then output has NaNs everywhere input had nans

    http://stackoverflow.com/questions/18697532/
    gaussian-filtering-a-image-with-nan-in-python"""
    nanX = np.isnan(X)

    X1 = X.copy()
    X1[nanX] = 0
    X1_filtered = gaussian_filter(X1, sigma, **gf_kwargs)

    X2 = np.ones_like(X)
    X2[nanX] = 0
    X2_filtered = gaussian_filter(X2, sigma, **gf_kwargs)

    out = X1_filtered/X2_filtered

    if keep_nans:
        out[nanX] = np.nan

    return out
Beispiel #30
0
def compute_harris_response(image, eps=1e-6):
    """ compute the Harris corner detector response function
        for each pixel in the image"""

    # derivatives
    image = ndimage.gaussian_filter(image, 1)
    imx = ndimage.sobel(image, axis=0, mode='constant')
    imy = ndimage.sobel(image, axis=1, mode='constant')

    Wxx = ndimage.gaussian_filter(imx * imx, 1.5, mode='constant')
    Wxy = ndimage.gaussian_filter(imx * imy, 1.5, mode='constant')
    Wyy = ndimage.gaussian_filter(imy * imy, 1.5, mode='constant')

    # determinant and trace
    Wdet = Wxx * Wyy - Wxy ** 2
    Wtr = Wxx + Wyy
    harris = Wdet / (Wtr + eps)

    # Non maximum filter of size 3
    harris_max = ndimage.maximum_filter(harris, 3, mode='constant')
    harris *= harris == harris_max
    # Remove the image corners
    harris[:3] = 0
    harris[-3:] = 0
    harris[:, :3] = 0
    harris[:, -3:] = 0

    return harris
Beispiel #31
0
def filter_(z, n):
    from scipy.ndimage import gaussian_filter
    return gaussian_filter(z, n)
 def augment_gaussian_filt_2d(iq_mat, label):
     iq_mat = gaussian_filter(iq_mat, sigma=1)
     return iq_mat, label
Beispiel #33
0
def filter(original_images, transformation):
    """
    :param original_images:
    :param transformation:
    :return:
    """
    nb_images, img_rows, img_cols, nb_channels = original_images.shape
    transformed_images = []
    if (transformation == TRANSFORMATION.filter_sobel):
        for img in original_images:
            if (nb_channels == 3):
                img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
            img = img.reshape(img_rows, img_cols)
            img_trans = filters.sobel(img)
            if (nb_channels == 3):
                img_trans = cv2.cvtColor(img_trans, cv2.COLOR_GRAY2RGB)
            transformed_images.append(img_trans)
    elif (transformation == TRANSFORMATION.filter_median):
        for img in original_images:
            img_trans = ndimage.median_filter(img, size=3)
            transformed_images.append(img_trans)
    elif (transformation == TRANSFORMATION.filter_minimum):
        for img in original_images:
            img_trans = ndimage.minimum_filter(img, size=3)
            transformed_images.append(img_trans)
    elif (transformation == TRANSFORMATION.filter_maximum):
        for img in original_images:
            img_trans = ndimage.maximum_filter(img, size=3)
            transformed_images.append(img_trans)
    elif (transformation == TRANSFORMATION.filter_gaussian):
        for img in original_images:
            img_trans = ndimage.gaussian_filter(img, sigma=1)
            transformed_images.append(img_trans)
    elif (transformation == TRANSFORMATION.filter_rank):
        for img in original_images:
            img_trans = ndimage.rank_filter(img, rank=15, size=3)
            transformed_images.append(img_trans)
    elif (transformation == TRANSFORMATION.filter_entropy):
        for img in original_images:
            radius = 2
            if (nb_channels == 3):
                radius = 1
                img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
            img = img.reshape(img_rows, img_cols)
            """
            requires values in range [-1., 1.]
            """
            img = (img - 0.5) * 2.
            """
            skimage-entropy function returns values in float64,
            however opencv only supports float32.
            """
            img_trans = np.float32(
                filters.rank.entropy(img, disk(radius=radius)))
            """
            rescale back into range [0., 1.]
            """
            img_trans = (img_trans / 2.) + 0.5
            if (nb_channels == 3):
                img_trans = cv2.cvtColor(img_trans, cv2.COLOR_GRAY2RGB)
            transformed_images.append(img_trans)
    elif (transformation == TRANSFORMATION.filter_roberts):
        for img in original_images:
            if (nb_channels == 3):
                img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
            img = img.reshape(img_rows, img_cols)
            img_trans = roberts(img)
            if (nb_channels == 3):
                img_trans = cv2.cvtColor(img_trans, cv2.COLOR_GRAY2RGB)
            transformed_images.append(img_trans)
    elif (transformation == TRANSFORMATION.filter_scharr):
        for img in original_images:
            if (nb_channels == 3):
                img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
            img = img.reshape(img_rows, img_cols)
            img_trans = scharr(img)
            if (nb_channels == 3):
                img_trans = cv2.cvtColor(img_trans, cv2.COLOR_GRAY2RGB)
            transformed_images.append(img_trans)
    elif (transformation == TRANSFORMATION.filter_prewitt):
        for img in original_images:
            if (nb_channels == 3):
                img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
            img = img.reshape(img_rows, img_cols)
            img_trans = prewitt(img)
            if (nb_channels == 3):
                img_trans = cv2.cvtColor(img_trans, cv2.COLOR_GRAY2RGB)
            transformed_images.append(img_trans)
    elif (transformation == TRANSFORMATION.filter_meijering):
        for img in original_images:
            if nb_channels == 1:
                img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB)
            img_trans = meijering(img, sigmas=[0.01])
            if nb_channels == 1:
                img_trans = img_trans[:, :, 1]
            transformed_images.append(img_trans)
    elif (transformation == TRANSFORMATION.filter_sato):
        for img in original_images:
            img_trans = sato(img)
            transformed_images.append(img_trans)
    elif (transformation == TRANSFORMATION.filter_frangi):
        for img in original_images:
            img_trans = frangi(img)
            transformed_images.append(img_trans)
    elif (transformation == TRANSFORMATION.filter_hessian):
        for img in original_images:
            img_trans = hessian(img)
            transformed_images.append(img_trans)
    elif (transformation == TRANSFORMATION.filter_skeletonize):
        for img in original_images:
            img = invert(img)
            img = img.reshape((img_rows, img_cols))
            img = skeletonize(img)
            transformed_images.append(img)
    elif (transformation == TRANSFORMATION.filter_thin):
        for img in original_images:
            img = img.reshape(img_rows, img_cols)
            img = thin(img, max_iter=100)
            transformed_images.append(img)
    else:
        raise ValueError('{} is not supported.'.format(transformation))

    transformed_images = np.stack(transformed_images, axis=0)
    if (nb_channels == 1):
        # reshape a 3d to a 4d
        transformed_images = transformed_images.reshape(
            (nb_images, img_rows, img_cols, nb_channels))
    return transformed_images
def slic(image, n_segments=100, compactness=10., max_iter=10, sigma=0,
         spacing=None, multichannel=True, convert2lab=None,
         enforce_connectivity=False, min_size_factor=0.5, max_size_factor=3,
         slic_zero=False):
    """Segments image using k-means clustering in Color-(x,y,z) space.

    Parameters
    ----------
    image : 2D, 3D or 4D ndarray
        Input image, which can be 2D or 3D, and grayscale or multichannel
        (see `multichannel` parameter).
    n_segments : int, optional
        The (approximate) number of labels in the segmented output image.
    compactness : float, optional
        Balances color-space proximity and image-space proximity. Higher
        values give more weight to image-space. As `compactness` tends to
        infinity, superpixel shapes become square/cubic. In SLICO mode, this
        is the initial compactness.
    max_iter : int, optional
        Maximum number of iterations of k-means.
    sigma : float or (3,) array-like of floats, optional
        Width of Gaussian smoothing kernel for pre-processing for each
        dimension of the image. The same sigma is applied to each dimension in
        case of a scalar value. Zero means no smoothing.
        Note, that `sigma` is automatically scaled if it is scalar and a
        manual voxel spacing is provided (see Notes section).
    spacing : (3,) array-like of floats, optional
        The voxel spacing along each image dimension. By default, `slic`
        assumes uniform spacing (same voxel resolution along z, y and x).
        This parameter controls the weights of the distances along z, y,
        and x during k-means clustering.
    multichannel : bool, optional
        Whether the last axis of the image is to be interpreted as multiple
        channels or another spatial dimension.
    convert2lab : bool, optional
        Whether the input should be converted to Lab colorspace prior to
        segmentation. The input image *must* be RGB. Highly recommended.
        This option defaults to ``True`` when ``multichannel=True`` *and*
        ``image.shape[-1] == 3``.
    enforce_connectivity: bool, optional (default False)
        Whether the generated segments are connected or not
    min_size_factor: float, optional
        Proportion of the minimum segment size to be removed with respect
        to the supposed segment size ```depth*width*height/n_segments```
    max_size_factor: float, optional
        Proportion of the maximum connected segment size. A value of 3 works
        in most of the cases.
    slic_zero: bool, optional
        Run SLIC-zero, the zero-parameter mode of SLIC. [2]_

    Returns
    -------
    labels : 2D or 3D array
        Integer mask indicating segment labels.

    Raises
    ------
    ValueError
        If ``convert2lab`` is set to ``True`` but the last array
        dimension is not of length 3.

    Notes
    -----
    * If `sigma > 0`, the image is smoothed using a Gaussian kernel prior to
      segmentation.

    * If `sigma` is scalar and `spacing` is provided, the kernel width is
      divided along each dimension by the spacing. For example, if ``sigma=1``
      and ``spacing=[5, 1, 1]``, the effective `sigma` is ``[0.2, 1, 1]``. This
      ensures sensible smoothing for anisotropic images.

    * The image is rescaled to be in [0, 1] prior to processing.

    * Images of shape (M, N, 3) are interpreted as 2D RGB images by default. To
      interpret them as 3D with the last dimension having length 3, use
      `multichannel=False`.

    References
    ----------
    .. [1] Radhakrishna Achanta, Appu Shaji, Kevin Smith, Aurelien Lucchi,
        Pascal Fua, and Sabine Süsstrunk, SLIC Superpixels Compared to
        State-of-the-art Superpixel Methods, TPAMI, May 2012.
    .. [2] http://ivrg.epfl.ch/research/superpixels#SLICO

    Examples
    --------
    >>> from skimage.segmentation import slic
    >>> from skimage.data import astronaut
    >>> img = astronaut()
    >>> segments = slic(img, n_segments=100, compactness=10)

    Increasing the compactness parameter yields more square regions:

    >>> segments = slic(img, n_segments=100, compactness=20)

    """
    if enforce_connectivity is None:
        warnings.warn('Deprecation: enforce_connectivity will default to'
                      ' True in future versions.')
        enforce_connectivity = False

    image = img_as_float(image)
    is_2d = False
    if image.ndim == 2:
        # 2D grayscale image
        image = image[np.newaxis, ..., np.newaxis]
        is_2d = True
    elif image.ndim == 3 and multichannel:
        # Make 2D multichannel image 3D with depth = 1
        image = image[np.newaxis, ...]
        is_2d = True
    elif image.ndim == 3 and not multichannel:
        # Add channel as single last dimension
        image = image[..., np.newaxis]

    if spacing is None:
        spacing = np.ones(3)
    elif isinstance(spacing, (list, tuple)):
        spacing = np.array(spacing, dtype=np.double)

    if not isinstance(sigma, coll.Iterable):
        sigma = np.array([sigma, sigma, sigma], dtype=np.double)
        sigma /= spacing.astype(np.double)
    elif isinstance(sigma, (list, tuple)):
        sigma = np.array(sigma, dtype=np.double)
    if (sigma > 0).any():
        # add zero smoothing for multichannel dimension
        sigma = list(sigma) + [0]
        image = ndi.gaussian_filter(image, sigma)

    if multichannel and (convert2lab or convert2lab is None):
        if image.shape[-1] != 3 and convert2lab:
            raise ValueError("Lab colorspace conversion requires a RGB image.")
        elif image.shape[-1] == 3:
            image = rgb2lab(image)

    depth, height, width = image.shape[:3]

    # initialize cluster centroids for desired number of segments
    grid_z, grid_y, grid_x = np.mgrid[:depth, :height, :width]
    slices = regular_grid(image.shape[:3], n_segments)
    step_z, step_y, step_x = [int(s.step) for s in slices]
    segments_z = grid_z[slices]
    segments_y = grid_y[slices]
    segments_x = grid_x[slices]

    segments_color = np.zeros(segments_z.shape + (image.shape[3],))
    segments = np.concatenate([segments_z[..., np.newaxis],
                               segments_y[..., np.newaxis],
                               segments_x[..., np.newaxis],
                               segments_color],
                              axis=-1).reshape(-1, 3 + image.shape[3])
    segments = np.ascontiguousarray(segments)

    # we do the scaling of ratio in the same way as in the SLIC paper
    # so the values have the same meaning
    step = float(max((step_z, step_y, step_x)))
    ratio = 1.0 / compactness

    image = np.ascontiguousarray(image * ratio)

    labels = _slic_cython(image, segments, step, max_iter, spacing, slic_zero)

    if enforce_connectivity:
        segment_size = depth * height * width / n_segments
        min_size = int(min_size_factor * segment_size)
        max_size = int(max_size_factor * segment_size)
        labels = _enforce_label_connectivity_cython(labels,
                                                    n_segments,
                                                    min_size,
                                                    max_size)

    if is_2d:
        labels = labels[0]

    return labels
Beispiel #35
0
import numpy as np
import scipy.ndimage as ndi
import matplotlib.pyplot as plt

img = np.zeros((516, 516))

img[128:-128, 128:-128] = 1

img = ndi.gaussian_filter(img, 8)

rotated = ndi.rotate(img, -20)

noisy = rotated + 0.09 * np.random.random(rotated.shape)

sx = ndi.sobel(noisy, axis=0)
sy = ndi.sobel(noisy, axis=1)
sob = np.hypot(sx, sy)

titles = ['Original', 'Rotated', 'Noisy',
          'Sobel (X-axis)', 'Sobel (Y-axis)', 'Sobel']

output = [img, rotated, noisy, sx, sy, sob]

for i in range(6):
	plt.subplot(2, 3, i+1)
	plt.imshow(output[i])
	plt.title(titles[i])
	plt.axis('off')
plt.show()
Beispiel #36
0
plt.subplot(1, 2, 2)
plt.imshow(flor_rotate_shape)

#%% Filtering Filtering

# min/max/median filters

flor_min = ndi.minimum_filter(flor_gray, size=(3, 3))
flor_max = ndi.maximum_filter(flor_gray, size=(3, 3))
flor_med = ndi.median_filter(flor_gray, size=(3, 3))

flor_max_rgb = ndi.maximum_filter(flor_rgb, size=(3, 3, 3))

# Gaussian blurring

flor_gauss = ndi.gaussian_filter(flor_gray, sigma=3)

# Convolution Filtering
#   Using a custom kernel to manipulate an image and enhance or blur an image

k = np.array([[0, -1, 0], [-1, 5, -1], [0, -1, 0]])  # 3x3 sharpening
flor_sharp = ndi.convolve(flor_gray, k)

k = np.array([[-1, 0, 1], [-1, 0, 1], [-1, 0, 1]])  # 3x3 vertical edges
flor_vert = ndi.convolve(flor_gray, k)
plt.imshow(flor_vert, cmap='seismic')

k = np.array([[-1, -1, -1], [0, 0, 0], [1, 1, 1]])  # 3x3 horz edges
flor_horz = ndi.convolve(flor_gray, k)
plt.imshow(flor_horz, cmap='PuOr')
#   Surface-based Lifted Index
#
#   The axis of the 300-hPa, 500-hPa, and 850-hPa jets
#
#   Surface dewpoint
#
#   700-hPa dewpoint depression
#
#   12-hr surface pressure falls and 500-hPa height changes


# 500 hPa CVA
dx, dy = mpcalc.lat_lon_grid_spacing(lon, lat)
vort_adv_500 = mpcalc.advection(avor_500, [u_500.to('m/s'), v_500.to('m/s')],
                                (dx, dy), dim_order='yx') * 1e9
vort_adv_500_smooth = gaussian_filter(vort_adv_500, 4)

####################################
# For the jet axes, we will calculate the windspeed at each level, and plot the highest values
wspd_300 = gaussian_filter(mpcalc.get_wind_speed(u_300, v_300), 5)
wspd_500 = gaussian_filter(mpcalc.get_wind_speed(u_500, v_500), 5)
wspd_850 = gaussian_filter(mpcalc.get_wind_speed(u_850, v_850), 5)

#################################
# 850-hPa dewpoint will be calculated from RH and Temperature_isobaric
Td_850 = mpcalc.dewpoint_rh(tmp_850, rh_850 / 100.)

################################
# 700-hPa dewpoint depression will be calculated from Temperature_isobaric and RH
Td_dep_700 = tmp_700 - mpcalc.dewpoint_rh(tmp_700, rh_700 / 100.)
Beispiel #38
0
def _smooth_flat_field(image):
    image = ndimage.gaussian_filter(image.astype(numpy.float32), 15, mode='nearest')
    image = image[::2, ::2]
    image = ndimage.median_filter(image, footprint=_m9)
    image = ndimage.zoom(image, 2)
    return ndimage.gaussian_filter(image, 5)
Beispiel #39
0
  def visualise_orientational_distribution(self, axes_to_return=None,
                                           cbar=True):

    """ Creates a plot of the orientational distribution of the unit cells.

    :param axes_to_return: if None, print to screen, otherwise, requires 3 axes objects, and will return them.
    :param cbar: boolean to specify if a color bar should be used.
    """
    import matplotlib.pyplot as plt
    import matplotlib.patheffects as patheffects
    from mpl_toolkits.basemap import Basemap
    import scipy.ndimage as ndi

    def cart2sph(x, y, z):
      # cctbx (+z to source, y to ceiling) to
      # lab frame (+x to source, z to ceiling)
      z, x, y = x, y, z
      dxy = np.sqrt(x ** 2 + y ** 2)
      r = np.sqrt(dxy ** 2 + z ** 2)
      theta = np.arctan2(y, x)
      phi = np.arctan2(z, dxy)  # angle of the z axis relative to xy plane
      theta, phi = np.rad2deg([theta, phi])
      return theta % 360, phi, r

    def xy_lat_lon_from_orientation(orientation_array, axis_id):
      logger.debug("axis_id: {}".format(axis_id))
      dist = math.sqrt(orientation_array[axis_id][0] ** 2 +
                       orientation_array[axis_id][1] ** 2 +
                       orientation_array[axis_id][2] ** 2)
      flon, flat, bla = cart2sph(orientation_array[axis_id][0] / dist,
                                 orientation_array[axis_id][1] / dist,
                                 orientation_array[axis_id][2] / dist)
      x, y = euler_map(flon, flat)
      return x, y, flon, flat

    orientations = [flex.vec3_double(flex.double(
      image.orientation.direct_matrix()))
      for image in self.members]

    space_groups = [image.orientation.unit_cell().lattice_symmetry_group()
                    for image in self.members]

    # Now do all the plotting
    if axes_to_return is None:
      plt.figure(figsize=(10, 14))
      axes_to_return = [plt.subplot2grid((3, 1), (0, 0)),
                        plt.subplot2grid((3, 1), (1, 0)),
                        plt.subplot2grid((3, 1), (2, 0))]
      show_image = True
    else:
      assert len(axes_to_return) == 3, "If using axes option, must hand" \
                                       " 3 axes to function."
      show_image = False

    axis_ids = [0, 1, 2]
    labels = ["a",
              "b",
              "c"]

    for ax, axis_id, label in zip(axes_to_return, axis_ids, labels):

      # Lists of x,y,lat,long for the master orientation, and for all
      # symmetry mates.
      x_coords = []
      y_coords = []
      lon = []
      lat = []
      sym_x_coords = []
      sym_y_coords = []
      sym_lon = []
      sym_lat = []
      euler_map = Basemap(projection='eck4', lon_0=0, ax=ax)

      for orientation, point_group_type in zip(orientations, space_groups):

        # Get position of main spots.
        main_x, main_y, main_lon, main_lat \
          = xy_lat_lon_from_orientation(list(orientation), axis_id)
        x_coords.append(main_x)
        y_coords.append(main_y)
        lon.append(main_lon)
        lat.append(main_lat)

        # Get position of symetry mates
        symmetry_operations = list(point_group_type.smx())[1:]
        for mx in symmetry_operations:
          rotated_orientation = list(mx.r().as_double() * orientation)  # <--
          # should make sense if orientation was a vector, not clear what is
          # going on since orientation is a matrix. Or, make some test cases
          # with 'orientation' and see if the behave as desired.
          sym_x, sym_y, sym_lo, sym_la \
            = xy_lat_lon_from_orientation(rotated_orientation, axis_id)
          #assert (sym_x, sym_y) != (main_x, main_y)
          sym_x_coords.append(sym_x)
          sym_y_coords.append(sym_y)
          sym_lon.append(sym_lo)
          sym_lat.append(sym_la)

      # Plot each image as a yellow sphere
      logger.debug(len(x_coords))
      euler_map.plot(x_coords, y_coords, 'oy',
                     markersize=4,
                     markeredgewidth=0.5)

      # Plot the symetry mates as black crosses
      #euler_map.plot(sym_x_coords, sym_y_coords, 'kx')

      # Use a histogram to bin the data in lattitude/longitude space, smooth it,
      # then plot this as a contourmap. This is for all the symetry-related
      # copies
      #density_hist = np.histogram2d(lat + sym_lat, lon + sym_lon,
      #                                    bins=[range(-90, 91), range(0, 361)])
      # No symmetry mates until we can verify what the cctbx libs are doing
      density_hist = np.histogram2d(lat, lon,
                                    bins=[list(range(-90, 91)), list(range(0, 361))])
      smoothed = ndi.gaussian_filter(density_hist[0], (15, 15), mode='wrap')
      local_intensity = []
      x_for_plot = []
      y_for_plot = []
      for _lat in range(0, 180):
        for _lon in range(0, 360):
          _x, _y = euler_map(density_hist[2][_lon], density_hist[1][_lat])
          x_for_plot.append(_x)
          y_for_plot.append(_y)
          local_intensity.append(smoothed[_lat, _lon])
      cs = euler_map.contourf(np.array(x_for_plot),
                              np.array(y_for_plot),
                              np.array(local_intensity), tri=True)

      #  Pretty up graph
      if cbar:
        _cbar = plt.colorbar(cs, ax=ax)
        _cbar.ax.set_ylabel('spot density [AU]')
      middle = euler_map(0, 0)
      path_effect = [patheffects.withStroke(linewidth=3, foreground="w")]
      euler_map.plot(middle[0], middle[1], 'o', markersize=10, mfc='none')
      euler_map.plot(middle[0], middle[1], 'x', markersize=8)
      ax.annotate("beam", xy=(0.52, 0.52), xycoords='axes fraction',
                  size='medium', path_effects=path_effect)
      euler_map.drawmeridians(np.arange(0, 360, 60),
                              labels=[0, 0, 1, 0],
                              fontsize=10)
      euler_map.drawparallels(np.arange(-90, 90, 30),
                              labels=[1, 0, 0, 0],
                              fontsize=10)
      ax.annotate(label, xy=(-0.05, 0.9), xycoords='axes fraction',
                  size='x-large', weight='demi')

    if show_image:
      plt.show()

    return axes_to_return
Beispiel #40
0
    # Only use argument 3 if it exists.
    try:
        plotPath = sys.argv[3]

        if (plotPath.split('.')[-1] != 'png'):
            raise ValueError('Output path for the plot must end with \".png\"')
    except ValueError:
        print('Something')
    except:
        pass

    # Transform the original image using a median filter of radius 22px.
    img = ndimage.imread(inputPath1, flatten=True)
    img2 = ndimage.imread(inputPath2, flatten=True)
    # img = median(img, disk((60)))
    img = ndimage.gaussian_filter(img, sigma=3)
    img2 = ndimage.gaussian_filter(img2, sigma=3)

    # Get the profile for each axis.
    vProfile = sumProfile(img, 0)  # Horizontal profile
    hProfile = sumProfile(img, 1)  # Vertical profile

    vProfile2 = sumProfile(img2, 0)  # Horizontal profile
    hProfile2 = sumProfile(img2, 1)  # Vertical profile

    # TODO: switch the axes and superimpose image behind it.

    # Horizontal plot 1
    plt.figure(0)
    plt.plot(hProfile, label='original 1')
    plt.plot(hProfile2, label='original 2')
def slic(image, n_segments=100, compactness=10., max_iter=10, sigma=0,
         spacing=None, multichannel=True, convert2lab=None,
         enforce_connectivity=True, min_size_factor=0.5, max_size_factor=3,
         slic_zero=False, start_label=None, mask=None):
    """Segments image using k-means clustering in Color-(x,y,z) space.

    Parameters
    ----------
    image : 2D, 3D or 4D ndarray
        Input image, which can be 2D or 3D, and grayscale or multichannel
        (see `multichannel` parameter).
    n_segments : int, optional
        The (approximate) number of labels in the segmented output image.
    compactness : float, optional
        Balances color proximity and space proximity. Higher values give
        more weight to space proximity, making superpixel shapes more
        square/cubic. In SLICO mode, this is the initial compactness.
        This parameter depends strongly on image contrast and on the
        shapes of objects in the image. We recommend exploring possible
        values on a log scale, e.g., 0.01, 0.1, 1, 10, 100, before
        refining around a chosen value.
    max_iter : int, optional
        Maximum number of iterations of k-means.
    sigma : float or (3,) array-like of floats, optional
        Width of Gaussian smoothing kernel for pre-processing for each
        dimension of the image. The same sigma is applied to each dimension in
        case of a scalar value. Zero means no smoothing.
        Note, that `sigma` is automatically scaled if it is scalar and a
        manual voxel spacing is provided (see Notes section).
    spacing : (3,) array-like of floats, optional
        The voxel spacing along each image dimension. By default, `slic`
        assumes uniform spacing (same voxel resolution along z, y and x).
        This parameter controls the weights of the distances along z, y,
        and x during k-means clustering.
    multichannel : bool, optional
        Whether the last axis of the image is to be interpreted as multiple
        channels or another spatial dimension.
    convert2lab : bool, optional
        Whether the input should be converted to Lab colorspace prior to
        segmentation. The input image *must* be RGB. Highly recommended.
        This option defaults to ``True`` when ``multichannel=True`` *and*
        ``image.shape[-1] == 3``.
    enforce_connectivity : bool, optional
        Whether the generated segments are connected or not
    min_size_factor : float, optional
        Proportion of the minimum segment size to be removed with respect
        to the supposed segment size ```depth*width*height/n_segments```
    max_size_factor : float, optional
        Proportion of the maximum connected segment size. A value of 3 works
        in most of the cases.
    slic_zero : bool, optional
        Run SLIC-zero, the zero-parameter mode of SLIC. [2]_
    start_label: int, optional
        The labels' index start. Should be 0 or 1.
    mask : 2D ndarray, optional
        If provided, superpixels are computed only where mask is True,
        and seed points are homogeneously distributed over the mask
        using a K-means clustering strategy.

    Returns
    -------
    labels : 2D or 3D array
        Integer mask indicating segment labels.

    Raises
    ------
    ValueError
        If ``convert2lab`` is set to ``True`` but the last array
        dimension is not of length 3.
    ValueError
        If ``start_label`` is not 0 or 1.

    Notes
    -----
    * If `sigma > 0`, the image is smoothed using a Gaussian kernel prior to
      segmentation.

    * If `sigma` is scalar and `spacing` is provided, the kernel width is
      divided along each dimension by the spacing. For example, if ``sigma=1``
      and ``spacing=[5, 1, 1]``, the effective `sigma` is ``[0.2, 1, 1]``. This
      ensures sensible smoothing for anisotropic images.

    * The image is rescaled to be in [0, 1] prior to processing.

    * Images of shape (M, N, 3) are interpreted as 2D RGB images by default. To
      interpret them as 3D with the last dimension having length 3, use
      `multichannel=False`.

    * `start_label` is introduced to handle the issue [4]_. The labels
      indexing starting at 0 will be deprecated in future versions. If
      `mask` is not `None` labels indexing starts at 1 and masked area
      is set to 0.

    References
    ----------
    .. [1] Radhakrishna Achanta, Appu Shaji, Kevin Smith, Aurelien Lucchi,
        Pascal Fua, and Sabine Süsstrunk, SLIC Superpixels Compared to
        State-of-the-art Superpixel Methods, TPAMI, May 2012.
        :DOI:`10.1109/TPAMI.2012.120`
    .. [2] https://www.epfl.ch/labs/ivrl/research/slic-superpixels/#SLICO
    .. [3] Irving, Benjamin. "maskSLIC: regional superpixel generation with
           application to local pathology characterisation in medical images.",
           2016, :arXiv:`1606.09518`
    .. [4] https://github.com/scikit-image/scikit-image/issues/3722

    Examples
    --------
    >>> from skimage.segmentation import slic
    >>> from skimage.data import astronaut
    >>> img = astronaut()
    >>> segments = slic(img, n_segments=100, compactness=10)

    Increasing the compactness parameter yields more square regions:

    >>> segments = slic(img, n_segments=100, compactness=20)

    """

    image = img_as_float(image)
    use_mask = mask is not None
    dtype = image.dtype

    is_2d = False

    if image.ndim == 2:
        # 2D grayscale image
        image = image[np.newaxis, ..., np.newaxis]
        is_2d = True
    elif image.ndim == 3 and multichannel:
        # Make 2D multichannel image 3D with depth = 1
        image = image[np.newaxis, ...]
        is_2d = True
    elif image.ndim == 3 and not multichannel:
        # Add channel as single last dimension
        image = image[..., np.newaxis]

    if multichannel and (convert2lab or convert2lab is None):
        if image.shape[-1] != 3 and convert2lab:
            raise ValueError("Lab colorspace conversion requires a RGB image.")
        elif image.shape[-1] == 3:
            image = rgb2lab(image)

    if start_label is None:
        if use_mask:
            start_label = 1
        else:
            warnings.warn("skimage.measure.label's indexing starts from 0. " +
                          "In future version it will start from 1. " +
                          "To disable this warning, explicitely " +
                          "set the `start_label` parameter to 1.",
                          FutureWarning, stacklevel=2)
            start_label = 0

    if start_label not in [0, 1]:
        raise ValueError("start_label should be 0 or 1.")

    # initialize cluster centroids for desired number of segments
    update_centroids = False
    if use_mask:
        mask = np.ascontiguousarray(mask, dtype=np.bool).view('uint8')
        if mask.ndim == 2:
            mask = np.ascontiguousarray(mask[np.newaxis, ...])
        if mask.shape != image.shape[:3]:
            raise ValueError("image and mask should have the same shape.")
        centroids, steps = _get_mask_centroids(mask, n_segments)
        update_centroids = True
    else:
        centroids, steps = _get_grid_centroids(image, n_segments)

    if spacing is None:
        spacing = np.ones(3, dtype=dtype)
    elif isinstance(spacing, (list, tuple)):
        spacing = np.ascontiguousarray(spacing, dtype=dtype)

    if not isinstance(sigma, coll.Iterable):
        sigma = np.array([sigma, sigma, sigma], dtype=dtype)
        sigma /= spacing.astype(dtype)
    elif isinstance(sigma, (list, tuple)):
        sigma = np.array(sigma, dtype=dtype)
    if (sigma > 0).any():
        # add zero smoothing for multichannel dimension
        sigma = list(sigma) + [0]
        image = ndi.gaussian_filter(image, sigma)

    n_centroids = centroids.shape[0]
    segments = np.ascontiguousarray(np.concatenate(
        [centroids, np.zeros((n_centroids, image.shape[3]))],
        axis=-1))

    # Scaling of ratio in the same way as in the SLIC paper so the
    # values have the same meaning
    step = max(steps)
    ratio = 1.0 / compactness

    image = np.ascontiguousarray(image * ratio, dtype=np.double)

    if update_centroids:
        # Step 2 of the algorithm [3]_
        _slic_cython(image, mask, segments, step, max_iter, spacing,
                     slic_zero, ignore_color=True,
                     start_label=start_label)

    labels = _slic_cython(image, mask, segments, step, max_iter,
                          spacing, slic_zero, ignore_color=False,
                          start_label=start_label)

    if enforce_connectivity:
        if use_mask:
            segment_size = mask.sum() / n_centroids
        else:
            segment_size = np.prod(image.shape[:3]) / n_centroids
        min_size = int(min_size_factor * segment_size)
        max_size = int(max_size_factor * segment_size)
        labels = _enforce_label_connectivity_cython(
            labels, min_size, max_size, start_label=start_label)

    if is_2d:
        labels = labels[0]

    return labels
Beispiel #42
0
 def get_low_pass(self, img):
     img = img.astype(np.float32)
     return ndimage.gaussian_filter(img, sigma=5)
Beispiel #43
0
def generate_shepplogan_phantom(img_size: int,
                                label: int = 0,
                                smoothing: bool = True) -> np.ndarray:
    """
    Generate 2D Shepp-Logan phantom with random regions size. Phantoms also
    simulate different kind of AD by generating smaller ROIs.

    Args:
        img_size: Size of the generated image (img_size x img_size).
        label: Take 0 or 1 or 2. Label of the generated image.
            If 0, the ROIs simulate a CN subject.
            If 1, the ROIs simulate type 1 of AD.
            if 2, the ROIs simulate type 2 of AD.
        smoothing: Default True. Apply Gaussian smoothing to the image.
    Returns:
        img: 2D Sheep Logan phantom with specified label.
    """
    img = np.zeros((img_size, img_size))
    center = (img_size + 1.0) / 2.0
    a = center - 2
    b = center * 2 / 3 - 2

    color = random.uniform(0.4, 0.6)

    if label == 0:
        roi1, roi2 = "large", "large"
    elif label == 1:
        roi1, roi2 = "large", "small"
    elif label == 2:
        roi1, roi2 = "small", "large"
    else:
        raise NotImplementedError(f"Subtype {label} was not implemented.")

    # Skull
    rr, cc = ellipse(center, center, a, b, (img_size, img_size))
    img[rr, cc] = 1

    # Brain
    offset = random.uniform(1, img_size / 32)
    rr, cc = ellipse(center + offset / 2, center, a - offset, b - offset,
                     (img_size, img_size))
    img[rr, cc] = 0.2

    # Central
    offset1 = random.uniform(1, img_size / 32)
    offset2 = random.uniform(1, img_size / 32)
    scale1, scale2 = generate_scales("large")
    phi = random.uniform(-np.pi, np.pi)
    rr, cc = ellipse(
        center + offset1,
        center + offset2,
        b / 6 * scale1,
        b / 6 * scale2,
        (img_size, img_size),
        rotation=phi,
    )
    img[rr, cc] = color

    # ROI 1
    offset1 = random.uniform(1, img_size / 32)
    offset2 = random.uniform(1, img_size / 32)
    scale1, scale2 = generate_scales(roi1)
    phi = random.uniform(-np.pi, np.pi)
    rr, cc = ellipse(
        center * 0.6 + offset1,
        center + offset2,
        b / 3 * scale1,
        b / 4 * scale2,
        (img_size, img_size),
        rotation=phi,
    )
    img[rr, cc] = color

    # ROI 2
    offset1 = random.uniform(1, img_size / 32)
    offset2 = random.uniform(1, img_size / 32)
    scale1, scale2 = generate_scales(roi2)
    phi = random.uniform(-np.pi, np.pi)
    rr, cc = ellipse(
        center * 1.5 + offset1,
        center + offset2,
        b / 10 * scale1,
        b / 10 * scale2,
        (img_size, img_size),
        rotation=phi,
    )
    img[rr, cc] = color

    offset1 = random.uniform(1, img_size / 32)
    offset2 = random.uniform(1, img_size / 32)
    scale1, scale2 = generate_scales(roi2)
    phi = random.uniform(-np.pi, np.pi)
    rr, cc = ellipse(
        center * 1.5 + offset1,
        center * 1.1 + offset2,
        b / 10 * scale1,
        b / 10 * scale2,
        (img_size, img_size),
        rotation=phi,
    )
    img[rr, cc] = color

    offset1 = random.uniform(1, img_size / 32)
    offset2 = random.uniform(1, img_size / 32)
    scale1, scale2 = generate_scales(roi2)
    phi = random.uniform(-np.pi, np.pi)
    rr, cc = ellipse(
        center * 1.5 + offset1,
        center * 0.9 + offset2,
        b / 10 * scale1,
        b / 10 * scale2,
        (img_size, img_size),
        rotation=phi,
    )
    img[rr, cc] = color

    # Ventricle 1
    a_roi = a * random.uniform(0.8, 1.2)
    phi = np.random.uniform(-np.pi / 16, np.pi / 16)
    rr, cc = ellipse(
        center,
        center * 0.75,
        a_roi / 3,
        a_roi / 6,
        (img_size, img_size),
        rotation=np.pi / 8 + phi,
    )
    img[rr, cc] = 0.0

    # Ventricle 2
    a_roi = a * random.uniform(0.8, 1.2)
    phi = np.random.uniform(-np.pi / 16, np.pi / 16)
    rr, cc = ellipse(
        center,
        center * 1.25,
        a_roi / 3,
        a_roi / 6,
        (img_size, img_size),
        rotation=-np.pi / 8 + phi,
    )
    img[rr, cc] = 0.0

    # Random smoothing
    if smoothing:
        sigma = random.uniform(0, 1)
        img = gaussian_filter(img,
                              sigma * img_size / 100.0)  # smoothing of data

    img.clip(0, 1)

    return img
Beispiel #44
0
from scipy import misc, ndimage
import numpy as np

from IPython.display import display
import matplotlib.pyplot as plt
%matplotlib inline

try:
    my_image = Image.open('img\\monarch_testimage.png')
except:
    print('Error: Could not load test image!')
else:
    # PIL and NumPy
    print(f'Loaded image (PIL):\n'
          f'Format: {my_image.format}\n'
          f'Size: {my_image.size}\n'
          f'Mode: {my_image.mode}')
    my_image_np = np.asarray(my_image)
    print(f'Image as NumPy array:\n'
          f'Shape: {my_image_np.shape}\n'
          f'Mean: {my_image_np.mean()}\n'
          f'StdDev: {my_image_np.std()}\n')
    display(my_image.filter(ImageFilter.FIND_EDGES))
    # scikit-image
    moon_image = data.moon()
    fig, ax = filters.try_all_threshold(moon_image, figsize=(12, 10), verbose=False)
    plt.show()
    # SciPy
    racoon_face = misc.face()
    blurred_racoon_face = ndimage.gaussian_filter(racoon_face, sigma=6)
    plt.imshow(blurred_racoon_face)
Beispiel #45
0
pitch = Pitch(pitch_type='uefa',
              figsize=(6.8, 10.5),
              line_zorder=2,
              line_color='white',
              orientation='vertical')
# draw
fig, ax = pitch.draw()
from matplotlib.colors import ListedColormap, LinearSegmentedColormap
cmaplist = ['#082630', '#0682fe', "#eff3ff"]
cmap = LinearSegmentedColormap.from_list("", cmaplist)
bin_statistic = pitch.bin_statistic(barcamoves.x,
                                    barcamoves.y,
                                    values=barcamoves.xT_value,
                                    statistic='sum',
                                    bins=(38, 25))
bin_statistic['statistic'] = gaussian_filter(bin_statistic['statistic'], 1)
vm = bin_statistic['statistic'].min()
vma = bin_statistic['statistic'].max()
pitch.heatmap(bin_statistic,
              ax=ax,
              cmap='inferno',
              edgecolors=None,
              vmin=bin_statistic['statistic'].min(),
              vmax=bin_statistic['statistic'].max())
ax.set_title('Barcelona' + '\n' + 'Open-play Threat-generation hotspots',
             fontsize=25)
fig.set_facecolor('white')
plt.savefig('xt.png', dpi=600)


def binnings(df):
Beispiel #46
0
def smooth(v, sigma):
    assert sigma > 0
    return SN.gaussian_filter(input=v, sigma=sigma)
Beispiel #47
0
def _gaussian(edge, nn):
    return ndimage.gaussian_filter(edge, sigma=nn * 2 + 1)
Beispiel #48
0
os.chdir(
    '/home/jinho93/new/oxides/perobskite/lanthanum-aluminate/periodic_step/vasp/my015/from-gulp/lvhar'
)
os.chdir(
    '/home/jinho93/new/oxides/perobskite/lanthanum-aluminate/periodic_step/vasp/stengel/015/4/lvhar'
)
loc = Locpot.from_file('LOCPOT')

#%%
dat = np.sum(loc.data['total'], axis=0) / loc.dim[0]
dat = np.roll(dat, int(loc.dim[1] * 0.8) // 2, axis=0)
dat = np.roll(dat, -10, axis=1)

# %%
import scipy.ndimage as nd
fft = np.fft.fft2(dat)

gaus = nd.gaussian_filter(fft.imag, sigma=10)
plt.imshow(gaus)

#%%
criteria = 1e-4
fft2 = np.where(np.abs(fft.real) > np.max(fft.real) * criteria, 0, fft)
fft2 = np.where(np.abs(fft.imag) > np.max(fft.imag) * criteria, 0, fft2)
# print(np.where(np.abs(fft.imag) > np.max(fft.imag) * 0.7))
ifft = np.fft.ifft2(fft2)

plt.imshow(ifft.real)

# %%
Beispiel #49
0
def smooth_3d(
    X,
    quantity_sum=[False],
    quantity_average=[False],
    res=1.,
    upper_threshold=False,
    extent=False,
    lower_threshold=False,
    njobs=8,
    nsteps=250,
    k=5,
    n_resample=500,
    projection='xy',
    verbose=True,
    antialias=True,
):
    '''
       purpose:
           adaptively smooths sparsely sampled particles according to their local density. 
           Similar to the approach described in Merritt+2020, Section 3.1
           (https://ui.adsabs.harvard.edu/abs/2020MNRAS.495.4570M/abstract)
       inputs:
           X:            particle co-ordinates: array shape=(3, Nparticles)
           quantity_sum: particle quantities to be summed: array shape=(Nquantities, Nparticles); ignore if [False]
           quantity_average: particle quantities to be averaged: array shape=(Nquantities, Nparticles); ignore if [False]
           res:          desired size of resolution unit in same units as X
           extent:       desired size of the image in the same units as X
           upper_threshold: number density of particles above which they will no longer be smoothed
           lower_threshold: number density of particles below which they will no longer be smoothed
           njobs:        number of workers to assign
           nsteps:       total number of density bins to do smoothing over, keep this fairly large or the result will lose accuracy
           k:            k nearest neighbour density estimate
           n_resample:   number of sub-particles to split each particle into for smoothing
           projection:   axis of projection used to produce the images
           antialias:    specifies whether the final image is antialiased
        outputs:
            img:         smoothed image for each summed quantity: shape = (Nquantities, Npixels, Npixels)
            average_img: smoothed image for each averaged quantity: shape = (Nquantities, Npixels, Npixels)
    '''

    assert quantity_sum[0] is not False or quantity_average[0] \
        is not False, \
        'both quantity_sum and quantity_average cannot be false'

    from multiprocessing import Pool, sharedctypes
    import tqdm
    import numpy as np
    from fast_histogram import histogram2d
    from scipy.ndimage import gaussian_filter

    (x, y) = project(X, projection=projection)
    if extent is False:
        extent = img_extent(x, y)

    hsize = int(extent * 2. / res)
    if verbose:
        print('      calculating local density of each particle...')
    (rho, dist) = nearest_neighbour_density(X.T, k=k, njobs=njobs)
    d_element = rho / res**3
    if (upper_threshold is not False) & (lower_threshold is not False):
        pick = (d_element < upper_threshold) & (d_element > lower_threshold)
    if (upper_threshold is not False) & (lower_threshold is False):
        pick = d_element < upper_threshold
    if (upper_threshold is False) & (lower_threshold is not False):
        pick = d_element > lower_threshold
    if (upper_threshold is False) & (lower_threshold is False):
        pick = d_element > 0
    sort = np.argsort(rho[pick])
    sample_points = np.asarray([
        np.random.normal(scale=np.median(s), size=n_resample * 10)
        for s in np.array_split(dist[pick][sort], nsteps)
    ])
    Ds = np.asarray(
        [np.median(s) for s in np.array_split(dist[pick][sort], nsteps)])
    Xs = np.array_split(np.vstack([x[pick], y[pick]])[:, sort],
                        nsteps,
                        axis=-1)
    if quantity_sum[0] is not False:
        Qs = np.array_split(quantity_sum[:, pick][:, sort], nsteps, axis=-1)
    else:
        Qs = [False] * nsteps
    if quantity_average[0] is not False:
        Qs2 = np.array_split(quantity_average[:, pick][:, sort],
                             nsteps,
                             axis=-1)
    else:
        Qs2 = [False] * nsteps
    args = []
    for (Xi, Qi, Qi2, Si) in zip(Xs, Qs, Qs2, Ds):
        args.append([
            Xi,
            Qi,
            Qi2,
            Si,
            extent,
            res,
            n_resample,
        ])

    if verbose:
        print('      smoothing particle distribution...')
    if quantity_sum[0] is not False:
        global shared_h
        result = \
            np.ctypeslib.as_ctypes(np.zeros((quantity_sum.shape[0],
                                   hsize - 1, hsize - 1)))
        shared_h = sharedctypes.RawArray(result._type_, result)

    if quantity_average[0] is not False:
        global shared_h_2
        result_2 = \
            np.ctypeslib.as_ctypes(np.zeros((quantity_average.shape[0],
                                   hsize - 1, hsize - 1)))
        shared_h_2 = sharedctypes.RawArray(result_2._type_, result_2)
        global shared_h_3
        result_3 = \
            np.ctypeslib.as_ctypes(np.zeros((quantity_average.shape[0],
                                   hsize - 1, hsize - 1)))
        shared_h_3 = sharedctypes.RawArray(result_3._type_, result_3)

    pool = Pool(processes=njobs)
    with pool as p:
        if verbose:
            job = \
                np.asarray(list(tqdm.tqdm(p.imap_unordered(resample_particles,
                           args), total=len(args))))
        else:
            job = np.asarray(list(p.imap_unordered(resample_particles, args)))

    # now put the images together

    sigma = (1. if antialias else 0.)
    if quantity_sum[0] is not False:
        h = np.ctypeslib.as_array(shared_h)
        img = []
        for i in range(Qs[0].shape[0]):
            h_ld = h[i]
            if np.count_nonzero(~pick) > 0:
                h_hd = histogram2d(x[~pick],
                                   y[~pick],
                                   weights=quantity_sum[i][~pick],
                                   bins=int(extent * 2. / res) - 1,
                                   range=[[-extent, extent], [-extent,
                                                              extent]])
                h_ld[~np.isfinite(h_ld)] = 0.
                h_hd[~np.isfinite(h_hd)] = 0.
                img.append(gaussian_filter(h_ld + h_hd, sigma))
            else:
                h_ld[~np.isfinite(h_ld)] = 0.
                img.append(gaussian_filter(h_ld, sigma))
    else:
        img = np.nan

    if quantity_average[0] is not False:
        h2 = np.ctypeslib.as_array(shared_h_2)
        h3 = np.ctypeslib.as_array(shared_h_3)

        average_img = []
        for i in range(Qs2[0].shape[0]):
            h_ld = h2[i]
            if np.count_nonzero(~pick) > 0:
                h_hd = histogram2d(x[~pick],
                                   y[~pick],
                                   weights=quantity_average[i][~pick],
                                   bins=int(extent * 2. / res) - 1,
                                   range=[[-extent, extent], [-extent,
                                                              extent]])
                n_hd = histogram2d(x[~pick],
                                   y[~pick],
                                   bins=int(extent * 2. / res) - 1,
                                   range=[[-extent, extent], [-extent,
                                                              extent]])

                h_ld[~np.isfinite(h_ld)] = 0.
                h_hd[~np.isfinite(h_hd)] = 0.

                average_img.append(
                    gaussian_filter((h_ld + h_hd) / (h3[i] + n_hd), sigma))
            else:
                h_ld[~np.isfinite(h_ld)] = 0.
                average_img.append(gaussian_filter(h_ld / h3[i], sigma))
    else:
        average_img = np.nan

    return (img, average_img)
def fill_error_gaps():

    res = pd.read_csv('/work/GLEAM/errors/result.csv', index_col=0)

    gpis_valid = get_valid_gpis(latmin=24., latmax=51., lonmin=-128., lonmax=-64.)
    ind_valid = np.unravel_index(gpis_valid, (720,1440))

    res_gapfilled = pd.DataFrame(index=gpis_valid)

    r_min = 0.0

    ind = (res['R_GLEAM_ASCAT'] <= r_min) | \
          (res['R_GLEAM_AMSR2'] <= r_min) | \
          (res['R_ASCAT_AMSR2'] <= r_min)

    res.loc[ind, ['TC1_R2_GLEAM', 'TC1_R2_ASCAT', 'TC1_R2_AMSR2']] = np.nan
    res.loc[ind, ['TC1_RMSE_GLEAM', 'TC1_RMSE_ASCAT', 'TC1_RMSE_AMSR2']] = np.nan

    # ind = (res['p_GLEAM_ASCAT'] >= 0.05) | \
    #       (res['p_GLEAM_AMSR2'] >= 0.05) | \
    #       (res['p_ASCAT_AMSR2'] >= 0.05)
    #
    # res.loc[ind, ['TC1_R2_GLEAM', 'TC1_R2_ASCAT', 'TC1_R2_AMSR2']] = np.nan
    # res.loc[ind, ['TC1_RMSE_GLEAM', 'TC1_RMSE_ASCAT', 'TC1_RMSE_AMSR2']] = np.nan

    ind = (res['R_GLEAM_ASCAT'] <= r_min) | \
          (res['R_GLEAM_SMAP'] <= r_min) | \
          (res['R_ASCAT_SMAP'] <= r_min)

    res.loc[ind, ['TC2_R2_GLEAM', 'TC2_R2_ASCAT', 'TC2_R2_SMAP']] = np.nan
    res.loc[ind, ['TC2_RMSE_GLEAM', 'TC2_RMSE_ASCAT', 'TC2_RMSE_SMAP']] = np.nan

    # ind = (res['p_GLEAM_ASCAT'] >= 0.05) | \
    #       (res['p_GLEAM_SMAP'] >= 0.05) | \
    #       (res['p_ASCAT_SMAP'] >= 0.05)
    #
    # res.loc[ind, ['TC2_R2_GLEAM', 'TC2_R2_ASCAT', 'TC2_R2_SMAP']] = np.nan
    # res.loc[ind, ['TC2_RMSE_GLEAM', 'TC2_RMSE_ASCAT', 'TC2_RMSE_SMAP']] = np.nan

    # ---------------------

    # tags = ['TC1_R2_GLEAM',]
    tags = ['TC1_R2_GLEAM', 'TC1_R2_ASCAT', 'TC1_R2_AMSR2',
            'TC2_R2_GLEAM', 'TC2_R2_ASCAT', 'TC2_R2_SMAP',
            'TC1_RMSE_GLEAM', 'TC1_RMSE_ASCAT', 'TC1_RMSE_AMSR2',
            'TC2_RMSE_GLEAM', 'TC2_RMSE_ASCAT', 'TC2_RMSE_SMAP']

    imp = IterativeImputer(max_iter=10, random_state=0)
    ind = np.unravel_index(res.index.values, (720,1440))
    for tag in tags:
        img = np.full((720,1440), np.nan)
        img[ind] = res[tag]

        # find all non-zero values
        idx = np.where(~np.isnan(img))
        vmin, vmax = np.percentile(img[idx], [2.5, 97.5])
        img[img<vmin] = vmin
        img[img>vmax] = vmax

        # calculate fitting parameters
        imp.set_params(min_value=vmin, max_value=vmax)
        imp.fit(img)

        # Define an anchor pixel to infer fitted image dimensions
        tmp_img = img.copy()
        tmp_img[idx[0][100],idx[1][100]] = 1000000

        # transform image with and without anchor pixel
        tmp_img_fitted = imp.transform(tmp_img)
        img_fitted = imp.transform(img)


        # # Get indices of fitted image
        idx_anchor = np.where(tmp_img_fitted == 1000000)[1][0]
        start = idx[1][100] - idx_anchor
        end = start + img_fitted.shape[1]

        # write output
        img[:,start:end] = img_fitted
        img = gaussian_filter(img, sigma=0.7, truncate=1)

        res_gapfilled.loc[:, tag] = img[ind_valid]

        # np.save('/work/GLEAM/test', img)

        print(tag, 'finished.')

    res_gapfilled.to_csv('/work/GLEAM/errors/result_gapfilled_sig07.csv')
Beispiel #51
0
imgtl.DisplayImage(IMG)
plt.title('raw data::' + FilenameBeam, fontsize=FTsize)
plt.axis('off')
print('size raw:', np.shape(IMG))
# crop image
#    plt.figure()
plt.subplot(2, 2, 2)
IMGc = imgtl.RemoveEdge(IMG, 100)
print('size removed:', np.shape(IMGc))
imgtl.DisplayCalibratedProj(IMGc, cal, fudge)
plt.title('cropped' + FilenameBeam, fontsize=FTsize)
plt.axis('off')
# threshold image
#    plt.figure()
plt.subplot(2, 2, 3)
IMGt = ndimage.gaussian_filter(IMGc, 0)
# imgtl.Threshold(IMGc, threshold)
imgtl.DisplayCalibratedProj(IMGt, cal, fudge)
plt.title('cropped::' + FilenameBeam, fontsize=FTsize)
plt.axis('off')
# compute profiles
histx, histy, x, y = imgtl.GetImageProjection(IMGt, cal)
#    plt.figure()
plt.subplot(2, 2, 4)
x = x - x[np.argmax(histx)]
y = y - y[np.argmax(histy)]
plt.plot(x, histx, '-')
plt.plot(y, histy, '-')
plt.xlabel('distance')
plt.ylabel('population')
plt.title('profiles::' + FilenameBeam, fontsize=FTsize)
Beispiel #52
0
def distortions(lensModel,
                kwargs_lens,
                num_pix=100,
                delta_pix=0.05,
                center_ra=0,
                center_dec=0,
                differential_scale=0.0001,
                smoothing_scale=None,
                **kwargs):
    """

    :param lensModel: LensModel instance
    :param kwargs_lens: lens model keyword argument list
    :param num_pix: number of pixels per axis
    :param delta_pix: pixel scale per axis
    :param center_ra: center of the grid
    :param center_dec: center of the grid
    :param differential_scale: scale of the finite derivative length in units of angles
    :param smoothing_scale: float or None, Gaussian FWHM of a smoothing kernel applied before plotting
    :return: matplotlib instance with different panels
    """
    kwargs_grid = sim_util.data_configure_simple(num_pix,
                                                 delta_pix,
                                                 center_ra=center_ra,
                                                 center_dec=center_dec)
    _coords = ImageData(**kwargs_grid)
    _frame_size = num_pix * delta_pix
    ra_grid, dec_grid = _coords.pixel_coordinates

    extensions = LensModelExtensions(lensModel=lensModel)
    ra_grid1d = util.image2array(ra_grid)
    dec_grid1d = util.image2array(dec_grid)
    lambda_rad, lambda_tan, orientation_angle, dlambda_tan_dtan, dlambda_tan_drad, dlambda_rad_drad, dlambda_rad_dtan, dphi_tan_dtan, dphi_tan_drad, dphi_rad_drad, dphi_rad_dtan = extensions.radial_tangential_differentials(
        ra_grid1d,
        dec_grid1d,
        kwargs_lens=kwargs_lens,
        center_x=center_ra,
        center_y=center_dec,
        smoothing_3rd=differential_scale,
        smoothing_2nd=None)

    lambda_rad2d, lambda_tan2d, orientation_angle2d, dlambda_tan_dtan2d, dlambda_tan_drad2d, dlambda_rad_drad2d, dlambda_rad_dtan2d, dphi_tan_dtan2d, dphi_tan_drad2d, dphi_rad_drad2d, dphi_rad_dtan2d = util.array2image(lambda_rad), \
                                            util.array2image(lambda_tan), util.array2image(orientation_angle), util.array2image(dlambda_tan_dtan), util.array2image(dlambda_tan_drad), util.array2image(dlambda_rad_drad), util.array2image(dlambda_rad_dtan), \
                                            util.array2image(dphi_tan_dtan), util.array2image(dphi_tan_drad), util.array2image(dphi_rad_drad), util.array2image(dphi_rad_dtan)

    if smoothing_scale is not None:
        lambda_rad2d = ndimage.gaussian_filter(lambda_rad2d,
                                               sigma=smoothing_scale /
                                               delta_pix)
        dlambda_rad_drad2d = ndimage.gaussian_filter(dlambda_rad_drad2d,
                                                     sigma=smoothing_scale /
                                                     delta_pix)
        lambda_tan2d = np.abs(lambda_tan2d)
        # the magnification cut is made to make a stable integral/convolution
        lambda_tan2d[lambda_tan2d > 100] = 100
        lambda_tan2d = ndimage.gaussian_filter(lambda_tan2d,
                                               sigma=smoothing_scale /
                                               delta_pix)
        # the magnification cut is made to make a stable integral/convolution
        dlambda_tan_dtan2d[dlambda_tan_dtan2d > 100] = 100
        dlambda_tan_dtan2d[dlambda_tan_dtan2d < -100] = -100
        dlambda_tan_dtan2d = ndimage.gaussian_filter(dlambda_tan_dtan2d,
                                                     sigma=smoothing_scale /
                                                     delta_pix)
        orientation_angle2d = ndimage.gaussian_filter(orientation_angle2d,
                                                      sigma=smoothing_scale /
                                                      delta_pix)
        dphi_tan_dtan2d = ndimage.gaussian_filter(dphi_tan_dtan2d,
                                                  sigma=smoothing_scale /
                                                  delta_pix)

    def _plot_frame(ax, map, vmin, vmax, text_string):
        """

        :param ax: matplotlib.axis instance
        :param map: 2d array
        :param vmin: minimum plotting scale
        :param vmax: maximum plotting scale
        :param text_string: string to describe the label
        :return:
        """
        font_size = 10
        _arrow_size = 0.02
        im = ax.matshow(map,
                        extent=[0, _frame_size, 0, _frame_size],
                        vmin=vmin,
                        vmax=vmax)
        ax.get_xaxis().set_visible(False)
        ax.get_yaxis().set_visible(False)
        ax.autoscale(False)
        divider = make_axes_locatable(ax)
        cax = divider.append_axes("right", size="5%", pad=0.05)
        cb = plt.colorbar(im, cax=cax, orientation='vertical')
        #cb.set_label(text_string, fontsize=10)
        #plot_util.scale_bar(ax, _frame_size, dist=1, text='1"', font_size=font_size)
        plot_util.text_description(ax,
                                   _frame_size,
                                   text=text_string,
                                   color="k",
                                   backgroundcolor='w',
                                   font_size=font_size)
        #if 'no_arrow' not in kwargs or not kwargs['no_arrow']:
        #    plot_util.coordinate_arrows(ax, _frame_size, _coords,
        #                                color='w', arrow_size=_arrow_size,
        #                                font_size=font_size)

    f, axes = plt.subplots(3, 4, figsize=(12, 8))
    _plot_frame(axes[0, 0],
                lambda_rad2d,
                vmin=0.6,
                vmax=1.4,
                text_string=r"$\lambda_{rad}$")
    _plot_frame(axes[0, 1],
                lambda_tan2d,
                vmin=-20,
                vmax=20,
                text_string=r"$\lambda_{tan}$")
    _plot_frame(axes[0, 2],
                orientation_angle2d,
                vmin=-np.pi / 10,
                vmax=np.pi / 10,
                text_string=r"$\phi$")
    _plot_frame(axes[0, 3],
                util.array2image(lambda_tan * lambda_rad),
                vmin=-20,
                vmax=20,
                text_string='magnification')
    _plot_frame(axes[1, 0],
                dlambda_rad_drad2d / lambda_rad2d,
                vmin=-.1,
                vmax=.1,
                text_string='dlambda_rad_drad')
    _plot_frame(axes[1, 1],
                dlambda_tan_dtan2d / lambda_tan2d,
                vmin=-20,
                vmax=20,
                text_string='dlambda_tan_dtan')
    _plot_frame(axes[1, 2],
                dlambda_tan_drad2d / lambda_tan2d,
                vmin=-20,
                vmax=20,
                text_string='dlambda_tan_drad')
    _plot_frame(axes[1, 3],
                dlambda_rad_dtan2d / lambda_rad2d,
                vmin=-.1,
                vmax=.1,
                text_string='dlambda_rad_dtan')

    _plot_frame(axes[2, 0],
                dphi_rad_drad2d,
                vmin=-.1,
                vmax=.1,
                text_string='dphi_rad_drad')
    _plot_frame(axes[2, 1],
                dphi_tan_dtan2d,
                vmin=0,
                vmax=20,
                text_string='dphi_tan_dtan: curvature radius')
    _plot_frame(axes[2, 2],
                dphi_tan_drad2d,
                vmin=-.1,
                vmax=.1,
                text_string='dphi_tan_drad')
    _plot_frame(axes[2, 3],
                dphi_rad_dtan2d,
                vmin=0,
                vmax=20,
                text_string='dphi_rad_dtan')

    return f, axes
Beispiel #53
0
def augment_data_with_masks(data, masks, labels, augm_nb_samples, transforms):
    """
    DESCRIPTION:
    -----------
    This function takes all the data and increases the number of samples by
    randomly selecting a transformation to be done.

    Returns:
    -------
    data: TYPE np.ndarray
        Array with images
    labels: TYPE nd.ndarray
        Array with the labels
    """
    # Record the number of samples before augmentation.
    nb_samples_start = data.shape[0]

    # Make two lists for augmented samples and labels
    augmented_data = []
    augmented_masks = []
    augmented_labels = []

    # Determine the possible values for the different transformations
    reflect_options = [-1, 1]
    scale_options = np.arange(1, 2, 0.05)
    rotate_options = np.arange(-np.pi, np.pi, 0.05 * np.pi)
    shear_options = np.arange(-1, 1, 0.05)
    gaussian_options = np.arange(0.5, 5.5, 0.5)

    while len(augmented_data) <= augm_nb_samples:

        # Select image to be transformed:
        index = np.random.randint(nb_samples_start)
        im = data[index, :, :, 0]
        msk = data[index, :, :, 0]
        im_label = labels[index]

        # Select random transformation:
        transformation = np.random.choice(transforms)
        if transformation == 'reflect':
            # Reflection:
            rx, ry = 1, 1
            while rx == 1 and ry == 1:
                rx = np.random.choice(reflect_options)
                ry = np.random.choice(reflect_options)
            T = reflect(rx, ry)

        if transformation == 'scale':
            # Scaling:
            sx = np.random.choice(scale_options)
            sy = np.random.choice(scale_options)
            T = scale(sx, sy)

        if transformation == 'rotate':
            # Rotation:
            angle = np.random.choice(rotate_options)
            Th = make_rotation(angle, im.shape)
            im_T = image_transform(im, Th)
            msk_T = image_transform(msk, Th)

        if transformation == 'shear':
            # Shearing:
            cx = np.random.choice(shear_options)
            cy = np.random.choice(shear_options)
            T = shear(cx, cy)

        if transformation == 'gaussblur':
            # Gaussian blur:
            sigma = np.random.choice(gaussian_options)
            im_T = ndimage.gaussian_filter(im, sigma=sigma)

        if transformation != 'gaussblur' and transformation != 'rotate':
            # Do some steps which are not required in Gaussian blurring
            # Check for singularity. When the matrix is singular, it cannot be
            # inverted or applied to the image and this step is reset.
            det = T[0, 0] * T[1, 1] - T[0, 1] * T[1, 0]
            if det == 0: continue

            # Converts the 2D-transformation matrix to the homogenous form:
            Th = t2h(T)

            # Transform the image using the homogenous transformation matrix.
            im_T = image_transform(im, Th)
            msk_T = image_transform(msk, Th)

        # Add a new axis to be able to append to the data array
        im_T = im_T[..., np.newaxis]
        msk_T = msk_T[..., np.newaxis]

        # Append data and labels to the augmented data lists
        augmented_data.append(im_T)
        augmented_masks.append(msk_T)
        augmented_labels.append(im_label)

    # Convert the lists to arrays
    print('{} samples augmented'.format(len(augmented_data)))
    augmented_data = np.array(augmented_data)
    augmented_masks = np.array(augmented_data)
    augmented_labels = np.array(augmented_labels)

    # Remove duplicate augmented samples
    augmented_data_unique, i_unique = np.unique(augmented_data,
                                                return_index=True,
                                                axis=0)
    augmented_masks_unique = augmented_masks[i_unique]
    augmented_labels_unique = augmented_labels[i_unique]
    print('{} augmented samples left after removing duplicates'.format(
        augmented_data_unique.shape[0]))

    # Return augmented samples
    return augmented_data_unique, augmented_masks_unique, augmented_labels_unique
def main(argv):

    if len(argv) == 0:
        print('count_colonies_interactive.py usage:')
        print(
            '    count_colonies_interactive.py [filename] [optional arguments]'
        )
        print('         e.g. count_colonies_interactive.py petridish.tif')
        print('    optional arguments:')
        print('         plot')
        print(
            '             - shows the plot at the end of the analysis (saves regardless)'
        )
        print('         colorspace')
        print(
            '             - change the default colorspace (default is L channel from LAB'
        )
        print(
            '             - if you use colorspace, you must have two additional arguments'
        )
        print(
            '             - the first must be the colorspace, a choice of either RGB, LAB, HSV'
        )
        print(
            '             - the second is the channel to use in that colorspace, a choice of either 0, 1, 2'
        )
        print(
            '             - e.g. count_colonies_interactive.py petridish.tif colorspace RGB 1'
        )
        print(
            '                   - this will use the green channel from the RGB colorspace for the analysis'
        )
        print(
            '             - if you are uncertain about the best colorspace, use the best_color_chooser.py program'
        )
        print('         calc_dists')
        print(
            '             - calculates a number of distance metrics using R and the spatstat package'
        )
        print(
            '             - only do this if you have those installed, and Rscript is visible by your system'
        )

        return

    if not os.path.isfile(argv[0]):
        print('first argument must be path to image file')
        return

    #default arguments
    show_plot = False
    color_space = "LAB"
    color_channel = 0
    calc_dists = False

    if len(argv) > 1:
        for i in range(1, len(argv)):
            if argv[i] == "plot":
                show_plot = True
            if argv[i] == 'colorspace':
                color_space = argv[i + 1]
                color_channel = int(argv[i + 2])
            if argv[i] == 'calc_dists':
                calc_dists = True

    # read file
    #file = "20170601_low_buffer_1.tif"
    #file = '20170605_higlu_E_1.tif'
    file = argv[0]
    cells = io.imread(file)
    im_for_viewing = cells

    cells = skimage.img_as_float(cells)

    # change to chosen color channel
    if color_space == "LAB":
        img = skimage.color.rgb2lab(cells)
    elif color_space == "RGB":
        img = cells
    elif color_space == "HSV":
        img = skimage.color.rgb2hsv(cells)
    else:
        print("colorspace first argument must be LAB, RGB, or HSV, quitting")
        quit()
    #plt.imshow(lab[:,:,1])

    # filter
    img1 = ndi.gaussian_filter(img, 5)

    # normalize to 0-1
    img1 = img1[:, :, color_channel] - np.min(img1[:, :, color_channel])
    img1 /= np.max(img1)

    # click to see if colony is lighter than or darker than background
    pos = get_clicks(
        im_for_viewing,
        caption="click on one colony then on background then exit window",
        window_width=640)

    # average clicked area over 9 pixel square
    foreground = 0
    background = 0
    for i in [-1, 0, 1]:
        for j in [-1, 0, 1]:
            foreground += img1[pos[0][0] + i, pos[0][1] + j]
            background += img1[pos[1][0] + i, pos[1][1] + j]
    foreground /= 9
    background /= 9
    # invert if background is brighter
    if background > foreground:
        img1 = 1 - img1

    # find center and half-width
    center = img1.shape[0] / 2
    radius = img1.shape[0] / 2

    # mask dish
    circle = make_circle_selection(im_for_viewing)
    center = (int(circle[0]), int(circle[1]))
    radius = int(circle[2])

    masked = round_mask(img1, center, radius)

    # apply threshold
    thresh = get_nonzero_otsu(masked)
    threshed = masked > thresh

    #  opened to erase teeny things
    strel = skimage.morphology.disk(7)
    opened = skimage.morphology.binary_opening(threshed, selem=strel)

    #smooth smoothed, then mask with the opened image
    smoothed = ndi.gaussian_filter(img1, 4)
    smoothed[~opened] = 0

    # find the local peaks
    coordinates = skimage.feature.peak_local_max(smoothed, min_distance=5)
    coordinates = [(x[0], x[1]) for x in coordinates]
    # let the user add additional points
    coordinates = add_or_remove_locs_with_clicks(
        im_for_viewing,
        coordinates,
        caption="click to add colonies, left-shift+click to de-select")

    coordinates = np.asarray(coordinates)

    # separate connected colonies
    bw = smoothed > 0
    distance = ndi.distance_transform_edt(bw)
    coordinates_in_im = skimage.feature.peak_local_max(smoothed,
                                                       indices=False,
                                                       min_distance=5)
    markers = skimage.measure.label(coordinates_in_im)
    labels_ws = skimage.morphology.watershed(-distance, markers, mask=bw)

    rp = skimage.measure.regionprops(labels_ws)

    # find the objects that don't have clicks in them
    rp_not_clicked = []
    for curr_rp in rp:
        coords = curr_rp.coords
        any_coordinate_in_object = False
        for i in range(len(coordinates)):
            if coordinates[i][0] in coords[:, 0] and coordinates[i][
                    1] in coords[:, 1]:
                any_coordinate_in_object = True
                break
        rp_not_clicked.append(any_coordinate_in_object)

    # remove those objects from rp
    rp = [rp[i] for i in range(len(rp)) if rp_not_clicked[i]]

    centroids = [rp[i].centroid for i in range(len(rp))]
    eccentricity = [rp[i].eccentricity for i in range(len(rp))]
    x = [c[1] for c in centroids]
    y = [c[0] for c in centroids]
    areas = np.asarray([rp[i].area for i in range(len(rp))])
    colony = [str(c) for c in range(len(x))]
    #plt.scatter(x, y, np.sqrt(areas))

    df = pd.DataFrame({
        'x': x,
        'y': y,
        'area': areas,
        'eccentricity': eccentricity,
        'colony': range(len(x)),
        'petri_x': circle[0],
        'petri_y': circle[1],
        'petri_radius': circle[2]
    })

    # plot

    f, axarr = plt.subplots(1, 2)
    axarr[0].axis("off")
    axarr[0].imshow(im_for_viewing)
    axarr[0].hold(True)
    axarr[0].plot(x, y, 'r.', markersize=1)
    for i, txt in enumerate(colony):
        axarr[0].annotate(txt, (x[i] + 9, y[i] - 9), color='#FFFFFF', size=2)
    axarr[1].axis("off")
    axarr[1].imshow(labels_ws)
    axarr[1].hold(True)
    axarr[1].plot(x, y, 'r.', markersize=1)
    for i, txt in enumerate(colony):
        axarr[1].annotate(txt, (x[i] + 9, y[i] - 9), color='#FFFFFF', size=2)

    plt.savefig('{}_result_img.png'.format(file), dpi=300, bbox_inches='tight')

    df.to_csv('{}_results.csv'.format(file))
    # use R to calculate Voronoi in a circle (no python package does this!)
    if calc_dists:
        subprocess.call(["Rscript", "./distance_metrics_calc.R", file])

    if show_plot:
        plt.show()
Beispiel #55
0
from skimage import io, util, color, feature
import sys
from scipy import ndimage as nd
import numpy as np

img = util.img_as_float(color.rgb2gray(io.imread(sys.argv[1])))

# Canny edge detecor
gaussian_img = nd.gaussian_filter(img, int(sys.argv[2]))
canny = feature.canny(gaussian_img)
# io.imsave(sys.argv[3], util.img_as_uint(canny))

# Prewitt operators
prk_x = np.asarray([[-1, -1, -1], [0, 0, 0], [1, 1, 1]])
prk_y = np.asarray([[-1, 0, 1], [-1, 0, 1], [-1, 0, 1]])
"""
# Sobel
prk_x = np.asarray([[-1, -2, -1], [0, 0, 0], [1, 2, 1]])
prk_y = np.asarray([[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]])
"""

grad_x = nd.convolve(gaussian_img, prk_x, mode='nearest')
grad_y = nd.convolve(gaussian_img, prk_y, mode='nearest')

grad_x_y = np.sqrt(grad_x**2 + grad_y**2)

io.imsave('prewitt.png', util.img_as_uint(grad_x_y))
Beispiel #56
0
 def process_image(self, image):
     image = image - gaussian_filter(image, self.nuclear_blur)
     image[image < 0] = 0
     return image
    TC_basin=da.read_nc('data/Allstorms.ibtracs_all.v03r10.nc')['basin']
    tc_sel=da.read_nc('data/Allstorms.ibtracs_all.v03r10.nc').ix[np.where((TC_season==int(identifier)) & (TC_basin[:,0]==0))[0]]

    plt.close('all')
    plate_carree = ccrs.PlateCarree()
    fig,ax=plt.subplots(nrows=1,ncols=1,figsize=(10,5))
    ax = plt.axes(projection=plate_carree)
    ax.set_global()
    ax.coastlines()
    ax.add_feature(cartopy.feature.LAND, facecolor='darkgreen')
    ax.add_feature(cartopy.feature.OCEAN,facecolor='darkblue')
    ax.set_xlim(np.min(lons),np.max(lons))
    ax.set_ylim(np.min(lats),np.max(lats))

    working_dir='detection/JRA55/'+str(identifier)+'_JRA55/'
    found_tcs=tc_detection.tc_tracks(Wind10=Wind10,MSLP=MSLP,MSLP_smoothed=ndimage.gaussian_filter(MSLP,sigma=(0,3,3)),land_mask=land_mask,SST=None,VO=VO,T=T_ana,T_diff=T_diff,lats=lats,lons=lons,time_=time_,dates=dates,identifier=identifier,working_dir=working_dir)
    found_tcs.init_map(ax=ax,transform=plate_carree)
    found_tcs.init_obs_tcs(tc_sel)
    elapsed = time.time() - start;  print('Done with preparations %.3f seconds.' % elapsed)

    # # hybrid method
    # found_tcs.detect_hybrid(overwrite=False,p_radius=27,dis_mslp_min=3,warm_core_size=4,dis_cores=1)
    # found_tcs.plot_detect_summary(thr_wind=10)
    # found_tcs.combine_tracks(overwrite=True,thr_wind=0,search_radius=6,strong_steps=8,total_steps=8,warm_steps=4,consecutive_warm_strong_steps=4,plot=False)
    # found_tcs.plot_season()
    # elapsed = time.time() - start;  print('Done with preparations %.3f seconds.' % elapsed)
    #

    # # contours method
    # found_tcs.detect_contours(overwrite=True,p_radius=27,dis_mslp_min=3,warm_core_size=3,dis_cores=1)
    # found_tcs.plot_detect_summary(thr_wind=10)
#             ax=ax, colorbar_label='Reflectivity (dB)', antialiased=True)

# get data
start = radar.get_start(sweep)
end = radar.get_end(sweep) + 1
data = radar.get_field(sweep, 'differential_reflectivity')
x, y, z = radar.get_gate_x_y_z(sweep, edges=False)

x /= 1000.0
y /= 1000.0
z /= 1000.0

# apply a gaussian blur to the data set for nice smooth lines:
# sigma adjusts the distance effect of blending each cell,
# 4 is arbirarly set for visual impact.
data = ndimage.gaussian_filter(data, sigma=4)

# calculate (R)ange
R = np.sqrt(x ** 2 + y ** 2) * np.sign(y)
R = -R
display.set_limits(xlim=[0, 40], ylim=[0, 15])

# add contours
# creates steps 35 to 100 by 5
levels = np.arange(-3, 4, 0.25)
# levels_rain = np.arange(1, 4, 0.5)
levels_ice = np.arange(-2, -0, 0.5)
levels_rain = [0.75]

# adds contours to plot
contours = ax.contour(R, z, data, levels, linewidths=1, colors='k',
Beispiel #59
0
    def estimate(heat_mat, paf_mat):
        if heat_mat.shape[2] == 19:
            heat_mat = np.rollaxis(heat_mat, 2, 0)
        if paf_mat.shape[2] == 38:
            paf_mat = np.rollaxis(paf_mat, 2, 0)

        if PoseEstimator.heatmap_supress:
            heat_mat = heat_mat - heat_mat.min(axis=1).min(axis=1).reshape(
                19, 1, 1)
            heat_mat = heat_mat - heat_mat.min(axis=2).reshape(
                19, heat_mat.shape[1], 1)

        if PoseEstimator.heatmap_gaussian:
            heat_mat = gaussian_filter(heat_mat, sigma=0.5)

        if PoseEstimator.adaptive_threshold:
            _NMS_Threshold = max(
                np.average(heat_mat) * 4.0, PoseEstimator.NMS_Threshold)
            _NMS_Threshold = min(_NMS_Threshold, 0.3)
        else:
            _NMS_Threshold = PoseEstimator.NMS_Threshold

        # extract interesting coordinates using NMS.
        coords = []  # [[coords in plane1], [....], ...]
        for plain in heat_mat[:-1]:
            nms = PoseEstimator.non_max_suppression(plain, 5, _NMS_Threshold)
            coords.append(np.where(nms >= _NMS_Threshold))

        # score pairs
        pairs_by_conn = list()
        for (part_idx1, part_idx2), (paf_x_idx,
                                     paf_y_idx) in zip(CocoPairs,
                                                       CocoPairsNetwork):
            pairs = PoseEstimator.score_pairs(
                part_idx1,
                part_idx2,
                coords[part_idx1],
                coords[part_idx2],
                paf_mat[paf_x_idx],
                paf_mat[paf_y_idx],
                heatmap=heat_mat,
                rescale=(1.0 / heat_mat.shape[2], 1.0 / heat_mat.shape[1]))

            pairs_by_conn.extend(pairs)

        # merge pairs to human
        # pairs_by_conn is sorted by CocoPairs(part importance) and Score between Parts.
        humans = [Human([pair]) for pair in pairs_by_conn]
        while True:
            merge_items = None
            for k1, k2 in itertools.combinations(humans, 2):
                if k1 == k2:
                    continue
                if k1.is_connected(k2):
                    merge_items = (k1, k2)
                    break

            if merge_items is not None:
                merge_items[0].merge(merge_items[1])
                humans.remove(merge_items[1])
            else:
                break

        # reject by subset count
        humans = [
            human for human in humans
            if human.part_count() >= PoseEstimator.PAF_Count_Threshold
        ]

        # reject by subset max score
        humans = [
            human for human in humans
            if human.get_max_score() >= PoseEstimator.Part_Score_Threshold
        ]

        return humans
    path_out+f_out,
    'w',
    driver='GTiff',
    height=mdt.shape[0],
    width=mdt.shape[1],
    count=1,
    dtype=mdt.dtype,
    crs=raster.crs,
    transform=raster.transform,
) as dst:
    dst.write(mdt_delta, 1)

#%% 6. Blurring final
from scipy.ndimage import gaussian_filter

gauss=gaussian_filter(mdt,15)*delta_mask
if plotear:
    plt.imshow(gauss,vmin=17,vmax=22)

#%% 7. Guardado en GTiff el blureado

with rasterio.open(
    path_out+f_out2,
    'w',
    driver='GTiff',
    height=mdt.shape[0],
    width=mdt.shape[1],
    count=1,
    dtype=mdt.dtype,
    crs=raster.crs,
    transform=raster.transform,