コード例 #1
0
def remove_outliers(echo, percentile, size):
    '''
    Masks all data values that fall above the given percentile value
    for the area around each pixel given by size.
    '''
    percentile_array = ndi.percentile_filter(echo.data, percentile, size=size)
    return np.ma.masked_where(echo.data > percentile_array, echo.data)
コード例 #2
0
ファイル: toolbox.py プロジェクト: jadelord/TomoKTH
def sliding_percentile(img=None, percentile=10., weight=1., window_size=30,
                       boundary_condition='reflect'):
    '''
    Flexible version of median filter. Low percentile values work well
    for dense images.

    Parameters
    ----------
    img : array_like
        Series of images as a 3D numpy array, or a list or a set
    percentile : scalar
        Percentile to filter. Setting `percentile = 50` is equivalent
        to a `sliding_median` filter.
    weight : scalar
        Fraction of median to be subtracted from each pixel.
        Value of `weight` should be in the interval (0.0,1.0).
    window_shape : tuple of integers
        Specifies the shape of the window as follows (dt, dy, dx)
    boundary_condition : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}
        Mode of handling array borders.

    '''
    img_out = img - weight * nd.percentile_filter(img,
                                                  percentile,
                                                  size=window_size,
                                                  mode=boundary_condition)
    return img_out
コード例 #3
0
ファイル: segment.py プロジェクト: wj2/2p
def watershed_segment(M,xM=None,yM=None):
    """Use watershed segmentation on an array. 
    Return an array where regions are given different integer labels"""

    if xM != None and yM != None:
        sel = np.ones((int(ceil(23.9*xM)),int(ceil(23.9*yM)))) # for opening
        sel2 = np.ones((int(ceil(127.2*xM)),int(ceil(127.2*yM)))) # for local thresholding
        sel3 = np.ones((int(ceil(11.9*xM)),int(ceil(11.9*yM)))) # for erosion
        ma,mi =(44245.21*xM*yM),(316.037*xM*yM) 
    else:
        selD = np.array([int(M.shape[0]*.012),int(M.shape[1]*.012)])
        selD = np.where(selD!=0,selD,1)
    
        sel2D = np.array([int(M.shape[0]*.12),int(M.shape[1]*.12)])
        sel2D = np.where(sel2D!=0,sel2D,1)

        sel3D = np.array([int(M.shape[0]*.01),int(M.shape[1]*.01)])
        sel3D = np.where(sel3D!=0,sel3D,1)


        sel = np.ones(selD) # for opening
        sel2 = np.ones(sel2D) # for local thresholding
        sel3 = np.ones(sel3D) # for erosion
        ma,mi = (M.shape[0]*M.shape[1]*.0075),(M.shape[0]*M.shape[1]*.0003)

    # get a few points in the center of each blob
    
    # threshold
    bw = ((M>=ndi.percentile_filter(M,80,footprint=sel2)))
    #& (M>=stats.scoreatpercentile(M.flatten(),80)))

    # open and erode
    blobs = snm.binary_opening(bw,structure=sel)
    blobs = snm.binary_erosion(blobs,structure=sel3,iterations=2)
    
    # label
    labels,_ = ndi.label(blobs)
    labels[labels > 0] += 1
    labels[0,0] = 1

    # rescale and cast to int16, then use watershed
    #M2 = rescaled(M,0,65000).astype(np.uint16)
    #newlabels = ndi.watershed_ift(M2,labels)
    newlabels = labels
    
    # get rid of groups unless they have the right number of pixels

    counts = np.bincount(newlabels.flatten())
    old2new = np.arange(len(counts)) 
    old2new[(counts < int(mi)) | (counts > int(ma))] = 0
    newlabels = old2new[newlabels]

    return newlabels
コード例 #4
0
def percentile_filter(x, z):
    from scipy.ndimage import percentile_filter
    from breze.learn.data import one_hot
    percentile = np.random.randint(0, 10)

    nx = np.transpose(x, (0, 2, 1, 3, 4))
    nx[0] = [percentile_filter(modality, percentile, (2, 2, 2)) for modality in nx[0]]
    nx = np.transpose(nx, (0, 2, 1, 3, 4))

    n_classes = z.shape[-1]
    nz = np.reshape(z, (x.shape[3], x.shape[4], x.shape[1], n_classes))
    nz = np.transpose(nz, (3, 0, 1, 2))
    nz = np.array([percentile_filter(class_map, percentile, (2, 2, 2)) for class_map in nz])
    nz = nz.argmax(axis=0)
    nz = np.reshape(nz, (-1,))
    nz = np.reshape(one_hot(nz, n_classes), z.shape)

    nx = np.asarray(nx, dtype=x.dtype)
    nz = np.asarray(nz, dtype=z.dtype)

    return (nx, nz)
コード例 #5
0
def rolling_minimum_background(img, size=(31, 51), kernel=None,
                               geometry='rectangular', topography='flat',
                               percentile=0):
    """
    Instead of calculating the resulting image, just calculate the background and apply with
        img -= rolling_minimum_background(img)

    That way you can apply any amount of pre-filters without complex logic:
        img -= rolling_minimum_background(gaussian_filter(img, sigma=2))

    Notes:
        This doesn't work well with images with sharp boundaries, e.g. the edge of a gel.
        For best result, apply AFTER opening() and col+row leveling.

    Args:
        img:
        size: int or 2-tuple with (height, width),
                     should be higher than the thickest band and wider than the widest smear. Square is usually OK.
        kernel: specify kernel / footprint / structuring element manually.
        geometry:
        topography:
        percentile: Use percentile_filter with this percentile instead of minimum_filter (equivalent to percentile=0)

    Returns:
        background image

    """
    if kernel is None:
        if isinstance(size, int):
            size = (size, size)
        if geometry in ('round', 'disk', 'ellipse'):
            # multiply with round binary kernel with ones round/elliptical shape.
            kernel = ellipse_binary(size)
        elif geometry == 'rectangular' or geometry is None:
            kernel = np.ones(size)
        if topography == 'ball':
            # topography generally doesn't work because ndimage filters takes boolean footprints.
            # I could probalby do it with a generic filter, or by some other means.
            pass

    if percentile:
        # percentile_filter; footprint must be boolean array; size=(n,m) is equivalent to footprint=np.ones((n,m))
        background = percentile_filter(img, percentile=percentile, footprint=kernel)
    else:
        background = minimum_filter(img, footprint=kernel)

    return background
コード例 #6
0
ファイル: toolbox.py プロジェクト: jadelord/TomoKTH
def temporal_percentile(img=None, percentile=10., weight=1.,
                        window_shape=None):
    '''
    Flexible version of median filter. Low percentile values work well
    for dense images.

    Parameters
    ----------
    img : array_like
        Series of images as a 3D numpy array, or a list or a set
    percentile : scalar
        Percentile to filter. Setting `percentile = 50` is equivalent
        to a `temporal_median` filter.
    weight : scalar
        Fraction of median to be subtracted from each pixel.
        Value of `weight` should be in the interval (0.0,1.0).
    window_shape : tuple of integers
        Specifies the shape of the window as follows (dt, dy, dx)

    '''
    time_axis = 0
    nb_imgs = img.shape[time_axis]
    if img.ndim <= 2 or nb_imgs <= 1:
        raise ValueError(
            'Need more than one image to apply temporal filtering.')

    if window_shape is None:
        window_shape = (nb_imgs, 1, 1)
    elif not isinstance(window_shape, tuple):
        raise ValueError('window_shape must be a tuple.')
    elif window_shape[0] <= 1:
        raise ValueError(
            'Cannot perform temporal filtering, try spatial filtering.')

    img_out = img - weight * nd.percentile_filter(img,
                                                  percentile,
                                                  size=window_shape)
    return img_out
コード例 #7
0
def show_transform():
    for im in gen_images(n=-1, crop=True):
        t_im = im['T1c']
        gt = im['gt']
        #t_im_trans, trans_gt = rotate_transform(t_im, gt)
        #t_im_trans = t_im
        #t_im_trans = re_rescale(t_im)
        #t_im_trans = flip(t_im)
        #t_im_trans = noise(t_im, intensity=1, n=10)
        t_im_trans, trans_gt = ndi.percentile_filter(t_im, np.random.randint(0, 10), (2, 2, 2)), gt
        #t_im_trans = ndi.morphological_gradient(t_im, size=(2, 2, 2))
        #t_im_trans = ndi.grey_dilation(t_im, size=(3, 3, 3))
        #t_im_trans = ndi.grey_erosion(t_im_trans, size=(3, 3, 3))
        
        print t_im_trans.dtype
        
        for _slice in np.arange(0, t_im.shape[0], t_im.shape[0]/20):
            im_slice = t_im[_slice]
            im_slice_trans = t_im_trans[_slice]
            gt_slice = gt[_slice]
            trans_gt_slice = trans_gt[_slice]
            
            vis_ims(im0=im_slice, gt0=gt_slice, im1=im_slice_trans, gt1=trans_gt_slice)
コード例 #8
0
ファイル: skysub.py プロジェクト: cdfassnacht/keckcode
def skysub(x, y, z, scale):
    """
	skysub(x,y,z,scale)

	Routine to determine the 2d background from data. (x,y) are the
	  coordinates of the data, usually in the *corrected* frame.

	Inputs:
	  x     - 1d array describing x-coordinate, usually wavelength
	  y     - 1d array describing y-coordinate, usually corrected spatial
                    position
	  z     - data each position (x,y)
	  scale - approximate output scale (for knot placement). It is not, in
	            general, possible to calculate this from x because the
	            input coordinates are not on a regular grid.

	Outputs:
	  2d spline model of the background
	"""

    height = int(y.max() - y.min())
    width = int(x.max() - x.min())
    npoints = x.size

    midpt = y.mean()
    """
	Very wide slits need special attention. Here we fit a first order
	  correction to the slit and subtract it away before doing the high
	  pixel rejection (the problem is if there is a small gradient across
	  a wide slit, the top and bottom pixels may differ significantly,
	  but these pixels may be close in *wavelength* and so locally (on
	  the CCD) low pixels will be rejected in the smoothing
	"""
    if height > WIDE:
        zbak = z.copy()
        args = y.argsort()
        revargs = args.argsort()
        ymodel = ndimage.percentile_filter(z[args], 30., size=height)[revargs]
        fit = special_functions.lsqfit(ymodel, 'polynomial', 1)

        if fit['coeff'][1] * float(ymodel.size) / fit['coeff'][0] < 0.05:
            pass
        else:
            ymodel = special_functions.genfunc(scipy.arange(ymodel.size), 0,
                                               fit)
            ymodel -= ymodel.mean()
            z -= ymodel

    # Filter locally (in wavelength space) high points
    args = x.argsort()
    revargs = args.argsort()
    smooth = ndimage.percentile_filter(z[args], 35., size=height)[revargs]
    diff = z - smooth
    # We assume poisson statistics....
    var = scipy.sqrt(scipy.fabs(z))
    sigma = diff / var

    args = y.argsort()
    revargs = args.argsort()

    t = ndimage.median_filter(sigma[args], 9)
    t = ndimage.gaussian_filter(t, width)[revargs]
    # Source detection/rejection
    # Reject yvalues > 1. sigma, and weight remaining pixels
    w = (1.0 - t) / abs(z)

    skycond = ((w > 0.) & (z > 0))
    x = x[skycond]
    y = y[skycond]
    z = z[skycond]

    # Reject residual high pixels (and very low pixels too!)
    args = x.argsort()
    revargs = args.argsort()
    smooth = ndimage.median_filter(z[args], height / 4.)[revargs]
    diff = z - smooth
    var = scipy.sqrt(smooth)

    cond = abs(diff) < 4. * var
    x = x[cond]
    y = y[cond]
    z = z[cond]

    kx = 3
    ky = 1

    # If the slit is long, return to original data and increase the order
    #   of the y-fit.
    if height > WIDE:
        z = zbak[skycond]
        z = z[cond].astype(scipy.float64)

        if height > WIDE * 1.5:
            ky = 3

        cond = z > 0.
        x = x[cond]
        y = y[cond]
        z = z[cond]

    w = 1. / z

    if x.size < 5. * width:
        kx = 1
        ky = 1

    # Create knots...
    innertx = scipy.arange(x.min() + scale / 2., x.max() - scale / 2., scale)
    tx = scipy.zeros(innertx.size + kx * 2 + 2)
    tx[0:kx + 1] = x.min()
    tx[kx + 1:innertx.size + kx + 1] = innertx.copy()
    tx[innertx.size + kx + 1:] = x.max()
    ty = scipy.zeros(ky * 2 + 2)
    ty[0:ky + 1] = y.min()
    ty[ky + 1:] = y.max()

    # ...and fit.
    bgfit = interpolate.bisplrep(x,
                                 y,
                                 z,
                                 w,
                                 tx=tx,
                                 ty=ty,
                                 kx=kx,
                                 ky=ky,
                                 task=-1,
                                 nxest=tx.size,
                                 nyest=ty.size,
                                 s=0)

    return bgfit
コード例 #9
0
ファイル: skysub.py プロジェクト: albertfxwang/mypytools
def skysub(x, y, z, scale):
    """
    skysub(x,y,z,scale)

    Routine to determine the 2d background from data. (x,y) are the
      coordinates of the data, usually in the *corrected* frame.

    Inputs:
      x     - 1d array describing x-coordinate, usually wavelength
      y     - 1d array describing y-coordinate, usually corrected spatial
            position
      z     - data each position (x,y)
      scale - approximate output scale (for knot placement). It is not, in
            general, possible to calculate this from x because the
            input coordinates are not on a regular grid.

    Outputs:
      2d spline model of the background
    """
    cond = (scipy.isfinite(z)) & (z > 0.)
    x = x[cond]
    y = y[cond]
    z = z[cond]

    x0 = x.copy()
    y0 = y.copy()
    z0 = z.copy()

    height = int(y.max() - y.min())
    width = int(x.max() - x.min())
    npoints = x.size

    midpt = y.mean()
    """
    Very wide slits need special attention. Here we fit a first order
      correction to the slit and subtract it away before doing the high
      pixel rejection (the problem is if there is a small gradient across
      a wide slit, the top and bottom pixels may differ significantly,
      but these pixels may be close in *wavelength* and so locally (on
      the CCD) low pixels will be rejected in the smoothing
    """
    if height > WIDE:
        zbak = z.copy()
        args = y.argsort()
        revargs = args.argsort()
        ymodel = ndimage.percentile_filter(z[args], 30., size=height)[revargs]
        fit = special_functions.lsqfit(ymodel, 'polynomial', 1)

        if fit['coeff'][1] * float(ymodel.size) / fit['coeff'][0] < 0.05:
            pass
        else:
            ymodel = special_functions.genfunc(scipy.arange(ymodel.size), 0,
                                               fit)
            ymodel -= ymodel.mean()
            z -= ymodel

    # Filter locally (in wavelength space) high points
    args = x.argsort()
    revargs = args.argsort()

    smooth = ndimage.percentile_filter(z[args], 35., size=height)[revargs]

    diff = z - smooth
    # We assume poisson statistics....
    var = scipy.sqrt(scipy.fabs(z))
    sigma = diff / var

    args = y.argsort()
    revargs = args.argsort()

    t = ndimage.median_filter(sigma[args], 9)
    t = ndimage.gaussian_filter(t, width)  #[revargs]
    # Source detection/rejection
    # Reject yvalues > 1. sigma, and weight remaining pixels
    w = (1.0 - t) / abs(z[args])

    if AGGRESSIVE:
        g = scipy.where(w <= 0, 0, 1)
        g = ndimage.maximum_filter(g, width * 3)
        g = ndimage.minimum_filter(g, width * 7)

        s = sigma[args].copy()
        b = ndimage.minimum_filter(g, width * 5)
        xi = scipy.arange(t.size)
        fitdata = scipy.empty((xi[g == 1].size, 2))
        fitdata[:, 0] = xi[g == 1].copy()
        fitdata[:, 1] = t[g == 1].copy()
        fit = special_functions.lsqfit(fitdata, 'polynomial', 3)
        fit = special_functions.genfunc(xi, 0., fit)

        diff = (t - fit)[b == 1]
        s = diff.std()
        while (abs(t - fit)[(g == 1) & (b == 0)] > 2.5 * s).any():
            g = b.copy()
            b = ndimage.minimum_filter(g, width * 5)
            fitdata = scipy.empty((xi[g == 1].size, 2))
            fitdata[:, 0] = xi[g == 1].copy()
            fitdata[:, 1] = t[g == 1].copy()
            fit = special_functions.lsqfit(fitdata, 'polynomial', 3)
            fit = special_functions.genfunc(xi, 0., fit)

            diff = (t - fit)[b == 1]
            s = diff.std()

        w *= g

    skycond = ((w > 0.) & (z > 0))
    x = x[skycond]
    y = y[skycond]
    z = z[skycond]

    # Reject residual high pixels (and very low pixels too!)
    args = x.argsort()
    revargs = args.argsort()
    smooth = ndimage.median_filter(z[args], height / 4.)[revargs]

    diff = z - smooth
    var = scipy.sqrt(smooth)

    cond = abs(diff) < 4. * var
    x = x[cond]
    y = y[cond]
    z = z[cond]

    kx = 3
    ky = 1

    # If the slit is long, return to original data and increase the order
    #   of the y-fit.
    if height > WIDE:
        z = zbak[skycond]
        z = z[cond].astype(scipy.float64)

        #    if height>WIDE*1.5:
        #        ky = 3

        cond = z > 0.
        x = x[cond]
        y = y[cond]
        z = z[cond]

    w = 1. / z

    if x.size < 5. * width:
        kx = 1
        ky = 1

    # Create knots...

    innertx = scipy.arange(x.min() + scale / 2.,
                           x.max() - scale / 2., 3. * scale / 4.)
    """
    tx = scipy.zeros(innertx.size+kx*2+2)
    tx[0:kx+1] = x.min()
    tx[kx+1:innertx.size+kx+1] = innertx.copy()
    tx[innertx.size+kx+1:] = x.max()
    """
    tx = scipy.linspace(x.min(), x.max(), innertx.size)
    xsort = scipy.sort(x)
    tmp = [x.min()]
    num = []
    cnt = 0
    j = 1
    for i in range(xsort.size):
        while xsort[i] > tx[j]:
            if cnt > 0:
                if len(num) == 0 or cnt > 1 or num[-1] > 1:
                    tmp.append(tx[j])
                    num.append(cnt)
                    cnt = 0
            j += 1
        cnt += 1
    tmp.append(x.max())
    tx = scipy.asarray(tmp)
    ty = scipy.zeros(ky * 2 + 2)
    ty[0:ky + 1] = y.min()
    ty[ky + 1:] = y.max()

    #del innertx
    # ...and fit.
    bgfit = interpolate.bisplrep(x,
                                 y,
                                 z,
                                 w,
                                 tx=tx,
                                 ty=ty,
                                 kx=kx,
                                 ky=ky,
                                 task=-1,
                                 nxest=tx.size,
                                 nyest=ty.size)
    del x, y, z, w, tx, ty
    return bgfit
コード例 #10
0
def percentile_25_transform(im, window_size):
    if im.ndim == 3:
        size = (window_size, window_size, 1)
    else:
        size = (window_size, window_size)
    return nd.percentile_filter(im, percentile=25, size=size, mode='reflect')
コード例 #11
0
def apply_filter(input_image, filter_type, show_result=False, *args, **kwargs):
    """
    Apply a selected filter to a image.
    
    Parameters
    ----------
    input_image : nparray
        Represents the image to be filtered
    filter_type: str
        Must in one of:
                'gaussian',
                'uniform',
                'median',
                'maximum',
                'minimum',
                'sharpening',
                'percentile',
                'wiener',
                'sobel'
    show_result: Boolean
            If True, the result is plotted using matplotlib, default is False.
    *args: Arguments of the selected filter, see details for more information.
    **kwargs: The key arguments of the selected filter, see details for more information.
    
    Returns
    -------
    nparray
        The filtered image as the same format of the input.
        
    Details
    -------
    Arguments for the filters and the default values:
    =============  ===========================
    Filter         Kwargs       
    =============  ===========================
    'gaussian'     sigma: 3   
    'uniform'      size: 3
    'median'       size: 3
    'maximum'      size: 3
    'minimum'      size: 3
    'sharpening'   alpha: 30, filter_sigma: 1
    'percentile'   percentile: 75, size: 3
    'wiener'       NotImplement
    'sobel'        None
    =============  ===========================
    This details also are defined in this module as a argument.
    """
    if filter_type not in filter_names:
        raise (NotImplemented)
    if filter_type == 'gaussian':
        output_image = ndimage.gaussian_filter(input_image, *args, **kwargs)
    elif filter_type == 'uniform':
        output_image = ndimage.uniform_filter(input_image, *args, **kwargs)
    elif filter_type == 'median':
        output_image = ndimage.median_filter(input_image, *args, **kwargs)
    elif filter_type == 'maximum':
        output_image = ndimage.maximum_filter(input_image, *args, **kwargs)
    elif filter_type == 'minimum':
        output_image = ndimage.minimum_filter(input_image, *args, **kwargs)
    elif filter_type == 'sharpening':
        output_image = sharpenning_filter(input_image, *args, **kwargs)
    elif filter_type == 'percentile':
        output_image = ndimage.percentile_filter(input_image, *args, **kwargs)
    elif filter_type == 'wiener':  # TODO: finish the wiener filter
        raise (NotImplemented)
    elif filter_type == 'sobel':
        output_image = sobel_filter(input_image)

    if show_result:
        show_images_and_hists(
            [input_image, output_image],
            titles=[
                'Input',
                'Output Image - %s%s\n%s - %s' %
                ("Filter: ", filter_type, str(args), str(kwargs))
            ],
            colorbar=True)

    output_image = output_image.astype(input_image.dtype)  # input format
    return output_image
コード例 #12
0
ax[1,0].set_xlabel('time [s]')
ax[1,1].set_xlabel('time [s]')
ax[1,2].set_xlabel('time [s]')


big_string = ''
for substring in my_list:
    big_string = big_string + ' ' + substring

A = pcf.cnmf.estimates.A
F =  pcf.cnmf.estimates.C +  pcf.cnmf.estimates.YrA
b = pcf.cnmf.estimates.b
f= pcf.cnmf.estimates.f
B = A.T.dot(b).dot(f)
import scipy.ndimage as nd
Df = nd.percentile_filter(B, 10, (1000,1))
plt.figure(); plt.plot(B[49]+pcf.cnmf.estimates.C[49]+pcf.cnmf.estimates.R[49])

#%% Flag auto as False, how does dFF look
import caiman.source_extraction.cnmf.utilities as ut
flag_dff = ut.detrend_df_f(A, b, pcf.cnmf.estimates.C, f, YrA=pcf.cnmf.estimates.YrA, quantileMin=8,
                           frames_window=500, flag_auto=False, use_fast=False, detrend_only=False)
slow_dff = ut.extract_DF_F(Yr, A, C, bl, quantileMin=8, frames_window=200, block_size=400, dview=None)

fig, ax = plt.subplots(2)
ax[0].plot(pcf.cnmf.estimates.F_dff[49])
ax[1].plot(flag_dff[49])

plt.figure()
plt.plot(flag_dff[49], label='auto=False')
plt.plot(pcf.cnmf.estimates.F_dff[49], label='auto=True')
コード例 #13
0
def ndi_med(image, n):
    return percentile_filter(image, 50, size=n * 2 - 1)
コード例 #14
0
ファイル: wavesolve.py プロジェクト: PatrickRWells/keckcode
def jointSolve(d,orders):
    import pylab
    path = __path__[0]

    lines = {}
    lines['cuar'] = numpy.loadtxt(path+"/data/cuar.lines")
    lines['hgne'] = numpy.loadtxt(path+"/data/hgne.lines")
    lines['xe'] = numpy.loadtxt(path+"/data/xe.lines")

    startsoln = numpy.load(path+"/data/esi_wavesolution.dat")
    startsoln = numpy.load(path+'/data/shao_wave.dat')
    startsoln = [i for i,j in startsoln]
    alldata = d['arc']
    arclist = d.keys()
    arclist.remove('arc')
    soln = []
    if alldata.shape[1]>3000:
        xvals = numpy.arange(4096.)
        resolve = False
        cuslice = slice(3860,3940)
        fw1 = 75.
        fw2 = 9
        WIDTH = 4
    else:
        xvals = numpy.arange(2048.)
        resolve = True
        cuslice = slice(1930,1970)
        fw1 = 37.
        fw2 = 7
        WIDTH = 3
    for i in range(10):
      solution = startsoln[i]
      start,end = orders[i]
      if resolve==True:
        tmp = numpy.arange(0.5,4096.,2.)
        w = sf.genfunc(tmp,0.,solution)
        solution = sf.lsqfit(numpy.array([xvals,w]).T,'chebyshev',3)

      data = numpy.nanmedian(alldata[start:end],axis=0)
      data[numpy.isnan(data)] = 0.
      if i==0:
        data[cuslice] = numpy.median(data)
      bak = ndimage.percentile_filter(data,50.,int(fw1))
      data -= bak

      peaks = []
      p = ndimage.maximum_filter(data,fw2)
      std = clip(numpy.trim_zeros(data),3.)[1]
      peak = numpy.where((p>30.*std)&(p==data))[0]
      for p in peak:
          if p-WIDTH<0 or p+WIDTH+1>xvals.size:
              continue
          x = xvals[p-WIDTH:p+WIDTH+1].copy()
          f = data[p-WIDTH:p+WIDTH+1].copy()
          fitdata = numpy.array([x,f]).T
          fit = numpy.array([0.,f.max(),xvals[p],1.])
          fit,chi = sf.ngaussfit(fitdata,fit,weight=1)
          peaks.append(fit[2])

      for converge in range(10):
        wave = 10**sf.genfunc(xvals,0.,solution)

        refit = []
        corr = []
        err = wave[wave.size/2]-wave[wave.size/2-1]
        p = 10.**sf.genfunc(peaks,0.,solution)
        for arc in arclist:
            for k in range(p.size):
                if i==0 and p[k]>4344.:
                    continue
                cent = p[k]
                diff = cent-lines[arc]
                corr.append(diff[abs(diff).argmin()])
        corr = numpy.array(corr)
        corr = corr[abs(corr)<5*err]
        m,s = clip(corr)
        corr = numpy.median(corr[abs(corr-m)<5.*s])
        for arc in arclist:
            for k in range(p.size):
                if i==0 and p[k]>4344.:
                    continue
                pos = peaks[k]
                cent = p[k]
                diff = abs(cent-lines[arc]-corr)
                if diff.min()<2.*err:
                    refit.append([pos,lines[arc][diff.argmin()]])
        refit = numpy.asarray(refit)
        solution = sf.lsqfit(refit,'polynomial',3)
        refit = []
        err = solution['coeff'][1]
        p = sf.genfunc(peaks,0.,solution)
        for k in range(p.size):
            delta = 1e9
            match = None
            for arc in arclist:
                for j in lines[arc]:
                    if i==0 and j>4344.:
                        continue
                    diff = abs(p[k]-j)
                    if diff<delta and diff<1.*err:
                        delta = diff
                        match = j
            if match is not None:
                refit.append([peaks[k],match])
        refit = numpy.asarray(refit)
        refit[:,1] = numpy.log10(refit[:,1])
        solution = sf.lsqfit(refit,'chebyshev',3)

        solution2 = sf.lsqfit(refit[:,::-1],'chebyshev',3)

        g = 10**sf.genfunc(peaks,0.,solution)
        g2 = 10**sf.genfunc(peak,0.,solution)
        w = 10**sf.genfunc(xvals,0.,solution)
        if (w==wave).all():
            print("Order %d converged in %d iterations"%(i,converge))
            soln.append([solution,solution2])
            break
            pylab.plot(w,data)
            for arc in arclist:
                for j in lines[arc]:
                    if j>w[0] and j<w[-1]:
                        pylab.axvline(j,c='b')
            for j in 10**refit[:,1]:
                if j>w[0] and j<w[-1]:
                    pylab.axvline(j,c='r')
            for j in g:
                pylab.axvline(j,c='g')
            for j in g2:
                pylab.axvline(j,c='c')
            pylab.show()
            break

    return soln
コード例 #15
0
ファイル: peaks.py プロジェクト: MingleiYang/hictools
def calculate_lambda(
        observed: np.ndarray,
        expected: np.ndarray,
        valid_mat: np.ndarray,
        row_factors: np.ndarray,
        col_factors: np.ndarray,
        kernels: Tuple[np.ndarray],
        band_width: int,
        inner_radius: int,
        outer_radius: int,
        ignore_diags: int = None) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
    """Calculate lambda values(background) for each pixel in regions sepcified in kernels.

    :param valid_mat:
    :param inner_radius:
    :param expected: np.ndarray. 2-d ndarray represents expeceted(normed) matrix.
    :param observed: np.ndarray. 2-d ndarray represents observed(normed) matrix.
    :param row_factors: np.ndarray. 1-d ndarray represents ICE factors of each row in expected.
    :param col_factors: np.ndarray. 1-d ndarray represents ICE factors of each column in expected.
    :param kernels: Tuple[np.ndarray]. Each array(mask) represents a certain region that is used for computing
    lambda(background) by summing all values within this region for each pixel.
    :param band_width: int. Width of the band region.
    :param outer_radius: int. The maximum radius among all kernels.
    :param ignore_diags: int. Number of diagonals to ignore. Pixles within this region will not be counted in available contacts.
    :return: Tuple[np.ndarray, np.ndarray]. The first ndarray contains indices of all available pixels, and the second ndarray
    contains the corresponding lambdas in all regions specified in kernels.
    """
    if ignore_diags is None:
        ignore_diags = 2 * outer_radius
    x, y = observed.nonzero()
    dis = y - x
    mask = ((dis <= (band_width - 2 * outer_radius))
            & (x < (observed.shape[0] - outer_radius))
            & (dis >= ignore_diags)
            & (x >= outer_radius))
    x, y = x[mask], y[mask]
    num_kernels = len(kernels)

    if x.size == 0:
        return np.empty((2, 0)), np.empty((num_kernels, 0)), np.empty(0)
    else:

        ratio_array = np.full((num_kernels, x.size), 0, dtype=np.float)
        oe_matrix = observed / expected
        for index, kernel in enumerate(kernels):
            # ob_sum = ndimage.convolve(observed, kernel)
            # ex_sum = ndimage.convolve(expected, kernel)
            # ratio_array[index] = (ob_sum / ex_sum)[(x, y)]

            # Another option
            # counts = ndimage.convolve(valid_mat, kernel)
            ratio = ndimage.convolve(oe_matrix, kernel) / kernel.sum()
            ratio_array[index] = ratio[x, y]

        lambda_array = (ratio_array * expected[x, y] * row_factors[x] *
                        col_factors[y])

        inner_len = 2 * inner_radius + 1
        outer_len = 2 * outer_radius + 1
        inner_num = inner_len**2
        percentage = (inner_num / outer_len**2)
        plateau_ma = oe_matrix - ndimage.percentile_filter(
            oe_matrix, int((1 - percentage) * 100), (outer_len, outer_len))
        plateau_region = (plateau_ma > 0).astype(np.int16)
        enrich_ratio = ndimage.convolve(
            plateau_region, np.ones((inner_len, inner_len)))[x, y] / inner_num

        return np.vstack((x, y)), lambda_array, enrich_ratio
コード例 #16
0
#da5 = xr.open_dataset(save1)

df5 = da5.resample(time='D', closed='right',
                   label='left').sum('time').to_dataframe().reset_index()

save_geotiff(df5, 4326, 'precipitationCal', 'lon', 'lat', export_path=save2)

a = np.arange(50, step=2).reshape((5, 5))
a[2, 2] = 2

ndimage.gaussian_filter(a, sigma=1, order=0)
ndimage.gaussian_gradient_magnitude(a, sigma=1)

ndimage.median_filter(a, size=3)
ndimage.percentile_filter(a, percentile=100, size=2)
ndimage.uniform_filter(a, size=3)

date0 = '2018-01-17'

date1 = pd.Timestamp(date0)
date2 = date1 + pd.DateOffset(days=1)

da4 = da1.loc[date1:date2]
da5 = da4.resample(time='D', closed='right', label='left').sum('time')
da6 = da5.copy()
da6['time'] = da6['time'].to_series() + pd.DateOffset(hours=1)

#da6.data = ndimage.percentile_filter(da5, percentile=100, size=2)
da6.data = ndimage.median_filter(da5, size=2)
コード例 #17
0
from scipy import ndimage, misc
import matplotlib.pyplot as plt
fig = plt.figure()
plt.gray()  # show the filtered result in grayscale
ax1 = fig.add_subplot(121)  # left side
ax2 = fig.add_subplot(122)  # right side
ascent = misc.ascent()
result = ndimage.percentile_filter(ascent, percentile=20, size=20)
ax1.imshow(ascent)
ax2.imshow(result)
plt.show()
コード例 #18
0
def cxd_to_h5(filename_cxd, bg, ff, roi, good_pixels, filename_cxi,
              do_percent_filter, filt_percent, filt_frames, cropping, minx,
              maxx, miny, maxy):
    print("*************************************")
    print("*   Particle conversion section     *")
    print("*************************************")
    # Initialise reader(s)
    # Data
    print("Opening %s" % filename_cxd)
    R = CXDReader(filename_cxd)

    frame = R.get_frame(0)  # dtype: uint16

    if (cropping):
        roi = (slice(miny, maxy, None), slice(minx, maxx, None))
    if (good_pixels is None):
        print(
            "Warning: Good pixels informaton is missing. Using all the pixels."
        )
        good_pixels = np.ones_like(frame)

    N = R.get_number_of_frames()
    shape = (N, frame[roi].shape[0], frame[roi].shape[1])

    if (do_percent_filter):
        four_gigabytes = 4 * (1 << 30)
        if np.prod(shape) * frame.dtype.itemsize > four_gigabytes:
            gigs = np.prod(shape) * np.dtype(np.float16).itemsize / (1 << 30)
            print(
                "Warning: reading data for percentile filter will require more than %.1fG of RAM!"
                % gigs)

        print("Calculating percentile filter...", end='')
        data_stack = np.zeros(shape, dtype=frame.dtype)  # percent_filter stack
        for i in range(N):
            frame = R.get_frame(i)
            data_stack[i] = frame[roi] * good_pixels[roi]
        filtered_stack = percentile_filter(data_stack,
                                           filt_percent,
                                           size=(filt_frames, 1, 1))
        print('done.')

    # Initialise integration variables
    integrated_raw = None
    integrated_image = None
    integratedsq_raw = None
    integratedsq_image = None

    # Write frames
    for i in range(N):

        frame = R.get_frame(i)

        bg_corr = None
        if (do_percent_filter):
            # Replace background with percentile filter
            # Applying both a constant background correction after a percentile filter is redundant
            bg_corr = filtered_stack[i]
        elif (bg is not None):
            bg_corr = bg[roi]

        print('(%d/%d) Writing frames...' % (i + 1, N), end='\r')

        frame = R.get_frame(i)
        image_raw = frame[roi] * good_pixels[roi]

        out = {}
        out["entry_1"] = {}

        # Raw data
        out["entry_1"]["data_1"] = {"data": image_raw}

        # Background-subtracted image
        if (bg_corr is not None):
            image_bgcor = (image_raw.astype(np.float16) -
                           bg_corr) * good_pixels[roi]
            out["entry_1"]["image_1"] = {"data": image_bgcor}

        # Write to disc
        W.write_slice(out)

        if integrated_raw is None:
            integrated_raw = np.zeros(shape=image_raw.shape, dtype='float32')
        if integratedsq_raw is None:
            integratedsq_raw = np.zeros(shape=image_raw.shape, dtype='float32')
        integrated_raw += np.asarray(image_raw, dtype='float32')
        integratedsq_raw += np.asarray(image_raw, dtype='float32')**2

        if (bg_corr is not None):
            if integrated_image is None:
                integrated_image = np.zeros(shape=image_bgcor.shape,
                                            dtype='float32')
            if integratedsq_image is None:
                integratedsq_image = np.zeros(shape=image_bgcor.shape,
                                              dtype='float32')
            integrated_image += image_bgcor
            integratedsq_image += np.asarray(image_bgcor, dtype='f')**2
    # Print newline
    print('(%d/%d) Writing frames...done.' % (N, N))
    # Write integrated images
    print('Writing integrated images...', end='')
    out = {"entry_1": {"data_1": {}, "image_1": {}}}
    if integrated_raw is not None:
        out["entry_1"]["data_1"]["data_mean"] = integrated_raw / float(N)
    if integrated_image is not None:
        out["entry_1"]["image_1"]["data_mean"] = integrated_image / float(N)
    if integratedsq_raw is not None:
        out["entry_1"]["data_1"]["datasq_mean"] = integratedsq_raw / float(N)
    if integratedsq_image is not None:
        out["entry_1"]["image_1"]["datasq_mean"] = integratedsq_image / float(
            N)

    if bg is not None:
        out["entry_1"]["image_1"]["bg_fullframe"] = bg
        out["entry_1"]["image_1"]["bg"] = bg[roi]
    if ff is not None:
        out["entry_1"]["image_1"]["ff_fullframe"] = ff
        out["entry_1"]["image_1"]["ff"] = ff[roi]

    out["entry_1"]["image_1"]["good_pixels_fullframe"] = good_pixels
    out["entry_1"]["image_1"]["good_pixels"] = good_pixels[roi]
    out["entry_1"]["image_1"]["roi"] = [
        roi[0].start, roi[0].stop, roi[1].start, roi[1].stop
    ]
    W.write_solo(out)
    print('done.')
    # Close readers
    R.close()

    # Make a small report
    report_fname = filename_cxd[:-4] + "_report.pdf"
    # Check if file already exists
    report_suffix = 0
    while (os.path.isfile(report_fname)):
        report_fname = filename_cxd[:-4] + "_report_" + str(
            report_suffix) + ".pdf"
        report_suffix += 1

    print("Writing report to %s..." % (report_fname), end='')

    fig, ax = plt.subplots(2, 2, figsize=(20, 14))
    if bg is not None:
        pos = ax[0][0].imshow(bg[roi])
        ax[0][0].set_title('Background')
        fig.colorbar(pos, ax=ax[0][0])
    if ff is not None:
        pos = ax[1][0].imshow(ff[roi])
        ax[1][0].set_title('Flatfield')
        fig.colorbar(pos, ax=ax[1][0])
    if integrated_raw is not None:
        pos = ax[0][1].imshow(integrated_raw)
        ax[0][1].set_title('Integrated Raw')
        fig.colorbar(pos, ax=ax[0][1])
    if integrated_image is not None:
        pos = ax[1][1].imshow(integrated_image)
        ax[1][1].set_title('Integrated Image')
        fig.colorbar(pos, ax=ax[1][1])

    plt.savefig(report_fname)
    try:
        plt.show()
    except:
        pass

    print("done.")
コード例 #19
0
def find_peaks(img, band_shape=(3, 25), show_images=False, save_images=False):
    """

    :param img:
    :param band_shape:  (height, width) akak (y, x)
    :param show_images:
    :param save_images:
    :return:
    """

    # Fixed: peaks are shifted, probably because opening() makes a shift from the structuring element.
    # Edit, no it is the convolution that does it...
    #
    img_org = img
    img = img.astype('f')  # cast to float, otherwise all calculations become inaccurate

    images = []
    band_selem = np.ones((3, 29))

    ploti = 0  # start at zero, and show_image will deal with it.
    if show_images:
        ploti = show_image(img, title="original", plotidx=ploti)

    title = "original"
    descr = "img"  # aka "history"
    images.append((img, title, descr))

    # #
    # # opening - small:
    # title, descr = "opening-3x21", "opening(%s)" % descr
    # print(title)
    # opened = opening(img, selem=np.ones((3, 21)))
    # # images.append((img, title, descr))
    # if show_images:
    #     ploti = show_image(opened, title=title, plotidx=ploti)
    #
    # #
    # # opening - medium:
    # title, descr = "opening-3x25", "opening(%s)" % descr
    # print(title)
    # opened = opening(img, selem=np.ones((3, 25)))
    # # images.append((img, title, descr))
    # if show_images:
    #     ploti = show_image(opened, title=title, plotidx=ploti)
    #
    #
    # opening - larger:
    title, descr = "opening-3x23", "opening(%s)" % descr
    print(title)
    img = opened1 = opening(img, selem=np.ones((3, 23)))
    images.append((img, title, descr))
    if show_images:
        ploti = show_image(img, title=title, plotidx=ploti)

    #
    # subtract global percentile:
    title = "subtract_global_percentile"
    descr = "%s(%s)" % (title, descr)
    print(title)
    img = minus_global_pct_bg = subtract_global_percentile(img, percentile=30)
    images.append((img, title, descr))
    if show_images:
        ploti = show_image(img, title=title, plotidx=ploti)



    #
    # rolling-minimum background subtraction with large ellipse:
    title = "rolling_5percentile_bg_el"
    descr = "%s(%s)" % (title, descr)
    print(title)
    rol_min_el = rolling_minimum_background(gaussian_filter(img, sigma=2), percentile=5,
                                             size=(71, 71),  # height, width (should be odd integers)
                                             geometry='ellipse')
    if show_images:
        ploti = show_image(rol_min_el, title=title, plotidx=ploti, clim_percentile=99.9)
    # subtract the background:
    title = "minus-rol_min_el"
    descr = "%s(%s)" % (title, descr)
    print(title)
    img = np.clip(img - rol_min_el, 0, None)  # remember to clip at zero
    images.append((img, title, descr))
    if show_images:
        ploti = show_image(img, title=title, plotidx=ploti)

    #
    # rolling-minimum background subtraction
    title = "rolling_5percentile_bg"
    descr = "%s(%s)" % (title, descr)
    print(title)
    rol_min_bg = rolling_minimum_background(gaussian_filter(img, sigma=2), percentile=5)
    if show_images:
        ploti = show_image(rol_min_bg, title=title, plotidx=ploti, clim_percentile=99.9)
    # subtract the background:
    title = "minus-rol_min_bg"
    descr = "%s(%s)" % (title, descr)
    print(title)
    img = np.clip(img - rol_min_bg, 0, None)  # remember to clip at zero
    images.append((img, title, descr))
    if show_images:
        ploti = show_image(img, title=title, plotidx=ploti)
    print("np.all(rol_min_bg == rol_min_el):", np.all(rol_min_bg == rol_min_el))

    # #
    # # subtract_row_col_percentile background subtraction:
    # title = "subtract_row_col_percentile"
    # descr = "%s(%s)" % (title, descr)
    # print(title)
    # img = background_subtracted_rcp = subtract_row_col_percentile(
    #     img, percentile=30, filters=savgol_filter, window_length=11, polyorder=1
    # )
    # images.append((img, title, descr))
    # if show_images:
    #     ploti = show_image(img, title=title, plotidx=ploti)

    #
    # opening, again:
    title, descr = "opening-%sx%s" % band_shape, "opening(%s)" % descr
    print(title)
    img = opened2 = opening(img, selem=np.ones(band_shape))
    images.append((img, title, descr))
    if show_images:
        ploti = show_image(img, title=title, plotidx=ploti)

    #
    # convolve:
    # default mode='full' will shift output, use mode='same' to prevent shifting
    title, descr = "convolved", "convolved(%s)" % descr
    print(title)
    img = convolved = convolve2d(img, band_selem/band_selem.sum(), mode='same')
    images.append((img, title, descr))
    if show_images:
        ploti = show_image(img, title=title, plotidx=ploti)

    #
    # low-percentile filter to narrow the bands:
    # (don't apply further openings or band-shape specific convolutions after narrowing the bands!)
    size = (3, 21)
    title = "pct_filtered-%sx%s" % size
    descr = "%s(%s)" % (title, descr)
    print(title)
    img = pct_filtered = percentile_filter(img, percentile=10, size=size)
    images.append((img, title, descr))
    if show_images:
        ploti = show_image(img, title=title, plotidx=ploti)

    #
    # gaussian:
    title, descr = "gaussian_filter", "gaussian_filter(%s)" % descr
    print(title)
    img = gaussianed = gaussian_filter(img, sigma=1)
    images.append((img, title, descr))
    # if show_images:
    #     ploti = show_image(img, title="gaussianed", plotidx=ploti)

    # Peaks!
    print("Finding peaks...")
    peak_pos = peak_local_max(img,
                              min_distance=10,
                              # threshold_abs=3,
                              # threshold_rel=0.01  # values must be 0.01 * maximum_value
                             )
    print("peak_pos.shape", peak_pos.shape)

    #
    # Draw peaks on a copy of the image:
    img = img.copy()  # otherwise we will write on convolved
    for pos in peak_pos:
        draw_rectangle(img, pos, width=2, val=255, border=0, center_val=None)
    ploti = show_image(img, title="peaks", plotidx=ploti)

    #
    # Other visualizations:
    ggm_filtered = gaussian_gradient_magnitude(convolved, sigma=1.0)
    ploti = show_image(ggm_filtered, title="gaussian_gradient_magnitude",
                       plotidx=ploti, clim=(0, np.percentile(ggm_filtered, 99.9)))

    title, descr = "glaplace of convolved", "laplace of convolved"
    # laplaced = laplace(convolved)
    laplaced = gaussian_laplace(convolved, sigma=2)
    ploti = show_image(laplaced, title=title, plotidx=ploti,
                       cmap="gray_r",
                       clim_percentile=(1, 99))

    lggm = laplaced*(ggm_filtered-3)
    ploti = show_image(lggm, title="laplaced*(ggm_filtered-3)", plotidx=ploti,
                       cmap="gray_r",
                       clim_percentile=(1, 99))

    title, descr = "glaplace of opened1", "laplace of opened1"
    # laplaced = laplace(convolved)
    laplaced = gaussian_laplace(opened1, sigma=2)
    ploti = show_image(laplaced, title=title, plotidx=ploti,
                       cmap="gray_r",
                       clim_percentile=(10, 90))

    if save_images:
        return peak_pos, images
    else:
        return peak_pos
コード例 #20
0
ファイル: skysub.py プロジェクト: cdfassnacht/keckcode
def skysub(x, y, z, scale):
    # Find sources by determining which pixels are slightly high
    height = int(y.max() - y.min())
    width = int(x.max() - x.min())

    midpt = y.mean()

    # Very wide slits need special attention. Here we fit a first order
    #   correction to the slit and subtract it away before doing the high
    #   pixel rejection (the problem is if there is a small gradient across
    #   a wide slit, the top and bottom pixels may differ significantly,
    #   but these pixels may be close in *wavelength* and so locally (on
    #   the CCD) low pixels will be rejected in the smoothing
    if height > WIDE:
        zbak = z.copy()
        args = y.argsort()
        revargs = args.argsort()
        ymodel = ndimage.percentile_filter(z[args], 30., size=height)[revargs]
        fit = special_functions.lsqfit(ymodel, 'polynomial', 1)

        if fit['coeff'][1] * float(ymodel.size) / fit['coeff'][0] < 0.05:
            pass
        else:
            ymodel = special_functions.genfunc(scipy.arange(ymodel.size), 0,
                                               fit)
            ymodel -= ymodel.mean()
            z -= ymodel

    # Filter locally (in wavelength space) high points
    args = x.argsort()
    revargs = args.argsort()
    smooth = ndimage.percentile_filter(z[args], 30., size=height)[revargs]
    diff = z - smooth
    # We assume poisson statistics....
    var = scipy.sqrt(scipy.fabs(smooth))
    sigma = diff / var

    args = y.argsort()
    revargs = args.argsort()

    t = ndimage.median_filter(sigma[args], 9)
    t = ndimage.gaussian_filter(t, width)[revargs]
    # Source detection/rejection
    # Reject yvalues > 1. sigma, and weight remaining pixels
    w = (1.0 - t) / abs(z)

    skycond = ((w > 0.) & (z > 0))  #|((y<y.min()+2.)|(y>y.max()-2.))
    x = x[skycond]
    y = y[skycond]
    z = z[skycond]

    # Reject residual high pixels (and very low pixels too!)
    args = x.argsort()
    revargs = args.argsort()
    smooth = ndimage.median_filter(z[args], height / 4.)[revargs]
    diff = z - smooth
    var = scipy.sqrt(smooth)

    cond = abs(diff) < 4. * var
    x = x[cond]
    y = y[cond]
    z = z[cond]

    kx = 3
    ky = 1

    # If the slit is long, return to original data and increase the order
    #   of the y-fit.
    if height > WIDE:
        z = zbak[skycond]
        z = z[cond].astype(scipy.float64)

        if height > WIDE * 1.5:
            ky = 3

        cond = z > 0.
        x = x[cond]
        y = y[cond]
        z = z[cond]

    w = 1. / z

    # Create knots...
    innertx = scipy.arange(x.min() + scale / 2., x.max() - scale / 2., scale)
    tx = scipy.zeros(innertx.size + kx * 2 + 2)
    tx[0:kx + 1] = x.min()
    tx[kx + 1:innertx.size + kx + 1] = innertx.copy()
    tx[innertx.size + kx + 1:] = x.max()
    ty = scipy.zeros(ky * 2 + 2)
    ty[0:ky + 1] = y.min()
    ty[ky + 1:] = y.max()

    # ...and fit.
    bgfit = interpolate.bisplrep(x,
                                 y,
                                 z,
                                 w,
                                 tx=tx,
                                 ty=ty,
                                 kx=kx,
                                 ky=ky,
                                 task=-1,
                                 nxest=tx.size,
                                 nyest=ty.size,
                                 s=0)

    return bgfit
コード例 #21
0
 def run(self, ips, snap, img, para=None):
     nimg.percentile_filter(snap, para['per'], para['size'], output=img)
コード例 #22
0
def evaluate(model,
             ds_test,
             ds_info,
             model_name,
             run_paths,
             num_categories=13):
    """evaluate performance of the model

    Parameters:
        model (keras.Model): keras model object to be evaluated
        ds_test (tf.data.Dataset): test set
        ds_info (dictionary): information and structure of dataset
        model_name (string): name of the model (name list: 'Sequence_LSTM', 'Sequence_BiLSTM', 'Sequence_GRU', 'Sequence_BiGRU', 'Sequence_Conv1D',
                         'Sequence_BiConv1D', 'Sequence_Ensemble', 'Seq2Seq', 'Sequence_RNN_Fourier')
        run_paths (dictionary): storage path of model information
        num_categories (int): number of label category (must be 12 or 13, when use 12, then remove the data with label 0)
    """

    # set up the model and load the checkpoint
    checkpoint = tf.train.Checkpoint(step=tf.Variable(1),
                                     optimizer=tf.keras.optimizers.Adam(),
                                     net=model)
    checkpoint_manager = tf.train.CheckpointManager(
        checkpoint, run_paths["path_ckpts_train"], max_to_keep=10)
    checkpoint.restore(checkpoint_manager.latest_checkpoint)
    step = int(checkpoint.step.numpy())

    if model_name == 'Seq2Seq':
        encoder = model[0]
        decoder = model[1]
        test_loss = []
        test_accuracy = []
        loss_object = tf.keras.losses.SparseCategoricalCrossentropy(
            from_logits=True)

        # evaluate the model
        for idx, (test_windows, test_labels) in enumerate(ds_test):
            loss = 0
            enc_output, enc_hidden = encoder(test_windows)
            dec_hidden = enc_hidden
            dec_input = tf.zeros([test_labels.shape[0], 1], dtype=tf.int64)
            for t in range(test_labels.shape[1]):
                prediction, dec_state, _ = decoder(dec_input, dec_hidden,
                                                   enc_output)
                loss += loss_object(tf.expand_dims(test_labels[:, t], 1),
                                    prediction)
                dec_input = tf.expand_dims(tf.argmax(prediction, axis=1), 1)
                if t == 0:
                    predictions = tf.expand_dims(prediction, 1)
                else:
                    predictions = tf.concat(
                        [predictions,
                         tf.expand_dims(prediction, 1)], axis=1)
            test_loss.append(
                tf.math.reduce_mean(loss / int(test_labels.shape[1])).numpy())
            test_accuracy.append(
                metrics.accuracy_score(
                    tf.reshape(test_labels, [-1]).numpy(),
                    tf.reshape(tf.argmax(predictions, axis=2), [-1]).numpy()))
            if idx == 0:
                test_confusion_matrix = metrics.confusion_matrix(
                    tf.reshape(test_labels, [-1]).numpy(),
                    tf.reshape(tf.argmax(predictions, axis=2), [-1]).numpy())
        test_loss = np.mean(test_loss)
        test_accuracy = np.mean(test_accuracy)

        # log the evaluation information
        logging.info(f"Evaluating at step: {step}...")
        logging.info('loss:\n{}'.format(test_loss))
        logging.info('accuracy:\n{}'.format(test_accuracy))
        logging.info('confusion_matrix:\n{}'.format(test_confusion_matrix))
    else:
        # compile the model
        model.compile(
            optimizer=tf.keras.optimizers.Adam(),
            loss=tf.keras.losses.SparseCategoricalCrossentropy(),
            metrics=[[Accuracy()],
                     [ConfusionMatrix(num_categories=num_categories)]])

        # evaluate the model
        result = model.evaluate(ds_test, return_dict=True)

        # log the evaluation information
        logging.info(f"Evaluating at step: {step}...")
        for key, value in result.items():
            logging.info('{}:\n{}'.format(key, value))

        for idx, (test_windows, test_labels) in enumerate(ds_test):
            predictions = model(test_windows)
            predictions = tf.argmax(predictions, axis=2)
            predictions = np.concatenate(predictions.numpy()).flatten()
            test_labels = np.concatenate(test_labels.numpy()).flatten()

            # postprocess the predictions by using the median filter or percentile filter
            # (also compare the results of filter and choose the best)
            plt.figure(dpi=800)
            plt.title('POSTPROCESSING METHODS COMPARISON')
            plt.xlabel('FILTER SIZE')
            plt.ylabel('ACCURACY(%)')
            plt.grid(b=True, axis='y')
            for percentile in range(45, 60, 5):
                size_list = range(0, 255, 10)
                acc_list = []
                for size in size_list:
                    if size != 0:
                        test_predictions = percentile_filter(
                            predictions, percentile=percentile, size=size)
                    else:
                        test_predictions = predictions
                    test_accuracy = metrics.accuracy_score(
                        test_labels, test_predictions) * 100
                    logging.info(
                        'accuracy(percentile filter {} with size {}):\n{}'.
                        format(percentile, size, test_accuracy))
                    acc_list.append(test_accuracy)
                if percentile == 50:
                    plt.plot(size_list,
                             acc_list,
                             marker="s",
                             markersize=3,
                             label=(str(percentile) + '%' +
                                    ' Percentile Filter(Median Filter)'))
                else:
                    plt.plot(size_list,
                             acc_list,
                             marker="s",
                             markersize=3,
                             label=(str(percentile) + '%' +
                                    ' Percentile Filter'))
            plt.legend(loc='lower left')
            plt.savefig(run_paths['path_model_id'] +
                        '/logs/eval/postprocessing_plot.png',
                        dpi=800)
            plt.show()

            # plot the confusion matrix
            cm = result['confusion_matrix']
            fig, ax = plt.subplots()
            im = ax.imshow(cm + 1,
                           norm=colors.LogNorm(vmin=100, vmax=cm.max()),
                           cmap='Wistia')
            cbar = ax.figure.colorbar(im, ax=ax)
            cbar.ax.set_ylabel('NUMBER OF SAMPLING POINTS',
                               rotation=-90,
                               va="bottom")
            ax.set_xticks(np.arange(num_categories))
            ax.set_yticks(np.arange(num_categories))
            ax.set_xticklabels([
                'W', 'WU', 'WD', 'SI', 'ST', 'L', 'ST2SI', 'SI2ST', 'SI2L',
                'L2SI', 'ST2L', 'L2ST'
            ])
            ax.set_yticklabels([
                'W', 'WU', 'WD', 'SI', 'ST', 'L', 'ST2SI', 'SI2ST', 'SI2L',
                'L2SI', 'ST2L', 'L2ST'
            ])
            plt.setp(ax.get_xticklabels(),
                     rotation=45,
                     ha="right",
                     rotation_mode="anchor")
            plt.setp(ax.get_yticklabels(),
                     rotation=45,
                     ha="right",
                     rotation_mode="anchor")
            for i in range(num_categories):
                for j in range(num_categories):
                    text = ax.text(j,
                                   i,
                                   cm[i, j],
                                   fontsize='x-small',
                                   ha="center",
                                   va="center",
                                   color="b")
            ax.set_title("SEQUENCE TO SEQUENCE CONFUSION MATRIX")
            fig.tight_layout()
            plt.show()
コード例 #23
0
ファイル: mocorr.py プロジェクト: jingxlim/CircuitSeeker
def motionCorrect(
    folder,
    prefix,
    suffix,
    fixed,
    fixed_vox,
    moving_vox,
    write_path,
    dataset_path=None,
    distributed_state=None,
    sigma=7,
    transforms_dir=None,
    **kwargs,
):
    """
    """

    # set up the distributed environment
    ds = distributed_state
    if distributed_state is None:
        ds = csd.distributedState()
        # writing large compressed chunks locks GIL for a long time
        ds.modifyConfig({
            'distributed.comm.timeouts.connect': '60s',
            'distributed.comm.timeouts.tcp': '180s',
        })
        ds.initializeLSFCluster(job_extra=["-P scicompsoft"])
        ds.initializeClient()

    # create (lazy) dask bag from all frames
    frames = csio.daskBagOfFilePaths(folder, prefix, suffix)
    nframes = frames.npartitions

    # scale cluster carefully
    if 'max_workers' in kwargs.keys():
        max_workers = kwargs['max_workers']
    else:
        max_workers = 1250
    ds.scaleCluster(njobs=min(nframes, max_workers))

    # align all
    dfixed = delayed(fixed)
    dfixed_vox = delayed(fixed_vox)
    dmoving_vox = delayed(moving_vox)
    ddataset_path = delayed(dataset_path)
    params = frames.map(
        lambda b, w, x, y, z: rigidAlign(w, b, x, y, dataset_path=z),
        w=dfixed,
        x=dfixed_vox,
        y=dmoving_vox,
        z=ddataset_path,
    ).compute()
    params = np.array(list(params))

    # (weak) outlier removal and smoothing
    params = percentile_filter(params, 50, footprint=np.ones((3, 1)))
    params = gaussian_filter1d(params, sigma, axis=0)

    # write transforms as matrices
    if transforms_dir is not None:
        paths = list(frames)
        for ind, p in enumerate(params):
            transform = _parametersToRigidMatrix(p)
            basename = os.path.splitext(os.path.basename(paths[ind]))[0]
            path = os.path.join(transforms_dir, basename) + '_rigid.mat'
            np.savetxt(path, transform)

    # apply transforms to all images
    params = db.from_sequence(params, npartitions=nframes)
    transformed = frames.map(
        lambda b, x, y, z: applyTransform(b, x, y, dataset_path=z),
        x=dmoving_vox,
        y=params,
        z=ddataset_path,
    ).to_delayed()

    # convert to a (lazy) 4D dask array
    sh = transformed[0][0].shape.compute()
    dd = transformed[0][0].dtype.compute()
    arrays = [da.from_delayed(t[0], sh, dtype=dd) for t in transformed]
    transformed = da.stack(arrays, axis=0)

    # write in parallel as 4D array to zarr file
    compressor = Blosc(cname='zstd', clevel=9, shuffle=Blosc.BITSHUFFLE)
    transformed_disk = zarr.open(write_path,
                                 'w',
                                 shape=transformed.shape,
                                 chunks=(256, 10, 256, 256),
                                 dtype=transformed.dtype,
                                 compressor=compressor)
    da.to_zarr(transformed, transformed_disk)

    # release resources
    if distributed_state is None:
        ds.closeClient()

    # return reference to data on disk
    return transformed_disk
コード例 #24
0
ファイル: wavesolve.py プロジェクト: PatrickRWells/keckcode
def solve(d,orders):
    path = os.path.split(__file__)[0]

    lines = {}
    lines['cuar'] = numpy.loadtxt(path+"/data/cuar.lines")
    lines['hgne'] = numpy.loadtxt(path+"/data/hgne.lines")
    lines['xe'] = numpy.loadtxt(path+"/data/xe.lines")

    #startsoln = numpy.load(path+"/data/test_wavesol.dat",
    #                       allow_pickle=True)
    startsoln = numpy.load(path+"/data/esi_wavesolution.dat",
                           allow_pickle=True)

    #arclist = d.keys() # Under python 3 this does not produce a list
    #                        and so arclist[0] below fails
    arclist = list(d)
    arclist.sort()
    soln = []
    if d[arclist[0]].shape[1]>3000:
        xvals = numpy.arange(4096.)
        cuslice = slice(3860,3940)
        fw1 = 75.
        fw2 = 9
    else:
        xvals = numpy.arange(1.,4096.,2.)
        cuslice = slice(1930,1970)
        fw1 = 37.
        fw2 = 5

    """
    Do a temporary kludge.  In some cases, the finding of the orders
    fails, and only 9 orders are found.  In this case, we need to skip
    the first of the orders
    """
    if len(orders) == 9:
        ordstart = 1
        dord = 1
    else:
        ordstart = 0
        dord = 0
    for i in range(ordstart, 10):
      solution = startsoln[i]
      start,end = orders[(i-dord)]

      peaks = {}
      trace = {}
      fitD = {}
      WIDTH = 4
      import pylab
      for arc in arclist:
          data = numpy.nanmedian(d[arc][start:end],axis=0)
          data[scipy.isnan(data)] = 0.
          if i==0 and arc=='cuar':
              data[cuslice] = numpy.median(data)
          trace[arc] = data.copy()
          bak = ndimage.percentile_filter(data,50.,int(fw1))
          bak = getContinuum(bak,40.)
          data -= bak
          fitD[arc] = data/d[arc][start:end].std(0)
          p = ndimage.maximum_filter(data,fw2)
          std = clip(scipy.trim_zeros(data),3.)[1]
          nsig = ndimage.uniform_filter((data>7.*std)*1.,3)
          peak = scipy.where((nsig==1)&(p>10.*std)&(p==data))[0]
          peaks[arc] = []
          for p in peak:
              if p-WIDTH<0 or p+WIDTH+1>xvals.size:
                  continue
              x = xvals[p-WIDTH:p+WIDTH+1].copy()#-xvals[p]
              f = data[p-WIDTH:p+WIDTH+1].copy()
              fitdata = scipy.array([x,f]).T
              fit = scipy.array([0.,f.max(),xvals[p],1.])
              fit,chi = sf.ngaussfit(fitdata,fit,weight=1)
              peaks[arc].append(fit[2])#+xvals[p])
      for converge in range(15):
        wave = 10**sf.genfunc(xvals,0.,solution)

        refit = []
        corrA = {}
        err = wave[int(wave.size/2)]-wave[int(wave.size/2-1)]
        for arc in arclist:
            corr = []
            p = 10.**sf.genfunc(peaks[arc],0.,solution)
            for k in range(p.size):
                cent = p[k]
                diff = cent-lines[arc]
                corr.append(diff[abs(diff).argmin()])
            corr = numpy.array(corr)
            if corr.size<4:
                continue
            m,s = clip(corr)
            corr = numpy.median(corr[abs(corr-m)<5.*s])
            print(corr)
            corrA[arc] = corr
#        corr = m

        #for arc in arclist:
            p = 10.**sf.genfunc(peaks[arc],0.,solution)
            for k in range(p.size):
                pos = peaks[arc][k]
                cent = p[k]
                diff = abs(cent-lines[arc]-corr)
                if diff.min()<2.*err:
                    refit.append([pos,lines[arc][diff.argmin()]])
        refit = scipy.asarray(refit)
        solution = sf.lsqfit(refit,'polynomial',3)
        refit = []
        err = solution['coeff'][1]
        for arc in arclist:
            data = trace[arc]
            for pos in peaks[arc]:
                cent = sf.genfunc(pos,0.,solution)
                delta = 1e9
                match = None
                for j in lines[arc]:
                    diff = abs(cent-j)
                    if diff<delta and diff<1.*err:
                        delta = diff
                        match = j
                if match is not None:
                    refit.append([pos,match])
        refit = scipy.asarray(refit)
        refit[:,1] = numpy.log10(refit[:,1])
        solution = sf.lsqfit(refit,'chebyshev',3)

        #refit[:,0],refit[:,1] = refit[:,1].copy(),refit[:,0].copy()
        #refit = numpy.array([refit[:,1],refit[:,0]]).T
        #refit = refit[:,::-1]
        solution2 = sf.lsqfit(refit[:,::-1],'chebyshev',3)
        #soln.append([solution,solution2])

        w = 10**sf.genfunc(xvals,0.,solution)
        if (w==wave).all() or converge>8:
            print("Order %d converged in %d iterations"%(i,converge))
            soln.append([solution,solution2])
            break
            for arc in arclist:
                pylab.plot(w,trace[arc])
                pylab.plot(w,fitD[arc])
                pp = 10**sf.genfunc(peaks[arc],0.,solution)
                for p in pp:
                    pylab.axvline(p,c='k')
            for j in 10**refit[:,1]:
                if j>w[0] and j<w[-1]:
                    pylab.axvline(j)
            pylab.show()
            break
    return soln
コード例 #25
0
ファイル: volTools.py プロジェクト: d-v-b/zebra
 def my_dff(y, perc, window): 
     baseFunc = lambda x: percentile_filter(x.astype(float_dtype), perc, window, mode='reflect')
     b = baseFunc(y)
     return ((y - b) / (b + .1))
コード例 #26
0
def ndi_med(image, n):
    return percentile_filter(image, 50, size=n * 2 - 1)
コード例 #27
0
    def apply_modifier(self):
        """
        Apply a selected filter to a image.

        Parameters
        ----------
        input_image : nparray
            Represents the image to be filtered
        filter_type: str
            Must in one of:
                    'gaussian',
                    'uniform',
                    'median',
                    'maximum',
                    'minimum',
                    'sharpening',
                    'percentile',
                    'wiener',
                    'sobel'
        *args: Arguments of the selected filter,
            see details for more information.
        **kwargs: The key arguments of the selected filter,
            see details for more information.

        Returns
        -------
        nparray
            The filtered image as the same format of the input.

        Details
        -------
        Arguments for the filters and the default values:
        =============  ===========================
        Filter         Kwargs
        =============  ===========================
        'gaussian'     sigma: 3
        'uniform'      size: 3
        'median'       size: 3
        'maximum'      size: 3
        'minimum'      size: 3
        'sharpening'   alpha: 30, filter_sigma: 1
        'percentile'   percentile: 75, size: 3
        'wiener'       noise power and size
        'sobel'        None
        =============  ===========================
        This details also are defined in this module as a argument.
        """
        complete_file_name = self.imagem.edited_image.path
        # Open the image file
        input_image = imageio.imread(complete_file_name)
        # Apply the Modifier
        if self.filter_type == self.GAUSSIAN:
            output_image = ndimage.gaussian_filter(
                input_image,
                sigma=float(self.filter_argument_value),
            )
        elif self.filter_type == self.UNIFORM:
            output_image = ndimage.uniform_filter(
                input_image,
                size=int(self.size_value),
            )
        elif self.filter_type == self.MEDIAN:
            output_image = ndimage.median_filter(
                input_image,
                size=int(self.size_value),
            )
        elif self.filter_type == self.MAXIMUM:
            output_image = ndimage.maximum_filter(
                input_image,
                size=int(self.size_value),
            )
        elif self.filter_type == self.MINIMUM:
            output_image = ndimage.minimum_filter(
                input_image,
                size=int(self.size_value),
            )
        elif self.filter_type == self.SHARPENING:
            output_image = self.sharpenning_filter(
                input_image,
                alpha=float(self.filter_argument_value),
                filter_sigma=float(self.size_value)
            )
        elif self.filter_type == self.PERCENTILE:
            output_image = ndimage.percentile_filter(
                input_image,
                percentile=int(self.filter_argument_value),
                size=int(self.size_value),
            )
        elif self.filter_type == self.WIENER:
            noise = None
            if self.filter_argument_value >= 0:
                noise = float(self.filter_argument_value)
            output_image = signal.wiener(
                input_image,
                mysize=int(self.size_value),  # TODO: Should be odd, add clean
                noise=noise,
            )
        elif self.filter_type == self.SOBEL:
            output_image = self.sobel_filter(
                input_image
            )

        # Save the result
        output_image = output_image.astype(input_image.dtype)
        imageio.imwrite(complete_file_name, output_image)
        return output_image