Ejemplo n.º 1
0
def multi_median(input, median_radius, numpass):
    """ Applies median filter multiple times on input data.

    Parameters
    ----------
    input : ndarray
        The input volume to apply filter on.
    median_radius : int
        Radius (in voxels) of the applied median filter
    numpass: int
        Number of pass of the median filter

    Returns
    -------
    input : ndarray
        Filtered input volume.
    """
    outvol = np.zeros_like(input)

    # Array representing the size of the median window in each dimension.
    medarr = np.ones_like(input.shape) * ((median_radius * 2) + 1)

    # Multi pass
    for i in range(0, numpass):
        median_filter(input, medarr, output=input)
    return input
Ejemplo n.º 2
0
def create_background_map(data, bsx, bsy):
    '''Create a background map with a given mesh size'''
    sx, sy = data.shape
    mx = sx / bsx
    my = sy / bsy

    comp = []
    rms = []
    # Rows
    sp = numpy.split(data, numpy.arange(bsx, sx, bsx), axis=0)
    for s in sp:
        # Columns
        rp = numpy.split(s, numpy.arange(bsy, sy, bsy), axis=1)
        for r in rp:
            b, r = background_estimator(r)
            comp.append(b)
            rms.append(r)

    # Reconstructed image
    z = numpy.array(comp)
    z.shape = (mx, my)
    # median filter
    ndfilter.median_filter(z, size=(3, 3), output=z)

    # Interpolate to the original size
    new = _interpolation(z, sx, sy, mx, my)

    # Interpolate the rms
    z = numpy.array(rms)
    z.shape = (mx, my)
    nrms = _interpolation(z, sx, sy, mx, my)

    return new, nrms
Ejemplo n.º 3
0
def compute_snr(power_2d, fractional_window_size=0.05):
    """
    Extract the central region of the 2D Fourier transform

    Parameters
    ----------
    power_2d : numpy array
        The 2D Fourier transform of the data
    fractional_window_size : float
        Median filter window size as a fraction of the 1D power array

    Returns
    -------
    snr : numpy array
        The 1D SNR
    """
    power = np.median(power_2d, axis=0)
    p2p_scatter = abs(power[1:] - power[:-1])
    power = power[1:]  # Throw away DC term

    # Median filter
    window_size = get_odd_integer(fractional_window_size * len(power))
    continuum = median_filter(power, size=window_size)
    pixel_to_pixel_scatter = median_filter(p2p_scatter, size=window_size)
    snr = (power - continuum) / pixel_to_pixel_scatter

    # Also divide out the global scatter for any residual structure that was not removed with the median filter
    global_scatter = robust_standard_deviation(snr)
    snr /= global_scatter

    return snr
Ejemplo n.º 4
0
def preprocess_image(image):
    # Copy the depth part of the image
    depth_pixels = image.pixels[..., 2].copy()
    depth_pixels = rescale_to_opencv_image(depth_pixels)
    filtered_depth_pixels = median_filter(depth_pixels, 5)

    # Build mask for floodfilling, this lets me ignore all the pixels
    # from the background and around the ears
    mask = np.zeros((depth_pixels.shape[0] + 2, depth_pixels.shape[1] + 2),
                    dtype=np.uint8)
    # Flood fill from top left
    cv2.floodFill(filtered_depth_pixels, mask, (0, 0),
                  (255, 255, 255), flags=cv2.FLOODFILL_MASK_ONLY)
    # Flood fill from top right
    cv2.floodFill(filtered_depth_pixels, mask, (depth_pixels.shape[1] - 1, 0),
                  (255, 255, 255), flags=cv2.FLOODFILL_MASK_ONLY)
    # Truncate and negate the flood filled areas to find the facial region
    floodfill_mask = (~mask.astype(np.bool))[1:-1, 1:-1]

    # Build a mask of the areas inside the face that need inpainting
    inpaint_mask = ~image.mask.mask & floodfill_mask
    # Inpaint the image and filter to smooth
    inpainted_pixels = cv2.inpaint(depth_pixels,
                                   inpaint_mask.astype(np.uint8),
                                   5, cv2.INPAINT_NS)
    inpainted_pixels = median_filter(inpainted_pixels, 5)

    # Back to depth pixels
    image.pixels[..., 2] = rescale_to_depth_image(image, inpainted_pixels)
    # Reset the mask!
    image.mask.pixels[..., 0] = ~np.isnan(image.pixels[..., 2])
Ejemplo n.º 5
0
def lacosmic(img, cat, oper, sci, imgrn):
    '''Maximum pixel on LA Cosmic "fine structure" image.'''
    lhs = median_filter(img['diff'], size=3)
    rhs = median_filter(median_filter(img['diff'], size=3), size=7)
    fsimg = lhs - rhs
    fsmax = fsimg.max()
    diffmax = img['diff'].max()
    return diffmax / fsmax
Ejemplo n.º 6
0
def _apply_median_filter(nr, footprint, three_d):
    if three_d:
        pancake = np.swapaxes(np.tile(footprint, (3, 1, 1)), 0, -1)
        nr = 1.0*median_filter(nr, footprint=pancake)
    else:
        nt = nr.shape[2]
        for i in range(0, nt):
            nr[:, :, i] = 1.0*median_filter(nr[:, :, i], footprint=footprint)
    return nr
Ejemplo n.º 7
0
def despike_full(data, window_length, rejection_threshold=7, preprocess_function=lpf256):
    preproc = np.abs(lpf256(data))
    medians = filters.median_filter(preproc, window_length)
    abs_deviations = np.abs(preproc - medians)
    mads = filters.median_filter(abs_deviations, window_length)
    spike_flags = abs_deviations > (mads*rejection_threshold)
    nspikes = spike_flags.sum()
    data[spike_flags] = np.array(random.sample(data[~spike_flags],nspikes))
    return data
Ejemplo n.º 8
0
def pearson(img, margin=3, dur=(0,25), SpaMed=False):
    ''' img     : numpy array
        margin  : shift range setting
        dur     : to define F in frames
        SpaMed  : can be a list of two boolean values or just a bool
    '''
    height, width, nframes = img.shape
    
    offsetxy = np.zeros((nframes,4), dtype=np.float)
    c = np.zeros((margin*2+1, margin*2+1), dtype=np.float)
    
    # clip out subimage at the center of img.
    x1, y1 = margin, margin
    x2, y2 = width-margin, height-margin
    if type(dur)==int:
        ref = img[y1:y2, x1:x2, dur]
        ref[np.isnan(ref)] = 0
    else:
        ref = np.mean(img[y1:y2, x1:x2, dur[0]:dur[1]], 2)
    
    if type(SpaMed) is bool:
        Filt_ref, Filt_sub = SpaMed, False
    elif type(SpaMed) is list:
        Filt_ref, Filt_sub = SpaMed
    
    if Filt_ref:
        ref = median_filter(ref, (3,3), mode='nearest')
    
    ref = ref.ravel()
    
    for z in range(nframes):
        if Filt_sub:
            target = median_filter(img[:,:,z], (3,3), mode='nearest')
        else:
            target = img[:,:,z]
            
        for xoff in range(-margin, margin+1):
            for yoff in range(-margin, margin+1):
                
                sub = target[y1+yoff:y2+yoff, x1+xoff:x2+xoff].ravel() # fastest
                #sub = img[y1+yoff:y2+yoff, x1+xoff:x2+xoff,z].flat
                #sub = img[y1+yoff:y2+yoff, x1+xoff:x2+xoff,z].reshape(-1)
                #sub = img[y1+yoff:y2+yoff, x1+xoff:x2+xoff,z].flatten()
                c[margin+yoff, margin+xoff] = pearsonr(ref,sub)[0]
        
        offset = np.nonzero(c.max() == c)
        #print offset, c.max(), c
        
        offsetxy[z,:] = [offset[0][0]-margin,\
                         offset[1][0]-margin,\
                         c.max(),
                         z]
    
    return offsetxy
def preprocess_eyegaze(eyegaze, blink_margin=200, filter_width=40):
    from scipy.ndimage.morphology import binary_dilation
    from scipy.ndimage.filters import median_filter

    mask = binary_dilation(np.isnan(eyegaze['x']), iterations=blink_margin)
    # filter x and y coordinate separately
    eyegaze['x'] = median_filter(eyegaze['x'], size=filter_width)
    eyegaze['y'] = median_filter(eyegaze['y'], size=filter_width)
    # blank blink margin
    eyegaze['x'][mask] = np.nan
    eyegaze['y'][mask] = np.nan
    return eyegaze
Ejemplo n.º 10
0
def dtm_kraus_median(data,tr,lo,hi,ml=(4,4),mh=(40,40)):
    med4 = filters.median_filter(data,ml)
    med40 = filters.median_filter(data,mh)
    diff = med4 - med40
    med4 = None
    med40 = None
    mask = diff > tr
    mask = ndimage.morphology.binary_erosion(mask,iterations=1)
    out = np.copy(data)
    out[mask] = out[mask] - diff[mask]
    out = denoise(out,lo,hi,(3,3))
    return out
Ejemplo n.º 11
0
def dtm_my_median(data,size,tr,lo,hi):
    med4 = filters.median_filter(data,(4,4))
    med40 = filters.median_filter(data,(size,size))
    diff = med4 - med40
    med4 = None
    med40 = None
    mask = diff > tr
    mask = ndimage.morphology.binary_erosion(mask,iterations=1)
    out = np.copy(data)
    out[mask] = out[mask] - diff[mask]
    out = denoise(out,lo,hi,(3,3))
    return out
Ejemplo n.º 12
0
    def run(self, rinput):
        from scipy.signal import savgol_filter

        self.logger.info('starting slit flat reduction')

        flow = self.init_filters(rinput, rinput.obresult.configuration)
        reduced = basic_processing_with_combination(rinput, flow, method=combine.median)
        hdr = reduced[0].header
        self.set_base_headers(hdr)

        self.save_intermediate_img(reduced, 'reduced_image.fits')

        # Using median filtering... In each channel
        l0 = reduced[0].data.shape[0]
        l1 = l0 // 2
        channel1 = (slice(None, l1), slice(None, None, None))
        channel2 = (slice(l1, None, None), slice(None, None, None))

        m_window1 = (11, 11)
        self.logger.debug('median filtering by channel %s', m_window1)
        median1 = numpy.zeros_like(reduced[0].data)
        median1[channel1] = median_filter(reduced[0].data[channel1], m_window1)
        median1[channel2] = median_filter(reduced[0].data[channel2], m_window1)

        self.save_intermediate_array(median1, 'median_image1.fits')

        qe1 = reduced[0].data / median1

        self.save_intermediate_array(qe1, 'qe_filter1.fits')

        m_window2 = (3, 21)
        self.logger.debug('median filtering %s', m_window2)

        median2 = median_filter(qe1, m_window2)
        self.save_intermediate_array(median2  , 'median_image2.fits')

        qe2 = qe1 / median2
        self.save_intermediate_array(qe2, 'qe_filter2.fits')

        self.logger.debug('filtering Inf/NaN in result')
        qe2[numpy.isinf(qe2)] = 1.0
        qe2[numpy.isnan(qe2)] = 1.0

        hdu = fits.PrimaryHDU(qe2.astype('float32'), header=reduced[0].header)
        hdu.header['UUID'] = str(uuid.uuid1())

        master_slitflat = fits.HDUList([hdu])
        self.set_base_headers(master_slitflat[0].header)

        self.logger.info('end slit flat recipe')
        return self.create_result(master_slitflat=master_slitflat,
                                  reduced_image=reduced)
Ejemplo n.º 13
0
    def __init__(self, bare_dir, dem_dir, outdir):
        
        #loads the GDAL derived DEMs to produce the crop height model
        
        if not os.path.exists(outdir):
            os.mkdir(outdir)
        
        os.path.join(dem_dir,bare_dir)
        
        bare = None
        crop_model = None
        
        for bare_dem in os.listdir(bare_dir):
            os.chdir(bare_dir)
            bare = LoadImage(bare_dem)
            
        bare_filtered = filters.median_filter(bare.stacked,size=(9,9))
        bare_spatial = bare.spatial
        
        for survey in os.listdir(dem_dir):
            im_path = os.path.join(dem_dir, survey)
            os.chdir(im_path)
            for image in os.listdir(im_path):
                crop = LoadImage(image)
                crop_filtered = filters.median_filter(crop.stacked,size=(3,3))
                
                crop_model = crop_filtered-bare_filtered

                
                
                bare_mask = ma.masked_where(bare_filtered==-999, bare_filtered)
                crop_mask = ma.masked_where(crop_filtered==-999, crop_filtered)
                #combine these into a single mask so that anything masked in either dataset
                #will be masked in the combined output
                combined_mask = ma.mask_or(bare_mask.mask, crop_mask.mask)
                #use this mask t omask the crop model
                crop_model = ma.array(crop_model, mask=combined_mask)
                
                #convert back to a bog-standard numpy array and fill the masked values
                crop_model = crop_model.filled(-999)
                
                
                name = survey+'_CropHeightModel.tif'
                
                self.writeimage(outdir,
                                name,
                                crop_model.shape[0],
                                crop_model.shape[1],
                                crop_model,
                                bare_spatial)
Ejemplo n.º 14
0
def median_filter(tomo, size=3, axis=0, ind=None):
    """
    Apply median filter to a 3D array along a specified axis.

    Parameters
    ----------
    tomo : ndarray
        Arbitrary 3D array.
    size : int, optional
        The size of the filter.
    axis : int, optional
        Axis along which median filtering is performed.
    ind : array of int, optional
        Indices at which the filtering is applied.

    Returns
    -------
    ndarray
        Median filtered 3D array.
    """
    if type(tomo) == str and tomo == 'SHARED':
        tomo = mp.shared_data
    else:
        arr = mp.distribute_jobs(
            tomo, func=median_filter, axis=axis,
            args=(size, axis))
        return arr

    dx, dy, dz = tomo.shape
    if ind is None:
        if axis == 0:
            ind = np.arange(0, dx)
        elif axis == 1:
            ind = np.arange(0, dy)
        elif axis == 2:
            ind = np.arange(0, dz)

    if axis == 0:
        for m in ind:
            tomo[m, :, :] = filters.median_filter(
                tomo[m, :, :], (size, size))
    elif axis == 1:
        for m in ind:
            tomo[:, m, :] = filters.median_filter(
                tomo[:, m, :], (size, size))
    elif axis == 2:
        for m in ind:
            tomo[:, :, m] = filters.median_filter(
                tomo[:, :, m], (size, size))
Ejemplo n.º 15
0
def detrend_using_mf(lc, flavor='SAP', size=14, mode='constant'):

    #  either use a gap (size is int):
    #
    #              size
    # <.......--------------.........................................>
    # _/▔﹀\_︿╱﹀╲/╲︿_/︺╲▁︹_/﹀\_︿╱▔︺\/\︹▁╱﹀▔╲︿_/︺▔╲▁︹_/﹀▔\⁄﹀\╱
    #
    #
    #
    #  or a gap with with a hole (size is 2-element array):
    #
    #         size[0] size[1] size[0]
    # <.......--------_______--------................................>
    # _/▔﹀\_︿╱﹀╲/╲︿_/︺╲▁︹_/﹀\_︿╱▔︺\/\︹▁╱﹀▔╲︿_/︺▔╲▁︹_/﹀▔\⁄﹀\╱

    pidx = {'SAP': 1, 'PDC': 3}[flavor]

    lc_chunks = slice_da_bitch_up(lc, pidx)
    dtlc = numpy.array(lc)  # [:,pidx])

    for i in range(0, len(lc_chunks), 2):

        try:
            len(size)

            sidesize = size[0]
            gapsize = size[1]

            footprint = [True]*sidesize + [False]*gapsize + [True]*sidesize

            dt = median_filter(# lc[:,pidx][lc_chunks[i]:lc_chunks[i+1]],
                               # footprint=footprint,
                               lc[lc_chunks[i]:lc_chunks[i+1]],
                               footprint=footprint,
                               mode=mode,
                               cval=1.0)

        except TypeError:
            dt = median_filter(# lc[:,pidx][lc_chunks[i]:lc_chunks[i+1]],size=size,
                               lc[lc_chunks[i]:lc_chunks[i+1]],
                               size=size,
                               mode=mode,
                               cval=1.0)

        # dtlc[lc_chunks[i]:lc_chunks[i+1]] = lc[:,pidx][lc_chunks[i]:lc_chunks[i+1]] / dt
        dtlc[lc_chunks[i]:lc_chunks[i+1]] = lc[lc_chunks[i]:lc_chunks[i+1]] / dt
    return dtlc
Ejemplo n.º 16
0
def ehist_equalize_melhist(d, sr, refMelHist, edges):
    """ Modify a signal in the Mel domain 
        by equalizing the Mel-subband histograms to match
        the passed-in ones """
    # Calculate the (Mel) spectrograms, and histogram, and axes
    melHist, edges, D, DmeldB, melmx, freqs = mel_hist(d, sr, edges=edges)
    # Build mapping & modify mel spectrogram
    histmaps = make_hist_maps(melHist, refMelHist, edges)
    # for some reason, extrapolating madly below bottom edge - clip it
    DmeldBmapped = np.maximum(edges[0], 
                              np.minimum(edges[-1], 
                                         apply_hist_maps(DmeldB, histmaps)))
    # Reconstruct audio based on mapped envelope
    # We map both original and modified Mel envelopes to FFT domain
    # then scale original STFT magnitudes by their ratio
    DmelInFFT = np.dot(melmx.T, idB(DmeldB))
    DmappedInFFT = np.dot(melmx.T, idB(DmeldBmapped))
    # Zero values in denominator will match to zeros in numerator, 
    # so it's OK to drop them
    Dmask = DmappedInFFT / (DmelInFFT + (DmelInFFT==0))
    # Median filter to remove short blips in gain
    medfiltwin = 7
    DmaskF = median_filter(Dmask, size=(1, medfiltwin))
    # Now scale each FFT val by their ratio
    Dmod = D * DmaskF
    # and resynthesize
    nfft = 2*(np.size(D, axis=0)-1)
    win = nfft
    hop = win/4
    dout = istft(Dmod.T, win, hop)
    return dout
Ejemplo n.º 17
0
def threshold_components(A, d1, d2, medw = (3,3), thr = 0.9999, se = np.ones((3,3),dtype=np.int), ss = np.ones((3,3),dtype=np.int)):
        
    from scipy.ndimage.filters import median_filter
    from scipy.ndimage.morphology import binary_closing    
    from scipy.ndimage.measurements import label    
    
    d, nr = np.shape(A)
    Ath = np.zeros((d,nr))
    
    for i in range(nr):
        A_temp = np.reshape(A[:,i],(d2,d1))
        A_temp = median_filter(A_temp,medw)
        Asor = np.sort(np.squeeze(np.reshape(A_temp,(d,1))))[::-1]
        temp = np.cumsum(Asor**2)
        ff = np.squeeze(np.where(temp<(1-thr)*temp[-1]))
        if ff.size > 0:
            ind = ff[-1]
            A_temp[A_temp<Asor[ind]] = 0
            BW = (A_temp>=Asor[ind])
        else:
            BW = (A_temp>=0)
            
        Ath[:,i] = np.squeeze(np.reshape(A_temp,(d,1)))
        BW = binary_closing(BW.astype(np.int),structure = se)
        labeled_array, num_features = label(BW, structure=ss)
        BW = np.reshape(BW,(d,1))
        labeled_array = np.squeeze(np.reshape(labeled_array,(d,1)))
        nrg = np.zeros((num_features,1))
        for j in range(num_features):
            nrg[j] = np.sum(Ath[labeled_array==j+1,i]**2)
            
        indm = np.argmax(nrg)
        Ath[labeled_array==indm+1,i] = A[labeled_array==indm+1,i]
        
    return Ath
Ejemplo n.º 18
0
 def applyMorphologicalCleaning(self, image):
 	"""
 	Applies a variety of morphological operations to improve the detection
 	of worms in the image.
 	Takes 0.030 s on MUSSORGSKY for a typical frame region
 	Takes 0.030 s in MATLAB too
 	"""
     # start with worm == 1
     image = image.copy()
     segmentation.clear_border(image)  # remove objects at edge (worm == 1)
     # fix defects in the thresholding by closing with a worm-width disk
     # worm == 1
     wormSE = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,
                                        (self.wormDiskRadius+1,
                                        	self.wormDiskRadius+1))
     imcl = cv2.morphologyEx(np.uint8(image), cv2.MORPH_CLOSE, wormSE)
     imcl = np.equal(imcl, 1)
     # fix defects by filling holes
     imholes = ndimage.binary_fill_holes(imcl)
     imcl = np.logical_or(imholes, imcl)
     # fix barely touching regions
     # majority with worm pixels == 1 (median filter same?)
     imcl = nf.median_filter(imcl, footprint=[[1, 1, 1],
                                              [1, 0, 1],
                                              [1, 1, 1]])
     # diag with worm pixels == 0
     imcl = np.logical_not(bwdiagfill(np.logical_not(imcl)))
     # open with worm pixels == 1
     openSE = cv2.getStructuringElement(cv2.MORPH_RECT, (1, 1))
     imcl = cv2.morphologyEx(np.uint8(imcl), cv2.MORPH_OPEN, openSE)
     return np.equal(imcl, 1)
Ejemplo n.º 19
0
Archivo: Bias.py Proyecto: scizen9/kpy
def full_frame(dat):

    bias = np.median(dat[:,2045:], axis=1)
    bias = bias.astype(np.float)
    smooth = FI.median_filter(bias, size=50)

    return np.tile(smooth, (2048,1))
 def filter_outliers(self, pattern, filter_size, std_cutoff):
     pattern[nonzero(pattern > 1e9)] = 0
     pattern_filter = median_filter(pattern, size=filter_size)
     pattern_diff = pattern - pattern_filter
     pattern_index = nonzero(
         logical_or(
             -std(pattern_diff) * std_cutoff + pattern_diff.mean() > pattern_diff,
             pattern_diff > std(pattern_diff) * std_cutoff + pattern_diff.mean(),
         )
     )
     print pattern_index
     if pattern[pattern_index].any():
         pattern_final = np.copy(pattern)
         print pattern_final[pattern_index]
         num_filter = len(pattern_final[pattern_index])
         print num_filter
         pattern_final[pattern_index] = pattern_filter[pattern_index]
         pattern_diff_final = pattern - pattern_final
         print nonzero(pattern > pattern.max() * 0.8), pattern.max(), pattern_filter.max(), median(
             pattern_diff_final
         ), (pattern_diff_final).max()
     else:
         pattern_final = copy(pattern)
         num_filter = 0
     return pattern_final, num_filter, pattern_diff
def check_alignment(image, r1, r2):
    """
    Take a particular line though the image and check
    if the spheres were properly aligned in the z direction.
    It happens to be off by a pixel or two sometimes
    """
                
    distance = dist_between_spheres(r1, r2, image.shape[0] / 2. + 10, image.shape[0] / 2.)
    gap_signal = []
    denoised = median_filter(image.copy(), 3)
    
    for j in np.arange(0., image.shape[1]):        
        # Take the region around the gap, which later on will be used
        # to define the intensity at the gap between the spheres.
        # The width of the gap is not exact
        if image.shape[1] / 2. + distance + 5 > j > image.shape[1] / 2. - distance - 5:
            gap_signal.append(denoised[image.shape[0] / 2. + 10, j])
    
    centre = np.mean(np.argwhere(np.min(gap_signal) == gap_signal))
    print centre
    print len(gap_signal) / 2.
    print
    
    if abs(centre - len(gap_signal) / 2.) <= 1.5:
        return True
    else:
        return False
Ejemplo n.º 22
0
def flatField(closeDist_img=None, inPlane_img=None,
              closeDist_bg=None, inPlane_bg=None,
              vignetting_model='different_objects',
              interpolation_method='kangWeiss',
              inPlane_scale_factor=None):

    # 1. Pixel sensitivity:
    if closeDist_img is not None:
        # TODO: find better name
        ff1 = flatFieldFromCalibration(closeDist_img, closeDist_bg)
    else:
        ff1 = 0

    # 2. Vignetting from in-plane measurements:
    if inPlane_img is not None:
        bg = gaussian_filter(median_filter(ff1, 3), 9)
        ff1 -= bg

        ff2, mask = VIGNETTING_MODELS[vignetting_model](inPlane_img,
                                                        inPlane_bg, inPlane_scale_factor)
#         import pylab as plt
#         plt.imshow(mask)
#         plt.show()
        ff2smooth = INTERPOLATION_METHODS[interpolation_method](ff2, mask)
        if isinstance(ff1, np.ndarray) and ff1.shape != ff2smooth.shape:
            ff2smooth = resize(ff2smooth, ff1.shape, mode='reflect')
    else:
        ff2 = 0
        ff2smooth = 0

    return ff1 + ff2smooth, ff2
Ejemplo n.º 23
0
    def activate(self):
        image = np.array(self.display.widget.image)

        self._setupMenu()

        scale = mn = 0
        # TRANSFORM TO UINT8:
        orig_dtype = image.dtype
        if orig_dtype != np.uint8:
            if self.pConvMethod.value() == 'clip':
                image = [np.uint8(np.clip(i, 0, 255)) for i in image]
            else:  # scale
                med = median_filter(image[0], 3)
                mn = np.min(med)
                image -= mn  # set min to 0
                scale = np.max(med) / 255
                image /= scale
                image = np.clip(image, 0, 255)
                image = image.astype(np.uint8)

        # if len(image)==1:
        #    image = image[0]

        self.startThread(
            lambda image=image, scale=scale, mn=mn, orig_dtype=orig_dtype:
                self._process(image, scale, mn, orig_dtype), self._processDone)
Ejemplo n.º 24
0
 def __init__(self, stack, psfwidth=1.68, **kwargs):
     super().__init__(stack)
     self.psfwidth = psfwidth
     # median filter to remove spikes
     self.peakfinder = PeakFinder(median_filter(self.stack.max(0), 3),
                                  self.psfwidth, **kwargs)
     self.peakfinder.find_blobs()
Ejemplo n.º 25
0
    def medianFilter(self, size=2):
        """
        Spatially smooth images using a median filter.

        Filtering will be applied to every image in the collection and can be applied
        to either images or volumes. For volumes, if an single scalar neighborhood is passed,
        it will be interpreted as the filter size in x and y, with no filtering in z.

        parameters
        ----------
        size: int, optional, default=2
            Size of the filter neighbourhood in pixels. A sequence is interpreted
            as the neighborhood size for each axis. For three-dimensional data, a single
            scalar is intrepreted as the neighborhood in x and y, with no filtering in z.
        """

        from scipy.ndimage.filters import median_filter
        from numpy import isscalar

        dims = self.dims
        ndims = len(dims)

        if ndims == 3 and isscalar(size) == 1:
            # improved performance applying separately to each plane
            def filter_(im):
                im.setflags(write=True)
                for z in arange(0, dims[2]):
                    im[:, :, z] = median_filter(im[:, :, z], size)
                return im
        else:
            filter_ = lambda im: median_filter(im, size)

        return self._constructor(
            self.rdd.mapValues(lambda v: filter_(v))).__finalize__(self)
def watershed_3d(sphere):
    """
    Markers should be int8
    Image should be uint8
    """
   
    sphere = median_filter(sphere, 3)
    thresh = threshold_otsu(sphere)
    sphere = (sphere >= thresh) * 1
    sphere = sobel(sphere)
    
    size = (sphere.shape[0], sphere.shape[1], sphere.shape[2])
    
    marker = np.zeros(size, dtype=np.int16)
    pl.imshow(sphere[:,:,50])
    pl.show()
    # mark everything outside as background
    marker[5, :, :] = -1
    marker[size[0] - 5, :, :] = -1
    marker[:, :, 5] = -1
    marker[:, :, size[2] - 5] = -1
    marker[:, 5, :] = -1
    marker[:, size[1] - 5, :] = -1
    marker[:,0,0] = -1
    # mark everything inside as a sphere
    marker[size[0] / 2., size[1] / 2., size[2] / 2.] = 5

    result = measurements.watershed_ift(sphere.astype(dtype=np.uint16), marker)
    pl.imshow(result[:,:,50])
    pl.show()
    
    return result
Ejemplo n.º 27
0
def get_init_centroid(image,mode,half_size = 100,median_window = 30): 
    """
    Initial guess for the centroid. The idea is pretty simple: 
    get rid of deviating pixels with a median filter; hence, create 
    a smooth version of the image with no outliers. Then, get as first 
    guess of the centroid the maximum count value (i.e., the brightest 
    star in the image).

    The idea is then to cut a sub-image around this star, and fit a gaussian 
    to that.
    """
    if mode == 'subimg':
       mf = median_filter(image,size=median_window)
    else:
       mf = image
    x0,y0 = np.where(mf == np.max(mf))
    x0,y0 = x0[0],y0[0]
    x_init = np.max([0,int(x0)-half_size])
    x_end = np.min([mf.shape[0],int(x0)+half_size])
    y_init = np.max([0,int(y0)-half_size])
    y_end = np.min([mf.shape[1],int(y0)+half_size])

    if mode == 'subimg':
        # Get subimg:
        sub_img = image[x_init:x_end,\
                       y_init:y_end]

        return x0-x_init,y0-y_init,x_init,y_init,sub_img
    else:
        return x0,y0
Ejemplo n.º 28
0
    def fitTrace(self,kwidth=10,porder=3,cwidth=30,pad=False):
        sh = self.sh
        xr1 = (0,sh[1])
        xrs = [xr1]

        polys = []
        for xr in xrs:
            xindex = np.arange(xr[0],xr[1])
            kernel = np.median(self.image[int(sh[0]/2-kwidth):int(sh[0]/2+kwidth),xindex],0)
                
            centroids = []
            totals = []
            for i in np.arange(sh[0]):
                row = self.image[i,xindex]
                row_med = np.median(row)
                    
                total = np.abs((row-row_med).sum())
                cc = fp.ifft(fp.fft(kernel)*np.conj(fp.fft(row-row_med)))
                cc_sh = fp.fftshift(cc)
                centroid = helpers.calc_centroid(cc_sh,cwidth=cwidth).real - xindex.shape[0]/2.
                centroids.append(centroid)
                totals.append(total)

            centroids = np.array(centroids)
        
            yindex = np.arange(sh[0])
            gsubs = np.where((np.isnan(centroids)==False))

            centroids[gsubs] = median_filter(centroids[gsubs],size=20)
            coeffs = np.polyfit(yindex[gsubs],centroids[gsubs],porder)

            poly = np.poly1d(coeffs)
            polys.append(poly)
        return xrs,polys
Ejemplo n.º 29
0
def rescaleData():
  # Rescale data so that they have roughly same size over channels.
  # Rescale to match the 90th percentile of each channel to a value of 100.

  resamp4D = nib.load(nifti4Dresamp)

  resampData = resamp4D.get_data().astype(np.float)

  val = 100.0

  prctile = 90

  for i in range(resampData.shape[3]):
    p = np.percentile(resampData[:,:,:,i], prctile)
    resampData[:,:,:,i] = resampData[:,:,:,i] * (val / p)

  # Clamp to a high percentile.
  maxVal = np.percentile(resampData, 99.9)
  resampData[resampData > maxVal] = maxVal

  # Make suitable for one-byte integers.
  resampData = np.round(resampData * 255.0 / maxVal, decimals=0).astype(np.uint8)

  #
  maxData = np.max(resampData, axis=3)
  tempIm = nib.Nifti1Image(maxData, resamp4D.get_affine())
  nib.save(tempIm, nifti4DChanMax)

  # Median filtering
  medFiltSize=3
  temp = median_filter(maxData, size=medFiltSize)

  # Save.
  tempIm = nib.Nifti1Image(temp, resamp4D.get_affine())
  nib.save(tempIm, nifti4DChanMaxCleaned)
def watershed_slicing(image):
    """
    Does the watershed algorithm slice by slice.
    Then use the labeled image to calculate the centres of
    mass for each slice.
    """
    image = median_filter(image, 3)
    thresh = threshold_otsu(image)
    image = (image > thresh) * 1
    
    
    N = len(image)
    slice_centroids = []
    slice_radius = []
    
    for i in range(N):
        
        slice = image[:, :, i]
        
        labels_slice = watershed_segmentation(slice)
        centroids, areas, bords, radius = centres_of_mass_2D(labels_slice)
        
        slice_centroids.append(centroids)
        slice_radius.append(radius)
#         if i > 49:
#             print centroids
#             pl.imshow(labels_slice)
#             pl.show()
        
    return slice_centroids, slice_radius
Ejemplo n.º 31
0
def compute_dff_windowed_median(traces,
                                median_kernel_long=5401,
                                median_kernel_short=101,
                                noise_stds=None,
                                n_small_baseline_frames=None,
                                **kwargs):
    """Compute dF/F of a set of traces with median filter detrending.

    The operation is basically:

        T_long = windowed_median(T) # long timescale kernel

        T_dff1 = (T - T_long) / elementwise_max(T_long, noise_std(T))

        T_short = windowed_median(T_dff1) # short timescale kernel

        T_dff = T_dff1 - elementwise_min(T_short, 2.5*noise_std(T_dff1))

    Parameters
    ----------
    traces : np.ndarray
       2D array of traces to be analyzed.
    median_kernel_long : int
        Window size to use for long timescale median detrending.
    median_kernel_short : int
        Window size to use for short timescale median detrending.
    noise_stds : list
        List that will contain noise_std(T_dff1) for each trace. The
        value for each trace will be appended to the list if provided.
    n_small_baseline_frames : list
        List that will contain the number of frames for each trace where
        the long-timescale median window is less than noise_std(T). The
        value for each trace will be appended to the list if provided.
    kwargs:
        Additional keyword arguments are passed to :func:`noise_std` .

    Returns
    -------
    dff : np.ndarray
        2D array of dF/F traces.
    """
    _check_kernel(median_kernel_long, traces.shape[1])
    _check_kernel(median_kernel_short, traces.shape[1])

    dff_traces = np.copy(traces)

    for dff in dff_traces:
        sigma_f = noise_std(dff, **kwargs)

        # long timescale median filter for baseline subtraction
        tf = median_filter(dff, median_kernel_long, mode='constant')
        dff -= tf
        dff /= np.maximum(tf, sigma_f)

        if n_small_baseline_frames is not None:
            n_small_baseline_frames.append(np.sum(tf <= sigma_f))

        sigma_dff = noise_std(dff, **kwargs)
        if noise_stds is not None:
            noise_stds.append(sigma_dff)

        # short timescale detrending
        tf = median_filter(dff, median_kernel_short, mode='constant')
        tf = np.minimum(tf, 2.5 * sigma_dff)
        dff -= tf

    return dff_traces
Ejemplo n.º 32
0
def threshold_components_parallel(A_i, neuron, dims, medw, thr_method, maxthr,
                                  nrgthr, extract_cc, **kwargs):
    """
       Post-processing of spatial components which includes the following steps

       (i) Median filtering
       (ii) Thresholding
       (iii) Morphological closing of spatial support
       (iv) Extraction of largest connected component ( to remove small unconnected pixel )    
       """
    if 'se' in kwargs.keys():
        se = kwargs['se']
    else:
        se = np.ones((3, ) * len(dims), dtype='uint8')

    ss = np.ones((3, ) * len(dims), dtype='uint8')

    # we reshape this one dimension column of the 2d components into the 2D that
    A_temp = np.reshape(A_i, dims)
    # we apply a median filter of size medw
    A_temp = median_filter(A_temp, medw)

    if thr_method == 'max':
        BW = (A_temp > maxthr * np.max(A_temp))
    elif thr_method == 'nrg':
        Asor = np.sort(A_temp.flatten())[::-1]
        temp = np.cumsum(Asor**2)
        ff = np.squeeze(np.where(temp < nrgthr * temp[-1]))
        if ff.size > 0:
            ind = ff if ff.ndim == 0 else ff[-1]
            A_temp[A_temp < Asor[ind]] = 0
            BW = (A_temp >= Asor[ind])
        else:
            BW = np.zeros_like(A_temp)
    # we want to remove the components that are valued 0 in this now 1d matrix
    Ath = A_temp.flatten()
    Ath2 = np.zeros_like(Ath)
    # we do that to have a full closed structure even if the values have been trehsolded
    BW = binary_closing(BW.astype(np.int), structure=se)

    # if we have deleted the element
    if BW.max() == 0:
        return Ath2, neuron
    #
    # we want to extract the largest connected component ( to remove small unconnected pixel )
    if extract_cc:
        # we extract each future as independent with the cross structuring elemnt
        labeled_array, num_features = label(BW, structure=ss)
        labeled_array = labeled_array.flatten()
        nrg = np.zeros((num_features, 1))
        # we extract the energy for each component
        for j in range(num_features):
            nrg[j] = np.sum(Ath[labeled_array == j + 1]**2)
        indm = np.argmax(nrg)
        Ath2[labeled_array == indm + 1] = Ath[labeled_array == indm + 1]

    else:
        BW = BW.flatten()
        Ath2[BW] = Ath[BW]

    return Ath2, neuron
Ejemplo n.º 33
0
def ffttc_traction_finite_thickness(u,
                                    v,
                                    pixelsize1,
                                    pixelsize2,
                                    h,
                                    young,
                                    sigma=0.49,
                                    filter="gaussian",
                                    fs=None):
    '''
    FTTC with correction for finite substrate thikness according to
    Xavier Trepat, Physical forces during collective cell migration, 2009


    :param u:deformation field in x direction in pixel of the deformation image
    :param v:deformation field in y direction in pixel of the deformation image
    :param young: youngs modulus in Pa
    :param pixelsize1: pixelsize of the original image, needed because u and v is given as displacment of these pixels
    :param pixelsize2: pixelsize of the deformation image
    :param h hight of the membrane the cells lie on, in µm
    :param sigma: poission ratio of the gel
    :param bf_image: give the brightfield image as an array before cells where removed
    :param filter: str, values: "mean","gaussian","median". Diffrent smoothing methods for the traction field.
    :param fs: float, size of the filter (std of gaussian or size of the filter window) in µm
     if fs
    :return: tx_filter,ty_filter: traction forces in x and y direction in Pa
    '''

    # 0) substracting mean(better name for this step)
    u_shift = (u - np.mean(u)) * pixelsize1
    v_shift = (v - np.mean(v)) * pixelsize1

    ## bens algortithm:
    # 1)Zero padding to get sqauerd array with even index number
    ax1_length = np.shape(u_shift)[0]  # u and v must have same dimensions
    ax2_length = np.shape(u_shift)[1]
    max_ind = int(np.max((ax1_length, ax2_length)))
    if max_ind % 2 != 0:
        max_ind += 1
    u_expand = np.zeros((max_ind, max_ind))
    v_expand = np.zeros((max_ind, max_ind))
    u_expand[max_ind - ax1_length:max_ind,
             max_ind - ax2_length:max_ind] = u_shift
    v_expand[
        max_ind - ax1_length:max_ind, max_ind - ax2_length:
        max_ind] = v_shift  # note: seems to be snummerically slightly diffrent then in normal fnction ??? (dont know why

    # 2) producing wave vectors (FT-space) "
    # form 1:max_ind/2 then -(max_ind/2:1)
    kx1 = np.array([
        list(range(0, int(max_ind / 2), 1)),
    ] * int(max_ind),
                   dtype=np.float64)
    kx2 = np.array([
        list(range(-int(max_ind / 2), 0, 1)),
    ] * int(max_ind),
                   dtype=np.float64)
    # spatial frequencies: 1/wavelength,in 1/µm in fractions of total length

    kx = np.append(kx1, kx2, axis=1) * 2 * np.pi / (pixelsize2 * max_ind)
    ky = np.transpose(kx)
    k = np.sqrt(kx**2 + ky**2)  # matrix with "relative" distances??#
    # np.save("/home/user/Desktop/k_test.npy",k)

    r = k * h
    c = np.cosh(r)
    s = np.sinh(r)
    s_c = np.tanh(r)

    #gamma = ((3 - 4 * sigma) * (c ** 2) + (1 - 2 * sigma) ** 2 + (k * h) ** 2) / (
    #         (3 - 4 * sigma) * s * c + k * h)  ## inf values here because k goes to zero
    gamma = ((3 - 4 * sigma) + (((1 - 2 * sigma)**2) / (c**2)) +
             ((r**2) / (c**2))) / ((3 - 4 * sigma) * s_c + r / (c**2))
    # 4) calculate fourier transform of displacements

    u_ft = scipy.fft.fft2(u_expand)
    v_ft = scipy.fft.fft2(v_expand)
    '''
    #4.0*) approximation for large h according to this paper
    factor3=young/(2*(1-sigma**2)*k)
    factor3[0,0]=factor3[0,1]
    tx_ft=factor3*(u_ft*((k**2)*(1-sigma)+sigma*(kx**2)) + v_ft*kx*ky*sigma)
    ty_ft=factor3*(v_ft*((k**2)*(1-sigma)+sigma*(ky**2)) + u_ft*kx*ky*sigma)   ## confirmed by comparisson with not corrected programm
    '''

    # 4.1) calculate tractions in fourrier space
    factor1 = (v_ft * kx - u_ft * ky)
    factor2 = (u_ft * kx + v_ft * ky)
    tx_ft = ((-young * ky * c) /
             (2 * k * s *
              (1 + sigma))) * factor1 + ((young * kx) /
                                         (2 * k *
                                          (1 - sigma**2))) * gamma * factor2
    tx_ft[
        0,
        0] = 0  ## zero frequency would represent force every where(constant), so this is legit??
    ty_ft = ((young * kx * c) /
             (2 * k * s *
              (1 + sigma))) * factor1 + ((young * ky) /
                                         (2 * k *
                                          (1 - sigma**2))) * gamma * factor2
    ty_ft[0, 0] = 0

    # 4.2) go back to real space
    tx = scipy.fft.ifft2(tx_ft.astype(
        np.complex128)).real  ## maybe devide by 2pi here???
    ty = scipy.fft.ifft2(ty_ft.astype(np.complex128)).real

    # 5.2) cut like in script from ben
    tx_cut = tx[max_ind - ax1_length:max_ind, max_ind - ax2_length:max_ind]
    ty_cut = ty[max_ind - ax1_length:max_ind, max_ind - ax2_length:max_ind]

    # 5.3) using filter
    if filter == "mean":
        fs = fs if isinstance(fs, (float, int)) else int(
            int(np.max((ax1_length, ax2_length))) / 16)
        tx_filter = uniform_filter(tx_cut, size=fs)
        ty_filter = uniform_filter(ty_cut, size=fs)
    if filter == "gaussian":
        fs = fs if isinstance(fs,
                              (float,
                               int)) else int(np.max(
                                   (ax1_length, ax2_length))) / 50
        tx_filter = gaussian_filter(tx_cut, sigma=fs)
        ty_filter = gaussian_filter(ty_cut, sigma=fs)
    if filter == "median":
        fs = fs if isinstance(fs, (float, int)) else int(
            int(np.max((ax1_length, ax2_length))) / 16)
        tx_filter = median_filter(tx_cut, size=fs)
        ty_filter = median_filter(ty_cut, size=fs)
    if not isinstance(filter, str):
        tx_filter = tx_cut
        ty_filter = ty_cut
    # show_quiver(tx_filter,ty_filter)
    return (tx_filter, ty_filter)
Ejemplo n.º 34
0
def median_filt(img, Q):  # Q = window size
    return median_filter(img, Q)
Ejemplo n.º 35
0
    def get_ext_coeffs_imglist(self,
                               lst,
                               roi_ambient=None,
                               apply_median=5,
                               **kwargs):
        """Apply dilution fit to all images in an :class:`ImgList`.

        Parameters
        ----------
        lst : ImgList
            image list for which the coefficients are supposed to be retrieved
        roi_ambient : list
            region of interest used to estimage ambient intensity, if None
            (default), usd :attr:`scale_rect` of :class:`PlumeBackgroundModel`
            of the input list
        apply_median : int
            if > 0, then a median filter of provided width is applied to
            the result time series (ext. coeffs and initial intensities)
        **kwargs :
            additional keyword args passed to dilution fit method
            :func:`apply_dilution_fit`.

        Returns
        -------
        DataFrame
            pandas data frame containing time series of retrieved extinction
            coefficients and initial intensities as well as the ambient
            intensities used, access keys are:

                - ``coeffs``: retrieved extinction coefficients
                - ``i0``: retrieved initial intensities
                - ``ia``: retrieved ambient intensities

        """
        if not isinstance(lst, ImgList):
            raise ValueError("Invalid input type for param lst, need ImgList")
        lst.vigncorr_mode = True

        if not check_roi(roi_ambient):
            try:
                roi_ambient = lst.bg_model.scale_rect
            except BaseException:
                pass
            if not check_roi(roi_ambient):
                raise ValueError("Input parameter roi_ambient is not a valied"
                                 "ROI and neither is scale_rect in background "
                                 "model of input image list...")
        cfn = lst.cfn
        lst.goto_img(0)
        nof = lst.nof
        times = lst.acq_times
        coeffs = []
        i0s = []
        ias = []
        for k in range(nof):
            img = lst.current_img()
            try:
                ia = img.crop(roi_ambient, True).mean()

                ext, i0, _, _ = self.apply_dilution_fit(img=img,
                                                        rad_ambient=ia,
                                                        plot=False,
                                                        **kwargs)
                coeffs.append(ext)
                i0s.append(i0)
                ias.append(ia)
            except BaseException:
                coeffs.append(nan)
                i0s.append(nan)
                ias.append(nan)
            lst.goto_next()
        lst.goto_img(cfn)
        if apply_median > 0:
            coeffs = median_filter(coeffs, apply_median)
            i0s = median_filter(i0s, apply_median)
            ias = median_filter(ias, apply_median)
        return DataFrame(dict(coeffs=coeffs, i0=i0s, ia=ias), index=times)
Ejemplo n.º 36
0
def middle(
    f,
    param,
    x=None,
    iterations=40,
    eps=0.001,
    poly=False,
    weight=1,
    lambda2=-1,
    mn=None,
    mx=None,
):
    """
    middle tries to fit a smooth curve that is located
    along the "middle" of 1D data array f. Filter size "filter"
    together with the total number of iterations determine
    the smoothness and the quality of the fit. The total
    number of iterations can be controlled by limiting the
    maximum number of iterations (iter) and/or by setting
    the convergence criterion for the fit (eps)
    04-Nov-2000 N.Piskunov wrote.
    09-Nov-2011 NP added weights and 2nd derivative constraint as LAM2

    Parameters
    ----------
    f : Callable
        Function to fit
    filter : int
        Smoothing parameter of the optimal filter (or polynomial degree of poly is True)
    iter : int
        maximum number of iterations [def: 40]
    eps : float
        convergence level [def: 0.001]
    mn : float
        minimum function values to be considered [def: min(f)]
    mx : float
        maximum function values to be considered [def: max(f)]
    lam2 : float
        constraint on 2nd derivative
    weight : array(float)
        vector of weights.
    """
    mn = mn if mn is not None else np.min(f)
    mx = mx if mx is not None else np.max(f)

    f = np.asarray(f)

    if x is None:
        xx = np.linspace(-1, 1, num=f.size)
    else:
        xx = np.asarray(x)

    if poly:
        j = (f >= mn) & (f <= mx)
        n = np.count_nonzero(j)
        if n <= round(param):
            return f

        fmin = np.min(f[j]) - 1
        fmax = np.max(f[j]) + 1
        ff = (f[j] - fmin) / (fmax - fmin)
        ff_old = ff
    else:
        fmin = np.min(f) - 1
        fmax = np.max(f) + 1
        ff = (f - fmin) / (fmax - fmin)
        ff_old = ff
        n = len(f)

    for _ in range(iterations):
        if poly:
            param = round(param)
            if param > 0:
                t = median_filter(np.polyval(np.polyfit(xx, ff, param), xx), 3)
                tmp = np.polyval(np.polyfit(xx, (t - ff)**2, param), xx)
            else:
                t = np.tile(np.polyfit(xx, ff, param), len(f))
                tmp = np.tile(np.polyfit(xx, (t - ff)**2, param), len(f))
        else:
            t = median_filter(
                opt_filter(ff, param, weight=weight, lambda2=lambda2), 3)
            tmp = opt_filter(weight * (t - ff)**2,
                             param,
                             weight=weight,
                             lambda2=lambda2)

        dev = np.sqrt(np.clip(tmp, 0, None))
        ff = np.clip(t - dev, ff, t + dev)
        dev2 = np.max(weight * np.abs(ff - ff_old))
        ff_old = ff

        # print(dev2)
        if dev2 <= eps:
            break

    if poly:
        xx = np.linspace(-1, 1, len(f))
        if param > 0:
            t = median_filter(np.polyval(np.polyfit(xx, ff, param), xx), 3)
        else:
            t = np.tile(np.polyfit(xx, ff, param), len(f))

    return t * (fmax - fmin) + fmin
Ejemplo n.º 37
0
def top(
    f,
    order=1,
    iterations=40,
    eps=0.001,
    poly=False,
    weight=1,
    lambda2=-1,
    mn=None,
    mx=None,
):
    """
    top tries to fit a smooth curve to the upper envelope
    of 1D data array f. Filter size "filter"
    together with the total number of iterations determine
    the smoothness and the quality of the fit. The total
    number of iterations can be controlled by limiting the
    maximum number of iterations (iter) and/or by setting
    the convergence criterion for the fit (eps)
    04-Nov-2000 N.Piskunov wrote.
    09-Nov-2011 NP added weights and 2nd derivative constraint as LAM2

    Parameters
    ----------
    f : Callable
        Function to fit
    filter : int
        Smoothing parameter of the optimal filter (or polynomial degree of poly is True)
    iter : int
        maximum number of iterations [def: 40]
    eps : float
        convergence level [def: 0.001]
    mn : float
        minimum function values to be considered [def: min(f)]
    mx : float
        maximum function values to be considered [def: max(f)]
    lam2 : float
        constraint on 2nd derivative
    weight : array(float)
        vector of weights.
    """
    mn = mn if mn is not None else np.min(f)
    mx = mx if mx is not None else np.max(f)

    f = np.asarray(f)
    xx = np.linspace(-1, 1, num=f.size)

    if poly:
        j = (f >= mn) & (f <= mx)
        if np.count_nonzero(j) <= round(order):
            raise ValueError("Not enough points")
        fmin = np.min(f[j]) - 1
        fmax = np.max(f[j]) + 1
        ff = (f - fmin) / (fmax - fmin)
        ff_old = ff
    else:
        fff = middle(
            f,
            order,
            iterations=iterations,
            eps=eps,
            weight=weight,
            lambda2=lambda2,
        )
        fmin = np.min(f) - 1
        fmax = np.max(f) + 1
        fff = (fff - fmin) / (fmax - fmin)
        ff = (f - fmin) / (fmax - fmin) / fff
        ff_old = ff

    for _ in range(iterations):
        order = round(order)
        if poly:
            t = median_filter(np.polyval(np.polyfit(xx, ff, order), xx), 3)
            tmp = np.polyval(
                np.polyfit(xx,
                           np.clip(ff - t, 0, None)**2, order), xx)
            dev = np.sqrt(np.clip(tmp, 0, None))
        else:
            t = median_filter(
                opt_filter(ff, order, weight=weight, lambda2=lambda2), 3)
            tmp = opt_filter(
                np.clip(weight * (ff - t), 0, None),
                order,
                weight=weight,
                lambda2=lambda2,
            )
            dev = np.sqrt(np.clip(tmp, 0, None))

        ff = np.clip(t - eps, ff, t + dev * 3)
        dev2 = np.max(weight * np.abs(ff - ff_old))
        ff_old = ff
        if dev2 <= eps:
            break

    if poly:
        t = median_filter(np.polyval(np.polyfit(xx, ff, order), xx), 3)
        return t * (fmax - fmin) + fmin
    else:
        return t * fff * (fmax - fmin) + fmin
Ejemplo n.º 38
0
def counting(c_std, a_std, imageFile):
    image_list = [imageFile]
    c_std = float(c_std)
    if image_list[0][-3:] == 'tif':
        for f, filename in enumerate(image_list):
            subprocess.call(["convert", filename, filename + '.png'])
            image_list[f] = filename + '.png'
    output_filename_hash = (datetime.datetime.now().minute * 60 +
                            datetime.datetime.now().second)

    images = [
        scipy.misc.imread(filename).astype(np.uint16)
        for filename in image_list
    ]
    #correlation_matrix = np.array([[-1, -1, -1, -1, -1],
    #                               [-1, -1, -1, -1, -1],
    #                               [-1, -1, 24, -1, -1],
    #                               [-1, -1, -1, -1, -1],
    #                               [-1, -1, -1, -1, -1]])
    correlation_matrix = np.array([[-5935, -5935, -5935, -5935, -5935],
                                   [-5935, 8027, 8027, 8027, -5935],
                                   [-5935, 8027, 30742, 8027, -5935],
                                   [-5935, 8027, 8027, 8027, -5935],
                                   [-5935, -5935, -5935, -5935, -5935]])
    median_diameter = 5
    local_max = 3
    #c_std = 1
    processed_images = [np.copy(image).astype(np.int64) for image in images]
    processed_images = \
 [np.subtract(image, np.minimum(spf.median_filter(image, median_diameter),
     image))
    for image in images]
    processed_images  = \
 [np.maximum(scipy.signal.correlate(image, correlation_matrix, mode='same'),
      np.zeros_like(image)).astype(np.int64)
    for image in processed_images]
    thresholded_images = [
        image > np.mean(image) + c_std * np.std(image)
        #images[i] > np.mean(images[i]) + a_std * np.std(images[i])
        #images[i] > a_std
        for i, image in enumerate(processed_images)
    ]
    for i, mask in enumerate(thresholded_images):
        for (h, w), valid in np.ndenumerate(mask):
            if valid:
                local_slice = \
                    np.copy(processed_images[i][h - local_max:h + local_max + 1,
                    w - local_max:w + local_max + 1])
                if (h + local_max >= mask.shape[0]
                        or w + local_max >= mask.shape[1] or h - local_max < 0
                        or w - local_max < 0):
                    mask[h, w] = False
                    continue
                local_slice[local_max, local_max] = 0
                if np.amax(local_slice) >= processed_images[i][h, w]:
                    mask[h, w] = False
    size = 4
    for i, image in enumerate(images):
        i_std = np.std(image)
        i_median = np.median(image) - i_std
        i_max = np.amax(image)
        for (h, w), value in np.ndenumerate(image):
            if image[h, w] > i_median:
                image[h, w] = int(
                    np.around(
                        math.sqrt(image[h, w] - i_median) /
                        math.sqrt(i_max - i_median) * float(2**8 - 1)))
            else:
                image[h, w] = 0
    output_images = [
        ImageOps.colorize(
            Image.fromstring('L', image.shape, image.astype(np.uint8)),
            (0, 0, 0), (255, 255, 255)) for image in images
    ]
    for i, image in enumerate(images):
        for (h, w), value in np.ndenumerate(image):
            if thresholded_images[i][h, w]:
                box = ((w - size, h - size), (w + size, h + size))
                draw = ImageDraw.Draw(output_images[i])
                draw.rectangle(box, fill=None, outline='blue')
    # Editing the output file name; Will have the same as the filename + png
    [
        image.save(
            str(i).zfill(int(np.ceil(math.log(len(output_images), 10)))) +
            ' SIMPLE ' + str(output_filename_hash) + '.png')
        for i, image in enumerate(output_images)
    ]
    for i, image in enumerate(thresholded_images):
        print(
            str(i).zfill(int(np.ceil(math.log(len(thresholded_images), 10)))) +
            ' ' + str(np.sum(np.sum(image))))
Ejemplo n.º 39
0
                    K=nb_median)

                print "Getting the harmonic part"
                estimated_spectrum_harmo, neighbors = regression.ann(
                    learn_feats[:, -48:-36].T,
                    learn_magspecs.T,
                    test_feats[:, -48:-36].T,
                    test_magspecs.T,
                    K=nb_median)

                win_size = params['wintime'] * params['sr']
                step_size = params['steptime'] * params['sr']
                # sliding median filtering ?
                if l_medfilt > 1:
                    estimated_spectrum = median_filter(
                        estimated_spectrum_full + estimated_spectrum_harmo,
                        (1, l_medfilt))

                print "reconstruction"

                #init_vec = np.random.randn(step_size*Y_hat.shape[1])
                init_vec = np.random.randn(step_size *
                                           estimated_spectrum.shape[1])
                x_recon = transforms.gl_recons(estimated_spectrum,
                                               init_vec,
                                               nb_iter_gl,
                                               win_size,
                                               step_size,
                                               display=False)

                # Get the rythmic part by using all coefficients
Ejemplo n.º 40
0
def correctHeadTailIntWorm(trajectories_worm, skeletons_file, intensities_file, smooth_W = 5,
    gap_size = 0, min_block_size = 10, local_avg_win = 25, min_frac_in = 0.85):
    
    #get data with valid intensity maps (worm int profile)
    good = trajectories_worm['int_map_id'] != -1;
    int_map_id = trajectories_worm.loc[good, 'int_map_id'].values
    int_skeleton_id = trajectories_worm.loc[good, 'skeleton_id'].values
    int_frame_number = trajectories_worm.loc[good, 'frame_number'].values
    
    #only analyze data that contains at least  min_block_size intensity profiles     
    if int_map_id.size < min_block_size:
        return []
    
        
    #read the worm intensity profiles
    with tables.File(intensities_file, 'r') as fid:
        worm_int_profile = fid.get_node('/straighten_worm_intensity_median')[int_map_id,:]

    #normalize intensities of each individual profile   
    worm_int_profile -= np.median(worm_int_profile, axis=1)[:, np.newaxis] 
    
    
    #reduce the importance of the head and tail. This parts are typically more noisy
    damp_factor = getDampFactor(worm_int_profile.shape[1])
    worm_int_profile *= damp_factor        

    #worm median intensity
    med_int = np.median(worm_int_profile, axis=0).astype(np.float)
        
    #%%
    #let's check for head tail errors by comparing the
    #total absolute difference between profiles using the original orientation ...
    diff_ori = np.sum(np.abs(med_int-worm_int_profile), axis = 1)
    #... and inverting the orientation
    diff_inv = np.sum(np.abs(med_int[::-1]-worm_int_profile), axis = 1)
    
    #%% DEPRECATED, it
    #check if signal noise will allow us to distinguish between the two signals
    #I am assuming that most of the images will have a correct head tail orientation 
    #and the robust estimates will give us a good representation of the noise levels     
    #if np.median(diff_inv) - medabsdev(diff_inv)/2 < np.median(diff_ori) + medabsdev(diff_ori)/2:
    #    bad_worms.append(worm_index)
    #    continue
    
    #%%
    #smooth data, it is easier for identification
    diff_ori_med = median_filter(diff_ori,smooth_W)
    diff_inv_med = median_filter(diff_inv,smooth_W)
        
    #this will increase the distance between the original and the inversion. 
    #Therefore it will become more stringent on detection
    diff_orim = minimum_filter(diff_ori_med, smooth_W)    
    diff_invM = maximum_filter(diff_inv_med, smooth_W)   
            
    #a segment with a bad head-tail indentification should have a lower 
    #difference with the median when the profile is inverted.
    bad_orientationM = diff_orim>diff_invM
    if np.all(bad_orientationM): 
        return []
        
    #let's create blocks of skeletons with a bad orientation
    blocks2correct = createBlocks(bad_orientationM, min_block_size)
    #print(blocks2correct)

    #let's refine blocks limits using the original unsmoothed differences
    bad_orientation = diff_ori>diff_inv
    blocks2correct = correctBlock(blocks2correct, bad_orientation, gap_size=0)
    
    
    #let's correct the blocks inversion boundaries by checking that they do not
    #travers a group of contigous skeletons. I am assuming that head tail errors
    #only can occur when we miss an skeleton.        
    blocks2correct = removeBadSkelBlocks(blocks2correct, int_skeleton_id, trajectories_worm, min_frac_in, gap_size=gap_size)
    
    #Check in the boundaries between blocks if there is really a better local match if the block is inverted 
    blocks2correct = checkLocalVariation(worm_int_profile, blocks2correct, local_avg_win)
    if not blocks2correct: 
        return []
    
    #redefine the limits in the skeleton_file and intensity_file rows using the final blocks boundaries
    skel_group = [(int_skeleton_id[ini], int_skeleton_id[fin]) for ini, fin in blocks2correct]
    int_group = [(int_map_id[ini], int_map_id[fin]) for ini, fin in blocks2correct]
    
    #finally switch all the data to correct for the wrong orientation in each group
    switchBlocks(skel_group, skeletons_file, int_group, intensities_file)
        
    #store data from the groups that were switched
    switched_blocks = []
    for ini, fin in blocks2correct:
        switched_blocks.append((int_frame_number[ini], int_frame_number[fin]))

    return switched_blocks
Ejemplo n.º 41
0
def convert_2D_to_1D(array2Dlist, average_vspan=None, search_window=None):
    '''
  # This function converts a 2D GISAXS reading to a 1D vertical average centered on the specular peak.
  #
  # In general the function first applies hot-pixel removal to deal with slightly faulty detectors, and then
  # finds the brightest pixel in the image and calls it the specular peak.  If you know the specular peak will
  # be the brightest pixel, then you can just let the function find it for you (and it does so on an 
  # image-by-image basis, so it automatically handles slight jitter between readings).
  #
  # However, if the specular peak is **not** the brightest pixel in the image, you can specify a smaller 
  # sub-window in which to look for it, by listing four numbers in a file called "specular-peak-window.txt"
  '''

    # get dimensions of (t, qx, qz) array
    datadims = np.shape(array2Dlist)
    tn = datadims[0]
    xn = datadims[1]
    zn = datadims[2]

    # obtain a median-filtered version of array (eliminates broken pixels -- borrowed from StackExchange)
    mf_data = np.array(array2Dlist)
    for kk in xrange(tn):
        arr = array2Dlist[kk, :, :]
        blurred = filters.median_filter(arr, size=2)
        difference = arr - blurred
        threshold = 3 * np.std(
            difference
        )  # too large and you don't remove pixels -- too small and you smooth
        hot_pixels = np.nonzero(
            (np.abs(difference[1:-1, 1:-1]) >
             threshold))  # ignore edges for simplicity; we'll discard anyway
        hot_pixels = np.array(
            hot_pixels
        ) + 1  # because we ignored the first row and first column
        for x, y in zip(hot_pixels[0], hot_pixels[1]):
            mf_data[kk, x, y] = blurred[x, y]

    # build the search window
    if search_window == None:
        print "Looking over the whole image for the specular peak."
        cxmin = 0
        cxmax = xn
        cymin = 0
        cymax = zn
    else:
        print "Looking for specular peak in provided window ", center_window
        cxmin = search_window[0]
        cxmax = search_window[1]
        cymin = search_window[2]
        cymax = search_window[3]

    # find the specular peak
    totalI = sum(mf_data, axis=0)
    cwindow = totalI[cxmin:cxmax, cymin:cymax]
    temp_coords = np.unravel_index(np.argmax(cwindow), cwindow.shape)
    max_coords = np.array([cxmin, cymin]) + temp_coords
    x0 = max_coords[0]
    z0 = max_coords[1]

    # report, with optional warning
    print "Peak found at ", max_coords
    if x0 == cxmin or x0 == cxmax or z0 == cymin or z0 == cymax:
        print "WARNING: specular peak found on edge of search window."
        print "Cannot confirm that value is a local maximum."
        print "Identified co-ordinates may be inaccurate."

    # identify the vertical analysis window (specular peak +- half the width/height specified in window_shape)
    zmin = z0 - (average_vspan - 1) / 2
    zmax = z0 + (average_vspan - 1) / 2
    iofqt = sum(mf_data[:, :, zmin:zmax + 1], axis=2)
    return iofqt
Ejemplo n.º 42
0
    def run(self, rinput):
        self.logger.info('starting processing for bars detection')

        flow = self.init_filters(rinput)

        hdulist = basic_processing_with_combination(rinput, flow=flow)

        hdr = hdulist[0].header
        self.set_base_headers(hdr)

        try:
            rotang = hdr['ROTANG']
            tsutc1 = hdr['TSUTC1']
            dtub, dtur = datamodel.get_dtur_from_header(hdr)
            csupos = datamodel.get_csup_from_header(hdr)
            csusens = datamodel.get_cs_from_header(hdr)

        except KeyError as error:
            self.logger.error(error)
            raise numina.exceptions.RecipeError(error)

        self.logger.debug('finding bars')
        # Processed array
        arr = hdulist[0].data

        # Median filter of processed array (two times)
        mfilter_size = rinput.median_filter_size

        self.logger.debug('median filtering X, %d columns', mfilter_size)
        arr_median = median_filter(arr, size=(1, mfilter_size))
        self.logger.debug('median filtering X, %d rows', mfilter_size)
        arr_median = median_filter(arr_median, size=(mfilter_size, 1))

        # Median filter of processed array (two times) in the other direction
        # for Y coordinates
        self.logger.debug('median filtering Y, %d rows', mfilter_size)
        arr_median_alt = median_filter(arr, size=(mfilter_size, 1))
        self.logger.debug('median filtering Y, %d columns', mfilter_size)
        arr_median_alt = median_filter(arr_median_alt, size=(1, mfilter_size))

        xfac = dtur[0] / EMIR_PIXSCALE
        yfac = -dtur[1] / EMIR_PIXSCALE

        vec = [yfac, xfac]
        self.logger.debug('DTU shift is %s', vec)

        # and the table of approx positions of the slits
        barstab = rinput.bars_nominal_positions
        # Currently, we only use fields 0 and 2
        # of the nominal positions file

        # Number or rows used
        # These other parameters cab be tuned also
        bstart = 1
        bend = 2047
        self.logger.debug('ignoring columns outside %d - %d', bstart, bend - 1)

        # extract a region to average
        wy = (rinput.average_box_row_size // 2)
        wx = (rinput.average_box_col_size // 2)
        self.logger.debug('extraction window is %d rows, %d cols', 2 * wy + 1,
                          2 * wx + 1)
        # Fit the peak with these points
        wfit = 2 * (rinput.fit_peak_npoints // 2) + 1
        self.logger.debug('fit with %d points', wfit)

        # Minimum threshold
        threshold = 5 * EMIR_RON
        # Savitsky and Golay (1964) filter to compute the X derivative
        # scipy >= xx has a savgol_filter function
        # for compatibility we do it manually

        allpos = {}
        ypos3_kernel = None
        slits = numpy.zeros((EMIR_NBARS, 8), dtype='float')

        self.logger.info('start finding bars')
        for ks in [3, 5, 7, 9]:
            self.logger.debug('kernel size is %d', ks)
            # S and G kernel for derivative
            kw = ks * (ks * ks - 1) / 12.0
            coeffs_are = -numpy.arange((1 - ks) // 2, (ks - 1) // 2 + 1) / kw
            if ks == 3:
                ypos3_kernel = coeffs_are
            self.logger.debug('kernel weights are %s', coeffs_are)

            self.logger.debug('derive image in X direction')
            arr_deriv = convolve1d(arr_median, coeffs_are, axis=-1)
            # Axis 0 is
            #
            self.logger.debug('derive image in Y direction (with kernel=3)')
            arr_deriv_alt = convolve1d(arr_median_alt, ypos3_kernel, axis=0)

            positions = []
            for coords in barstab:
                lbarid = int(coords[0])
                rbarid = lbarid + EMIR_NBARS
                ref_y_coor = coords[1] + vec[1]
                poly_coeffs = coords[2:]
                prow = coor_to_pix_1d(ref_y_coor) - 1
                fits_row = prow + 1  # FITS pixel index

                # A function that returns the center of the bar
                # given its X position
                def center_of_bar(x):
                    # Pixel values are 0-based
                    return polyval(x + 1 - vec[0], poly_coeffs) + vec[1] - 1

                self.logger.debug('looking for bars with ids %d - %d', lbarid,
                                  rbarid)
                self.logger.debug('reference y position is Y %7.2f',
                                  ref_y_coor)

                # if ref_y_coor is outlimits, skip this bar
                # ref_y_coor is in FITS format
                if (ref_y_coor >= 2047) or (ref_y_coor <= 1):
                    self.logger.debug(
                        'reference y position is outlimits, skipping')
                    positions.append([lbarid, fits_row, fits_row, 1, 0, 3])
                    positions.append([rbarid, fits_row, fits_row, 1, 0, 3])
                    continue

                # Left bar
                self.logger.debug('measure left border (%d)', lbarid)

                centery, xpos, fwhm, st = char_bar_peak_l(arr_deriv,
                                                          prow,
                                                          bstart,
                                                          bend,
                                                          threshold,
                                                          center_of_bar,
                                                          wx=wx,
                                                          wy=wy,
                                                          wfit=wfit)
                xpos1 = xpos
                positions.append(
                    [lbarid, centery + 1, fits_row, xpos + 1, fwhm, st])

                # Right bar
                self.logger.debug('measure rigth border (%d)', rbarid)
                centery, xpos, fwhm, st = char_bar_peak_r(arr_deriv,
                                                          prow,
                                                          bstart,
                                                          bend,
                                                          threshold,
                                                          center_of_bar,
                                                          wx=wx,
                                                          wy=wy,
                                                          wfit=wfit)
                positions.append(
                    [rbarid, centery + 1, fits_row, xpos + 1, fwhm, st])
                xpos2 = xpos
                #
                if st == 0:
                    self.logger.debug('measure top-bottom borders')
                    try:
                        y1, y2, statusy = char_bar_height(arr_deriv_alt,
                                                          xpos1,
                                                          xpos2,
                                                          centery,
                                                          threshold,
                                                          wh=35,
                                                          wfit=wfit)
                    except Exception as error:
                        self.logger.warning('Error computing height: %s',
                                            error)
                        statusy = 44

                    if statusy in [0, 40]:
                        # Main border is detected
                        positions[-1][1] = y2 + 1
                        positions[-2][1] = y2 + 1
                    else:
                        # Update status
                        positions[-1][-1] = 4
                        positions[-2][-1] = 4
                else:
                    self.logger.debug('slit is not complete')
                    y1, y2 = 0, 0

                # Update positions

                self.logger.debug(
                    'bar %d centroid-y %9.4f, row %d x-pos %9.4f, FWHM %6.3f, status %d',
                    *positions[-2])
                self.logger.debug(
                    'bar %d centroid-y %9.4f, row %d x-pos %9.4f, FWHM %6.3f, status %d',
                    *positions[-1])

                if ks == 5:
                    slits[lbarid -
                          1] = [xpos1, y2, xpos2, y2, xpos2, y1, xpos1, y1]
                    # FITS coordinates
                    slits[lbarid - 1] += 1.0
                    self.logger.debug('inserting bars %d-%d into "slits"',
                                      lbarid, rbarid)

            allpos[ks] = numpy.asarray(
                positions, dtype='float')  # GCS doesn't like lists of lists

        self.logger.debug('end finding bars')
        result = self.create_result(
            frame=hdulist,
            slits=slits,
            positions9=allpos[9],
            positions7=allpos[7],
            positions5=allpos[5],
            positions3=allpos[3],
            DTU=dtub,
            ROTANG=rotang,
            TSUTC1=tsutc1,
            csupos=csupos,
            csusens=csusens,
        )
        return result
Ejemplo n.º 43
0
def visitcomb(allvisit,
              starver,
              load=None,
              apred='r13',
              telescope='apo25m',
              nres=[5, 4.25, 3.5],
              bconly=False,
              plot=False,
              write=True,
              dorvfit=True,
              apstar_vers='stars',
              logger=None):
    """ Combine multiple visits with individual RVs to rest frame sum
    """

    if logger is None:
        logger = dln.basiclogger()

    if load is None: load = apload.ApLoad(apred=apred, telescope=telescope)
    cspeed = 2.99792458e5  # speed of light in km/s

    logger.info('Doing visitcomb for {:s} '.format(allvisit['apogee_id'][0]))

    wnew = norm.apStarWave()
    nwave = len(wnew)
    nvisit = len(allvisit)

    # initialize array for stack of interpolated spectra
    zeros = np.zeros([nvisit, nwave])
    izeros = np.zeros([nvisit, nwave], dtype=int)
    stack = apload.ApSpec(zeros,
                          err=zeros.copy(),
                          bitmask=izeros,
                          cont=zeros.copy(),
                          sky=zeros.copy(),
                          skyerr=zeros.copy(),
                          telluric=zeros.copy(),
                          telerr=zeros.copy())

    apogee_target1, apogee_target2, apogee_target3 = 0, 0, 0
    apogee2_target1, apogee2_target2, apogee2_target3, apogee2_target4 = 0, 0, 0, 0
    starflag, andflag = np.uint64(0), np.uint64(0)
    starmask = bitmask.StarBitMask()

    # Loop over each visit and interpolate to final wavelength grid
    if plot: fig, ax = plots.multi(1, 2, hspace=0.001)
    for i, visit in enumerate(allvisit):

        if bconly: vrel = -visit['bc']
        else: vrel = visit['vrel']

        # Skip if we don't have an RV
        if np.isfinite(vrel) is False: continue

        # Load the visit
        if load.telescope == 'apo1m':
            apvisit = load.apVisit1m(visit['plate'],
                                     visit['mjd'],
                                     visit['apogee_id'],
                                     load=True)
        else:
            apvisit = load.apVisit(int(visit['plate']),
                                   visit['mjd'],
                                   visit['fiberid'],
                                   load=True)
        pixelmask = bitmask.PixelBitMask()

        # Rest-frame wavelengths transformed to this visit spectra
        w = norm.apStarWave() * (1.0 + vrel / cspeed)

        # Loop over the chips
        for chip in range(3):

            # Get the pixel values to interpolate to
            pix = wave.wave2pix(w, apvisit.wave[chip, :])
            gd, = np.where(np.isfinite(pix))

            # Get a smoothed, filtered spectrum to use as replacement for bad values
            cont = gaussian_filter(
                median_filter(apvisit.flux[chip, :], [501], mode='reflect'),
                100)
            errcont = gaussian_filter(
                median_filter(apvisit.flux[chip, :], [501], mode='reflect'),
                100)
            bd, = np.where(apvisit.bitmask[chip, :] & pixelmask.badval())
            if len(bd) > 0:
                apvisit.flux[chip, bd] = cont[bd]
                apvisit.err[chip, bd] = errcont[bd]

            # Load up quantity/error pairs for interpolation
            raw = [[apvisit.flux[chip, :], apvisit.err[chip, :]**2],
                   [apvisit.sky[chip, :], apvisit.skyerr[chip, :]**2],
                   [apvisit.telluric[chip, :], apvisit.telerr[chip, :]**2]]

            # Load up individual mask bits
            for ibit, name in enumerate(pixelmask.name):
                if name is not '' and len(
                        np.where(apvisit.bitmask[chip, :] & 2**ibit)[0]) > 0:
                    raw.append([
                        np.clip(apvisit.bitmask[chip, :] & 2**ibit, None, 1),
                        None
                    ])

            # Do the sinc interpolation
            out = sincint.sincint(pix[gd], nres[chip], raw)

            # From output flux, get continuum to remove, so that all spectra are
            #   on same scale. We'll later multiply in the median continuum
            flux = out[0][0]
            stack.cont[i, gd] = gaussian_filter(
                median_filter(flux, [501], mode='reflect'), 100)

            # Load interpolated spectra into output stack
            stack.flux[i, gd] = out[0][0] / stack.cont[i, gd]
            stack.err[i, gd] = out[0][1] / stack.cont[i, gd]
            stack.sky[i, gd] = out[1][0]
            stack.skyerr[i, gd] = out[1][1]
            stack.telluric[i, gd] = out[2][0]
            stack.telerr[i, gd] = out[2][1]
            # For mask, set bits where interpolated value is above some threshold
            #   defined for each mask bit
            iout = 3
            for ibit, name in enumerate(pixelmask.name):
                if name is not '' and len(
                        np.where(apvisit.bitmask[chip, :] & 2**ibit)[0]) > 0:
                    j = np.where(
                        np.abs(out[iout][0]) > pixelmask.maskcontrib[ibit])[0]
                    stack.bitmask[i, gd[j]] |= 2**ibit
                    iout += 1

        # Increase uncertainties for persistence pixels
        bd, = np.where(
            (stack.bitmask[i, :] & pixelmask.getval('PERSIST_HIGH')) > 0)
        if len(bd) > 0: stack.err[i, bd] *= np.sqrt(5)
        bd, = np.where((
            (stack.bitmask[i, :] & pixelmask.getval('PERSIST_HIGH')) == 0) & (
                (stack.bitmask[i, :] & pixelmask.getval('PERSIST_MED')) > 0))
        if len(bd) > 0: stack.err[i, bd] *= np.sqrt(4)
        bd, = np.where(
            ((stack.bitmask[i, :] & pixelmask.getval('PERSIST_HIGH')) == 0)
            & ((stack.bitmask[i, :] & pixelmask.getval('PERSIST_MED')) == 0)
            & ((stack.bitmask[i, :] & pixelmask.getval('PERSIST_LOW')) > 0))
        if len(bd) > 0: stack.err[i, bd] *= np.sqrt(3)
        bd, = np.where(
            (stack.bitmask[i, :] & pixelmask.getval('SIG_SKYLINE')) > 0)
        if len(bd) > 0: stack.err[i, bd] *= np.sqrt(100)

        if plot:
            ax[0].plot(norm.apStarWave(), stack.flux[i, :])
            ax[1].plot(norm.apStarWave(), stack.flux[i, :] / stack.err[i, :])
            plt.draw()
            pdb.set_trace()

        # Accumulate for header of combined frame. Turn off visit specific RV flags first
        visitflag = visit['starflag'] & ~starmask.getval(
            'RV_REJECT') & ~starmask.getval('RV_SUSPECT')
        starflag |= visitflag
        andflag &= visitflag
        if visit['survey'] == 'apogee':
            apogee_target1 |= visit['apogee_target1']
            apogee_target2 |= visit['apogee_target2']
            apogee_target3 |= visit['apogee_target3']
        elif visit['survey'].find('apogee2') >= 0:
            apogee2_target1 |= visit['apogee_target1']
            apogee2_target2 |= visit['apogee_target2']
            apogee2_target3 |= visit['apogee_target3']
            try:
                apogee2_target4 |= visit['apogee_target4']
            except:
                pass
        # MWM target flags?

    # Create final spectrum
    zeros = np.zeros([nvisit + 2, nwave])
    izeros = np.zeros([nvisit + 2, nwave], dtype=int)
    apstar = apload.ApSpec(zeros,
                           err=zeros.copy(),
                           bitmask=izeros,
                           wave=norm.apStarWave(),
                           sky=zeros.copy(),
                           skyerr=zeros.copy(),
                           telluric=zeros.copy(),
                           telerr=zeros.copy(),
                           cont=zeros.copy(),
                           template=zeros.copy())
    apstar.header['CRVAL1'] = norm.logw0
    apstar.header['CDELT1'] = norm.dlogw
    apstar.header['CRPIX1'] = 1
    apstar.header['CTYPE1'] = (
        'LOG-LINEAR', 'Logarithmic wavelength scale in subsequent HDU')
    apstar.header['DC-FLAG'] = 1

    # Pixel-by-pixel weighted average
    cont = np.median(stack.cont, axis=0)
    apstar.flux[0, :] = np.sum(stack.flux / stack.err**2, axis=0) / np.sum(
        1. / stack.err**2, axis=0) * cont
    apstar.err[0, :] = np.sqrt(1. / np.sum(1. / stack.err**2, axis=0)) * cont
    apstar.bitmask[0, :] = np.bitwise_and.reduce(stack.bitmask, 0)
    apstar.cont[0, :] = cont

    # Individual visits
    apstar.flux[2:, :] = stack.flux * stack.cont
    apstar.err[2:, :] = stack.err * stack.cont
    apstar.bitmask[2:, :] = stack.bitmask
    apstar.sky[2:, :] = stack.sky
    apstar.skyerr[2:, :] = stack.skyerr
    apstar.telluric[2:, :] = stack.telluric
    apstar.telerr[2:, :] = stack.telerr

    # Populate header
    apstar.header['OBJID'] = (allvisit['apogee_id'][0], 'APOGEE object name')
    apstar.header['APRED'] = (apred, 'APOGEE reduction version')
    apstar.header['STARVER'] = (starver, 'apStar version')
    apstar.header['HEALPIX'] = (apload.obj2healpix(allvisit['apogee_id'][0]),
                                'HEALPix location')
    try:
        apstar.header['SNR'] = (np.nanmedian(apstar.flux / apstar.err),
                                'Median S/N per apStar pixel')
    except:
        apstar.header['SNR'] = (0., 'Median S/N per apStar pixel')
    apstar.header['RA'] = (allvisit['ra'].max(), 'right ascension, deg, J2000')
    apstar.header['DEC'] = (allvisit['dec'].max(), 'declination, deg, J2000')
    apstar.header['GLON'] = (allvisit['glon'].max(), 'Galactic longitude')
    apstar.header['GLAT'] = (allvisit['glat'].max(), 'Galactic latitude')
    apstar.header['J'] = (allvisit['j'].max(), '2MASS J magnitude')
    apstar.header['J_ERR'] = (allvisit['j_err'].max(),
                              '2MASS J magnitude uncertainty')
    apstar.header['H'] = (allvisit['h'].max(), '2MASS H magnitude')
    apstar.header['H_ERR'] = (allvisit['h_err'].max(),
                              '2MASS H magnitude uncertainty')
    apstar.header['K'] = (allvisit['k'].max(), '2MASS K magnitude')
    apstar.header['K_ERR'] = (allvisit['k_err'].max(),
                              '2MASS K magnitude uncertainty')
    try:
        apstar.header['SRC_H'] = (allvisit['src_h'][0],
                                  'source of H magnitude')
    except KeyError:
        pass
    keys = [
        'wash_m', 'wash_t2', 'ddo51', 'irac_3_6', 'irac_4_5', 'irac_5_8',
        'wise_4_5', 'targ_4_5'
    ]
    for key in keys:
        try:
            apstar.header[key] = allvisit[key].max()
        except KeyError:
            pass

    apstar.header['AKTARG'] = (allvisit['ak_targ'].max(),
                               'Extinction used for targeting')
    apstar.header['AKMETHOD'] = (allvisit['ak_targ_method'][0],
                                 'Extinction method using for targeting')
    apstar.header['AKWISE'] = (allvisit['ak_wise'].max(),
                               'WISE all-sky extinction')
    apstar.header['SFD_EBV'] = (allvisit['sfd_ebv'].max(), 'SFD E(B-V)')
    apstar.header['APTARG1'] = (apogee_target1,
                                'APOGEE_TARGET1 targeting flag')
    apstar.header['APTARG2'] = (apogee_target2,
                                'APOGEE_TARGET2 targeting flag')
    apstar.header['APTARG3'] = (apogee_target3,
                                'APOGEE_TARGET3 targeting flag')
    apstar.header['AP2TARG1'] = (apogee2_target1,
                                 'APOGEE2_TARGET1 targeting flag')
    apstar.header['AP2TARG2'] = (apogee2_target2,
                                 'APOGEE2_TARGET2 targeting flag')
    apstar.header['AP2TARG3'] = (apogee2_target3,
                                 'APOGEE2_TARGET3 targeting flag')
    apstar.header['AP2TARG4'] = (apogee2_target4,
                                 'APOGEE2_TARGET4 targeting flag')
    apstar.header['NVISITS'] = (len(allvisit),
                                'Number of visit spectra combined flag')
    apstar.header['STARFLAG'] = (starflag,
                                 'bitwise OR of individual visit starflags')
    apstar.header['ANDFLAG'] = (andflag,
                                'bitwise AND of individual visit starflags')

    try:
        apstar.header['N_COMP'] = (allvisit['n_components'].max(),
                                   'Maximum number of components in RV CCFs')
    except:
        pass
    apstar.header['VHBARY'] = (
        (allvisit['vheliobary'] * allvisit['snr']).sum() /
        allvisit['snr'].sum(), 'S/N weighted mean barycentric RV')
    if len(allvisit) > 1:
        apstar.header['vscatter'] = (allvisit['vheliobary'].std(ddof=1),
                                     'standard deviation of visit RVs')
    else:
        apstar.header['VSCATTER'] = (0., 'standard deviation of visit RVs')
    apstar.header['VERR'] = (0., 'unused')
    apstar.header['RV_TEFF'] = (allvisit['rv_teff'].max(),
                                'Effective temperature from RV fit')
    apstar.header['RV_LOGG'] = (allvisit['rv_logg'].max(),
                                'Surface gravity from RV fit')
    apstar.header['RV_FEH'] = (allvisit['rv_feh'].max(),
                               'Metallicity from RV fit')

    if len(allvisit) > 0:
        meanfib = (allvisit['fiberid'] *
                   allvisit['snr']).sum() / allvisit['snr'].sum()
    else:
        meanfib = 999999.
    if len(allvisit) > 1: sigfib = allvisit['fiberid'].std(ddof=1)
    else: sigfib = 0.
    apstar.header['MEANFIB'] = (meanfib, 'S/N weighted mean fiber number')
    apstar.header['SIGFIB'] = (
        sigfib, 'standard deviation (unweighted) of fiber number')
    apstar.header['NRES'] = ('{:5.2f}{:5.2f}{:5.2f}'.format(*nres),
                             'number of pixels/resolution used for sinc')

    # individual visit information in header
    for i0, visit in enumerate(allvisit):
        i = i0 + 1
        apstar.header['SFILE{:d}'.format(i)] = (
            visit['file'], ' Visit #{:d} spectrum file'.format(i))
        apstar.header['DATE{:d}'.format(i)] = (
            visit['dateobs'], 'DATE-OBS of visit {:d}'.format(i))
        apstar.header['JD{:d}'.format(i)] = (
            visit['jd'], 'Julian date of visit {:d}'.format(i))
        # hjd = helio_jd(visitstr[i].jd-2400000.0,visitstr[i].ra,visitstr[i].dec)
        #apstar.header['HJD{:d}'.format(i)] =
        apstar.header['FIBER{:d}'.format(i)] = (visit['fiberid'],
                                                ' Fiber, visit {:d}'.format(i))
        apstar.header['BC{:d}'.format(i)] = (
            visit['bc'],
            ' Barycentric correction (km/s), visit {:d}'.format(i))
        apstar.header['VRAD{:d}'.format(i)] = (
            visit['vrel'], ' Doppler shift (km/s) of visit {:d}'.format(i))
        #apstar.header['VERR%d'.format(i)] =
        apstar.header['VHBARY{:d}'.format(i)] = (
            visit['vheliobary'],
            ' Barycentric velocity (km/s), visit {:d}'.format(i))
        apstar.header['SNRVIS{:d}'.format(i)] = (
            visit['snr'], ' Signal/Noise ratio, visit {:d}'.format(i))
        apstar.header['FLAG{:d}'.format(i)] = (
            visit['starflag'], ' STARFLAG for visit {:d}'.format(i))
        apstar.header.insert('SFILE{:d}'.format(i),
                             ('COMMENT', 'VISIT {:d} INFORMATION'.format(i)))

    # Do a RV fit just to get a template and normalized spectrum, for plotting
    if dorvfit:
        try:
            apstar.setmask(pixelmask.badval())
            spec = doppler.Spec1D(apstar.flux[0, :],
                                  err=apstar.err[0, :],
                                  bitmask=apstar.bitmask[0, :],
                                  mask=apstar.mask[0, :],
                                  wave=apstar.wave,
                                  lsfpars=np.array([0]),
                                  lsfsigma=apstar.wave / 22500 / 2.354,
                                  instrument='APOGEE',
                                  filename=apstar.filename)
            out = doppler.rv.jointfit([spec],
                                      verbose=False,
                                      plot=False,
                                      tweak=False,
                                      maxvel=[-50, 50])
            apstar.cont = out[3][0].flux
            apstar.template = out[2][0].flux
        except ValueError as err:
            logger.error('Exception raised in visitcomb RV for: ',
                         apstar.header['FIELD'], apstar.header['OBJID'])
            logger.error("ValueError: {0}".format(err))
        except RuntimeError as err:
            logger.error('Exception raised in visitcomb RV for: ',
                         apstar.header['FIELD'], apstar.header['OBJID'])
            logger.error("Runtime error: {0}".format(err))
        except:
            logger.error('Exception raised in visitcomb RV fit for: ',
                         apstar.header['FIELD'], apstar.header['OBJID'])

    # Write the spectrum to file
    if write:
        outfilenover = load.filename('Star', obj=apstar.header['OBJID'])
        outdir = os.path.dirname(outfilenover)
        outbase = os.path.splitext(os.path.basename(outfilenover))[0]
        outbase += '-' + starver  # add star version
        outfile = outdir + '/' + outbase + '.fits'
        if apstar_vers != 'stars':
            outfile = outfile.replace('/stars/', '/' + apstar_vers + '/')
        outdir = os.path.dirname(outfile)
        try:
            os.makedirs(os.path.dirname(outfile))
        except:
            pass
        logger.info('Writing apStar file to ' + outfile)
        apstar.write(outfile)
        apstar.filename = outfile
        mwm_root = os.environ['MWM_ROOT']
        apstar.uri = outfile[len(mwm_root) + 1:]
        # Create symlink no file with no version
        if os.path.exists(outfilenover) or os.path.islink(outfilenover):
            os.remove(outfilenover)
        os.symlink(os.path.basename(outfile), outfilenover)  # relative path

        # Plot
        gd, = np.where(
            (apstar.bitmask[0, :]
             & (pixelmask.badval() | pixelmask.getval('SIG_SKYLINE'))) == 0)
        fig, ax = plots.multi(1, 3, hspace=0.001, figsize=(48, 6))
        med = np.nanmedian(apstar.flux[0, :])
        plots.plotl(ax[0],
                    norm.apStarWave(),
                    apstar.flux[0, :],
                    color='k',
                    yr=[0, 2 * med])
        ax[0].plot(norm.apStarWave()[gd], apstar.flux[0, gd], color='g')
        ax[0].set_ylabel('Flux')
        try:
            ax[1].plot(norm.apStarWave()[gd], apstar.cont[gd], color='g')
            ax[1].set_ylabel('Normalized')
            ax[1].plot(norm.apStarWave(), apstar.template, color='r')
        except:
            pass
        plots.plotl(ax[2],
                    norm.apStarWave(),
                    apstar.flux[0, :] / apstar.err[0, :],
                    yt='S/N')
        for i in range(3):
            ax[i].set_xlim(15100, 17000)
        ax[0].set_xlabel('Wavelength')
        fig.savefig(outdir + '/plots/' + outbase + '.png')

    # Plot
    if plot:
        ax[0].plot(norm.apStarWave(), apstar.flux, color='k')
        ax[1].plot(norm.apStarWave(), apstar.flux / apstar.err, color='k')
        plt.draw()
        pdb.set_trace()

    return apstar
    mFilterSize = 3
    mFilterEvery = 30
    mIter = 1001
    # step 5, mFilterEvery 8, mFilterSize 3

    # we run gradient ascent for 20 steps
    for i in range(mIter):
        loss_value, grads_value = iterate([input_img_data])
        input_img_data += grads_value * step

        input_img_data = np.clip(input_img_data, 0., 255.)

        if mFilterSize is not 0 and i % mFilterEvery == 0:
            input_img_data = median_filter(input_img_data,
                                           size=(1, 1, mFilterSize,
                                                 mFilterSize))

        if i % 50 == 0:
            print('\t%d, Current loss value:%f' % (i, loss_value))

    # decode the resulting input image
    img = deprocess_image(input_img_data[0])
    kept_filters.append((img, loss_value, filter_index))
    end_time = time.time()
    print('%d, Filter %d processed in %ds' %
          (ix, filter_index, end_time - start_time))

print("=" * 80)
print("Finish!")
Ejemplo n.º 45
0
 def _medianf(f):
     for i in range(niter):
         f = median_filter(f, size)
     return f
Ejemplo n.º 46
0
def ffttc_traction_pure_shear(u,
                              v,
                              pixelsize1,
                              pixelsize2,
                              h,
                              young,
                              sigma=0.49,
                              spatial_filter="mean",
                              fs=None):
    """
    limiting case for h*k==0
    Xavier Trepat, Physical forces during collective cell migration, 2009

    :param u:deformation field in x direction in pixel of the deformation image
    :param v:deformation field in y direction in pixel of the deformation image
    :param young: Young's modulus in Pa
    :param pixelsize1: pixelsize of the original image, needed because u and v is given as displacement of these pixels
    :param pixelsize2: pixelsize of the deformation image
    :param h: height of the membrane the cells lie on, in µm
    :param sigma: Poisson's ratio of the gel
    :param spatial_filter: str, values: "mean","gaussian","median". Different smoothing methods for the traction field.
    :return: tx_filter,ty_filter: traction forces in x and y direction in Pa
    """

    # 0) subtracting mean(better name for this step)
    u_shift = (u - np.mean(u)) * pixelsize1
    v_shift = (v - np.mean(v)) * pixelsize1

    # Ben's algorithm:
    # 1)Zero padding to get square array with even index number
    ax1_length = np.shape(u_shift)[0]  # u and v must have same dimensions
    ax2_length = np.shape(u_shift)[1]
    max_ind = int(np.max((ax1_length, ax2_length)))
    if max_ind % 2 != 0:
        max_ind += 1
    u_expand = np.zeros((max_ind, max_ind))
    v_expand = np.zeros((max_ind, max_ind))
    u_expand[max_ind - ax1_length:max_ind,
             max_ind - ax2_length:max_ind] = u_shift
    v_expand[max_ind - ax1_length:max_ind,
             max_ind - ax2_length:max_ind] = v_shift

    u_ft = scipy.fft.fft2(u_expand)
    v_ft = scipy.fft.fft2(v_expand)

    # 4.1) calculate tractions in Fourier space
    mu = young / (2 * (1 + sigma))
    tx_ft = mu * u_ft / h
    tx_ft[0,
          0] = 0  # zero frequency would represent force everywhere (constant)
    ty_ft = mu * v_ft / h
    ty_ft[0, 0] = 0

    # 4.2) go back to real space
    tx = scipy.fft.ifft2(tx_ft).real
    ty = scipy.fft.ifft2(ty_ft).real

    # 5.2) cut like in script from ben

    tx_cut = tx[max_ind - ax1_length:max_ind, max_ind - ax2_length:max_ind]
    ty_cut = ty[max_ind - ax1_length:max_ind, max_ind - ax2_length:max_ind]

    # 5.3) using filter
    tx_filter = tx_cut
    ty_filter = ty_cut
    if spatial_filter == "mean":
        fs = fs if isinstance(fs, (float, int)) else int(
            int(np.max((ax1_length, ax2_length))) / 16)
        tx_filter = uniform_filter(tx_cut, size=fs)
        ty_filter = uniform_filter(ty_cut, size=fs)
    if spatial_filter == "gaussian":
        fs = fs if isinstance(fs,
                              (float,
                               int)) else int(np.max(
                                   (ax1_length, ax2_length))) / 50
        tx_filter = gaussian_filter(tx_cut, sigma=fs)
        ty_filter = gaussian_filter(ty_cut, sigma=fs)
    if spatial_filter == "median":
        fs = fs if isinstance(fs, (float, int)) else int(
            int(np.max((ax1_length, ax2_length))) / 16)
        tx_filter = median_filter(tx_cut, size=fs)
        ty_filter = median_filter(ty_cut, size=fs)

    return tx_filter, ty_filter
def AGD(vel, data, errors, alpha1 = None, alpha2 = None,
        plot = False, mode ='c', verbose = False,
        SNR_thresh = 5.0, BLFrac = 0.1, SNR2_thresh=5.0, deblend=True,
        perform_final_fit = True, phase='one'):
    """ Autonomous Gaussian Decomposition
    """

    if type(SNR2_thresh) != type([]): SNR2_thresh = [SNR2_thresh, SNR2_thresh]
    if type(SNR_thresh) != type([]): SNR_thresh = [SNR_thresh, SNR_thresh]

    say('\n  --> AGD() \n',verbose)

    if (not alpha2) and (phase=='two'):
        print 'alpha2 value required'
        return

    dv = np.abs(vel[1] - vel[0])
    v_to_i = interp1d(vel, np.arange(len(vel)))


    #--------------------------------------#
    # Find phase-one guesses               #
    #--------------------------------------#
    agd1 = initialGuess(vel, data, errors = None, alpha = alpha1,
                                              plot = plot, mode = mode, verbose = verbose,
                                              SNR_thresh = SNR_thresh[0],
                                              BLFrac = BLFrac,
                                              SNR2_thresh = SNR2_thresh[0],
                                              deblend = deblend)

    amps_g1, widths_g1, offsets_g1, u2 = agd1['amps'], agd1['FWHMs'], agd1['means'], agd1['u2']
    params_g1 = np.append(np.append(amps_g1, widths_g1), offsets_g1)
    ncomps_g1 = len(params_g1) / 3
    ncomps_g2 = 0 # Default
    ncomps_f1 = 0 # Default



    # ----------------------------#
    # Find phase-two guesses #
    # ----------------------------#
    if phase == 'two':
        say('Beginning phase-two AGD... ', verbose)
        ncomps_g2 = 0.


        # ----------------------------------------------------------#
        # Produce the residual signal                               #
        #  -- Either the original data, or intermediate subtraction #
        # ----------------------------------------------------------#
        if ncomps_g1 == 0:
            say('Phase 2 with no narrow comps -> No intermediate subtration... ', verbose)
            residuals = data
        else:
            # "Else" Narrow components were found, and Phase == 2, so perform intermediate subtraction...

            # The "fitmask" is a collection of windows around the a list of phase-one components
            fitmask, fitmaskw = create_fitmask(len(vel), v_to_i(offsets_g1), widths_g1 / dv / 2.355 * 0.9)
            notfitmask  = 1 - fitmask
            notfitmaskw  = np.logical_not(fitmaskw)


            # Error function for intermediate optimization
            def objectiveD2_leastsq(paramslm):
                params = vals_vec_from_lmfit(paramslm)
                model0 = func(vel, *params)
                model2 = np.diff(np.diff(model0.ravel()))/dv/dv
                resids1 = fitmask[1:-1] * (model2 - u2[1:-1]) / errors[1:-1]
                resids2 = notfitmask * (model0 - data)  / errors /10.
                return np.append(resids1, resids2)


            # Perform the intermediate fit using LMFIT
            t0 = time.time()
            say('Running LMFIT on initial narrow components...', verbose)
            lmfit_params = paramvec_to_lmfit(params_g1)
            result = lmfit_minimize(objectiveD2_leastsq, lmfit_params, method='leastsq')
            params_f1 = vals_vec_from_lmfit(result.params)
            ncomps_f1 = len(params_f1) / 3

            # Make "FWHMS" positive
            params_f1[0:ncomps_f1][params_f1[0:ncomps_f1] < 0.0] = -1 * params_f1[0:ncomps_f1][params_f1[0:ncomps_f1] < 0.0]

            del lmfit_params
            say('LMFIT fit took {0} seconds.'.format(time.time()-t0))


            if result.success:
                # Compute intermediate residuals
                # Median filter on 2x effective scale to remove poor subtractions of strong components
                intermediate_model = func(vel, *params_f1).ravel() # Explicit final (narrow) model
                median_window = 2. * 10**((np.log10(alpha1) + 2.187) / 3.859)
                residuals = median_filter(data - intermediate_model, np.int(median_window))
            else:
                residuals = data
            # Finished producing residual signal # ---------------------------



        # Search for phase-two guesses
        agd2 = initialGuess(vel, residuals, errors = None,
                                                 alpha = alpha2, mode = mode, verbose = verbose,
                                                 SNR_thresh = SNR_thresh[1],
                                                 BLFrac = BLFrac,
                                                 SNR2_thresh = SNR2_thresh[1], # June 9 2014, change
                                                 deblend=deblend, plot=plot)
        ncomps_g2  = agd2['N_components']
        if  ncomps_g2 > 0:
            params_g2 = np.concatenate([agd2['amps'],agd2['FWHMs'], agd2['means']])
        else:
            params_g2 = []
        u22 = agd2['u2']

        # END PHASE 2 <<<



    # Check for phase two components, make final guess list
    # ------------------------------------------------------
    if phase=='two' and (ncomps_g2 > 0):
        amps_gf = np.append(params_g1[0:ncomps_g1], params_g2[0:ncomps_g2])
        widths_gf = np.append(params_g1[ncomps_g1:2*ncomps_g1], params_g2[ncomps_g2:2*ncomps_g2])
        offsets_gf = np.append(params_g1[2*ncomps_g1:3*ncomps_g1], params_g2[2*ncomps_g2:3*ncomps_g2])
        params_gf = np.concatenate([amps_gf, widths_gf, offsets_gf])
        ncomps_gf = len(params_gf) / 3
    else:
        params_gf = params_g1
        ncomps_gf = len(params_gf) / 3




    # Sort final guess list by amplitude
    # ----------------------------------
    say('N final parameter guesses: ' + str(ncomps_gf))
    amps_temp = params_gf[0:ncomps_gf]
    widths_temp = params_gf[ncomps_gf:2*ncomps_gf]
    offsets_temp = params_gf[2*ncomps_gf:3*ncomps_gf]
    w_sort_amp = np.argsort(amps_temp)[::-1]
    params_gf = np.concatenate([amps_temp[w_sort_amp], widths_temp[w_sort_amp], offsets_temp[w_sort_amp]])


    if (perform_final_fit == True) and (ncomps_gf > 0):
        say('\n\n  --> Final Fitting... \n',verbose)


        # Objective functions for final fit
        def objective_leastsq(paramslm):
            params = vals_vec_from_lmfit(paramslm)
            resids = (func(vel, *params).ravel() - data.ravel()) / errors
            return resids

        # Final fit using unconstrained parameters
        t0 = time.time()
        lmfit_params = paramvec_to_lmfit(params_gf)
        result2 = lmfit_minimize(objective_leastsq, lmfit_params, method='leastsq')
        params_fit = vals_vec_from_lmfit(result2.params)
        params_errs = errs_vec_from_lmfit(result2.params)
        ncomps_fit = len(params_fit)/3

        del lmfit_params
        say('Final fit took {0} seconds.'.format(time.time()-t0), verbose)

        # Make "FWHMS" positive
        params_fit[0:ncomps_fit][params_fit[0:ncomps_fit] < 0.0] = -1 * params_fit[0:ncomps_fit][params_fit[0:ncomps_fit] < 0.0]


        best_fit_final = func(vel, *params_fit).ravel()
        rchi2 = np.sum( (data - best_fit_final)**2 / errors**2) / len(data)


        # Check if any amplitudes are identically zero, if so, remove them.
        if np.any(params_fit[0:ncomps_gf] == 0.0):
            amps_fit = params_fit[0:ncomps_gf]
            fwhms_fit = params_fit[ncomps_gf:2*ncomps_gf]
            offsets_fit = params_fit[2*ncomps_gf:3*ncomps_gf]
            w_keep = amps_fit > 0.
            params_fit = np.concatenate([amps_fit[w_keep], fwhms_fit[w_keep], offsets_fit[w_keep]])
            ncomps_fit = len(params_fit)/3

    if plot:
        datamax = np.max(data)

        # Set up figure
        fig = plt.figure('AGD results', [12,12])
        ax1 = fig.add_axes([0.1,0.5,0.4,0.4]) # Initial guesses (alpha1)
        ax2 = fig.add_axes([0.5,0.5,0.4,0.4]) # D2 fit to peaks(alpha2)
        ax3 = fig.add_axes([0.1,0.1,0.4,0.4]) # Initial guesses (alpha2)
        ax4 = fig.add_axes([0.5,0.1,0.4,0.4]) # Final fit

        # Decorations
        plt.figtext(0.52,0.47,'Final fit')
        if perform_final_fit:
            try:
                plt.figtext(0.52,0.45,'Reduced Chi2: {0:3.1f}'.format(rchi2))
                plt.figtext(0.52,0.43,'N components: {0}'.format(ncomps_fit))
            except UnboundLocalError:
                pass

        plt.figtext(0.12,0.47,'Phase-two initial guess')
        plt.figtext(0.12,0.45,'N components: {0}'.format(ncomps_g2))

        plt.figtext(0.12,0.87,'Phase-one initial guess')
        plt.figtext(0.12,0.85,'N components: {0}'.format(ncomps_g1))

        plt.figtext(0.52,0.87,'Intermediate fit')


        # Initial Guesses (Panel 1)
        # -------------------------
        ax1.xaxis.tick_top()
        u2_scale = 1. / np.max(np.abs(u2)) * datamax * 0.5
        ax1.plot(vel, data, '-k')
        ax1.plot(vel, u2 * u2_scale, '-r')
        ax1.plot(vel, vel / vel * agd1['thresh'], '-k')
        ax1.plot(vel, vel / vel * agd1['thresh2'] * u2_scale, '--r')

        for i in range(ncomps_g1):
            one_component = gaussian(params_g1[i], params_g1[i+ncomps_g1], params_g1[i+2*ncomps_g1])(vel)
            ax1.plot(vel, one_component,'-g')



        # Plot intermediate fit components (Panel 2)
        # ------------------------------------------
        ax2.xaxis.tick_top()
        ax2.plot(vel, data, '-k')
        ax2.yaxis.tick_right()
        for i in range(ncomps_f1):
            one_component = gaussian(params_f1[i], params_f1[i+ncomps_f1], params_f1[i+2*ncomps_f1])(vel)
            ax2.plot(vel, one_component,'-',color='blue')


        # Residual spectrum (Panel 3)
        # -----------------------------
        if phase == 'two':
            u22_scale = 1. / np.abs(u22).max() * np.max(residuals) * 0.5
            ax3.plot(vel, residuals, '-k')
            ax3.plot(vel, vel / vel * agd2['thresh'], '--k')
            ax3.plot(vel, vel / vel * agd2['thresh2'] * u22_scale, '--r')
            ax3.plot(vel, u22 * u22_scale, '-r')
            for i in range(ncomps_g2):
                one_component = gaussian(params_g2[i], params_g2[i+ncomps_g2], params_g2[i+2*ncomps_g2])(vel)
                ax3.plot(vel, one_component,'-g')


        # Plot best-fit model (Panel 4)
        # -----------------------------
        if perform_final_fit:
            ax4.yaxis.tick_right()
            try:
                ax4.plot(vel, best_fit_final, label='final model',color='purple')
                ax4.plot(vel, data, label='data',color='black')
                for i in range(ncomps_fit):
                    one_component = gaussian(params_fit[i], params_fit[i+ncomps_fit], params_fit[i+2*ncomps_fit])(vel)
                    ax4.plot(vel, one_component,'-',color='purple')
                ax4.plot(vel ,best_fit_final, '-', color='purple')
            except UnboundLocalError:
                pass

        plt.show()




    # Construct output dictionary (odict)
    # -----------------------------------
    odict = {}
    odict['initial_parameters'] = params_gf
    odict['N_components'] = ncomps_gf

    if (perform_final_fit == True) and (ncomps_gf > 0):
        odict['best_fit_parameters'] = params_fit
        odict['best_fit_errors'] = params_errs
        odict['rchi2'] = rchi2

    return (1, odict)
Ejemplo n.º 48
0
def ffttc_traction_pure_shear(u,
                              v,
                              pixelsize1,
                              pixelsize2,
                              h,
                              young,
                              sigma=0.49,
                              filter="mean",
                              fs=None):
    '''
     limiting case for h*k==0
    Xavier Trepat, Physical forces during collective cell migration, 2009

    :param u:deformation field in x direction in pixel of the deformation image
    :param v:deformation field in y direction in pixel of the deformation image
    :param young: youngs modulus in Pa
    :param pixelsize1: pixelsize of the original image, needed because u and v is given as displacment of these pixels
    :param pixelsize2: pixelsize of the deformation image
    :param h hight of the membrane the cells lie on, in µm
    :param sigma: poission ratio of the gel
    :param bf_image: give the brightfield image as an array before cells where removed
    :param filter: str, values: "mean","gaussian","median". Diffrent smoothing methods for the traction field.
    :return: tx_filter,ty_filter: traction forces in x and y direction in Pa
    '''

    # 0) substracting mean(better name for this step)
    u_shift = (u - np.mean(u)) * pixelsize1  # also try without dis
    v_shift = (v - np.mean(v)) * pixelsize1

    ## bens algortithm:

    # 1)Zero padding to get sqauerd array with even index number
    ax1_length = np.shape(u_shift)[0]  # u and v must have same dimensions
    ax2_length = np.shape(u_shift)[1]
    max_ind = int(np.max((ax1_length, ax2_length)))
    if max_ind % 2 != 0:
        max_ind += 1
    u_expand = np.zeros((max_ind, max_ind))
    v_expand = np.zeros((max_ind, max_ind))
    u_expand[max_ind - ax1_length:max_ind,
             max_ind - ax2_length:max_ind] = u_shift
    v_expand[
        max_ind - ax1_length:max_ind, max_ind - ax2_length:
        max_ind] = v_shift  # note: seems to be snummerically slightly diffrent then in normal fnction ??? (dont know why

    # 2) producing wave vectors (FT-space) "
    # form 1:max_ind/2 then -(max_ind/2:1)
    kx1 = np.array([
        list(range(0, int(max_ind / 2), 1)),
    ] * int(max_ind))
    kx2 = np.array([
        list(range(-int(max_ind / 2), 0, 1)),
    ] * int(max_ind))

    kx = np.append(kx1, kx2, axis=1) / (
        pixelsize2 * max_ind
    ) * 2 * np.pi  # spatial frequencies: 1/wavelength,in 1/µm in fractions of total length

    ky = np.transpose(kx)
    k = np.sqrt(kx**2 + ky**2)  # matrix with "relative" distances??#

    u_ft = scipy.fft.fft2(u_expand)
    v_ft = scipy.fft.fft2(v_expand)

    # 4.1) calculate tractions in fourrier space
    mu = young / (2 * (1 + sigma))
    tx_ft = mu * u_ft / h
    tx_ft[
        0,
        0] = 0  ## zero frequency would represent force every where(constant), so this is legit??
    ty_ft = mu * v_ft / h
    ty_ft[0, 0] = 0

    # 4.2) go back to real space
    tx = scipy.fft.ifft2(tx_ft).real  ## maybe devide by 2pi here???
    ty = scipy.fft.ifft2(ty_ft).real

    # 5.2) cut like in script from ben

    tx_cut = tx[max_ind - ax1_length:max_ind, max_ind - ax2_length:max_ind]
    ty_cut = ty[max_ind - ax1_length:max_ind, max_ind - ax2_length:max_ind]

    # 5.3) using filter
    if filter == "mean":
        fs = fs if isinstance(fs, (float, int)) else int(
            int(np.max((ax1_length, ax2_length))) / 16)
        tx_filter = uniform_filter(tx_cut, size=fs)
        ty_filter = uniform_filter(ty_cut, size=fs)
    if filter == "gaussian":
        fs = fs if isinstance(fs,
                              (float,
                               int)) else int(np.max(
                                   (ax1_length, ax2_length))) / 50
        tx_filter = gaussian_filter(tx_cut, sigma=fs)
        ty_filter = gaussian_filter(ty_cut, sigma=fs)
    if filter == "median":
        fs = fs if isinstance(fs, (float, int)) else int(
            int(np.max((ax1_length, ax2_length))) / 16)
        tx_filter = median_filter(tx_cut, size=fs)
        ty_filter = median_filter(ty_cut, size=fs)
    if not isinstance(filter, str):
        tx_filter = tx_cut
        ty_filter = ty_cut
    # show_quiver(tx_filter,ty_filter)
    return (tx_filter, ty_filter)
Ejemplo n.º 49
0
def slice_wise_fft(in_file, ftmask=None, spike_thres=3., out_prefix=None):
    """Search for spikes in slices using the 2D FFT"""
    import os.path as op
    import numpy as np
    import nibabel as nb
    from mriqc.workflows.utils import spectrum_mask
    from scipy.ndimage.filters import median_filter
    from scipy.ndimage import generate_binary_structure, binary_erosion
    from statsmodels.robust.scale import mad


    if out_prefix is None:
        fname, ext = op.splitext(op.basename(in_file))
        if ext == '.gz':
            fname, _ = op.splitext(fname)
        out_prefix = op.abspath(fname)

    func_data = nb.load(in_file).get_data()

    if ftmask is None:
        ftmask = spectrum_mask(tuple(func_data.shape[:2]))

    fft_data = []
    for t in range(func_data.shape[-1]):
        func_frame = func_data[..., t]
        fft_slices = []
        for z in range(func_frame.shape[2]):
            sl = func_frame[..., z]
            fftsl = median_filter(np.real(np.fft.fft2(sl)).astype(np.float32),
                                  size=(5, 5), mode='constant') * ftmask
            fft_slices.append(fftsl)
        fft_data.append(np.stack(fft_slices, axis=-1))

    # Recompose the 4D FFT timeseries
    fft_data = np.stack(fft_data, -1)

    # Z-score across t, using robust statistics
    mu = np.median(fft_data, axis=3)
    sigma = np.stack([mad(fft_data, axis=3)] * fft_data.shape[-1], -1)
    idxs = np.where(np.abs(sigma) > 1e-4)
    fft_zscored = fft_data - mu[..., np.newaxis]
    fft_zscored[idxs] /= sigma[idxs]

    # save fft z-scored
    out_fft = op.abspath(out_prefix + '_zsfft.nii.gz')
    nii = nb.Nifti1Image(fft_zscored.astype(np.float32), np.eye(4), None)
    nii.to_filename(out_fft)

    # Find peaks
    spikes_list = []
    for t in range(fft_zscored.shape[-1]):
        fft_frame = fft_zscored[..., t]

        for z in range(fft_frame.shape[-1]):
            sl = fft_frame[..., z]
            if np.all(sl < spike_thres):
                continue

            # Any zscore over spike_thres will be called a spike
            sl[sl <= spike_thres] = 0
            sl[sl > 0] = 1

            # Erode peaks and see how many survive
            struc = generate_binary_structure(2, 2)
            sl = binary_erosion(sl.astype(np.uint8), structure=struc).astype(np.uint8)

            if sl.sum() > 10:
                spikes_list.append((t, z))

    out_spikes = op.abspath(out_prefix + '_spikes.tsv')
    np.savetxt(out_spikes, spikes_list, fmt=b'%d', delimiter=b'\t', header='TR\tZ')


    return len(spikes_list), out_spikes, out_fft
Ejemplo n.º 50
0
def ffttc_traction(u,
                   v,
                   pixelsize1,
                   pixelsize2,
                   young,
                   sigma=0.49,
                   filter="gaussian",
                   fs=None):
    '''
    fourier transform based calculation of the traction force. U and v must be given  as deformations in pixel. Size of
    these pixels must be the pixelsize (size of a pixel in the deformation field u or v). Note that thePiv deformation
    returns deformation in pixel of the size of pixels in the images of beads before and after.
    If bf_image is provided this script will return a traction field that is zoomed to the size of the brightfield image,
    by interpolation. It is not recommended to use this for any calculations.
    The function can use diffretn filters. Recommended filter is gaussian. Mean filter should yield similar results.

    :param u:deformation field in x direction in pixel of the deformation image
    :param v:deformation field in y direction in pixel of the deformation image
    :param young: youngs modulus in Pa
    :param pixelsize1: pixelsize in m/pixel of the original image, needed because u and v is given as displacement of these pixels
    :param pixelsize2: pixelsize of m/pixel the deformation image
    :param sigma: posson ratio of the gel
    :param bf_image: give the brightfield image as an array before cells where removed
    :param filter: str, values: "mean","gaussian","median". Diffrent smoothing methods for the traction field
    :return: tx_filter,ty_filter: traction forces in x and y direction in Pa
    '''

    # 0) substracting mean(better name for this step)
    u_shift = (
        u - np.mean(u)
    )  # shifting to zero mean  (translating to pixelsize of u-image is done later)
    v_shift = (v - np.mean(v))
    ## bens algortithm:

    # 1)Zeropadding to get sqauerd array with even index number
    ax1_length = np.shape(u_shift)[0]  # u and v must have same dimensions
    ax2_length = np.shape(u_shift)[1]
    max_ind = int(np.max((ax1_length, ax2_length)))
    if max_ind % 2 != 0:
        max_ind += 1

    u_expand = np.zeros((max_ind, max_ind))
    v_expand = np.zeros((max_ind, max_ind))
    u_expand[:ax1_length, :ax2_length] = u_shift
    v_expand[:ax1_length, :ax2_length] = v_shift

    # 2) produceing wave vectors   ## why this form
    # form 1:max_ind/2 then -(max_ind/2:1)
    kx1 = np.array([
        list(range(0, int(max_ind / 2), 1)),
    ] * int(max_ind))
    kx2 = np.array([
        list(range(-int(max_ind / 2), 0, 1)),
    ] * int(max_ind))
    kx = np.append(
        kx1, kx2,
        axis=1) * 2 * np.pi  # fourier transform in this case is defined as
    # F(kx)=1/2pi integral(exp(i*kx*x)dk therefore kx must be expressed as a spatial frequency in distance*2*pi

    ky = np.transpose(kx)
    k = np.sqrt(kx**2 + ky**2) / (pixelsize2 * max_ind)
    # np.save("/home/user/Desktop/k_test.npy",k)

    # 2.1) calculating angle between k and kx with atan 2 function (what is this exactely??)  just if statemments to get
    # angel from x1 to x2 in fixed direction... (better explanation)
    alpha = np.arctan2(ky, kx)
    alpha[0, 0] = np.pi / 2
    # np.save("/home/user/Desktop/alpha_test.npy",alpha)
    # 3) calculation of K --> Tensor to calculate displacements from Tractions. We calculate inverse of K
    # (check if correct inversion by your self)
    # K⁻¹=[[kix kid],
    #     [kid,kiy]]  ,,, so is "diagonal, kid appears two times

    kix = ((k * young) /
           (2 * (1 - sigma**2))) * ((1 - sigma + sigma * np.cos(alpha)**2))
    kiy = ((k * young) /
           (2 * (1 - sigma**2))) * ((1 - sigma + sigma * np.sin(alpha)**2))
    kid = ((k * young) /
           (2 * (1 - sigma**2))) * (sigma * np.sin(alpha) * np.cos(alpha))

    # np.save("/home/user/Desktop/kid_seg2_test.npy",(sigma * np.sin(alpha) * np.cos(alpha)))
    ## adding zeros in kid diagonals(?? why do i need this)
    kid[:, int(max_ind / 2)] = np.zeros(max_ind)
    kid[int(max_ind / 2), :] = np.zeros(max_ind)

    # 4) calculate fourrier transform of dsiplacements
    # u_ft=np.fft.fft2(u_expand*pixelsize1*2*np.pi)
    # v_ft=np.fft.fft2(v_expand*pixelsize1*2*np.pi)   #
    u_ft = scipy.fft.fft2(u_expand * pixelsize1)
    v_ft = scipy.fft.fft2(v_expand * pixelsize1)

    # 4.1) calculate tractions in fourrier space T=K⁻¹*U, U=[u,v]  here with individual matrix elelemnts..
    tx_ft = kix * u_ft + kid * v_ft
    ty_ft = kid * u_ft + kiy * v_ft

    # 4.2) go back to real space
    tx = scipy.fft.ifft2(tx_ft).real
    ty = scipy.fft.ifft2(ty_ft).real

    # 5.2) cut back to oringinal shape
    tx_cut = tx[0:ax1_length, 0:ax2_length]
    ty_cut = ty[0:ax1_length, 0:ax2_length]

    # 5.3) using filter
    if filter == "mean":
        fs = fs if isinstance(fs, (float, int)) else int(
            int(np.max((ax1_length, ax2_length))) / 16)
        tx_filter = uniform_filter(tx_cut, size=fs)
        ty_filter = uniform_filter(ty_cut, size=fs)
    if filter == "gaussian":
        fs = fs if isinstance(fs,
                              (float,
                               int)) else int(np.max(
                                   (ax1_length, ax2_length))) / 50
        tx_filter = gaussian_filter(tx_cut, sigma=fs)
        ty_filter = gaussian_filter(ty_cut, sigma=fs)
    if filter == "median":
        fs = fs if isinstance(fs, (float, int)) else int(
            int(np.max((ax1_length, ax2_length))) / 16)
        tx_filter = median_filter(tx_cut, size=fs)
        ty_filter = median_filter(ty_cut, size=fs)
    if not isinstance(filter, str):
        tx_filter = tx_cut
        ty_filter = ty_cut

    return (tx_filter, ty_filter)
Ejemplo n.º 51
0
        x = width - 1

    if y < 0:
        y = 0
    elif y >= height:
        y = height - 1

    return x, y


print "CREATING..."
depths = create_depths_from_pix(pix)
print "CREATED"
print "SHIFTING..."
depths = shift_depths(depths)
print "SHIFTED"
# print "DILATING..."
# depths = dilate(depths)
# print "DILATED"
# print "ERODING..."
# depths = erode(depths)
# print "ERODED"
print "MEDIANING..."
depths = filters.median_filter(depths, size=(13, 5))
print "MEDIANED"
print "ADDING NOISE..."
depths = add_noise(depths)
print "DONE"
new_img = depths_to_image(depths)
new_img.show()
Ejemplo n.º 52
0
    def Blink_Tracker(EAR, IF_Closed_Eyes, Counter4blinks, TOTAL_BLINKS, skip):
        BLINK_READY = False

        if int(IF_Closed_Eyes) == 1:  # If the eyes are closed
            Current_Blink.values.append(EAR)
            Current_Blink.EAR_of_FOI = EAR  # Save to use later
            if Counter4blinks > 0:
                skip = False
            if Counter4blinks == 0:
                Current_Blink.startEAR = EAR  # EAR_series[6] is the EAR for the frame of interest(the middle one)
                Current_Blink.start = reference_frame - 6  # reference-6 points to the frame of interest which will be the 'start' of the blink
            Counter4blinks += 1
            if Current_Blink.peakEAR >= EAR:  # deciding the min point of the EAR signal
                Current_Blink.peakEAR = EAR
                Current_Blink.peak = reference_frame - 6
        else:  # otherwise, the eyes are open in this frame
            if Counter4blinks < 2 and skip == False:  # Wait to approve or reject the last blink
                if Last_Blink.duration > 15:
                    FRAME_MARGIN_BTW_2BLINKS = 8
                else:
                    FRAME_MARGIN_BTW_2BLINKS = 1
                if ((reference_frame - 6) -
                        Last_Blink.end) > FRAME_MARGIN_BTW_2BLINKS:
                    # Check so the prev blink signal is not monotonic or too small (noise)
                    if Last_Blink.peakEAR < Last_Blink.startEAR and Last_Blink.peakEAR < Last_Blink.endEAR and Last_Blink.amplitude > MIN_AMPLITUDE and Last_Blink.start < Last_Blink.peak:
                        if ((Last_Blink.startEAR - Last_Blink.peakEAR) >
                            (Last_Blink.endEAR - Last_Blink.peakEAR) * 0.25 and
                            (Last_Blink.startEAR - Last_Blink.peakEAR) * 0.25 <
                            (Last_Blink.endEAR -
                             Last_Blink.peakEAR)):  # the amplitude is balanced
                            BLINK_READY = True

                            #THE ULTIMATE BLINK Check
                            Last_Blink.values = signal.convolve1d(
                                Last_Blink.values, [1 / 3.0, 1 / 3.0, 1 / 3.0],
                                mode='nearest')

                            Last_Blink.values = signal.median_filter(
                                Last_Blink.values, 3, mode='reflect')
                            # #smoothing the signal
                            [MISSED_BLINKS,
                             retrieved_blinks] = Ultimate_Blink_Check()
                            TOTAL_BLINKS = TOTAL_BLINKS + len(
                                retrieved_blinks
                            )  # Finally, approving/counting the previous blink candidate

                            ###Now You can count on the info of the last separate and valid blink and analyze it
                            Counter4blinks = 0
                            print("RETRIVED BLINKS= {}".format(
                                len(retrieved_blinks)))
                            return retrieved_blinks, int(
                                TOTAL_BLINKS
                            ), Counter4blinks, BLINK_READY, skip
                        else:
                            skip = True
                            print('rejected due to imbalance')
                    else:
                        skip = True
                        print('rejected due to noise,magnitude is {}'.format(
                            Last_Blink.amplitude))
                        print(Last_Blink.start < Last_Blink.peak)

            # if the eyes were closed for a sufficient number of frames (2 or more)
            # then this is a valid CANDIDATE for a blink
            if Counter4blinks > 1:
                Current_Blink.end = reference_frame - 7  # reference-7 points to the last frame that eyes were closed
                Current_Blink.endEAR = Current_Blink.EAR_of_FOI
                Current_Blink.amplitude = (Current_Blink.startEAR +
                                           Current_Blink.endEAR -
                                           2 * Current_Blink.peakEAR) / 2
                Current_Blink.duration = Current_Blink.end - Current_Blink.start + 1

                if Last_Blink.duration > 15:
                    FRAME_MARGIN_BTW_2BLINKS = 8
                else:
                    FRAME_MARGIN_BTW_2BLINKS = 1
                if (
                        Current_Blink.start - Last_Blink.end
                ) <= FRAME_MARGIN_BTW_2BLINKS + 1:  # Merging two close blinks
                    print('Merging...')
                    frames_in_between = Current_Blink.start - Last_Blink.end - 1
                    print(Current_Blink.start, Last_Blink.end,
                          frames_in_between)
                    valuesBTW = Linear_Interpolate(Last_Blink.endEAR,
                                                   Current_Blink.startEAR,
                                                   frames_in_between)
                    Last_Blink.values = Last_Blink.values + valuesBTW + Current_Blink.values
                    Last_Blink.end = Current_Blink.end  # update the end
                    Last_Blink.endEAR = Current_Blink.endEAR
                    if Last_Blink.peakEAR > Current_Blink.peakEAR:  # update the peak
                        Last_Blink.peakEAR = Current_Blink.peakEAR
                        Last_Blink.peak = Current_Blink.peak
                        # update duration and amplitude
                    Last_Blink.amplitude = (Last_Blink.startEAR +
                                            Last_Blink.endEAR -
                                            2 * Last_Blink.peakEAR) / 2
                    Last_Blink.duration = Last_Blink.end - Last_Blink.start + 1
                else:  # Should not Merge (a Separate blink)
                    Last_Blink.values = Current_Blink.values  # update the EAR list
                    Last_Blink.end = Current_Blink.end  # update the end
                    Last_Blink.endEAR = Current_Blink.endEAR

                    Last_Blink.start = Current_Blink.start  # update the start
                    Last_Blink.startEAR = Current_Blink.startEAR

                    Last_Blink.peakEAR = Current_Blink.peakEAR  # update the peak
                    Last_Blink.peak = Current_Blink.peak

                    Last_Blink.amplitude = Current_Blink.amplitude
                    Last_Blink.duration = Current_Blink.duration
            # reset the eye frame counter
            Counter4blinks = 0
        retrieved_blinks = 0
        return retrieved_blinks, int(
            TOTAL_BLINKS), Counter4blinks, BLINK_READY, skip
Ejemplo n.º 53
0
    coords = []
    img = images_hu[k]
    root = tk.Tk()
    im = Image.fromarray(img)
    photo = ImageTk.PhotoImage(im)
    canvas = tk.Canvas(root, width=512, height=512)
    canvas.pack()
    canvas.create_image(256, 256, image=photo)
    canvas.bind("<Button-1>", onclick)
    root.mainloop()
    for i in range(img.shape[0]):
        for j in range(img.shape[0]):
            if img[i][j] > 0:
                img[i][j] *= 12
    blurred_f = ndimage.gaussian_filter(img, sigma=3)
    filter_blurred_f = filters.median_filter(img, 3)
    filter_blurred_f2 = ndimage.gaussian_filter(filter_blurred_f, sigma=1)

    alpha = 10
    sharpened = filter_blurred_f + alpha * (filter_blurred_f -
                                            filter_blurred_f2)
    seed = np.zeros(img.shape)
    for jj in range(len(coords)):
        seed[int(coords[jj][1]), int(coords[jj][0])] = 1
    seg, phi, its = lvlset(sharpened,
                           seed,
                           max_its=1800,
                           display=True,
                           alpha=0.2,
                           thresh=0.1)
    segment = seg * 1.0
Ejemplo n.º 54
0
def smooth_data(data, mbox=25):
    mdata = median_filter(data, size=(mbox, mbox))
    return data - mdata
Ejemplo n.º 55
0
def threshold_components_parallel(pars):
    """
       Post-processing of spatial components which includes the following steps

       (i) Median filtering
       (ii) Thresholding
       (iii) Morphological closing of spatial support
       (iv) Extraction of largest connected component ( to remove small unconnected pixel )
       /!\ need to be called through the function threshold components

       Parameters:
        ---------
        [parsed]
       A:      np.ndarray
           2d matrix with spatial components

       dims:   tuple
           dimensions of spatial components

       medw: [optional] tuple
           window of median filter

       thr_method: [optional] string
           Method of thresholding:
               'max' sets to zero pixels that have value less than a fraction of the max value
               'nrg' keeps the pixels that contribute up to a specified fraction of the energy

       maxthr: [optional] scalar
           Threshold of max value

       nrgthr: [optional] scalar
           Threshold of energy

       extract_cc: [optional] bool
           Flag to extract connected components (might want to turn to False for dendritic imaging)

       se: [optional] np.intarray
           Morphological closing structuring element

       ss: [optinoal] np.intarray
           Binary element for determining connectivity

       Returns:
        -------
           Ath: np.ndarray
               2d matrix with spatial components thresholded
       """

    A_i, i, dims, medw, d, thr_method, se, ss, maxthr, nrgthr, extract_cc = pars
    # we reshape this one dimension column of the 2d components into the 2D that
    A_temp = np.reshape(A_i, dims[::-1])
    # we apply a median filter of size medw
    A_temp = median_filter(A_temp, medw)
    if thr_method == 'max':
        BW = (A_temp > maxthr * np.max(A_temp))
    elif thr_method == 'nrg':
        Asor = np.sort(np.squeeze(np.reshape(A_temp, (d, 1))))[::-1]
        temp = np.cumsum(Asor**2)
        ff = np.squeeze(np.where(temp < nrgthr * temp[-1]))
        if ff.size > 0:
            ind = ff if ff.ndim == 0 else ff[-1]
            A_temp[A_temp < Asor[ind]] = 0
            BW = (A_temp >= Asor[ind])
        else:
            BW = np.zeros_like(A_temp)
    # we want to remove the components that are valued 0 in this now 1d matrix
    Ath = np.squeeze(np.reshape(A_temp, (d, 1)))
    Ath2 = np.zeros((d))
    # we do that to have a full closed structure even if the values have been trehsolded
    BW = binary_closing(BW.astype(np.int), structure=se)

    # if we have deleted the element
    if BW.max() == 0:
        return Ath2, i
    #
    if extract_cc:  # we want to extract the largest connected component ( to remove small unconnected pixel )
        # we extract each future as independent with the cross structuring elemnt
        labeled_array, num_features = label(BW, structure=ss)
        labeled_array = np.squeeze(np.reshape(labeled_array, (d, 1)))
        nrg = np.zeros((num_features, 1))
        # we extract the energy for each component
        for j in range(num_features):
            nrg[j] = np.sum(Ath[labeled_array == j + 1]**2)
        indm = np.argmax(nrg)
        Ath2[labeled_array == indm + 1] = Ath[labeled_array == indm + 1]

    else:
        BW = BW.flatten()
        Ath2[BW] = Ath[BW]

    return Ath2, i
Ejemplo n.º 56
0
def reduce_singlefiber_phase3(config, logtable):
    """Reduce the single fiber data of Xinglong 2.16m HRS.

    Args:
        config (:class:`configparser.ConfigParser`): Config object.
        logtable (:class:`astropy.table.Table`): Table of observing log.

    """
    # extract keywords from config file
    section = config['data']
    rawpath = section.get('rawpath')
    statime_key = section.get('statime_key')
    exptime_key = section.get('exptime_key')
    direction = section.get('direction')

    section = config['reduce']
    midpath = section.get('midpath')
    odspath = section.get('odspath')
    figpath = section.get('figpath')
    mode = section.get('mode')
    fig_format = section.get('fig_format')
    oned_suffix = section.get('oned_suffix')
    ncores = section.get('ncores')

    # create folders if not exist
    if not os.path.exists(figpath): os.mkdir(figpath)
    if not os.path.exists(odspath): os.mkdir(odspath)
    if not os.path.exists(midpath): os.mkdir(midpath)

    # determine number of cores to be used
    if ncores == 'max':
        ncores = os.cpu_count()
    else:
        ncores = min(os.cpu_count(), int(ncores))

    # initialize general card list
    general_card_lst = {}

    ### Parse bias

    bias, bias_card_lst = get_bias(config, logtable)

    ### define dtype of 1-d spectra
    if bias is None:
        ndisp = 4096
    else:
        ncros, ndisp = bias.shape

    types = [
        ('aperture', np.int16),
        ('order', np.int16),
        ('points', np.int16),
        ('wavelength', (np.float64, ndisp)),
        ('flux', (np.float32, ndisp)),
        ('error', (np.float32, ndisp)),
        ('background', (np.float32, ndisp)),
        ('mask', (np.int16, ndisp)),
    ]
    names, formats = list(zip(*types))
    spectype = np.dtype({'names': names, 'formats': formats})

    ### Combine the flats and trace the orders

    # fiter flat frames
    filterfunc = lambda item: item['object'].lower() == 'flat'
    logitem_lst = list(filter(filterfunc, logtable))

    nflat = len(logitem_lst)

    flat_filename = os.path.join(midpath, 'flat.fits')
    aperset_filename = os.path.join(midpath, 'trace.trc')
    aperset_regname = os.path.join(midpath, 'trace.reg')
    trace_figname = os.path.join(figpath, 'trace.{}'.format(fig_format))
    profile_filename = os.path.join(midpath, 'profile.fits')


    if mode=='debug' and os.path.exists(flat_filename) \
        and os.path.exists(aperset_filename) \
        and os.path.exists(profile_filename):
        # read flat data and mask array
        hdu_lst = fits.open(flat_filename)
        flat_data = hdu_lst[0].data
        flat_mask = hdu_lst[1].data
        flat_norm = hdu_lst[2].data
        flat_sens = hdu_lst[3].data
        flat_spec = hdu_lst[4].data
        exptime = hdu_lst[0].header[exptime_key]
        hdu_lst.close()
        aperset = load_aperture_set(aperset_filename)
        disp_x_lst, profile_x, profile_lst = read_crossprofile(
            profile_filename)
    else:
        data_lst = []
        head_lst = []
        exptime_lst = []

        print('* Combine {} Flat Images: {}'.format(nflat, flat_filename))
        fmt_str = '  - {:>7s} {:^11} {:^8s} {:^7} {:^23s} {:^8} {:^6}'
        head_str = fmt_str.format('frameid', 'FileID', 'Object', 'exptime',
                                  'obsdate', 'N(sat)', 'Q95')

        for iframe, logitem in enumerate(logitem_lst):
            # read each individual flat frame
            fname = '{}.fits'.format(logitem['fileid'])
            filename = os.path.join(rawpath, fname)
            data, head = fits.getdata(filename, header=True)
            exptime_lst.append(head[exptime_key])
            mask = get_mask(data, head)
            sat_mask = (mask & 4 > 0)
            bad_mask = (mask & 2 > 0)
            if iframe == 0:
                allmask = np.zeros_like(mask, dtype=np.int16)
            allmask += sat_mask

            # correct overscan for flat
            data, card_lst = correct_overscan(data, head, logitem['amp'])
            for key, value in card_lst:
                head.append((key, value))

            # correct bias for flat, if has bias
            if bias is None:
                message = 'No bias. skipped bias correction'
            else:
                data = data - bias
                message = 'Bias corrected'
            logger.info(message)

            # print info
            if iframe == 0:
                print(head_str)
            message = fmt_str.format('[{:d}]'.format(logitem['frameid']),
                                     logitem['fileid'], logitem['object'],
                                     logitem['exptime'], logitem['obsdate'],
                                     logitem['nsat'], logitem['q95'])
            print(message)

            data_lst.append(data)

        if nflat == 1:
            flat_data = data_lst[0]
        else:
            data_lst = np.array(data_lst)
            flat_data = combine_images(
                data_lst,
                mode='mean',
                upper_clip=10,
                maxiter=5,
                maskmode=(None, 'max')[nflat > 3],
                ncores=ncores,
            )
        # get mean exposure time and write it to header
        head = fits.Header()
        exptime = np.array(exptime_lst).mean()
        head[exptime_key] = exptime

        # find saturation mask
        sat_mask = allmask > nflat / 2.
        flat_mask = np.int16(sat_mask) * 4 + np.int16(bad_mask) * 2

        # get exposure time normalized flats
        flat_norm = flat_data / exptime

        # create the trace figure
        tracefig = TraceFigure()

        section = config['reduce.trace']
        aperset = find_apertures(
            flat_data,
            flat_mask,
            scan_step=section.getint('scan_step'),
            minimum=section.getfloat('minimum'),
            separation=section.get('separation'),
            align_deg=section.getint('align_deg'),
            filling=section.getfloat('filling'),
            degree=section.getint('degree'),
            display=section.getboolean('display'),
            fig=tracefig,
        )

        aperset.fill(tol=10)

        # save the trace figure
        tracefig.adjust_positions()
        title = 'Trace for {}'.format(flat_filename)
        tracefig.suptitle(title, fontsize=15)
        tracefig.savefig(trace_figname)

        aperset.save_txt(aperset_filename)
        aperset.save_reg(aperset_regname)

        # do the flat fielding
        # prepare the output mid-prococess figures in debug mode
        if mode == 'debug':
            figname = 'flat_aperpar_{}_%03d.{}'.format(flatname, fig_format)
            fig_aperpar = os.path.join(figpath, figname)
        else:
            fig_aperpar = None

        # prepare the name for slit figure
        figname = 'slit.{}'.format(fig_format)
        fig_slit = os.path.join(figpath, figname)

        # prepare the name for slit file
        fname = 'slit.dat'
        slit_file = os.path.join(midpath, fname)

        section = config['reduce.flat']

        p1, p2, pstep = -8, 8, 0.1
        profile_x = np.arange(p1, p2 + 1e-4, pstep)
        disp_x_lst = np.arange(48, ndisp, 500)

        fig_spatial = SpatialProfileFigure()
        flat_sens, flatspec_lst, profile_lst = get_flat(
            data=flat_data,
            mask=flat_mask,
            apertureset=aperset,
            nflat=nflat,
            q_threshold=section.getfloat('q_threshold'),
            smooth_A_func=smooth_aperpar_A,
            smooth_c_func=smooth_aperpar_c,
            smooth_bkg_func=smooth_aperpar_bkg,
            mode='debug',
            fig_spatial=fig_spatial,
            flatname='flat_normal',
            profile_x=profile_x,
            disp_x_lst=disp_x_lst,
        )
        figname = os.path.join(figpath, 'spatial_profile.png')
        title = 'Spatial Profile of Flat'
        fig_spatial.suptitle(title)
        fig_spatial.savefig(figname)
        fig_spatial.close()

        # pack 1-d spectra of flat
        flat_spec = []
        for aper, flatspec in sorted(flatspec_lst.items()):
            n = flatspec.size
            row = (
                aper,
                0,
                n,
                np.zeros(n, dtype=np.float64),  # wavelength
                flatspec,  # flux
                np.zeros(n, dtype=np.float32),  # error
                np.zeros(n, dtype=np.float32),  # background
                np.zeros(n, dtype=np.int16),  # mask
            )
            flat_spec.append(row)
        flat_spec = np.array(flat_spec, dtype=spectype)

        # save cross-profiles
        save_crossprofile(profile_filename, disp_x_lst, p1, p2, pstep,
                          profile_lst)

        # pack results and save to fits
        hdu_lst = fits.HDUList([
            fits.PrimaryHDU(flat_data, head),
            fits.ImageHDU(flat_mask),
            fits.ImageHDU(flat_norm),
            fits.ImageHDU(flat_sens),
            fits.BinTableHDU(flat_spec),
        ])
        hdu_lst.writeto(flat_filename, overwrite=True)

    ############################## Extract ThAr ################################

    # get the data shape
    ny, nx = flat_sens.shape

    calib_lst = {}

    # filter ThAr frames
    filter_thar = lambda item: item['object'].lower() == 'thar'

    thar_items = list(filter(filter_thar, logtable))

    for ithar, logitem in enumerate(thar_items):
        # logitem alias
        frameid = logitem['frameid']
        fileid = logitem['fileid']
        objname = logitem['object']
        imgtype = logitem['imgtype']
        exptime = logitem['exptime']
        amp = logitem['amp']

        # prepare message prefix
        logger_prefix = 'FileID: {} - '.format(fileid)
        screen_prefix = '    - '

        fmt_str = 'FileID: {} ({}) OBJECT: {} - wavelength identification'
        message = fmt_str.format(fileid, imgtype, objname)
        logger.info(message)
        print(message)

        fname = '{}.fits'.format(fileid)
        filename = os.path.join(rawpath, fname)
        data, head = fits.getdata(filename, header=True)
        mask = get_mask(data, head)

        # correct overscan for ThAr
        data, card_lst = correct_overscan(data, head, amp)
        for key, value in card_lst:
            head.append((key, value))
        message = 'Overscan corrected.'
        logger.info(logger_prefix + message)
        print(screen_prefix + message)

        # correct bias for ThAr, if has bias
        if bias is None:
            message = 'No bias'
        else:
            data = data - bias
            message = 'Bias corrected. Mean = {:.2f}'.format(bias.mean())
        logger.info(logger_prefix + message)
        print(screen_prefix + message)

        head.append(('HIERARCH GAMSE BACKGROUND CORRECTED', False))

        # extract ThAr spectra
        lower_limit = 7
        upper_limit = 7
        spectra1d = extract_aperset(
            data,
            mask,
            apertureset=aperset,
            lower_limit=lower_limit,
            upper_limit=upper_limit,
        )
        head = aperset.to_fitsheader(head)
        message = '1D spectra extracted for {:d} orders'.format(len(spectra1d))
        logger.info(logger_prefix + message)
        print(screen_prefix + message)

        # pack to a structured array
        spec = []
        for aper, item in sorted(spectra1d.items()):
            flux_sum = item['flux_sum']
            n = flux_sum.size

            # pack to table
            row = (
                aper,
                0,
                n,
                np.zeros(n, dtype=np.float64),  # wavelength
                flux_sum,  # flux
                np.zeros(n, dtype=np.float32),  # error
                np.zeros(n, dtype=np.float32),  # background
                np.zeros(n, dtype=np.int16),  # mask
            )
            spec.append(row)
        spec = np.array(spec, dtype=spectype)

        figname = 'wlcalib_{}.{}'.format(fileid, fig_format)
        wlcalib_fig = os.path.join(figpath, figname)

        section = config['reduce.wlcalib']

        title = '{}.fits'.format(fileid)

        if ithar == 0:
            # this is the first ThAr frame in this observing run
            if section.getboolean('search_database'):
                # find previouse calibration results
                index_file = os.path.join(
                    os.path.dirname(__file__),
                    '../../data/calib/wlcalib_xinglong216hrs.dat')

                message = ('Searching for archive wavelength calibration'
                           'file in "{}"'.format(os.path.basename(index_file)))
                logger.info(logger_prefix + message)
                print(screen_prefix + message)

                ref_spec, ref_calib = select_calib_from_database(
                    index_file, head[statime_key])

                if ref_spec is None or ref_calib is None:

                    message = ('Did not find nay archive wavelength'
                               'calibration file')
                    logger.info(logger_prefix + message)
                    print(screen_prefix + message)

                    # if failed, pop up a calibration window and identify
                    # the wavelengths manually
                    calib = wlcalib(
                        spec,
                        figfilename=wlcalib_fig,
                        title=title,
                        linelist=section.get('linelist'),
                        window_size=section.getint('window_size'),
                        xorder=section.getint('xorder'),
                        yorder=section.getint('yorder'),
                        maxiter=section.getint('maxiter'),
                        clipping=section.getfloat('clipping'),
                        q_threshold=section.getfloat('q_threshold'),
                    )
                else:
                    # if success, run recalib
                    # determien the direction
                    message = 'Found archive wavelength calibration file'
                    logger.info(message)
                    print(screen_prefix + message)

                    ref_direction = ref_calib['direction']

                    if direction[1] == '?':
                        aperture_k = None
                    elif direction[1] == ref_direction[1]:
                        aperture_k = 1
                    else:
                        aperture_k = -1

                    if direction[2] == '?':
                        pixel_k = None
                    elif direction[2] == ref_direction[2]:
                        pixel_k = 1
                    else:
                        pixel_k = -1

                    result = find_caliblamp_offset(
                        ref_spec,
                        spec,
                        aperture_k=aperture_k,
                        pixel_k=pixel_k,
                        pixel_range=(-50, 50),
                        mode=mode,
                    )
                    aperture_koffset = (result[0], result[1])
                    pixel_koffset = (result[2], result[3])

                    message = 'Aperture offset = {}; Pixel offset = {}'
                    message = message.format(aperture_koffset, pixel_koffset)
                    logger.info(logger_prefix + message)
                    print(screen_prefix + message)

                    use = section.getboolean('use_prev_fitpar')
                    xorder = (section.getint('xorder'), None)[use]
                    yorder = (section.getint('yorder'), None)[use]
                    maxiter = (section.getint('maxiter'), None)[use]
                    clipping = (section.getfloat('clipping'), None)[use]
                    window_size = (section.getint('window_size'), None)[use]
                    q_threshold = (section.getfloat('q_threshold'), None)[use]

                    calib = recalib(
                        spec,
                        figfilename=wlcalib_fig,
                        title=title,
                        ref_spec=ref_spec,
                        linelist=section.get('linelist'),
                        aperture_koffset=aperture_koffset,
                        pixel_koffset=pixel_koffset,
                        ref_calib=ref_calib,
                        xorder=xorder,
                        yorder=yorder,
                        maxiter=maxiter,
                        clipping=clipping,
                        window_size=window_size,
                        q_threshold=q_threshold,
                        direction=direction,
                    )
            else:
                message = 'No database searching. Identify lines manually'
                logger.info(logger_prefix + message)
                print(screen_prefix + message)

                # do not search the database
                calib = wlcalib(
                    spec,
                    figfilename=wlcalib_fig,
                    title=title,
                    identfilename=section.get('ident_file', None),
                    linelist=section.get('linelist'),
                    window_size=section.getint('window_size'),
                    xorder=section.getint('xorder'),
                    yorder=section.getint('yorder'),
                    maxiter=section.getint('maxiter'),
                    clipping=section.getfloat('clipping'),
                    q_threshold=section.getfloat('q_threshold'),
                )
                message = ('Wavelength calibration finished.'
                           '(k, offset) = ({}, {})'.format(
                               calib['k'], calib['offset']))
                logger.info(logger_prefix + message)

            # then use this thar as reference
            ref_calib = calib
            ref_spec = spec
            message = 'Reference calib and spec are selected'
            logger.info(logger_prefix + message)
        else:
            message = 'Use reference calib and spec'
            logger.info(logger_prefix + message)
            # for other ThArs, no aperture offset
            calib = recalib(
                spec,
                figfilename=wlcalib_fig,
                title=title,
                ref_spec=ref_spec,
                linelist=section.get('linelist'),
                ref_calib=ref_calib,
                aperture_koffset=(1, 0),
                pixel_koffset=(1, 0),
                xorder=ref_calib['xorder'],
                yorder=ref_calib['yorder'],
                maxiter=ref_calib['maxiter'],
                clipping=ref_calib['clipping'],
                window_size=ref_calib['window_size'],
                q_threshold=ref_calib['q_threshold'],
                direction=direction,
            )

        # add more infos in calib
        calib['fileid'] = fileid
        calib['date-obs'] = head[statime_key]
        calib['exptime'] = head[exptime_key]
        message = 'Add more info in calib of {}'.format(fileid)
        logger.info(logger_prefix + message)

        # reference the ThAr spectra
        spec, card_lst, identlist = reference_self_wavelength(spec, calib)
        message = 'Wavelength solution added'
        logger.info(logger_prefix + message)

        prefix = 'HIERARCH GAMSE WLCALIB '
        for key, value in card_lst:
            head.append((prefix + key, value))

        hdu_lst = fits.HDUList([
            fits.PrimaryHDU(header=head),
            fits.BinTableHDU(spec),
            fits.BinTableHDU(identlist),
        ])

        # save in midproc path as a wlcalib reference file
        fname = 'wlcalib_{}.fits'.format(fileid)
        filename = os.path.join(midpath, fname)
        hdu_lst.writeto(filename, overwrite=True)
        message = 'Wavelength calibrated spectra written to {}'.format(
            filename)
        logger.info(logger_prefix + message)

        # save in onedspec path
        fname = '{}_{}.fits'.format(fileid, oned_suffix)
        filename = os.path.join(odspath, fname)
        hdu_lst.writeto(filename, overwrite=True)
        message = 'Wavelength calibrated spectra written to {}'.format(
            filename)
        logger.info(logger_prefix + message)

        # pack to calib_lst
        calib_lst[frameid] = calib

    # print fitting summary
    fmt_string = ' [{:3d}] {} - ({:4g} sec) - {:4d}/{:4d} RMS = {:7.5f}'
    section = config['reduce.wlcalib']
    auto_selection = section.getboolean('auto_selection')

    if auto_selection:
        rms_threshold = section.getfloat('rms_threshold', 0.005)
        group_contiguous = section.getboolean('group_contiguous', True)
        time_diff = section.getfloat('time_diff', 120)

        ref_calib_lst = select_calib_auto(
            calib_lst,
            rms_threshold=rms_threshold,
            group_contiguous=group_contiguous,
            time_diff=time_diff,
        )
        ref_fileid_lst = [calib['fileid'] for calib in ref_calib_lst]

        # print ThAr summary and selected calib
        for frameid, calib in sorted(calib_lst.items()):
            string = fmt_string.format(frameid, calib['fileid'],
                                       calib['exptime'], calib['nuse'],
                                       calib['ntot'], calib['std'])
            if calib['fileid'] in ref_fileid_lst:
                string = '\033[91m{} [selected]\033[0m'.format(string)
            print(string)
    else:
        # print the fitting summary
        for frameid, calib in sorted(calib_lst.items()):
            string = fmt_string.format(frameid, calib['fileid'],
                                       calib['exptime'], calib['nuse'],
                                       calib['ntot'], calib['std'])
            print(string)

        promotion = 'Select References: '
        ref_calib_lst = select_calib_manu(calib_lst, promotion=promotion)

    ########### Extract Science Spectrum ##########
    # filter science items in logtable
    #extr_filter = config['reduce.extract'].get('extract',
    #                    'lambda row: row["imgtype"]=="sci"')
    #extr_filter = eval(extr_filter)
    extr_filter = lambda row: row['imgtype'] == 'sci'
    extr_items = list(filter(extr_filter, logtable))

    for logitem in extr_items:

        # logitem alias
        frameid = logitem['frameid']
        fileid = logitem['fileid']
        imgtype = logitem['imgtype']
        objname = logitem['object']
        exptime = logitem['exptime']
        amp = logitem['amp']

        # prepare message prefix
        logger_prefix = 'FileID: {} - '.format(fileid)
        screen_prefix = '    - '

        filename = os.path.join(rawpath, '{}.fits'.format(fileid))

        message = 'FileID: {} ({}) OBJECT: {}'.format(fileid, imgtype, objname)
        logger.info(message)
        print(message)

        # read raw data
        data, head = fits.getdata(filename, header=True)
        mask = get_mask(data, head)

        # correct overscan
        data, card_lst = correct_overscan(data, head, amp)
        for key, value in card_lst:
            head.append((key, value))
        message = 'Overscan corrected.'
        logger.info(logger_prefix + message)
        print(screen_prefix + message)

        # correct bias
        if bias is None:
            message = 'No bias'
        else:
            data = data - bias
            message = 'Bias corrected. Mean = {:.2f}'.format(bias.mean())
        logger.info(logger_prefix + message)
        print(screen_prefix + message)

        # correct flat
        data = data / flat_sens
        message = 'Flat field corrected.'
        logger.info(logger_prefix + message)
        print(screen_prefix + message)

        ny, nx = data.shape
        allx = np.arange(nx)
        # get background lights
        background = get_interorder_background(data, mask, aperset)
        for y in np.arange(ny):
            m = mask[y, :] == 0
            f = intp.InterpolatedUnivariateSpline(allx[m],
                                                  background[y, :][m],
                                                  k=3)
            background[y, :][~m] = f(allx[~m])
        background = median_filter(background, size=(9, 5), mode='nearest')
        background = savitzky_golay_2d(background,
                                       window_length=(21, 101),
                                       order=3,
                                       mode='nearest')

        # plot stray light
        figname = 'bkg2d_{}.{}'.format(fileid, fig_format)
        figfilename = os.path.join(figpath, figname)
        fig_bkg = BackgroundFigure(
            data,
            background,
            title='Background Correction for {}'.format(fileid),
            figname=figfilename,
        )
        fig_bkg.close()

        data = data - background
        message = 'Background corrected. Max = {:.2f}; Mean = {:.2f}'.format(
            background.max(), background.mean())
        logger.info(logger_prefix + message)
        print(screen_prefix + message)

        # extract 1d spectrum
        section = config['reduce.extract']
        method = section.get('method')
        if method == 'optimal':
            result = extract_aperset_optimal(
                data,
                mask,
                background=background,
                apertureset=aperset,
                gain=1.02,
                ron=3.29,
                profilex=profile_x,
                disp_x_lst=disp_x_lst,
                main_disp='x',
                profile_lst=profile_lst,
            )
            flux_opt_lst = result[0]
            flux_err_lst = result[1]
            back_opt_lst = result[2]
            flux_sum_lst = result[3]
            back_sum_lst = result[4]

            # pack spectrum
            spec = []
            for aper in sorted(flux_opt_lst.keys()):
                n = flux_opt_lst[aper].size

                row = (
                    aper,
                    0,
                    n,
                    np.zeros(n, dtype=np.float64),  # wavelength
                    flux_opt_lst[aper],  # flux
                    flux_err_lst[aper],  # error
                    back_opt_lst[aper],  # background
                    np.zeros(n, dtype=np.int16),  # mask
                )
                spec.append(row)
            spec = np.array(spec, dtype=spectype)

        elif method == 'sum':
            lower_limit = section.getfloat('lower_limit')
            upper_limit = section.getfloat('upper_limit')

            # extract 1d spectra of the object
            spectra1d = extract_aperset(
                data,
                mask,
                apertureset=aperset,
                lower_limit=lower_limit,
                upper_limit=upper_limit,
            )
            norder = len(spectra1d)
            message = '1D spectra of {} orders extracted'.format(norder)
            logger.info(logger_prefix + message)
            print(screen_prefix + message)

            # extract 1d spectra for straylight/background light
            background1d = extract_aperset(
                background,
                mask,
                apertureset=aperset,
                lower_limit=lower_limit,
                upper_limit=upper_limit,
            )
            message = '1D straylight of {} orders extracted'.format(
                len(background1d))
            logger.info(logger_prefix + message)
            print(screen_prefix + message)

            prefix = 'HIERARCH GAMSE EXTRACTION '
            head.append((prefix + 'LOWER LIMIT', lower_limit))
            head.append((prefix + 'UPPER LIMIT', upper_limit))

            # pack spectrum
            spec = []
            for aper, item in sorted(spectra1d.items()):
                flux_sum = item['flux_sum']
                n = flux_sum.size
                # background 1d flux
                back_flux = background1d[aper]['flux_sum']

                row = (
                    aper,
                    0,
                    n,
                    np.zeros(n, dtype=np.float64),  # wavelength
                    flux_sum,  # flux
                    np.zeros(n, dtype=np.float32),  # error
                    back_flux,  # background
                    np.zeros(n, dtype=np.int16),  # mask
                )
                spec.append(row)
            spec = np.array(spec, dtype=spectype)

        # wavelength calibration
        weight_lst = get_calib_weight_lst(
            ref_calib_lst,
            obsdate=head[statime_key],
            exptime=head[exptime_key],
        )

        message_lst = ['Wavelength calibration:']
        for i, calib in enumerate(ref_calib_lst):
            string = ' ' * len(screen_prefix)
            string = string + '{} ({:4g} sec) {} weight = {:5.3f}'.format(
                calib['fileid'], calib['exptime'], calib['date-obs'],
                weight_lst[i])
            message_lst.append(string)
        message = os.linesep.join(message_lst)
        logger.info(logger_prefix + message)
        print(screen_prefix + message)

        spec, card_lst = reference_spec_wavelength(spec, ref_calib_lst,
                                                   weight_lst)
        prefix = 'HIERARCH GAMSE WLCALIB '
        for key, value in card_lst:
            head.append((prefix + key, value))

        # pack and save wavelength referenced spectra
        hdu_lst = fits.HDUList([
            fits.PrimaryHDU(header=head),
            fits.BinTableHDU(spec),
        ])
        fname = '{}_{}.fits'.format(fileid, oned_suffix)
        filename = os.path.join(odspath, fname)
        hdu_lst.writeto(filename, overwrite=True)

        message = '1D spectra written to "{}"'.format(filename)
        logger.info(logger_prefix + message)
        print(screen_prefix + message)
Ejemplo n.º 57
0
def vectorizeRaster(infile, outfile, classes, classfile, weight, nodata,
                    smoothing, band, cartoCSS, axonometrize, nosimple,
                    setNoData, nibbleMask, outvar):
    band = int(band)
    src = gdal.Open(infile)
    bandData = src.GetRasterBand(band)
    inarr = bandData.ReadAsArray()

    if (inarr is None) or (len(inarr) == 0):
        gdal.SetConfigOption('GDAL_NETCDF_BOTTOMUP', 'NO')
        src = gdal.Open(infile)
        bandData = src.GetRasterBand(band)
        inarr = bandData.ReadAsArray()

    oshape = np.shape(inarr)

    if len(src.GetProjectionRef()) > 0:
        new_cs = osr.SpatialReference()
        new_cs.ImportFromEPSG(4326)
        old_cs = osr.SpatialReference()
        old_cs.ImportFromWkt(src.GetProjectionRef())
        transform = osr.CoordinateTransformation(old_cs, new_cs)

    oaff = Affine.from_gdal(*src.GetGeoTransform())

    bbox = src.GetGeoTransform()
    nodata = None

    if type(bandData.GetNoDataValue()) == float:
        nodata = bandData.GetNoDataValue()

    if (type(setNoData) == int or type(setNoData) == float) and hasattr(
            inarr, 'mask'):
        inarr[np.where(inarr.mask == True)] = setNoData
        nodata = True

    nlat, nlon = np.shape(inarr)
    dataY = np.arange(nlat) * bbox[5] + bbox[3]
    dataX = np.arange(nlon) * bbox[1] + bbox[0]

    if len(src.GetProjectionRef()) > 0:
        ul = transform.TransformPoint(min(dataX), max(dataY))
        ll = transform.TransformPoint(min(dataX), min(dataY))
        ur = transform.TransformPoint(max(dataX), max(dataY))
        lr = transform.TransformPoint(max(dataX), min(dataY))
        simplestY1 = (abs(ul[1] - ll[1]) / float(oshape[0]))
        simplestY2 = (abs(ur[1] - lr[1]) / float(oshape[0]))
        simplestX1 = (abs(ur[0] - ul[0]) / float(oshape[1]))
        simplestX2 = (abs(lr[0] - ll[0]) / float(oshape[1]))
        simplest = 2 * max(simplestX1, simplestY1, simplestX2, simplestY2)
    else:
        simplestY = ((max(dataY) - min(dataY)) / float(oshape[0]))
        simplestX = ((max(dataX) - min(dataX)) / float(oshape[1]))
        simplest = 2 * max(simplestX, simplestY)

    if nodata == 'min':
        maskArr = np.zeros(inarr.shape, dtype=np.bool)
        maskArr[np.where(inarr == inarr.min())] = True
        inarr = np.ma.array(inarr, mask=maskArr)
        del maskArr
    elif type(nodata) == int or type(nodata) == float:
        maskArr = np.zeros(inarr.shape, dtype=np.bool)
        maskArr[np.where(inarr == nodata)] = True
        inarr[np.where(inarr == nodata)] = np.nan
        inarr = np.ma.array(inarr, mask=maskArr)
        del maskArr
    elif nodata == None or np.isnan(nodata) or nodata:
        maskArr = np.zeros(inarr.shape, dtype=np.bool)
        inarr = np.ma.array(inarr, mask=maskArr)
        del maskArr
    elif (type(nodata) == int or type(nodata) == float) and hasattr(
            inarr, 'mask'):
        nodata = True

    if nibbleMask:
        inarr.mask = maximum_filter(inarr.mask, size=3)

    if smoothing and smoothing > 1:
        inarr, oaff = zoomSmooth(inarr, smoothing, oaff)
    else:
        smoothing = 1

    with open(classfile, 'r') as ofile:
        classifiers = ofile.read().split(',')
        classRas, breaks = classifyManual(
            inarr,
            np.array(classifiers).astype(inarr.dtype))

    # filtering for speckling
    classRas = median_filter(classRas, size=2)

    # print out cartocss for classes
    if cartoCSS:
        for i in breaks:
            click.echo('[value = ' + str(breaks[i]) +
                       '] { polygon-fill: @class' + str(i) + '}')

    if outfile:
        outputHandler = tools.dataOutput(True)
    else:
        outputHandler = tools.dataOutput()
    #polys = []
    #vals = []
    for i, br in enumerate(breaks):
        if i == 0:
            continue
        tRas = (classRas == i).astype(np.uint8)
        if nodata:
            tRas[np.where(classRas == 0)] = 0

        for feature, shapes in features.shapes(np.asarray(tRas, order='C'),
                                               transform=oaff):
            if shapes == 1:
                featurelist = []
                for c, f in enumerate(feature['coordinates']):
                    if len(src.GetProjectionRef()) > 0:
                        for ix in range(len(f)):
                            px = transform.TransformPoint(f[ix][0], f[ix][1])
                            lst = list()
                            lst.append(px[0])
                            lst.append(px[1])
                            f[ix] = tuple(lst)
                    if len(f) > 3 or c == 0:
                        if axonometrize:
                            f = np.array(f)
                            f[:, 1] += (axonometrize * br)
                        if nosimple:
                            poly = Polygon(f)
                        else:
                            poly = Polygon(f).simplify(simplest /
                                                       float(smoothing),
                                                       preserve_topology=True)
                            if c == 0:
                                poly = polygon.orient(poly, sign=-1.0)
                            else:
                                poly = polygon.orient(poly, sign=1.0)
                            featurelist.append(poly)
                if len(featurelist) != 0:
                    #polys.append(MultiPolygon(featurelist))
                    #vals.append(breaks[br])
                    outputHandler.out({
                        'type':
                        'Feature',
                        'geometry':
                        mapping(MultiPolygon(featurelist)),
                        'properties': {
                            outvar: breaks[br]
                        }
                    })

    #for pa in range(0,len(polys)):
    #    for pb in range(0,len(polys)):
    #        if pa==pb:
    #            continue
    #        if polys[pa].contains(polys[pb]) & (polys[pa].area>polys[pb].area):
    #            try:
    #                polys[pa] = polys[pa].difference(polys[pb])
    #                print polys[pa].area
    #                print '---'
    #                break
    #            except:
    #                a = 1
    #
    #for pc in range(0,len(polys)):
    #    outputHandler.out({
    #        'type': 'Feature',
    #        'geometry': mapping(polys[pc]),
    #        'properties': {
    #            outvar: vals[pc]
    #        }
    #    })
    if outfile:
        with open(outfile, 'w') as ofile:
            ofile.write(
                json.dumps({
                    "type": "FeatureCollection",
                    "features": outputHandler.data
                }))
Ejemplo n.º 58
0
def main(argv):
    generate_output = True
    selected_channel = 1  # 0 or 1: 2017-09-25 sample has flipped channels
    input_h5_file = '/groups/mousebrainmicro/mousebrainmicro/users/base/AnnotationData/h5repo/2017-09-25_G-003_Consensus/2017-09-25_G-003_Consensus-carved.h5'
    output_folder = '/groups/mousebrainmicro/mousebrainmicro/users/base/AnnotationData/h5repo/2017-09-25_G-003_Consensus/'

    try:
        opts, args = getopt.getopt(argv, "hi:o:",
                                   ["input_h5_file=", "output_folder="])
    except getopt.GetoptError:
        print('segmentAxon.py -i <input_h5_file> -o <output_folder>')
        sys.exit(2)
    for opt, arg in opts:
        print('opt:', opt, 'arg:', arg)
        if opt == '-h':
            print('segmentAxon.py -i <input_h5_file> -o <output_folder>')
            sys.exit()
        elif opt in ("-i", "--input_h5_file"):
            input_h5_file = arg
            print('SWCFILE   :', input_h5_file)
        elif opt in ("-o", "--output_folder"):
            output_folder = arg
            print('OUTPUT    :', output_folder)

    # swc_name = '2017-11-17_G-017_Seg-1'
    # data_folder = os.path.join('/groups/mousebrainmicro/home/base/CODE/MOUSELIGHT/navigator/data/',swc_name)

    inputfolder, h5_name_w_ext = os.path.split(input_h5_file)
    file_name, _ = h5_name_w_ext.split(os.extsep)

    segmentation_output_h5_file = os.path.join(output_folder,
                                               file_name + '_segmented.h5')
    swc_output_file = os.path.join(output_folder, file_name + '_segmented.swc')

    if not os.path.exists(output_folder):
        os.makedirs(output_folder)

    # AC_output_tif_file =  os.path.join(data_folder,swc_name+'_AC_cropped_segmented.tif')
    # Frangi_output_tif_file =  os.path.join(data_folder,swc_name+'_Frangi_cropped_segmented.tif')

    # TODO export scale/filt if needed
    # dset_filter_Frangi_magnitude = f_out.create_dataset("/filter/Frangi/magnitude", volume.shape[:3], dtype='f', chunks=f["volume"].chunks[:3], compression="gzip", compression_opts=9)
    # dset_filter_Frangi_scale = f_out.create_dataset("/filter/Frangi/scale", volume.shape[:3], dtype='f', chunks=f["volume"].chunks[:3], compression="gzip", compression_opts=9)
    # for each branch, crop a box, run segmentation based on:
    # 1) frangi vesselness filter
    # 2) active countours
    # 3) stat thresholding: TODO: diffusion is buggy, might be better to switch to a regularized version

    # figure out signal channel
    # pattern_strings = ['\xc2d', '\xa0', '\xe7', '\xc3\ufffdd', '\xc2\xa0', '\xc3\xa7', '\xa0\xa0', '\xc2', '\xe9']
    if selected_channel == None:
        pattern_strings = ['_G-', '_G_']
        pattern_string = '|'.join(pattern_strings)
        pattern = re.compile(pattern_string)
        if re.search(pattern, os.path.split(inputfolder)[1]):
            # green channel
            ch = 0
        else:
            ch = 1
    else:
        ch = selected_channel

    with h5py.File(input_h5_file, "r") as f:
        recon = f["reconstruction"]
        volume = f["volume"]
        output_dims = volume.shape
        if generate_output:
            f_out = h5py.File(segmentation_output_h5_file, "w")
            dset_segmentation_AC = f_out.create_dataset(
                "/segmentation/AC",
                volume.shape[:3],
                dtype='uint8',
                chunks=f["volume"].chunks[:3],
                compression="gzip",
                compression_opts=9)
            dset_segmentation_Frangi = f_out.create_dataset(
                "/segmentation/Frangi",
                volume.shape[:3],
                dtype='uint8',
                chunks=f["volume"].chunks[:3],
                compression="gzip",
                compression_opts=9)
            dset_swc_Frangi = f_out.create_dataset("/reconstruction/sparse",
                                                   data=recon[:],
                                                   dtype='f')

        lookup_data = {}  # keeps track of indicies, subs and radius
        # sigmas = np.array([0.75, 1.0, 1.5, 2, 2.5, 3, 3.5, 4, 4.5, 5, 5.5])
        sigmas = np.array([0.75, 1.0, 1.5, 2, 2.5])
        linkdata = []
        window_size = 3
        radius_list = func.getRadiusIndicies(radius=sigmas)

        for ix, txt in enumerate(recon[:100, :]):
            # ix = np.argmin(np.sum(np.abs(np.array([757.3, 2327.5, 575.0]) - recon[:, 2:5]), axis=1))
            print('{} out of {}'.format(ix, recon.shape[0]))
            start_node = ix  #  recon[ix,0]
            end_node = recon[ix, 6] - 1  # 0 based indexing
            if end_node < 0:
                continue

            start = np.asarray(recon[start_node, 2:5], np.int)
            end = np.asarray(recon[end_node, 2:5], np.int)

            # vesselness convolution sigmas
            # padding needs to be at least window_size/2*sigma
            padding = np.ceil(np.max(window_size * sigmas / 2)).__int__()

            bbox_min_wo = np.min((start, end), axis=0)
            bbox_max_wo = np.max((start, end), axis=0)
            bbox_min = np.min((start, end), axis=0) - padding
            bbox_max = np.max(
                (start, end),
                axis=0) + padding + 1  # add one to make python inclusive

            bbox_size = bbox_max - bbox_min
            start_ = start - bbox_min
            end_ = end - bbox_min
            roi = ((padding), ())

            # crop
            crop = volume[bbox_min[0]:bbox_max[0], bbox_min[1]:bbox_max[1],
                          bbox_min[2]:bbox_max[2]]
            crop[crop == 0] = np.min(crop[
                crop > 0])  # overwrites any missing voxel with patch minima

            ## fix line shifts
            # st = -9;en = 10;shift, shift_float = lnf.findShift(inputim[:,:inputim.shape[1]//2*2,:], st, en, False)
            ##
            inputim = np.log(np.asarray(
                crop[:, :, :, 1],
                np.float))  # add 1 to prevent /0 cases for log scaling
            inputim = (inputim - inputim.min()) / (inputim.max() -
                                                   inputim.min())

            ## FRANGI
            filtresponse, scaleresponse = frangi.frangi(
                inputim,
                sigmas,
                window_size=window_size,
                alpha=0.01,
                beta=1.5,
                frangi_c=2 * np.std(inputim),
                black_vessels=False)

            # min filter to to local noise supression to tune to local center
            scaleresponse = minimum_filter(scaleresponse, 3)

            # sitk.Show(sitk.GetImageFromArray(np.swapaxes(inputim,2,0)))
            # sitk.Show(sitk.GetImageFromArray(np.swapaxes(filtresponse/np.max(filtresponse),2,0)))
            # sitk.Show(sitk.GetImageFromArray(np.swapaxes(scaleresponse,2,0)))

            # cost: high intensity low scale
            # filter out scale response for local radius variations:
            cost_array = scaleresponse / (0.001 + filtresponse)
            cost_array[cost_array > 100] = 100

            # TODO: smooth/prune path: if s[i-1] has +/-1 access to s[i+1], delete s[i], this is to prevent triangular extensions, i.e. |\ or |/
            # shortest path based on cost_array
            path, cost = skig.route_through_array(cost_array,
                                                  start=start_,
                                                  end=end_,
                                                  fully_connected=True)
            path_array = np.asarray(path)
            # sample along path
            path_array_indicies = np.ravel_multi_index(path_array.T,
                                                       cost_array.shape)
            xyz_trace_locations = path_array + bbox_min
            # index ids for each location
            inds = np.ravel_multi_index(xyz_trace_locations.T, output_dims[:3])
            # plt.figure()
            # plt.imshow(np.max(filtresponse**.05,axis=2).T)
            # plt.plot(path_array[:,0],path_array[:,1])

            # branch data based on Frangi
            # frangi radius estimate around tracing
            radius_estimate_around_trace = scaleresponse.flat[
                path_array_indicies]
            # filter radius to smooth
            radius_estimate_around_trace = median_filter(
                radius_estimate_around_trace, 3)
            # radius as 4th column
            branch_data = np.concatenate(
                (xyz_trace_locations, radius_estimate_around_trace[:, None],
                 inds[:, None]),
                axis=1)

            # store recon info
            linkdata.append(branch_data)

            if generate_output:  # paint functions
                # paint hdf5: for each trace location, generate a ball with the given radius and paint into segmentation output
                for xyzr in branch_data:
                    xyz = xyzr[:3]
                    r = xyzr[3]
                    # search the nearest key
                    mask = radius_list[sigmas[np.argmin((sigmas - r)**2)]]
                    paintlocs = np.where(mask) - np.floor(r) + xyz[:, None]
                    for locs in paintlocs.transpose():
                        dset_segmentation_Frangi[tuple(locs)] = 1

                ## segmentation based on active contours
                if 0:
                    segment = seg.volumeSeg(filtresponse,
                                            path_array)  # working
                else:  # use cost function to initialize segmentation
                    segment = seg.volumeSeg(
                        inputim,
                        path_array,
                        cost_array=np.max(cost_array) -
                        cost_array)  # revert cost for positive active contour

                segment.runSeg()
                # TODO: ability to export swc for AC
                radius_estimate_around_trace_AC = segment.estimateRad()

                # sitk.Show(sitk.GetImageFromArray(np.swapaxes(segment.mask_ActiveContour,2,0)))
                # sitk.Show(sitk.GetImageFromArray(np.swapaxes(inputim,2,0)))
                # sitk.Show(sitk.GetImageFromArray(np.swapaxes(filtresponse/np.max(filtresponse),2,0)))

                # patch wise write is buggy, so location wise painting

                paintlocs = bbox_min[:, None] + np.where(
                    segment.mask_ActiveContour)
                for locs in paintlocs.transpose():
                    dset_segmentation_AC[tuple(locs)] = 1

            for ii, ind in enumerate(inds):
                lookup_data[ind] = branch_data[ii, :4]

        swc_data = np.array(func.link2pred(linkdata, lookup_data))
        func.array2swc(swcfile=swc_output_file, swcdata=swc_data)

        if generate_output:
            # dump dense reconstruction
            f_out.create_dataset("/reconstruction/dense",
                                 data=swc_data,
                                 dtype='f')

            # close output data
            f_out.close()
Ejemplo n.º 59
0
def median_filter(X, M=8):
    """Median filter along the first axis of the feature matrix X."""
    for i in xrange(X.shape[1]):
        X[:, i] = filters.median_filter(X[:, i], size=M)
    return X
Ejemplo n.º 60
0
print "Took ", time.time() - t , " secs"

t = time.time()
print Xdev.shape
print Ydev.shape
print X.shape
print Y.shape

Y_hat = regression.ann(Xdev, Ydev, X, Y, K=5)
print "Took ", time.time() - t , " secs"
#plt.figure();plt.imshow(Ktest_dev); 
#plt.colorbar()
#plt.show()

# optionnal step: median filtering for smoothing the data:
Y_hat = median_filter(Y_hat,(1,10))

#plt.figure()
#plt.subplot(211)
#plt.imshow(np.log(Y),
#           origin='lower')
#plt.colorbar()
#plt.title('Original')
#plt.subplot(212)
#plt.imshow(np.log(Y_hat),
#           origin='lower')
#plt.colorbar()
#plt.title('Estimation from Nadaraya-Watson')
#plt.show()

sig_orig = Signal(test_audiofilepath,  normalize=True, mono=True)