Пример #1
0
def create_images_for_labeling(pars):
    import scipy.stats as st
    import os
    import numpy as np
    import calblitz as cb
    from glob import glob

    try:
        f_name = pars
        cdir = os.path.dirname(f_name)

        print('loading')
        m = cb.load(f_name)

        print('corr image')
        img = m.local_correlations(eight_neighbours=True)
        im = cb.movie(img, fr=1)
        im.save(os.path.join(cdir, 'correlation_image.tif'))

        print('std image')
        img = np.std(m, 0)
        im = cb.movie(np.array(img), fr=1)
        im.save(os.path.join(cdir, 'std_projection.tif'))

        m1 = m.resize(1, 1, old_div(1., m.fr))

        print('median image')
        img = np.median(m1, 0)
        im = cb.movie(np.array(img), fr=1)
        im.save(os.path.join(cdir, 'median_projection.tif'))

        print('save BL')
        m1 = m1 - img
        m1.save(os.path.join(cdir, 'MOV_BL.tif'))
        m1 = m1.bilateral_blur_2D()
        m1.save(os.path.join(cdir, 'MOV_BL_BIL.tif'))
        m = np.array(m1)

        print('max image')
        img = np.max(m, 0)
        im = cb.movie(np.array(img), fr=1)
        im.save(os.path.join(cdir, 'max_projection.tif'))

        print('skew image')
        img = st.skew(m, 0)
        im = cb.movie(img, fr=1)
        im.save(os.path.join(cdir, 'skew_projection.tif'))
        del m
        del m1
    except Exception as e:

        return e

    return f_name
Пример #2
0
def create_images_for_labeling(pars):
    import scipy.stats as st
    import os
    import numpy as np
    import calblitz as cb
    from glob import glob

    try:
        f_name = pars
        cdir = os.path.dirname(f_name)

        print('loading')
        m = cb.load(f_name)

        print('corr image')
        img = m.local_correlations(eight_neighbours=True)
        im = cb.movie(img, fr=1)
        im.save(os.path.join(cdir, 'correlation_image.tif'))

        print('std image')
        img = np.std(m, 0)
        im = cb.movie(np.array(img), fr=1)
        im.save(os.path.join(cdir, 'std_projection.tif'))

        m1 = m.resize(1, 1, old_div(1., m.fr))

        print('median image')
        img = np.median(m1, 0)
        im = cb.movie(np.array(img), fr=1)
        im.save(os.path.join(cdir, 'median_projection.tif'))

        print('save BL')
        m1 = m1 - img
        m1.save(os.path.join(cdir, 'MOV_BL.tif'))
        m1 = m1.bilateral_blur_2D()
        m1.save(os.path.join(cdir, 'MOV_BL_BIL.tif'))
        m = np.array(m1)

        print('max image')
        img = np.max(m, 0)
        im = cb.movie(np.array(img), fr=1)
        im.save(os.path.join(cdir, 'max_projection.tif'))

        print('skew image')
        img = st.skew(m, 0)
        im = cb.movie(img, fr=1)
        im.save(os.path.join(cdir, 'skew_projection.tif'))
        del m
        del m1
    except Exception as e:

        return e

    return f_name
Пример #3
0
def main():
    #%
    mmat=loadmat('mov_AG051514-01-060914 C.mat')['mov']
    m=cb.movie(mmat.transpose((2,0,1)),fr=120)
    mask=select_roi(m[0])
    if 1:
        mov_tot=compute_optical_flow(m[:3000],mask)
    else:
        mov_tot=compute_optical_flow(m[:3000],mask,polar_coord=False)
    
    sp_filt,t_trace,norm_fact=extract_components(mov_tot)
    
    new_t_trace,coor_1,coor_2 = normalize_components(t_trace,sp_filt)   
    plot_components(sp_filt,t_trace)

    
    #%
    id_comp=1
    pl.plot(np.sum(np.reshape(sp_filt[id_comp]>1,[d1,d2])*mov_tot[1],axis=(1,2))/np.sum(sp_filt[id_comp]>1))
    pl.plot(t_trace[id_comp][:,1]) 
Пример #4
0
    def pre_process_handle(args):
        #        import calblitz as cb

        from scipy.ndimage import filters as ft
        import logging

        fil, resize_factors, diameter_bilateral_blur, median_filter_size = args

        name_log = fil[:-4] + '_LOG'
        logger = logging.getLogger(name_log)
        hdlr = logging.FileHandler(name_log)
        formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
        hdlr.setFormatter(formatter)
        logger.addHandler(hdlr)
        logger.setLevel(logging.INFO)

        logger.info('START')
        logger.info(fil)
        mov = cb.load(fil, fr=30)
        logger.info('Read file')

        mov = mov.resize(1, 1, resize_factors[0])
        logger.info('Resize')
        mov = mov.bilateral_blur_2D(diameter=diameter_bilateral_blur)
        logger.info('Bilateral')
        mov1 = cb.movie(ft.median_filter(mov, median_filter_size), fr=30)
        logger.info('Median filter')
        #mov1=mov1-np.median(mov1,0)
        mov1 = mov1.resize(1, 1, resize_factors[1])
        logger.info('Resize 2')
        mov1 = mov1 - cb.utils.mode_robust(mov1, 0)
        logger.info('Mode')
        mov = mov.resize(1, 1, resize_factors[1])
        logger.info('Resize')
        #        mov=mov-np.percentile(mov,1)

        mov.save(fil[:-4] + '_compress_.tif')
        logger.info('Save 1')
        mov1.save(fil[:-4] + '_BL_compress_.tif')
        logger.info('Save 2')
        return 1
Пример #5
0
   def pre_process_handle(args):
#        import calblitz as cb 
        
        from scipy.ndimage import filters as ft
        import logging
        
        fil, resize_factors, diameter_bilateral_blur,median_filter_size=args
        
        name_log=fil[:-4]+ '_LOG'
        logger = logging.getLogger(name_log)
        hdlr = logging.FileHandler(name_log)
        formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
        hdlr.setFormatter(formatter)
        logger.addHandler(hdlr) 
        logger.setLevel(logging.INFO)

        logger.info('START')
        logger.info(fil)
        mov=cb.load(fil,fr=30)
        logger.info('Read file')

        mov=mov.resize(1,1,resize_factors[0])
        logger.info('Resize')
        mov=mov.bilateral_blur_2D(diameter=diameter_bilateral_blur)
        logger.info('Bilateral')
        mov1=cb.movie(ft.median_filter(mov,median_filter_size),fr=30)
        logger.info('Median filter')
        #mov1=mov1-np.median(mov1,0)
        mov1=mov1.resize(1,1,resize_factors[1])
        logger.info('Resize 2')
        mov1=mov1-cb.utils.mode_robust(mov1,0)
        logger.info('Mode')
        mov=mov.resize(1,1,resize_factors[1])
        logger.info('Resize')
#        mov=mov-np.percentile(mov,1)
        
        mov.save(fil[:-4] + '_compress_.tif')
        logger.info('Save 1')
        mov1.save(fil[:-4] + '_BL_compress_.tif')
        logger.info('Save 2')
        return 1
Пример #6
0
"""
Created on Wed Mar 16 16:31:55 2016
OPTICAL FLOW
@author: agiovann
"""
cd '/home/agiovann/Dropbox (Simons Foundation)/Eyeblink/Datasets/AG051514-01/060914 C/-34 555 183_COND_C_001/-34 555 183_COND_C_X25/tr_X25'
#%%
import ca_source_extraction as cse
import calblitz as cb
import numpy as np
import pylab as pl
#%%
from scipy.io import loadmat
import cv2
mmat=loadmat('mov_AG051514-01-060914 C.mat')['mov']
m=cb.movie(mmat.transpose((2,0,1)),fr=120)
#%%
m.play(backend='opencv',magnification=4)
#%% dense flow: select only region that you think contains important information. Important, there is a considerable border effect 
fig=pl.figure()
pl.imshow(m[0],cmap=pl.cm.gray)
pts = fig.ginput(0, timeout=0)
data = np.zeros(np.shape(m[0]), dtype=np.int32)
pts = np.asarray(pts, dtype=np.int32)
cv2.fillConvexPoly(data, pts, (1,1,1), lineType=cv2.LINE_AA)
#data=np.float32(data)
pl.close()
#%%
#numstdthr=4.
#vect_diff=np.array(np.mean(np.abs(np.diff(m*data,axis=0)),axis=(1,2)))
#thresh=np.mean(vect_diff)+numstdthr*np.std(vect_diff)
file_res=cb.motion_correct_parallel(fnames[:-3],fr=30,template=None,margins_out=0,max_shift_w=45, max_shift_h=45,backend='ipyparallel',apply_smooth=True)
t2=time()-t1
print t2
#%%   
all_movs=[]
for f in  file_res:
    with np.load(f+'npz') as fl:
        pl.subplot(1,2,1)
        pl.imshow(fl['template'],cmap=pl.cm.gray)
        pl.subplot(1,2,2)
        pl.plot(fl['shifts'])       
        all_movs.append(fl['template'][np.newaxis,:,:])
        pl.pause(2)
        pl.cla()
#%%        
all_movs=cb.movie(np.concatenate(all_movs,axis=0),fr=10)
all_movs,shifts,_,_=all_movs.motion_correct(template=np.median(all_movs,axis=0))
template=np.median(all_movs,axis=0)
np.save('template_total',template)
#pl.imshow(template,cmap=pl.cm.gray,vmax=100)
#%%
file_res=cb.motion_correct_parallel(fnames,40,template=template,margins_out=0,max_shift_w=25, max_shift_h=25,remove_blanks=False)
#%%
for f in  file_res:
    with np.load(f+'npz') as fl:
        pl.subplot(1,2,1)
        pl.imshow(fl['template'],cmap=pl.cm.gray)
        pl.subplot(1,2,2)
        pl.plot(fl['shifts'])       
        pl.pause(0.1)
        pl.cla()
Пример #8
0


@author: agiovann
"""
from glob import glob
import scipy.stats as st
import calblitz as cb
import numpy as np
#%%
for fl in glob('k36*compress_.tif'):
    print(fl)
    m = cb.load(fl, fr=3)

    img = m.local_correlations(eight_neighbours=True)
    im = cb.movie(img, fr=1)
    im.save(fl[:-4] + 'correlation_image.tif')

    m = np.array(m)

    img = st.skew(m, 0)
    im = cb.movie(img, fr=1)
    im.save(fl[:-4] + 'skew.tif')

    img = st.kurtosis(m, 0)
    im = cb.movie(img, fr=1)
    im.save(fl[:-4] + 'kurtosis.tif')

    img = np.std(m, 0)
    im = cb.movie(img, fr=1)
    im.save(fl[:-4] + 'std.tif')
#%%   
all_movs=[]
counter=0
for f in  fls:
    print(f)
    with np.load(f[:-3]+'npz') as fl:
        pl.subplot(6,5,counter+1)
#        pl.imshow(fl['template'],cmap=pl.cm.gray)
#        pl.subplot(1,2,2)
        pl.plot(fl['shifts'])       
        counter+=1
#        all_movs.append(fl['template'][np.newaxis,:,:])
#        pl.pause(.1)
#        pl.cla()
#%%        
all_movs=cb.movie(np.concatenate(all_movs,axis=0),fr=10)
all_movs,shifts,corss,_=all_movs.motion_correct(template=all_movs[1],max_shift_w=45, max_shift_h=45)
#%%
template=np.median(all_movs[:],axis=0)
np.save(base_folder+'template_total',template)
pl.imshow(template,cmap=pl.cm.gray,vmax=120)
#%%
all_movs.play(backend='opencv',gain=5,fr=30)
#%%
t1 = time()
file_res=cb.motion_correct_parallel(fnames,30,template=template,margins_out=0,max_shift_w=45, max_shift_h=45,dview=client_[::2],remove_blanks=False)
t2=time()-t1
print(t2)
#%%
fnames=[]
for file in glob.glob(base_folder+'k31_20160107_MMP_150um_65mW_zoom2p2_000*[0-9].hdf5'):
Пример #10
0
t2 = time() - t1
print(t2)
#%%
all_movs = []
for f in glob.glob(base_folder + '*.hdf5'):
    print(f)
    with np.load(f[:-4] + 'npz') as fl:
        #        pl.subplot(1,2,1)
        #        pl.imshow(fl['template'],cmap=pl.cm.gray)
        #        pl.subplot(1,2,2)
        #        pl.plot(fl['shifts'])
        all_movs.append(fl['template'][np.newaxis, :, :])
#        pl.pause(2)
#        pl.cla()
#%%
all_movs = cb.movie(np.concatenate(all_movs, axis=0), fr=10)
all_movs, shifts, corss, _ = all_movs.motion_correct(template=None,
                                                     max_shift_w=45,
                                                     max_shift_h=45)
#%%
template = np.median(all_movs[:], axis=0)
np.save(base_folder + 'template_total', template)
pl.imshow(template, cmap=pl.cm.gray, vmax=120)
#%%
all_movs.play(backend='opencv', gain=10, fr=10)
#%%
t1 = time()
file_res = cb.motion_correct_parallel(fnames,
                                      30,
                                      template=template,
                                      margins_out=0,
Пример #11
0
    # Specify the number of iterations.
    number_of_iterations = 5000;
     
    # Specify the threshold of the increment
    # in the correlation coefficient between two iterations
    termination_eps = 1e-10;
     
    # Define termination criteria
    criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, number_of_iterations,  termination_eps)
     
    # Run the ECC algorithm. The results are stored in warp_matrix.
    (cc, warp_matrix) = cv2.findTransformECC (im1,im2,warp_matrix, warp_mode, criteria)
     
    if warp_mode == cv2.MOTION_HOMOGRAPHY :
        # Use warpPerspective for Homography 
        im2_aligned = cv2.warpPerspective (im2, warp_matrix, (sz[1],sz[0]), flags=cv2.INTER_LINEAR + cv2.WARP_INVERSE_MAP)
    else :
        # Use warpAffine for Translation, Euclidean and Affine
        im2_aligned = cv2.warpAffine(im2, warp_matrix, (sz[1],sz[0]), flags=cv2.INTER_LINEAR + cv2.WARP_INVERSE_MAP);
    
    newm.append(im2_aligned)
    # Show final results
#    cv2.imshow("Image 1", im1)
#    cv2.imshow("Image 2", im2)
#    cv2.imshow("Aligned Image 2", im2_aligned)
#    cv2.waitKey(0)
#    cv2.destroyAllWindows()
#%%
mm=np.concatenate([m,newm],axis=2)
newmn=cb.movie(np.array(mm),fr=m.fr)
pl.imshow(np.median(newmn,axis=0),cmap=pl.cm.gray)
#%%   
all_movs=[]
for f in  fls:
    idx=f.find('.')
    with np.load(f[:idx+1]+'npz') as fl:
        print f
#        pl.subplot(1,2,1)
#        pl.imshow(fl['template'],cmap=pl.cm.gray)
#        pl.subplot(1,2,2)
             
        all_movs.append(fl['template'][np.newaxis,:,:])
#        pl.plot(fl['shifts'])  
#        pl.pause(.001)
#        pl.cla()
#%%
all_movs=cb.movie(np.concatenate(all_movs,axis=0),fr=30)        
all_movs,shifts,_,_=all_movs.motion_correct(template=np.median(all_movs,axis=0))
all_movs.play(backend='opencv',gain=1.,fr=10)
#%%
all_movs=np.array(all_movs)
#%%
num_movies_per_chunk=50      
if num_movies_per_chunk < len(fnames):  
    chunks=range(0,len(fnames),num_movies_per_chunk)
    chunks[-1]=len(fnames)
else:
    chunks=[0, len(fnames)]
print chunks
movie_names=[]

for idx in range(len(chunks)-1):
Пример #13
0
print(t2)

#%%
all_movs = []
for f in fnames:
    print(f)
    with np.load(f[:-3] + 'npz') as fl:
        #        pl.subplot(1,2,1)
        #        pl.imshow(fl['template'],cmap=pl.cm.gray)
        #        pl.subplot(1,2,2)
        #        pl.plot(fl['shifts'])
        all_movs.append(fl['template'][np.newaxis, :, :])
#        pl.pause(.1)
#        pl.cla()
#%%
all_movs = cb.movie(np.concatenate(all_movs, axis=0), fr=10)
all_movs, shifts, corss, _ = all_movs.motion_correct(template=all_movs[1],
                                                     max_shift_w=45,
                                                     max_shift_h=45)
#%%
template = np.median(all_movs[:], axis=0)
np.save(base_folder + 'template_total', template)
pl.imshow(template, cmap=pl.cm.gray, vmax=120)
#%%
all_movs.play(backend='opencv', gain=5, fr=30)
#%%
t1 = time()
file_res = cb.motion_correct_parallel(fnames,
                                      30,
                                      template=template,
                                      margins_out=0,
Пример #14
0
masks_tmp=[];
for mask in _masks:
    numPixels=np.sum(np.array(mask));        
    if (numPixels>minPixels and numPixels<maxPixels):
        print numPixels
        masks_tmp.append(mask>0)
        
masks_tmp=np.asarray(masks_tmp,dtype=np.float16)
all_masksForPlot_tmp=[kk*(ii+1)*1.0 for ii,kk in enumerate(masks_tmp)]
len(all_masksForPlot_tmp)


#%%
# reshape dendrites if required(if the movie was resized)
if fx != 1 or fy !=1:
    mdend=cb.movie(np.array(masks_tmp,dtype=np.float32), fr=1);
    mdend=mdend.resize(fx=1/fx,fy=1/fy)
    all_masks=mdend;
else:
    all_masks=masks_tmp              

all_masksForPlot=[kk*(ii+1)*1.0 for ii,kk in enumerate(all_masks)]


#%%
mask_show=np.max(np.asarray(all_masksForPlot_tmp,dtype=np.float16),axis=0);
#loc_corrs=m.local_correlations(eight_neighbours=True)
#pl.subplot(2,1,1)
pl.imshow(loc_corrs,cmap=pl.cm.gray,vmin=0.5,vmax=1)
pl.imshow(mask_show,alpha=.3,vmin=0)
Yr = np.reshape(Yr, (d1 * d2, T), order='F')
# np.save('Y',Y)
np.save('Yr', Yr)
# Y=np.load('Y.npy',mmap_mode='r')
Yr = np.load('Yr.npy', mmap_mode='r')
Y = np.reshape(Yr, (d1, d2, T), order='F')
Cn = cse.utilities.local_correlations(Y)
# n_pixels_per_process=d1*d2/n_processes # how to subdivide the work among processes

pl.imshow(Cn, cmap=pl.cm.gray)

#%% USING FILTERS TO INCREASE SNR
N = 0  # Y.shape[-1]
N1 = 30000

m = cb.movie(np.transpose(np.array(Y[:, :, N:N1]), [2, 0, 1]), fr=30)
# denoise using PCA
# m=m.IPCA_denoise(components=100,batch=10000)

# denoise using median filter
#m=cb.movie(scipy.ndimage.median_filter(m, size=(3,2,2), mode='nearest'),fr=30)

# denoise using bilateral filters
# m=m.bilateral_blur_2D(diameter=10,sigmaColor=10000,sigmaSpace=0)

# denoise using percentile filter
#m=cb.movie(scipy.ndimage.percentile_filter(m, 90, size=(3,2,2), mode='nearest'),fr=30)

# denoise using gaussian filter: USE THIS!!!
m = cb.movie(scipy.ndimage.gaussian_filter(
    m, sigma=(.5, .5, .5), mode='nearest', truncate=2), fr=30)
Пример #16
0
Created on Mon Jul 11 10:09:09 2016

@author: agiovann
"""
from __future__ import print_function
from glob import glob
import scipy.stats as st
import calblitz as cb
import numpy as np
#%%
for fl in glob('k36*compress_.tif'):
    print(fl)
    m = cb.load(fl, fr=3)

    img = m.local_correlations(eight_neighbours=True)
    im = cb.movie(img, fr=1)
    im.save(fl[:-4] + 'correlation_image.tif')

    m = np.array(m)

    img = st.skew(m, 0)
    im = cb.movie(img, fr=1)
    im.save(fl[:-4] + 'skew.tif')

    img = st.kurtosis(m, 0)
    im = cb.movie(img, fr=1)
    im.save(fl[:-4] + 'kurtosis.tif')

    img = np.std(m, 0)
    im = cb.movie(img, fr=1)
    im.save(fl[:-4] + 'std.tif')
Пример #17
0
def save_memmap(filenames,
                base_name='Yr',
                resize_fact=(1, 1, 1),
                remove_init=0,
                idx_xy=None):
    """ Saves efficiently a list of tif files into a memory mappable file
    Parameters
    ----------
        filenames: list
            list of tif files
        base_name: str
            the base yused to build the file name. IT MUST NOT CONTAIN "_"    
        resize_fact: tuple
            x,y, and z downampling factors (0.5 means downsampled by a factor 2) 
        remove_init: int
            number iof frames to remove at the begining of each tif file (used for resonant scanning images if laser in rutned on trial by trial)
        idx_xy: tuple size 2
            for selecting slices of the original FOV, for instance idx_xy=(slice(150,350,None),slice(150,350,None))

    Return
    -------
        fname_new: the name of the mapped file, the format is such that the name will contain the frame dimensions and the number of f

    """
    order = 'F'
    Ttot = 0
    for idx, f in enumerate(filenames):
        print f
        if os.path.splitext(f)[-1] == '.hdf5':
            import calblitz as cb
            if idx_xy is None:
                Yr = np.array(cb.load(f))[remove_init:]
            else:
                Yr = np.array(cb.load(f))[remove_init:, idx_xy[0], idx_xy[1]]
        else:
            if idx_xy is None:
                Yr = imread(f)[remove_init:]
            else:
                Yr = imread(f)[remove_init:, idx_xy[0], idx_xy[1]]

        fx, fy, fz = resize_fact
        if fx != 1 or fy != 1 or fz != 1:
            try:
                import calblitz as cb
                Yr = cb.movie(Yr, fr=1)
                Yr = Yr.resize(fx=fx, fy=fy, fz=fz)
            except:
                print(
                    'You need to install the CalBlitz package to resize the movie'
                )
                raise

        [T, d1, d2] = Yr.shape
        Yr = np.transpose(Yr, (1, 2, 0))
        Yr = np.reshape(Yr, (d1 * d2, T), order=order)

        if idx == 0:
            fname_tot = base_name + '_d1_' + str(d1) + '_d2_' + str(
                d2) + '_order_' + str(order)
            big_mov = np.memmap(fname_tot,
                                mode='w+',
                                dtype=np.float32,
                                shape=(d1 * d2, T),
                                order=order)
        else:
            big_mov = np.memmap(fname_tot,
                                dtype=np.float32,
                                mode='r+',
                                shape=(d1 * d2, Ttot + T),
                                order=order)
        #    np.save(fname[:-3]+'npy',np.asarray(Yr))

        big_mov[:, Ttot:Ttot + T] = np.asarray(Yr, dtype=np.float32)
        big_mov.flush()
        Ttot = Ttot + T

    fname_new = fname_tot + '_frames_' + str(Ttot) + '_.mmap'
    os.rename(fname_tot, fname_new)

    return fname_new
Пример #18
0
def save_memmap(filenames,base_name='Yr',resize_fact=(1,1,1),remove_init=0,idx_xy=None):       
    """ Saves efficiently a list of tif files into a memory mappable file
    Parameters
    ----------
        filenames: list
            list of tif files
        base_name: str
            the base yused to build the file name. IT MUST NOT CONTAIN "_"    
        resize_fact: tuple
            x,y, and z downampling factors (0.5 means downsampled by a factor 2) 
        remove_init: int
            number iof frames to remove at the begining of each tif file (used for resonant scanning images if laser in rutned on trial by trial)
        idx_xy: tuple size 2
            for selecting slices of the original FOV, for instance idx_xy=(slice(150,350,None),slice(150,350,None))

    Return
    -------
        fname_new: the name of the mapped file, the format is such that the name will contain the frame dimensions and the number of f

    """
    order='F'
    Ttot=0;    
    for idx,f in  enumerate(filenames):
        print f   
        if os.path.splitext(f)[-1] == '.hdf5':
            import calblitz as cb
            if idx_xy is None:
                Yr=np.array(cb.load(f))[remove_init:]
            else:
                Yr=np.array(cb.load(f))[remove_init:,idx_xy[0],idx_xy[1]]                                                        
        else:
            if idx_xy is None:
                Yr=imread(f)[remove_init:]
            else:
                Yr=imread(f)[remove_init:,idx_xy[0],idx_xy[1]]
                
        fx,fy,fz=resize_fact
        if fx!=1 or fy!=1 or fz!=1:
            try:
                import calblitz as cb
                Yr=cb.movie(Yr,fr=1)                
                Yr=Yr.resize(fx=fx,fy=fy,fz=fz)
            except:
                print('You need to install the CalBlitz package to resize the movie')
                raise
                
        [T,d1,d2]=Yr.shape;
        Yr=np.transpose(Yr,(1,2,0)) 
        Yr=np.reshape(Yr,(d1*d2,T),order=order)
        
        if idx==0:
            fname_tot=base_name+'_d1_'+str(d1)+'_d2_'+str(d2)+'_order_'+str(order)
            big_mov=np.memmap(fname_tot,mode='w+',dtype=np.float32,shape=(d1*d2,T),order=order);
        else:
            big_mov=np.memmap(fname_tot,dtype=np.float32,mode='r+',shape=(d1*d2,Ttot+T),order=order)
        #    np.save(fname[:-3]+'npy',np.asarray(Yr))
        
        big_mov[:,Ttot:Ttot+T]=np.asarray(Yr,dtype=np.float32)+1e-10
        big_mov.flush()
        Ttot=Ttot+T;                                        

    fname_new=fname_tot+'_frames_' +str(Ttot) + '_.mmap'
    os.rename(fname_tot,fname_new)
    
    return fname_new
Пример #19
0
def get_behavior_traces(fname,
                        t0,
                        t1,
                        freq,
                        ISI,
                        draw_rois=False,
                        plot_traces=False,
                        mov_filt_1d=True,
                        window_hp=201,
                        window_lp=3,
                        interpolate=True,
                        EXPECTED_ISI=.25):
    """
    From hdf5 movies extract eyelid closure and wheel movement
    
    
    Parameters
    ----------
    fname: str    
        file name of the hdf5 file
        
    t0,t1: float. 
        Times of beginning and end of trials (in general 0 and 8 for our dataset) to build the absolute time vector
    
    freq: float
        frequency used to build the final time vector    
        
    ISI: float
        inter stimulu interval
        
    draw_rois: bool
        whether to manually draw the eyelid contour

    plot_traces: bool
        whether to plot the traces during extraction        
       
    mov_filt_1d: bool 
        whether to filter the movie after extracting the average or ROIs. The alternative is a 3D filter that can be very computationally expensive
    
    window_lp, window_hp: ints
        number of frames to be used to median filter the data. It is needed because of the light IR artifact coming out of the eye
        
    Returns
    -------
    res: dict
        dictionary with fields 
            'eyelid': eyelid trace
            'wheel': wheel trace
            'time': absolute tim vector
            'trials': corresponding indexes of the trials
            'trial_info': for each trial it returns start trial, end trial, time CS, time US, trial type  (CS:0 US:1 CS+US:2)
            'idx_CS_US': idx trial CS US
            'idx_US': idx trial US
            'idx_CS': idx trial CS 
    """
    CS_ALONE = 0
    US_ALONE = 1
    CS_US = 2
    meta_inf = fname[:-7] + 'data.h5'

    time_abs = np.linspace(t0, t1, freq * (t1 - t0))

    T = len(time_abs)
    t_us = 0
    t_cs = 0
    n_samples_ISI = np.int(ISI * freq)
    t_uss = []
    ISIs = []
    eye_traces = []
    wheel_traces = []
    trial_info = []
    tims = []
    with h5py.File(fname) as f:

        with h5py.File(meta_inf) as dt:

            rois = np.asarray(dt['roi'], np.float32)

            trials = f.keys()

            trials.sort(key=lambda (x): np.int(x.replace('trial_', '')))

            trials_idx = [np.int(x.replace('trial_', '')) - 1 for x in trials]

            trials_idx_ = []

            for tr, idx_tr in zip(trials[:], trials_idx[:]):
                if plot_traces:
                    pl.cla()

                print tr

                trial = f[tr]

                mov = np.asarray(trial['mov'])

                if draw_rois:

                    pl.imshow(np.mean(mov, 0))
                    pl.xlabel('Draw eye')
                    pts = pl.ginput(-1)

                    pts = np.asarray(pts, dtype=np.int32)

                    data = np.zeros(np.shape(mov)[1:], dtype=np.int32)
                    #        if CV_VERSION == 2:
                    #lt = cv2.CV_AA
                    #        elif CV_VERSION == 3:
                    lt = cv2.LINE_AA

                    cv2.fillConvexPoly(data, pts, (1, 1, 1), lineType=lt)

                    rois[0] = data

                    pl.close()

                    pl.imshow(np.mean(mov, 0))
                    pl.xlabel('Draw wheel')
                    pts = pl.ginput(-1)

                    pts = np.asarray(pts, dtype=np.int32)

                    data = np.zeros(np.shape(mov)[1:], dtype=np.int32)
                    #        if CV_VERSION == 2:
                    #lt = cv2.CV_AA
                    #        elif CV_VERSION == 3:
                    lt = cv2.LINE_AA

                    cv2.fillConvexPoly(data, pts, (1, 1, 1), lineType=lt)

                    rois[1] = data

                    pl.close()
    #            eye_trace=np.mean(mov*rois[0],axis=(1,2))
    #            mov_trace=np.mean((np.diff(np.asarray(mov,dtype=np.float32),axis=0)**2)*rois[1],axis=(1,2))
                mov = np.transpose(mov, [0, 2, 1])

                mov = mov[:, :, ::-1]

                if mov.shape[0] > 0:

                    ts = np.array(trial['ts'])

                    if np.size(ts) > 0:

                        assert np.std(
                            np.diff(ts)
                        ) < 0.005, 'Time stamps of behaviour are unreliable'

                        if interpolate:

                            new_ts = np.linspace(0, ts[-1, 0] - ts[0, 0],
                                                 np.shape(mov)[0])

                            if dt['trials'][idx_tr, -1] == US_ALONE:

                                t_us = np.maximum(
                                    t_us, dt['trials'][idx_tr, 3] -
                                    dt['trials'][idx_tr, 0])

                                mmm = mov[:n_samples_ISI].copy()

                                mov = mov[:-n_samples_ISI]

                                mov = np.concatenate([mmm, mov])

                            elif dt['trials'][idx_tr, -1] == CS_US:

                                t_cs = np.maximum(
                                    t_cs, dt['trials'][idx_tr, 2] -
                                    dt['trials'][idx_tr, 0])

                                t_us = np.maximum(
                                    t_us, dt['trials'][idx_tr, 3] -
                                    dt['trials'][idx_tr, 0])

                                t_uss.append(t_us)

                                ISI = t_us - t_cs

                                ISIs.append(ISI)

                                n_samples_ISI = np.int(ISI * freq)

                            else:

                                t_cs = np.maximum(
                                    t_cs, dt['trials'][idx_tr, 2] -
                                    dt['trials'][idx_tr, 0])

                            new_ts = new_ts

                            tims.append(new_ts)

                        else:

                            start, end, t_CS, t_US = dt['trials'][
                                idx_tr, :-1] - dt['trials'][idx_tr, 0]

                            f_rate = np.median(np.diff(ts[:, 0]))
                            ISI = t_US - t_CS
                            idx_US = np.int(t_US / f_rate)
                            idx_CS = np.int(t_CS / f_rate)
                            fr_before_US = np.int((t_US - start - .1) / f_rate)
                            fr_after_US = np.int((end - .1 - t_US) / f_rate)
                            idx_abs = np.arange(-fr_before_US, fr_after_US)
                            time_abs = idx_abs * f_rate

                            assert np.abs(ISI - EXPECTED_ISI) < .01, str(
                                np.abs(ISI - EXPECTED_ISI)
                            ) + ':the distance form CS and US is different from what expected'

#                            trig_US=
#                            new_ts=

                    mov_e = cb.movie(mov * rois[0][::-1].T,
                                     fr=1 / np.mean(np.diff(new_ts)))
                    mov_w = cb.movie(mov * rois[1][::-1].T,
                                     fr=1 / np.mean(np.diff(new_ts)))

                    x_max_w, y_max_w = np.max(np.nonzero(np.max(mov_w, 0)), 1)
                    x_min_w, y_min_w = np.min(np.nonzero(np.max(mov_w, 0)), 1)

                    x_max_e, y_max_e = np.max(np.nonzero(np.max(mov_e, 0)), 1)
                    x_min_e, y_min_e = np.min(np.nonzero(np.max(mov_e, 0)), 1)

                    mov_e = mov_e[:, x_min_e:x_max_e, y_min_e:y_max_e]
                    mov_w = mov_w[:, x_min_w:x_max_w, y_min_w:y_max_w]

                    #                    mpart=mov[:20].copy()
                    #                    md=cse.utilities.mode_robust(mpart.flatten())
                    #                    N=np.sum(mpart<=md)
                    #                    mpart[mpart>md]=md
                    #                    mpart[mpart==0]=md
                    #                    mpart=mpart-md
                    #                    std=np.sqrt(np.sum(mpart**2)/N)
                    #                    thr=md+10*std
                    #
                    #                    thr=np.minimum(255,thr)
                    #                    return mov
                    if mov_filt_1d:

                        mov_e = np.mean(mov_e, axis=(1, 2))
                        window_hp_ = window_hp
                        window_lp_ = window_lp
                        if plot_traces:
                            pl.plot((mov_e - np.mean(mov_e)) /
                                    (np.max(mov_e) - np.min(mov_e)))

                    else:

                        window_hp_ = (window_hp, 1, 1)
                        window_lp_ = (window_lp, 1, 1)

                    bl = signal.medfilt(mov_e, window_hp_)
                    mov_e = signal.medfilt(mov_e - bl, window_lp_)

                    if mov_filt_1d:

                        eye_ = np.atleast_2d(mov_e)

                    else:

                        eye_ = np.atleast_2d(np.mean(mov_e, axis=(1, 2)))

                    wheel_ = np.concatenate([
                        np.atleast_1d(0),
                        np.nanmean(np.diff(mov_w, axis=0)**2, axis=(1, 2))
                    ])

                    if np.abs(new_ts[-1] - time_abs[-1]) > 1:
                        raise Exception(
                            'Time duration is significantly larger or smaller than reference time'
                        )

                    wheel_ = np.squeeze(wheel_)
                    eye_ = np.squeeze(eye_)

                    f1 = scipy.interpolate.interp1d(new_ts,
                                                    eye_,
                                                    bounds_error=False,
                                                    kind='linear')
                    eye_ = np.array(f1(time_abs))

                    f1 = scipy.interpolate.interp1d(new_ts,
                                                    wheel_,
                                                    bounds_error=False,
                                                    kind='linear')
                    wheel_ = np.array(f1(time_abs))

                    if plot_traces:
                        pl.plot((eye_) / (np.nanmax(eye_) - np.nanmin(eye_)),
                                'r')
                        pl.plot(
                            (wheel_ - np.nanmin(wheel_)) / np.nanmax(wheel_),
                            'k')
                        pl.pause(.01)

                    trials_idx_.append(idx_tr)

                    eye_traces.append(eye_)
                    wheel_traces.append(wheel_)

                    trial_info.append(dt['trials'][idx_tr, :])

            res = dict()

            res['eyelid'] = eye_traces
            res['wheel'] = wheel_traces
            res['time'] = time_abs - np.median(t_uss)
            res['trials'] = trials_idx_
            res['trial_info'] = trial_info
            res['idx_CS_US'] = np.where(
                map(int,
                    np.array(trial_info)[:, -1] == CS_US))[0]
            res['idx_US'] = np.where(
                map(int,
                    np.array(trial_info)[:, -1] == US_ALONE))[0]
            res['idx_CS'] = np.where(
                map(int,
                    np.array(trial_info)[:, -1] == CS_ALONE))[0]

            return res
Пример #20
0
    criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT,
                number_of_iterations, termination_eps)

    # Run the ECC algorithm. The results are stored in warp_matrix.
    (cc, warp_matrix) = cv2.findTransformECC(im1, im2, warp_matrix, warp_mode,
                                             criteria)

    if warp_mode == cv2.MOTION_HOMOGRAPHY:
        # Use warpPerspective for Homography
        im2_aligned = cv2.warpPerspective(im2,
                                          warp_matrix, (sz[1], sz[0]),
                                          flags=cv2.INTER_LINEAR +
                                          cv2.WARP_INVERSE_MAP)
    else:
        # Use warpAffine for Translation, Euclidean and Affine
        im2_aligned = cv2.warpAffine(im2,
                                     warp_matrix, (sz[1], sz[0]),
                                     flags=cv2.INTER_LINEAR +
                                     cv2.WARP_INVERSE_MAP)

    newm.append(im2_aligned)
    # Show final results
#    cv2.imshow("Image 1", im1)
#    cv2.imshow("Image 2", im2)
#    cv2.imshow("Aligned Image 2", im2_aligned)
#    cv2.waitKey(0)
#    cv2.destroyAllWindows()
#%%
mm = np.concatenate([m, newm], axis=2)
newmn = cb.movie(np.array(mm), fr=m.fr)
pl.imshow(np.median(newmn, axis=0), cmap=pl.cm.gray)
Пример #21
0
movs=[];
fr=30;
start_time=0;
templates=[];
for tif_file in tif_files:
    print(tif_file)
    m=cb.load(tif_file,fr=30,start_time=0,subindices=range(0,1500,20))
    min_val_add=np.percentile(m,.01)
    m=m-min_val_add
    movs.append(m)
    templ=np.nanmedian(m,axis=0);
    m,template,shifts,xcorrs=m.motion_correct(max_shift_w=5, max_shift_h=5, show_movie=False, template=templ, method='opencv')    
    templates.append(np.median(m,axis=0))

all_movs=cb.concatenate(movs)
m=cb.movie(np.array(templates),fr=1)
m=m.motion_correct(template=m[0])[0]
template=np.median(m,axis=0)
cb.matrixMontage(m,cmap=pl.cm.gray,vmin=0,vmax=1000)

#%%
all_shifts=[];
movs=[];
for tif_file in tif_files:
    print(tif_file)
    m=cb.load(tif_file,fr=30,start_time=0);   
    min_val_add=np.percentile(m,.01)
    m=m-min_val_add
    m,_,shifts,_=m.motion_correct(template=template, method='opencv')
    movs.append(m)    
    all_shifts.append(shifts)
Пример #22
0
import glob
import h5py
import re
#%% Sue Ann create data for labeling
diameter_bilateral_blur=4
median_filter_size=(2,1,1)
from scipy.ndimage import filters as ft
res=[]
with open('file_list.txt') as f:
    for ln in f:
        ln1=ln[:-1]
        print(ln1)
        with  h5py.File(ln1) as hh:
            print hh.keys()
            mov=np.array(hh['binnedF'],dtype=np.float32)
            mov=cb.movie(mov,fr=3)
            mov=mov.bilateral_blur_2D(diameter=diameter_bilateral_blur)
            mov1=cb.movie(ft.median_filter(mov,median_filter_size),fr=3)
            #mov1=mov1-np.median(mov1,0)
            mov1=mov1.resize(1,1,.3)
            mov1=mov1-cb.utils.mode_robust(mov1,0)
            mov=mov.resize(1,1,.3)
    #        mov=mov-np.percentile(mov,1)
            
            mov.save(ln1[:-3] + '_compress_.tif')
            mov1.save(ln1[:-3] + '_BL_compress_.tif')
            
            res.append(ln1[:-3] + '_compress_.tif')
            res.append(ln1[:-3] + '_BL_compress_.tif')
#%% extract correlation image