#%% process files sequntially in case of failure if 0: fnames1 = [] for f in fnames: if os.path.isfile(f[:-3] + 'hdf5'): 1 else: print((1)) fnames1.append(f) #%% motion correct t1 = time() file_res = cb.motion_correct_parallel(fnames1, fr=30, template=None, margins_out=0, max_shift_w=45, max_shift_h=45, dview=None, apply_smooth=True) t2 = time() - t1 print(t2) #%% LOGIN TO MASTER NODE # TYPE salloc -n n_nodes --exclusive # source activate environment_name #%%#%% slurm_script = '/mnt/xfs1/home/agiovann/SOFTWARE/Constrained_NMF/SLURM/slurmStart.sh' cse.utilities.start_server(slurm_script=slurm_script) # n_processes = 27#np.maximum(psutil.cpu_count() - 2,1) # roughly number of cores on your machine minus 1 pdir, profile = os.environ['IPPPDIR'], os.environ['IPPPROFILE'] client_ = Client(ipython_dir=pdir, profile=profile)
#fnames.sort() fnames = ['file_name.tif' ] # you ahev to eliminate the first element, it cannot be loaded! print fnames fnames = fnames #%% m = cb.load_movie_chain(fnames, fr=30) #%% m.play(backend='opencv', gain=3., fr=50) #%% apply_smooth = False # set to true if SNR too low. Slower but might work better. In my hand ot worked out just fine! m_mc = cb.motion_correct_parallel([m], fr=30, template=None, margins_out=0, max_shift_w=25, max_shift_h=25, remove_blanks=True, apply_smooth=apply_smooth, dview=None, save_hdf5=False) m_mc = m_mc[0] #%% m_mc.resize(1, 1, .2).play(backend='opencv', gain=5., fr=10) #%% m_mc.save('all_mov.hdf5') #%% in what follows there is some redundancy only due to the fact that this is meant to run oin parallel for many datasets... it can be done much faster. #idx_x=slice(12,500,None) #idx_y=slice(12,500,None) #idx_xy=(idx_x,idx_y) final_frate = 30 downsample_factor = 1 # use .2 or .1 if file is large and you want a quick answer
pl.imshow(template,cmap='gray',vmin=lq,vmax=hq) pl.pause(.1) counter+=1 pl.title(img.split('/')[-2]) #%% process files sequntially in case of failure if 0: fnames1=[] for f in fnames: if os.path.isfile(f[:-3]+'hdf5'): 1 else: print((1)) fnames1.append(f) #%% motion correct t1 = time() file_res=cb.motion_correct_parallel(fnames1,fr=30,template=None,margins_out=0,max_shift_w=45, max_shift_h=45,dview=None,apply_smooth=True) t2=time()-t1 print(t2) #%% LOGIN TO MASTER NODE # TYPE salloc -n n_nodes --exclusive # source activate environment_name #%%#%% backend='local' if backend == 'slurm': slurm_script='/mnt/xfs1/home/agiovann/SOFTWARE/Constrained_NMF/SLURM/slurmStart.sh' cse.utilities.start_server(slurm_script=slurm_script) #n_processes = 27#np.maximum(psutil.cpu_count() - 2,1) # roughly number of cores on your machine minus 1 pdir, profile = os.environ['IPPPDIR'], os.environ['IPPPROFILE'] client_ = Client(ipython_dir=pdir, profile=profile) else:
cse.utilities.stop_server() cse.utilities.start_server(n_processes) #%% #low_SNR=False #if low_SNR: # N=1000 # mn1=m.copy().bilateral_blur_2D(diameter=5,sigmaColor=10000,sigmaSpace=0) # # mn1,shifts,xcorrs, template=mn1.motion_correct() # mn2=mn1.apply_shifts(shifts) # #mn1=cb.movie(np.transpose(np.array(Y_n),[2,0,1]),fr=30) # mn=cb.concatenate([mn1,mn2],axis=1) # mn.play(gain=5.,magnification=4,backend='opencv',fr=30) #%% t1 = time() file_res=cb.motion_correct_parallel(fnames[:-3],fr=30,template=None,margins_out=0,max_shift_w=45, max_shift_h=45,backend='ipyparallel',apply_smooth=True) t2=time()-t1 print t2 #%% all_movs=[] for f in file_res: with np.load(f+'npz') as fl: pl.subplot(1,2,1) pl.imshow(fl['template'],cmap=pl.cm.gray) pl.subplot(1,2,2) pl.plot(fl['shifts']) all_movs.append(fl['template'][np.newaxis,:,:]) pl.pause(2) pl.cla() #%% all_movs=cb.movie(np.concatenate(all_movs,axis=0),fr=10)
n_processes = 112#np.maximum(psutil.cpu_count() - 2,1) # roughly number of cores on your machine minus 1 #%% #low_SNR=False #if low_SNR: # N=1000 # mn1=m.copy().bilateral_blur_2D(diameter=5,sigmaColor=10000,sigmaSpace=0) # # mn1,shifts,xcorrs, template=mn1.motion_correct() # mn2=mn1.apply_shifts(shifts) # #mn1=cb.movie(np.transpose(np.array(Y_n),[2,0,1]),fr=30) # mn=cb.concatenate([mn1,mn2],axis=1) # mn.play(gain=5.,magnification=4,backend='opencv',fr=30) #%% t1 = time() file_res=cb.motion_correct_parallel(fnames,fr=30,template=None,margins_out=0,max_shift_w=45, max_shift_h=45,backend='SLURM',apply_smooth=True) t2=time()-t1 print t2 #%% all_movs=[] for f in glob.glob(base_folder+'*.hdf5'): print f with np.load(f[:-4]+'npz') as fl: # pl.subplot(1,2,1) # pl.imshow(fl['template'],cmap=pl.cm.gray) # pl.subplot(1,2,2) # pl.plot(fl['shifts']) all_movs.append(fl['template'][np.newaxis,:,:]) # pl.pause(2) # pl.cla() #%%
idx_start=[i for i in xrange(len(fnames[0])) if fnames[0][i] != fnames[- 1][i]][0] base_name=fnames[0][:idx_start] #%% #low_SNR=False #if low_SNR: # N=1000 # mn1=m.copy().bilateral_blur_2D(diameter=5,sigmaColor=10000,sigmaSpace=0) # # mn1,shifts,xcorrs, template=mn1.motion_correct() # mn2=mn1.apply_shifts(shifts) # #mn1=cb.movie(np.transpose(np.array(Y_n),[2,0,1]),fr=30) # mn=cb.concatenate([mn1,mn2],axis=1) # mn.play(gain=5.,magnification=4,backend='opencv',fr=30) #%% t1 = time() file_res=cb.motion_correct_parallel(fnames,fr=30,template=None,margins_out=0,max_shift_w=45, max_shift_h=45,dview=dview,apply_smooth=True) t2=time()-t1 print t2 #%% all_movs=[] for f in fls: idx=f.find('.') with np.load(f[:idx+1]+'npz') as fl: print f # pl.subplot(1,2,1) # pl.imshow(fl['template'],cmap=pl.cm.gray) # pl.subplot(1,2,2) all_movs.append(fl['template'][np.newaxis,:,:]) # pl.plot(fl['shifts']) # pl.pause(.001)
# load movie # for loading only a portion of the movie or only some channels # you can use the option: subindices=range(0,1500,10) m = cb.load(filename, fr=frameRate, subindices=slice(1,None,2)) print m.shape m.save(filename_red) m = cb.load(filename, fr=frameRate, subindices=slice(0,None,2)) m.save(filename_green) return((filename_green,filename_red)) #%% file_names=glob.glob('*.tif') file_names.sort() print file_names #%% res=map(separate_channels,file_names) #%% greens=[a for a,b in res] #%% file_res=cb.motion_correct_parallel(greens,fr=30,template=None,margins_out=0,max_shift_w=45, max_shift_h=45,dview=None,apply_smooth=True,save_hdf5=True) #%% run a second time since it gives better results file_res=cb.motion_correct_parallel(greens,fr=30,template=None,margins_out=0,max_shift_w=25, max_shift_h=25,dview=None,apply_smooth=True,save_hdf5=True) #%% m=cb.load(file_res[2]+'hdf5',fr=30) m.resize(1,1,.2).play(backend='opencv',gain=3.,fr=100) #%% for f in file_res: m=cb.load(f+'hdf5',fr=30) m.save(f[:-1]+'_mc.tif')
if is_interactive: pl.imshow(template,cmap=pl.cm.gray,vmax=100) np.savez(base_name+'-template_total.npz',template_each=template_each, all_movs_each=np.array(all_movs_each),movie_names=movie_names) #% if is_interactive: for idx,mov in enumerate(all_movs_each): mov.play(backend='opencv',gain=50.,fr=100) # mov.save(str(idx)+'sam_example.tif') #% #% file_res=[] for template,fn in zip(template_each,movie_names): print fn file_res.append(cb.motion_correct_parallel(fn,30*downsample_factor,dview= dview,template=template,margins_out=0,max_shift_w=35, max_shift_h=35,remove_blanks=False)) #% if is_interactive: for f1 in file_res: for f in f1: with np.load(f+'npz') as fl: pl.subplot(1,2,1) pl.cla() pl.imshow(fl['template'],cmap=pl.cm.gray) pl.subplot(1,2,2) pl.plot(fl['shifts']) pl.pause(0.001) pl.cla() #%
print 'Using '+ str(len(c)) + ' processes' dview=c[:len(c)] #%% FOR LOADING ALL TIFF FILES IN A FILE AND SAVING THEM ON A SINGLE MEMORY MAPPABLE FILE #fnames=[] #fnames=glob.glob('*.tif') #fnames.sort() fnames=['file_name.tif'] # you ahev to eliminate the first element, it cannot be loaded! print fnames fnames=fnames #%% m=cb.load_movie_chain(fnames,fr=30) #%% m.play(backend='opencv',gain=3.,fr=50) #%% apply_smooth=False # set to true if SNR too low. Slower but might work better. In my hand ot worked out just fine! m_mc=cb.motion_correct_parallel([m], fr=30, template=None, margins_out=0, max_shift_w=25, max_shift_h=25, remove_blanks=True, apply_smooth=apply_smooth, dview=None, save_hdf5=False) m_mc=m_mc[0] #%% m_mc.resize(1,1,.2).play(backend='opencv',gain=5.,fr=10) #%% m_mc.save('all_mov.hdf5') #%% in what follows there is some redundancy only due to the fact that this is meant to run oin parallel for many datasets... it can be done much faster. #idx_x=slice(12,500,None) #idx_y=slice(12,500,None) #idx_xy=(idx_x,idx_y) final_frate=30 downsample_factor=1 # use .2 or .1 if file is large and you want a quick answer final_frate=final_frate*downsample_factor idx_xy=None base_name='Yr' name_new=cse.utilities.save_memmap_each(['all_mov.hdf5'], dview=None,base_name=base_name, resize_fact=(1, 1, downsample_factor), remove_init=0,idx_xy=idx_xy )