def test_print(self): conf.set_verbose(0) pt.print1("No print") pt.print2("No print") pt.print_progress(4, 10) pt.print_progress(10, 10) conf.set_verbose(1) pt.print1("Ok") pt.print2("No print") conf.set_verbose(2) pt.print1("OK") pt.print2("OK") pt.print_progress(4, 10) pt.print_progress(10, 10)
def asmemmaps(basename, video, count = None): """Loads multi-frame video into numpy memmaps. Actual data is written to numpy files with the provide basename and subscripted by source identifier (index), e.g. "basename_0.npy" and "basename_1.npy" in case of dual-frame video source. Parameters ---------- basename: str Base name for the filenames of the videos. video : iterable A multi-frame iterator object. count : int, optional Defines how many multi-frames are in the video. If not provided it is determined by len(). Returns ------- out : tuple of arrays A tuple of memmapped array(s) representing video(s) """ if count is None: try: count = len(video) except TypeError: raise ValueError("You must provide count") def _load(array, frame): array[...] = frame def _empty_arrays(frames): out = tuple( (np.lib.format.open_memmap(basename + "_{}.npy".format(i), "w+", shape = (count,) + frame.shape, dtype = frame.dtype) for i,frame in enumerate(frames))) return out print1("Writing to memmap...") print_progress(0, count) frames = next(video) out = _empty_arrays(frames) [_load(out[i][0],frame) for i,frame in enumerate(frames)] for j,frames in enumerate(video): print_progress(j+1, count) [_load(out[i][j+1],frame) for i,frame in enumerate(frames)] print_progress(count, count) return out
def asarrays(video, count = None): """Loads multi-frame video into numpy arrays. Parameters ---------- video : iterable A multi-frame iterator object. count : int, optional Defines how many frames are in the video. If not provided it will calculate length of the video based on the length of the iterable. If that is not possible ValueError is raised Returns ------- out : tuple of arrays A tuple of array(s) representing video(s) """ t0 = time.time() def _load(array, frame): array[...] = frame print1("Loading array...") if count is None: try: count = len(video) except TypeError: raise ValueError("You must provide count") print_progress(0, count) video = iter(video) frames = next(video) out = tuple((np.empty(shape = (count,) + frame.shape, dtype = frame.dtype) for frame in frames)) [_load(out[i][0],frame) for i,frame in enumerate(frames)] for j,frames in enumerate(video): print_progress(j+1, count) [_load(out[i][j+1],frame) for i,frame in enumerate(frames)] print_progress(count, count) print_frame_rate(count,t0) return out
def asarrays(video, count=None): """Loads multi-frame video into numpy arrays. Parameters ---------- video : iterable A multi-frame iterator object. count : int, optional Defines how many frames are in the video. If not provided and video has an undefined length, it will try to load the video using np.asarray. This means that data copying """ def _load(array, frame): array[...] = frame print("Writing to array...") if count is None: try: count = len(video) except TypeError: out = np.asarray(video) out = tuple((out[:, i] for i in range(out.shape[1]))) return out print_progress(0, count) frames = next(video) out = tuple((np.empty(shape=(count, ) + frame.shape, dtype=frame.dtype) for frame in frames)) [_load(out[i][0], frame) for i, frame in enumerate(frames)] for j, frames in enumerate(video): print_progress(j + 1, count) [_load(out[i][j + 1], frame) for i, frame in enumerate(frames)] print_progress(count, count) return out
def cross_analyze_iter(data, t1, t2, period = 1, level = 4, chunk_size = 256, binning = True, method = "corr", auto_background = False, nlog = None, return_background = False): if method == "corr": f = ccorr elif method == "diff": f = cdiff #binning = False else: raise ValueError("Unknown method '{}'".format(method)) half_chunk_size = chunk_size // 2 assert chunk_size % 2 == 0 assert level > 2 n = 2 ** level assert n <= half_chunk_size n_fast = period * n n_slow = n if nlog is None: n_decades = 0 while len(t1)//(chunk_size * 2**(n_decades)) > 0: n_decades += 1 else: n_decades = nlog + 1 assert n_decades >= 1 t_slow = np.arange(len(t1)) print("Computing...") print_progress(0, len(t1)) for i,d in enumerate(data): x1,x2 = d if i == 0: shape = x1.shape out_fast = np.zeros(shape[0:-1] + (n_fast,) + shape[-1:], FDTYPE) out_fast = _transpose_data(out_fast) count_fast = np.zeros((n_fast,),IDTYPE) out_slow = np.zeros((n_decades-1,) + shape[0:-1] + (n_slow,) + shape[-1:], FDTYPE) out_slow = _transpose_data(out_slow) count_slow = np.zeros((n_decades-1, n_slow,),IDTYPE) fdata1 = np.empty((n_decades,) + shape[0:-1] + (chunk_size,) + shape[-1:], CDTYPE) fdata2 = np.empty((n_decades,) + shape[0:-1] + (chunk_size,) + shape[-1:], CDTYPE) _add_data2(i,x1, x2, fdata1,fdata2, binning) if i % (half_chunk_size) == half_chunk_size -1: ichunk = i//half_chunk_size fstart1 = half_chunk_size * (ichunk%2) fstop1 = fstart1 + half_chunk_size fstart2 = half_chunk_size * ((ichunk-1)%2) fstop2 = fstart2 + half_chunk_size istart1 = ichunk * half_chunk_size istop1 = istart1 + half_chunk_size istart2 = istart1 - half_chunk_size istop2 = istop1 - half_chunk_size if auto_background == True: if ichunk == 0: bg1 = np.mean(fdata1[0,...,fstart1:fstop1,:], axis = -2) bg2 = np.mean(fdata2[0,...,fstart1:fstop1,:], axis = -2) np.subtract(fdata1[0,...,fstart1:fstop1,:], bg1[...,None,:], fdata1[0,...,fstart1:fstop1,:]) np.subtract(fdata2[0,...,fstart1:fstop1,:], bg2[...,None,:], fdata2[0,...,fstart1:fstop1,:]) if return_background == True: out_bg1 = np.mean(fdata1[0,...,fstart1:fstop1,:], axis = -2) out_bg2 = np.mean(fdata2[0,...,fstart1:fstop1,:], axis = -2) f(fdata1[0,...,fstart1:fstop1,:],fdata2[0,...,fstart1:fstop1,:],t1[istart1:istop1],t2[istart1:istop1],-2,out = (out_fast, count_fast)) if istart2 >= 0 : f(fdata1[0][...,fstart1:fstop1,:],fdata2[0][...,fstart2:fstop2,:],t1[istart1:istop1],t2[istart2:istop2],-2,out = (out_fast, count_fast)) f(fdata1[0][...,fstart2:fstop2,:],fdata2[0][...,fstart1:fstop1,:],t1[istart2:istop2],t2[istart1:istop1],-2,out = (out_fast, count_fast)) for j in range(1, n_decades): if i % (half_chunk_size * 2**j) == half_chunk_size * 2**j -1: ichunk = i//(half_chunk_size * 2**j) fstart1 = half_chunk_size * (ichunk%2) fstop1 = fstart1 + half_chunk_size fstart2 = half_chunk_size * ((ichunk-1)%2) fstop2 = fstart2 + half_chunk_size istart1 = ichunk * half_chunk_size istop1 = istart1 + half_chunk_size istart2 = istart1 - half_chunk_size istop2 = istop1 - half_chunk_size if auto_background == True: # bg1 = np.mean(fdata1[j,...,fstart1:fstop1,:], axis = -2) # bg2 = np.mean(fdata2[j,...,fstart1:fstop1,:], axis = -2) np.subtract(fdata1[j,...,fstart1:fstop1,:], bg1[...,None,:], fdata1[j,...,fstart1:fstop1,:]) np.subtract(fdata2[j,...,fstart1:fstop1,:], bg2[...,None,:], fdata2[j,...,fstart1:fstop1,:]) f(fdata1[j,...,fstart1:fstop1,:],fdata2[j,...,fstart1:fstop1,:], t_slow[istart1:istop1], t_slow[istart1:istop1],-2, out = (out_slow[j-1],count_slow[j-1])) if istart2 >= 0 : f(fdata1[j,...,fstart1:fstop1,:],fdata2[j,...,fstart2:fstop2,:], t_slow[istart1:istop1], t_slow[istart2:istop2], -2, out = (out_slow[j-1],count_slow[j-1])) f(fdata1[j,...,fstart2:fstop2,:],fdata2[j,...,fstart1:fstop1,:], t_slow[istart2:istop2], t_slow[istart1:istop1], -2, out = (out_slow[j-1],count_slow[j-1])) else: break print_progress(i+1, len(t1)) if return_background == True: yield ((out_fast,count_fast), (out_slow,count_slow)), (out_bg1, out_bg2) else: yield ((out_fast,count_fast), (out_slow,count_slow))