def test_corr_regular_2_mask(self): for scale in (True, False): for mode in ("corr", "diff"): bg, var = core.stats(test_data1, test_data2) data = core.ccorr(test_data1, test_data2, norm=2, method="fft") self.out = core.normalize(data, bg, var, norm=2, mode=mode, scale=scale, mask=test_mask) data = core.ccorr(test_data1, test_data2, norm=2, method="corr") out_other = core.normalize(data, bg, var, norm=2, mode=mode, scale=scale, mask=test_mask) self.assertTrue(allclose(self.out, out_other))
def test_corr_regular_2(self): for scale in (True, False): for mode in ("corr", "diff"): for axis in (0, 1, 2): bg, var = core.stats(test_data1, test_data2, axis=axis) data = core.ccorr(test_data1, test_data2, norm=2, method="fft", axis=axis) self.out = core.normalize(data, bg, var, norm=2, mode=mode, scale=scale) data = core.ccorr(test_data1, test_data2, norm=2, method="corr", axis=axis) out_other = core.normalize(data, bg, var, norm=2, mode=mode, scale=scale) self.assertTrue(allclose(self.out, out_other))
def test_auto_equivalence_1(self): for method in ("corr","fft","diff"): bg,var = core.stats(test_data1, axis = 0) data1 = core.acorr(test_data1, n = 8, norm = 1, method = method) out1 = core.normalize(data1, bg, var, norm = 1) data2,bg,var = core.iacorr(test_data1, n = 8, norm = 1, method = method) out2 = core.normalize(data2, bg, var, norm = 1) self.assertTrue(np.allclose(out1, out2))
def test_auto_equivalence_2(self): for method in ("corr",): bg,var = core.stats(test_data1, axis = 0) data1 = core.ccorr(test_data1,test_data1, n = 8, norm = 2, method = method) out1 = core.normalize(data1, bg, var, norm = 2) data2,bg,var = core.iacorr(test_data1, n = 8, norm = 2, method = method) out2 = core.normalize(data2, bg, var, norm = 2) self.assertTrue(np.allclose(out1, out2))
def test_cross_equivalence(self): for method in ("corr","diff","fft"): bg,var = core.stats(test_data1, test_data2, axis = 0) data = core.ccorr(test_data1, test_data2,n = 8, norm = 1, method = method) out1 = core.normalize(data, bg, var) vid = fromarrays((test_data1, test_data2)) data,bg,var = core.iccorr(vid, count = len(test_data1),chunk_size = 16,n = 8, norm = 1, method = method) out2 = core.normalize(data, bg, var) self.assertTrue(np.allclose(out1, out2))
def calculate(): out = None bgs = [] vars = [] for i in range(NRUN): print("Run {}/{}".format(i+1,NRUN)) seed(i) importlib.reload(video_simulator) #recreates iterator with new seed t1,t2 = video_simulator.t1,video_simulator.t2 video = multiply(video_simulator.video, window_video) #: if the intesity of light source flickers you can normalize each frame to the intensity of the frame #video = normalize_video(video) #: perform rfft2 and crop results, to take only first kimax and first kjmax wavenumbers. fft = rfft2(video, kimax = KIMAX, kjmax = 0) #: you can also normalize each frame with respect to the [0,0] component of the fft #: this it therefore equivalent to normalize_video #fft = normalize_fft(fft) f1, f2 = asarrays(fft,NFRAMES) bg, var = stats(f1,f2) bg, var = stats(f1,f2) data = ccorr(f1,f2, t1 = t1,t2=t2, n = NFRAMES) #: now perform auto correlation calculation with default parameters and show live #data, bg, var = iacorr(fft, t, auto_background = True, n = NFRAMES) #perform normalization and merge data bgs.append(bg) vars.append(var) #5 and 7 are redundand, but we are calulating it for easier indexing for norm in (1,2,3,5,6,7,9,10,11): # weighted (subtracted and compensated) if norm in (7,11): y = normalize(data, bg, var, norm = norm, scale = True, weight = np.moveaxis(w,0,-1)) #weighted prime elif norm in (3,): y = normalize(data, bg, var, norm = norm, scale = True, weight = np.moveaxis(wp,0,-1)) else: y = normalize(data, bg, var, norm = norm, scale = True) if out is None: out = np.empty(shape = (NRUN,12)+ y.shape, dtype = y.dtype) out[i,norm] = y return out, bgs, vars
def calculate(): out = None bgs = [] vars = [] for i in range(NRUN): print("Run {}/{}".format(i + 1, NRUN)) seed(i) importlib.reload(video_simulator) #recreates iterator with new seed t1, t2 = video_simulator.t1, video_simulator.t2 video = multiply(video_simulator.video, window_video) fft = rfft2(video, kimax=KIMAX, kjmax=0) f1, f2 = asarrays(fft, NFRAMES) bg, var = stats(f1, f2) bg, var = stats(f1, f2) data = ccorr(f1, f2, t1=t1, t2=t2, n=NFRAMES) bgs.append(bg) vars.append(var) for norm in (1, 2, 3, 5, 6, 7, 9, 10, 11): # weighted (subtracted) if norm in (7, 11): y = normalize(data, bg, var, norm=norm, scale=True, weight=np.moveaxis(w, 0, -1)) # weighted prime (baseline) elif norm in (3, ): y = normalize(data, bg, var, norm=norm, scale=True, weight=np.moveaxis(wp, 0, -1)) else: y = normalize(data, bg, var, norm=norm, scale=True) if out is None: out = np.empty(shape=(NRUN, 12) + y.shape, dtype=y.dtype) out[i, norm] = y return out, bgs, vars
def calculate(): out = None bgs = [] vars = [] for i in range(NRUN): print("Run {}/{}".format(i + 1, NRUN)) seed(i) importlib.reload(video_simulator) #recreates iterator with new seed t = video_simulator.t video = multiply(video_simulator.video, window_video) #: perform rfft2 and crop results, to take only first kimax and first kjmax wavenumbers. fft = rfft2(video, kimax=KIMAX, kjmax=0) fft_array, = asarrays(fft, NFRAMES_RANDOM) data = acorr(fft_array, t=t, n=int(NFRAMES / DT_RANDOM)) bg, var = stats(fft_array) bgs.append(bg) vars.append(var) for norm in (1, 2, 3, 5, 6, 7, 9, 10, 11): # weighted (subtracted) if norm in (7, 11): y = normalize(data, bg, var, norm=norm, scale=True, weight=np.moveaxis(w, 0, -1)) # weighted prime (baseline) elif norm in (3, ): y = normalize(data, bg, var, norm=norm, scale=True, weight=np.moveaxis(wp, 0, -1)) else: y = normalize(data, bg, var, norm=norm, scale=True) if out is None: out = np.empty(shape=(NRUN, 12) + y.shape, dtype=y.dtype) out[i, norm] = y return out, bgs, vars
def acorr_save(fft_array, path_out, method='diff', mode='diff'): # These codes are copied from the examples in cddm package #: now perform auto correlation calculation with default parameters data = acorr(fft_array, method=method) bg, var = stats(fft_array) #: perform normalization and merge data data_lin = normalize(data, bg, var, scale=True, mode=mode) np.save(path_out / f'auto_correlate_data_lin_{method}_{mode}.npy', data_lin) # #: change size, to define time resolution in log space # x, y = log_average(data_lin, size=16) # #: save the normalized data to numpy files # np.save(path_out / 'auto_correlate_t.npy', x) # np.save(path_out / 'auto_correlate_data.npy', y) return ( { 'bg': bg, 'var': var, 'data_lin_shape': data_lin.shape, # 't_shape': x.shape, # 'data_shape': y.shape, }, data_lin) # , x, y)
def calculate(): out = None bgs = [] vars = [] for i in range(NRUN): print("Run {}/{}".format(i + 1, NRUN)) seed(i) importlib.reload(video_simulator) #recreates iterator with new seed video = multiply(video_simulator.video, window_video) fft = rfft2(video, kimax=51, kjmax=0) fft_array, = asarrays(fft, NFRAMES_RANDOM) data = acorr(fft_array) bg, var = stats(fft_array) bgs.append(bg) vars.append(var) for norm in (1, 2, 3, 5, 6, 7, 9, 10, 11): y = normalize(data, bg, var, norm=norm, scale=True) if out is None: out = np.empty(shape=(NRUN, 12) + y.shape, dtype=y.dtype) out[i, norm] = y return out, bgs, vars
def test_ccorr_regular_3_mask(self): for scale in (True, False): for mode in ("corr", "diff"): axis = 0 bg,var = core.stats(test_data1, test_data2, axis = axis) data = core.ccorr(test_data1, test_data2, norm = 3, method = "fft", axis = axis) self.out = core.normalize(data, bg, var, norm = 3, mode = mode, scale = scale, mask = test_mask) data = core.ccorr(test_data1, test_data2, norm = 3, method = "corr", axis = axis) out_other = core.normalize(data, bg, var, norm = 3, mode = mode, scale = scale, mask = test_mask) self.assertTrue(np.allclose(self.out, out_other)) data = core.ccorr(test_data1, test_data2, norm = 3, method = "diff", axis = axis) out_other = core.normalize(data, bg, var, norm = 3, mode = mode, scale = scale, mask = test_mask) self.assertTrue(np.allclose(self.out, out_other))
#fft = normalize_fft(fft) #load in numpy array fft_array, = asarrays(fft, NFRAMES_RANDOM) if __name__ == "__main__": import os.path as p #: now perform auto correlation calculation with default parameters data = acorr(fft_array, t = video_simulator.t, n = int(NFRAMES/DT_RANDOM)) bg, var = stats(fft_array) for norm in (1,2,3,5,6,7,9,10,11): #: perform normalization and merge data data_lin = normalize(data, bg, var, scale = True, norm = norm) #: change size, to define time resolution in log space x,y = log_average(data_lin, size = 16) #: save the normalized data to numpy files np.save(p.join(DATA_PATH, "corr_random_t.npy"),x*DT_RANDOM) np.save(p.join(DATA_PATH, "corr_random_data_norm{}.npy".format(norm)),y) #: inspect the data viewer = CorrViewer(scale = True) viewer.set_data(data,bg, var) viewer.set_mask(k = 25, angle = 0, sector = 30) viewer.plot() viewer.show()
""" Demonstrates the use and equivalence of method and mode options """ from examples.auto_correlate import fft_array from cddm.core import acorr, normalize, stats import matplotlib.pyplot as plt bg, var = stats(fft_array) for method in ("corr","diff"): if method == "corr": data = acorr(fft_array, method = "fft") #fft,so that it is faster else: data = acorr(fft_array, method = "diff", n = 256) for mode in ("diff", "corr"): data_lin = normalize(data, bg, var, mode = mode, norm = 2, scale = True) plt.semilogx(data_lin[4,12], label = "mode = {}; method = {}".format(mode, method)) plt.legend() plt.show()