def test_normalize(self): video = fromarrays((self.vid,)) fft = rfft2(video) fft, = asarrays(normalize_fft(fft),128) self.assertTrue(np.allclose(fft, self.fft_norm)) video = fromarrays((self.vid,)) fft = rfft2(video) fft, = asarrays(normalize_fft(fft, inplace = True),128) self.assertTrue(np.allclose(fft, self.fft_norm))
def test_rfft2_scipy(self): set_rfft2lib("scipy") video = fromarrays((self.vid,)) fft, = asarrays(rfft2(video),128) self.assertTrue(np.allclose(fft, self.fft)) for kimax, kjmax in ((5,6), (7,7),(4,4)): video = fromarrays((self.vid,)) fft, = asarrays(rfft2(video, kimax = kimax, kjmax = kjmax),128) self.assertTrue(np.allclose(fft[:,0:kimax+1], self.fft[:,0:kimax+1,0:kjmax+1])) self.assertTrue(np.allclose(fft[:,-kimax:], self.fft[:,-kimax:,0:kjmax+1]))
def calculate(): out = None bgs = [] vars = [] for i in range(NRUN): print("Run {}/{}".format(i + 1, NRUN)) seed(i) importlib.reload(video_simulator) #recreates iterator with new seed video = multiply(video_simulator.video, window_video) fft = rfft2(video, kimax=51, kjmax=0) fft_array, = asarrays(fft, NFRAMES_RANDOM) data = acorr(fft_array) bg, var = stats(fft_array) bgs.append(bg) vars.append(var) for norm in (1, 2, 3, 5, 6, 7, 9, 10, 11): y = normalize(data, bg, var, norm=norm, scale=True) if out is None: out = np.empty(shape=(NRUN, 12) + y.shape, dtype=y.dtype) out[i, norm] = y return out, bgs, vars
def calculate(): out = None bgs = [] vars = [] for i in range(NRUN): print("Run {}/{}".format(i+1,NRUN)) seed(i) importlib.reload(video_simulator) #recreates iterator with new seed t1,t2 = video_simulator.t1,video_simulator.t2 video = multiply(video_simulator.video, window_video) #: if the intesity of light source flickers you can normalize each frame to the intensity of the frame #video = normalize_video(video) #: perform rfft2 and crop results, to take only first kimax and first kjmax wavenumbers. fft = rfft2(video, kimax = KIMAX, kjmax = 0) #: you can also normalize each frame with respect to the [0,0] component of the fft #: this it therefore equivalent to normalize_video #fft = normalize_fft(fft) f1, f2 = asarrays(fft,NFRAMES) bg, var = stats(f1,f2) bg, var = stats(f1,f2) data = ccorr(f1,f2, t1 = t1,t2=t2, n = NFRAMES) #: now perform auto correlation calculation with default parameters and show live #data, bg, var = iacorr(fft, t, auto_background = True, n = NFRAMES) #perform normalization and merge data bgs.append(bg) vars.append(var) #5 and 7 are redundand, but we are calulating it for easier indexing for norm in (1,2,3,5,6,7,9,10,11): # weighted (subtracted and compensated) if norm in (7,11): y = normalize(data, bg, var, norm = norm, scale = True, weight = np.moveaxis(w,0,-1)) #weighted prime elif norm in (3,): y = normalize(data, bg, var, norm = norm, scale = True, weight = np.moveaxis(wp,0,-1)) else: y = normalize(data, bg, var, norm = norm, scale = True) if out is None: out = np.empty(shape = (NRUN,12)+ y.shape, dtype = y.dtype) out[i,norm] = y return out, bgs, vars
def test_rfft2_numpy(self): set_rfft2lib("numpy") video = fromarrays((self.vid,)) fft, = asarrays(rfft2(video),128) self.assertTrue(np.allclose(fft, self.fft)) for kimax, kjmax in ((5,6), (7,7),(4,4)): video = fromarrays((self.vid,)) fft, = asarrays(rfft2(video, kimax = kimax, kjmax = kjmax),128) self.assertTrue(np.allclose(fft[:,0:kimax+1], self.fft[:,0:kimax+1,0:kjmax+1])) self.assertTrue(np.allclose(fft[:,-kimax:], self.fft[:,-kimax:,0:kjmax+1])) video = fromarrays((self.vid,)) fft, = asarrays(rfft2(video, kimax = None, kjmax = 6),128) self.assertTrue(np.allclose(fft, self.fft[:,:,0:7])) video = fromarrays((self.vid,)) fft, = asarrays(rfft2(video, kimax = 6),128) self.assertTrue(np.allclose(fft[:,0:7,:], self.fft[:,0:7,:])) with self.assertRaises(ValueError): video = fromarrays((self.vid,)) fft, = asarrays(rfft2(video, kimax = 16),128) with self.assertRaises(ValueError): video = fromarrays((self.vid,)) fft, = asarrays(rfft2(video, kjmax = 17),128)
def calculate(): out = None bgs = [] vars = [] for i in range(NRUN): print("Run {}/{}".format(i + 1, NRUN)) seed(i) importlib.reload(video_simulator) #recreates iterator with new seed t1, t2 = video_simulator.t1, video_simulator.t2 video = multiply(video_simulator.video, window_video) fft = rfft2(video, kimax=KIMAX, kjmax=0) f1, f2 = asarrays(fft, NFRAMES) bg, var = stats(f1, f2) bg, var = stats(f1, f2) data = ccorr(f1, f2, t1=t1, t2=t2, n=NFRAMES) bgs.append(bg) vars.append(var) for norm in (1, 2, 3, 5, 6, 7, 9, 10, 11): # weighted (subtracted) if norm in (7, 11): y = normalize(data, bg, var, norm=norm, scale=True, weight=np.moveaxis(w, 0, -1)) # weighted prime (baseline) elif norm in (3, ): y = normalize(data, bg, var, norm=norm, scale=True, weight=np.moveaxis(wp, 0, -1)) else: y = normalize(data, bg, var, norm=norm, scale=True) if out is None: out = np.empty(shape=(NRUN, 12) + y.shape, dtype=y.dtype) out[i, norm] = y return out, bgs, vars
def calculate(): out = None bgs = [] vars = [] for i in range(NRUN): print("Run {}/{}".format(i + 1, NRUN)) seed(i) importlib.reload(video_simulator) #recreates iterator with new seed t = video_simulator.t video = multiply(video_simulator.video, window_video) #: perform rfft2 and crop results, to take only first kimax and first kjmax wavenumbers. fft = rfft2(video, kimax=KIMAX, kjmax=0) fft_array, = asarrays(fft, NFRAMES_RANDOM) data = acorr(fft_array, t=t, n=int(NFRAMES / DT_RANDOM)) bg, var = stats(fft_array) bgs.append(bg) vars.append(var) for norm in (1, 2, 3, 5, 6, 7, 9, 10, 11): # weighted (subtracted) if norm in (7, 11): y = normalize(data, bg, var, norm=norm, scale=True, weight=np.moveaxis(w, 0, -1)) # weighted prime (baseline) elif norm in (3, ): y = normalize(data, bg, var, norm=norm, scale=True, weight=np.moveaxis(wp, 0, -1)) else: y = normalize(data, bg, var, norm=norm, scale=True) if out is None: out = np.empty(shape=(NRUN, 12) + y.shape, dtype=y.dtype) out[i, norm] = y return out, bgs, vars
vid = np.load("simple_brownian_ddm_video.npy") nframes = len(vid) #obtain frames iterator video = fromarrays((vid, )) ##apply blackman window window = blackman(SHAPE) video = apply_window(video, (window, )) #perform rfft2 and crop data video = rfft2(video, kisize=64, kjsize=64) #load all frames into numpy array #video, = asmemmaps("brownian_single_camera_fft", video, nframes) #compute and create numpy array video, = asarrays(video, nframes) np.save("simple_brownian_ddm_fft.npy", video) v1 = np.load("simple_brownian_cddm_video_0.npy") v2 = np.load("simple_brownian_cddm_video_1.npy") nframes = len(v1) #obtain frames iterator video = fromarrays((v1, v2)) ##apply blackman window window = blackman(SHAPE) video = apply_window(video, (window, window)) #perform rfft2 and crop data video = rfft2(video, kisize=64, kjsize=64) #load all frames into numpy array #video, = asmemmaps("brownian_single_camera_fft", video, nframes)
video = multiply(video_simulator.video, window_video) else: video = video_simulator.video #: if the intesity of light source flickers you can normalize each frame to the intensity of the frame #video = normalize_video(video) #: perform rfft2 and crop results, to take only first kimax and first kjmax wavenumbers. fft = rfft2(video, kimax = KIMAX, kjmax = KJMAX) #: you can also normalize each frame with respect to the [0,0] component of the fft #: this it therefore equivalent to normalize_video #fft = normalize_fft(fft) #load in numpy array fft_array, = asarrays(fft, NFRAMES_RANDOM) if __name__ == "__main__": import os.path as p #: now perform auto correlation calculation with default parameters data = acorr(fft_array, t = video_simulator.t, n = int(NFRAMES/DT_RANDOM)) bg, var = stats(fft_array) for norm in (1,2,3,5,6,7,9,10,11): #: perform normalization and merge data data_lin = normalize(data, bg, var, scale = True, norm = norm) #: change size, to define time resolution in log space x,y = log_average(data_lin, size = 16)
def setUp(self): video = random_video((31,32), count = 128, dtype = "uint8", max_value = 255) self.vid, = asarrays(video, count = 128) self.fft = npfft.rfft2(self.vid) self.fft_norm = self.fft/(self.fft[:,0,0])[:,None,None]
#:perform the actual multiplication video = multiply(video_simulator.video, window_video) #: if the intesity of light source flickers you can normalize each frame to the intensity of the frame #video = normalize_video(video) #: perform rfft2 and crop results, to take only first kimax and first kjmax wavenumbers. fft = rfft2(video, kimax=KIMAX, kjmax=KJMAX) #: you can also normalize each frame with respect to the [0,0] component of the fft #: this it therefore equivalent to normalize_video #fft = normalize_fft(fft) #load in numpy array fft_array, = asarrays(fft, NFRAMES) if __name__ == "__main__": import os.path as p #: now perform auto correlation calculation with default parameters data = acorr(fft_array) bg, var = stats(fft_array) #: perform normalization and merge data data_lin = normalize(data, bg, var, scale=True) #: inspect the data viewer = DataViewer() viewer.set_data(data_lin) viewer.set_mask(k=25, angle=0, sector=30)
vid1 = sim.particles_video(x1, shape=SIMSHAPE, sigma=5, intensity=5, background=0) #vid2 = sim.particles_video(x2, shape = SIMSHAPE, sigma = 30, intensity = 30, background = 0) return (_crop_frames((frame, )) for frame in vid1) #return ((frame1+frame2,frame1+frame2) for frame1,frame2 in zip(vid1,vid2)) if __name__ == "__main__": print("Computing ddm video...") vid = get_video(seed=0) vid, = video.asarrays(vid, NFRAMES_SINGLE) print("Writing to HD ...") np.save("simple_brownian_ddm_video.npy", vid) print("Computing cddm video...") vid = get_dual_video(seed=0) #we can write directly to HD by creating and writing to numpy memmap v1, v2 = video.asmemmaps("simple_brownian_cddm_video", vid, NFRAMES_DUAL) np.save("simple_brownian_cddm_t1.npy", t1) np.save("simple_brownian_cddm_t2.npy", t2) #print("Writing to HD ...") #np.save("brownian_dual_camera_0.npy",v1) #np.save("brownian_dual_camera_1.npy",v2)
window_video = ((window,window),)*NFRAMES_DUAL #:perform the actual multiplication if APPLY_WINDOW: video = multiply(dual_video.video, window_video) else: video = dual_video.video #: if the intesity of light source flickers you can normalize each frame to the intensity of the frame #video = normalize_video(video) #: perform rfft2 and crop results, to take only first kimax and first kjmax wavenumbers. fft = rfft2(video, kimax = KIMAX, kjmax = KJMAX) #load in numpy array fft1,fft2 = asarrays(fft, NFRAMES_DUAL) if __name__ == "__main__": import os.path as p #: now perform cross correlation calculation with default parameters data = ccorr(fft1,fft2, t1 = t1,t2 = t2, n = NFRAMES_DUAL) bg, var = stats(fft1,fft2) for norm in range(8): #: perform normalization and merge data data_lin = normalize(data, bg, var, scale = True, norm = norm) if norm == 6: np.save(p.join(DATA_PATH, "corr_dual_linear.npy"),data_lin)
def test_mask(self): m = vid[0] > 0. video = fromarrays((vid, )) out, = asarrays(mask(video, m), 128) self.assertTrue(np.allclose(out, vid[:, m]))
def test_crop(self): video = fromarrays((vid, )) with self.assertRaises(ValueError): list(crop(video, roi=((0, 4), 0, 2))) out, = asarrays(crop(video, roi=((0, 2), (0, 2))), 128) self.assertTrue(np.allclose(out[:], vid[:, 0:2, 0:2]))
"""tests for video processing functions""" import unittest import numpy as np from cddm.video import subtract, multiply, normalize_video, random_video, asmemmaps,\ asarrays, fromarrays, load, crop, show_video, show_fft, show_diff, play, add, \ mask from cddm.conf import FDTYPE, set_showlib from cddm.window import blackman video = random_video((32, 8), count=128, dtype="uint8", max_value=255) vid, = asarrays(video, count=128) bg = vid.mean(0) window = blackman((32, 8)) vid_subtract = vid - bg[None, ...] vid_multiply = vid * window[None, ...] vid_add = vid + bg[None, ...] vid_normalize = vid / (vid.mean((1, 2))[:, None, None]) vid_multiple = vid_subtract * window[None, ...] vid_multiple = vid_multiple / (vid_multiple.mean((1, 2))[:, None, None]) class TestVideo(unittest.TestCase): def setUp(self): pass def test_memmaps(self): video = fromarrays((vid, )) with self.assertRaises(ValueError):
video = multiply(video_simulator.video, window_video) else: video = video_simulator.video #: if the intesity of light source flickers you can normalize each frame to the intensity of the frame #video = normalize_video(video) #: perform rfft2 and crop results, to take only first kimax and first kjmax wavenumbers. fft = rfft2(video, kimax=KIMAX, kjmax=KJMAX) #: you can also normalize each frame with respect to the [0,0] component of the fft #: this it therefore equivalent to normalize_video #fft = normalize_fft(fft) #load in numpy array fft_array, = asarrays(fft, NFRAMES_FAST) if __name__ == "__main__": import os.path as p #: now perform auto correlation calculation with default parameters data = acorr(fft_array, n=int(NFRAMES / DT_FAST), method="fft") bg, var = stats(fft_array) for norm in range(8): #: perform normalization and merge data data_lin = normalize(data, bg, var, scale=True, norm=norm) if norm == 6: np.save(p.join(DATA_PATH, "corr_fast_linear.npy"), data_lin)
video = multiply(video_simulator.video, window_video) else: video = video_simulator.video #: if the intesity of light source flickers you can normalize each frame to the intensity of the frame #video = normalize_video(video) #: perform rfft2 and crop results, to take only first kimax and first kjmax wavenumbers. fft = rfft2(video, kimax=KIMAX, kjmax=KJMAX) #: you can also normalize each frame with respect to the [0,0] component of the fft #: this it therefore equivalent to normalize_video #fft = normalize_fft(fft) #load in numpy array fft_array, = asarrays(fft, NFRAMES_STANDARD) if __name__ == "__main__": import os.path as p #: now perform auto correlation calculation with default parameters data = acorr(fft_array, n=int(NFRAMES / DT_STANDARD), method="fft") bg, var = stats(fft_array) for norm in range(8): #: perform normalization and merge data data_lin = normalize(data, bg, var, scale=True, norm=norm) if norm == 6: np.save(p.join(DATA_PATH, "corr_standard_linear.npy"), data_lin) #: perform log averaging