def get_video(n=NFRAMES_SINGLE, seed=None): #We use a mix of numba and numpy random generator... #If you want to seed, both need to be set!, this seed function can be used for that if seed is not None: sim.seed(seed) #build particle coordinates of smaller, faster particles x1 = sim.brownian_particles(shape=SIMSHAPE, n=NFRAMES_SINGLE, delta=1., dt=1, particles=NPARTICLES) #build particle coordinate of a single large slow moving particle #x2 = sim.brownian_particles(shape = SIMSHAPE, n = NFRAMES_SINGLE, delta = 0.1, dt = 1, # particles = 1, velocity = ((0.01,0),), x0 = ((0,256),)) vid1 = sim.particles_video(x1, shape=SIMSHAPE, sigma=5, intensity=5, background=0) #vid2 = sim.particles_video(x2, shape = SIMSHAPE, sigma = 30, intensity = 30, background = 0) return (_crop_frames((frame, )) for frame in vid1)
def calculate(): out = None bgs = [] vars = [] for i in range(NRUN): print("Run {}/{}".format(i + 1, NRUN)) seed(i) importlib.reload(video_simulator) #recreates iterator with new seed video = multiply(video_simulator.video, window_video) fft = rfft2(video, kimax=51, kjmax=0) fft_array, = asarrays(fft, NFRAMES_RANDOM) data = acorr(fft_array) bg, var = stats(fft_array) bgs.append(bg) vars.append(var) for norm in (1, 2, 3, 5, 6, 7, 9, 10, 11): y = normalize(data, bg, var, norm=norm, scale=True) if out is None: out = np.empty(shape=(NRUN, 12) + y.shape, dtype=y.dtype) out[i, norm] = y return out, bgs, vars
def calculate(): out = None bgs = [] vars = [] for i in range(NRUN): print("Run {}/{}".format(i+1,NRUN)) seed(i) importlib.reload(video_simulator) #recreates iterator with new seed t1,t2 = video_simulator.t1,video_simulator.t2 video = multiply(video_simulator.video, window_video) #: if the intesity of light source flickers you can normalize each frame to the intensity of the frame #video = normalize_video(video) #: perform rfft2 and crop results, to take only first kimax and first kjmax wavenumbers. fft = rfft2(video, kimax = KIMAX, kjmax = 0) #: you can also normalize each frame with respect to the [0,0] component of the fft #: this it therefore equivalent to normalize_video #fft = normalize_fft(fft) f1, f2 = asarrays(fft,NFRAMES) bg, var = stats(f1,f2) bg, var = stats(f1,f2) data = ccorr(f1,f2, t1 = t1,t2=t2, n = NFRAMES) #: now perform auto correlation calculation with default parameters and show live #data, bg, var = iacorr(fft, t, auto_background = True, n = NFRAMES) #perform normalization and merge data bgs.append(bg) vars.append(var) #5 and 7 are redundand, but we are calulating it for easier indexing for norm in (1,2,3,5,6,7,9,10,11): # weighted (subtracted and compensated) if norm in (7,11): y = normalize(data, bg, var, norm = norm, scale = True, weight = np.moveaxis(w,0,-1)) #weighted prime elif norm in (3,): y = normalize(data, bg, var, norm = norm, scale = True, weight = np.moveaxis(wp,0,-1)) else: y = normalize(data, bg, var, norm = norm, scale = True) if out is None: out = np.empty(shape = (NRUN,12)+ y.shape, dtype = y.dtype) out[i,norm] = y return out, bgs, vars
def calculate(): out = None bgs = [] vars = [] for i in range(NRUN): print("Run {}/{}".format(i + 1, NRUN)) seed(i) importlib.reload(video_simulator) #recreates iterator with new seed t1, t2 = video_simulator.t1, video_simulator.t2 video = multiply(video_simulator.video, window_video) fft = rfft2(video, kimax=KIMAX, kjmax=0) f1, f2 = asarrays(fft, NFRAMES) bg, var = stats(f1, f2) bg, var = stats(f1, f2) data = ccorr(f1, f2, t1=t1, t2=t2, n=NFRAMES) bgs.append(bg) vars.append(var) for norm in (1, 2, 3, 5, 6, 7, 9, 10, 11): # weighted (subtracted) if norm in (7, 11): y = normalize(data, bg, var, norm=norm, scale=True, weight=np.moveaxis(w, 0, -1)) # weighted prime (baseline) elif norm in (3, ): y = normalize(data, bg, var, norm=norm, scale=True, weight=np.moveaxis(wp, 0, -1)) else: y = normalize(data, bg, var, norm=norm, scale=True) if out is None: out = np.empty(shape=(NRUN, 12) + y.shape, dtype=y.dtype) out[i, norm] = y return out, bgs, vars
def calculate(binning=1): out = None for i in range(NRUN): print("Run {}/{}".format(i + 1, NRUN)) importlib.reload(dual_video_simulator) #recreates iterator #reset seed... because we use seed(0) in dual_video_simulator seed(i) t1, t2 = dual_video_simulator.t1, dual_video_simulator.t2 video = multiply(dual_video_simulator.video, window_video) #: if the intesity of light source flickers you can normalize each frame to the intensity of the frame #video = normalize_video(video) #: perform rfft2 and crop results, to take only first kimax and first kjmax wavenumbers. fft = rfft2(video, kimax=51, kjmax=0) #: you can also normalize each frame with respect to the [0,0] component of the fft #: this it therefore equivalent to normalize_video #fft = normalize_fft(fft) #: now perform auto correlation calculation with default parameters and show live data, bg, var = iccorr_multi(fft, t1, t2, level_size=16, binning=binning, period=PERIOD, auto_background=True) #perform normalization and merge data #5 and 7 are redundand, but we are calulating it for easier indexing for norm in (1, 2, 3, 5, 6, 7, 9, 10, 11, 13, 14, 15): fast, slow = normalize_multi(data, bg, var, norm=norm, scale=True) #we merge with binning (averaging) of linear data enabled/disabled x, y = log_merge(fast, slow, binning=binning) if out is None: out = np.empty(shape=(NRUN, 16) + y.shape, dtype=y.dtype) out[0, norm] = y else: out[i, norm] = y return x, out
def calculate(): out = None bgs = [] vars = [] for i in range(NRUN): print("Run {}/{}".format(i + 1, NRUN)) seed(i) importlib.reload(video_simulator) #recreates iterator with new seed t = video_simulator.t video = multiply(video_simulator.video, window_video) #: perform rfft2 and crop results, to take only first kimax and first kjmax wavenumbers. fft = rfft2(video, kimax=KIMAX, kjmax=0) fft_array, = asarrays(fft, NFRAMES_RANDOM) data = acorr(fft_array, t=t, n=int(NFRAMES / DT_RANDOM)) bg, var = stats(fft_array) bgs.append(bg) vars.append(var) for norm in (1, 2, 3, 5, 6, 7, 9, 10, 11): # weighted (subtracted) if norm in (7, 11): y = normalize(data, bg, var, norm=norm, scale=True, weight=np.moveaxis(w, 0, -1)) # weighted prime (baseline) elif norm in (3, ): y = normalize(data, bg, var, norm=norm, scale=True, weight=np.moveaxis(wp, 0, -1)) else: y = normalize(data, bg, var, norm=norm, scale=True) if out is None: out = np.empty(shape=(NRUN, 12) + y.shape, dtype=y.dtype) out[i, norm] = y return out, bgs, vars
def get_dual_video(seed=None): #We use a mix of numba and numpy random generator... #If you want to seed, both need to be set!, this seed function can be used for that if seed is not None: sim.seed(seed) #build particle coordinates of smaller, faster particles x1 = sim.brownian_particles(shape=SIMSHAPE, n=SIMFRAMES, delta=4., dt=1, particles=NPARTICLES) #build particle coordinate of a single large slow moving particle x2 = sim.brownian_particles(shape=SIMSHAPE, n=SIMFRAMES, delta=0.02, dt=1, particles=1, velocity=((0.01, 0), ), x0=((0, 256), )) vid1 = sim.particles_video(x1, t1=t1, t2=t2, shape=SIMSHAPE, sigma=5, intensity=5, background=0) vid2 = sim.particles_video(x2, t1=t1, t2=t2, shape=SIMSHAPE, sigma=30, intensity=30, background=0) video = ((frame1[0] + frame2[0], frame1[1] + frame2[1]) for frame1, frame2 in zip(vid1, vid2)) #dual_vid = intensity_jitter(video, t1,t2) return (_crop_frames(frames) for frames in video)
""" """ from cddm.viewer import CorrViewer from cddm.video import multiply, asarrays from cddm.window import blackman from cddm.fft import rfft2 from cddm.core import acorr, normalize, stats from cddm.multitau import log_average from cddm.sim import seed import importlib import numpy as np seed(0) #: see video_simulator for details, loads sample video import examples.paper.simple_video.random_video as video_simulator importlib.reload(video_simulator) #recreates iterator from examples.paper.conf import KIMAX, KJMAX, SHAPE, NFRAMES_RANDOM, NFRAMES, DATA_PATH, APPLY_WINDOW, DT_RANDOM #: create window for multiplication... window = blackman(SHAPE) #: we must create a video of windows for multiplication window_video = ((window,),)*NFRAMES_RANDOM #:perform the actual multiplication
def setUp(self): seed(0)