コード例 #1
0
ファイル: test_fft.py プロジェクト: inwwin/cddm
 def test_normalize(self):
     video = fromarrays((self.vid,))
     fft = rfft2(video)
     fft, = asarrays(normalize_fft(fft),128)
     self.assertTrue(np.allclose(fft, self.fft_norm))
     video = fromarrays((self.vid,))
     fft = rfft2(video)
     fft, = asarrays(normalize_fft(fft, inplace = True),128)
     self.assertTrue(np.allclose(fft, self.fft_norm))
コード例 #2
0
ファイル: test_fft.py プロジェクト: inwwin/cddm
 def test_rfft2_scipy(self):
     set_rfft2lib("scipy")
     video = fromarrays((self.vid,))
     fft, = asarrays(rfft2(video),128)
     self.assertTrue(np.allclose(fft, self.fft))
     
     for kimax, kjmax in ((5,6), (7,7),(4,4)):
         video = fromarrays((self.vid,))
         fft, = asarrays(rfft2(video, kimax = kimax, kjmax = kjmax),128)
         self.assertTrue(np.allclose(fft[:,0:kimax+1], self.fft[:,0:kimax+1,0:kjmax+1])) 
         self.assertTrue(np.allclose(fft[:,-kimax:], self.fft[:,-kimax:,0:kjmax+1]))  
コード例 #3
0
def calculate():
    out = None
    bgs = []
    vars = []

    for i in range(NRUN):

        print("Run {}/{}".format(i + 1, NRUN))

        seed(i)
        importlib.reload(video_simulator)  #recreates iterator with new seed

        video = multiply(video_simulator.video, window_video)
        fft = rfft2(video, kimax=51, kjmax=0)

        fft_array, = asarrays(fft, NFRAMES_RANDOM)

        data = acorr(fft_array)
        bg, var = stats(fft_array)

        bgs.append(bg)
        vars.append(var)
        for norm in (1, 2, 3, 5, 6, 7, 9, 10, 11):
            y = normalize(data, bg, var, norm=norm, scale=True)
            if out is None:
                out = np.empty(shape=(NRUN, 12) + y.shape, dtype=y.dtype)
            out[i, norm] = y

    return out, bgs, vars
コード例 #4
0
def calculate():
    out = None
    bgs = []
    vars = []
    
    for i in range(NRUN):
        
        print("Run {}/{}".format(i+1,NRUN))
        
        seed(i)
        importlib.reload(video_simulator) #recreates iterator with new seed
        
        t1,t2 = video_simulator.t1,video_simulator.t2
        
        video = multiply(video_simulator.video, window_video)
        
        #: if the intesity of light source flickers you can normalize each frame to the intensity of the frame
        #video = normalize_video(video)
        
        #: perform rfft2 and crop results, to take only first kimax and first kjmax wavenumbers.
        fft = rfft2(video, kimax = KIMAX, kjmax = 0)
        
        #: you can also normalize each frame with respect to the [0,0] component of the fft
        #: this it therefore equivalent to  normalize_video
        #fft = normalize_fft(fft)
        
        f1, f2 = asarrays(fft,NFRAMES)
        bg, var = stats(f1,f2)
        
        bg, var = stats(f1,f2)
        data = ccorr(f1,f2, t1 = t1,t2=t2, n = NFRAMES)

    
        #: now perform auto correlation calculation with default parameters and show live
        #data, bg, var = iacorr(fft, t,  auto_background = True, n = NFRAMES)
        #perform normalization and merge data
        bgs.append(bg)
        vars.append(var)
        
        #5 and 7 are redundand, but we are calulating it for easier indexing
        for norm in (1,2,3,5,6,7,9,10,11):
            # weighted (subtracted and compensated)
            if norm in (7,11):
                y = normalize(data, bg, var, norm = norm, scale = True, weight = np.moveaxis(w,0,-1))
            #weighted prime
            elif norm in (3,):
                y = normalize(data, bg, var, norm = norm, scale = True, weight = np.moveaxis(wp,0,-1))
            else:
                y = normalize(data, bg, var, norm = norm, scale = True)

            if out is None:
                out = np.empty(shape = (NRUN,12)+ y.shape, dtype = y.dtype)
            out[i,norm] = y
        
    return out, bgs, vars
コード例 #5
0
ファイル: test_fft.py プロジェクト: inwwin/cddm
    def test_rfft2_numpy(self):
        set_rfft2lib("numpy")
        video = fromarrays((self.vid,))
        fft, = asarrays(rfft2(video),128)
        self.assertTrue(np.allclose(fft, self.fft))
        
        for kimax, kjmax in ((5,6), (7,7),(4,4)):
            video = fromarrays((self.vid,))
            fft, = asarrays(rfft2(video, kimax = kimax, kjmax = kjmax),128)
            self.assertTrue(np.allclose(fft[:,0:kimax+1], self.fft[:,0:kimax+1,0:kjmax+1])) 
            self.assertTrue(np.allclose(fft[:,-kimax:], self.fft[:,-kimax:,0:kjmax+1]))  

        video = fromarrays((self.vid,))
        fft, = asarrays(rfft2(video, kimax = None, kjmax = 6),128)
        self.assertTrue(np.allclose(fft, self.fft[:,:,0:7])) 
        
        video = fromarrays((self.vid,))
        fft, = asarrays(rfft2(video, kimax = 6),128)
        self.assertTrue(np.allclose(fft[:,0:7,:], self.fft[:,0:7,:])) 
        
        with self.assertRaises(ValueError):
            video = fromarrays((self.vid,))
            fft, = asarrays(rfft2(video, kimax = 16),128)
        with self.assertRaises(ValueError):
            video = fromarrays((self.vid,))
            fft, = asarrays(rfft2(video, kjmax = 17),128)
コード例 #6
0
def calculate():
    out = None
    bgs = []
    vars = []

    for i in range(NRUN):

        print("Run {}/{}".format(i + 1, NRUN))

        seed(i)
        importlib.reload(video_simulator)  #recreates iterator with new seed

        t1, t2 = video_simulator.t1, video_simulator.t2

        video = multiply(video_simulator.video, window_video)

        fft = rfft2(video, kimax=KIMAX, kjmax=0)

        f1, f2 = asarrays(fft, NFRAMES)
        bg, var = stats(f1, f2)

        bg, var = stats(f1, f2)
        data = ccorr(f1, f2, t1=t1, t2=t2, n=NFRAMES)

        bgs.append(bg)
        vars.append(var)

        for norm in (1, 2, 3, 5, 6, 7, 9, 10, 11):
            # weighted (subtracted)
            if norm in (7, 11):
                y = normalize(data,
                              bg,
                              var,
                              norm=norm,
                              scale=True,
                              weight=np.moveaxis(w, 0, -1))
            # weighted prime (baseline)
            elif norm in (3, ):
                y = normalize(data,
                              bg,
                              var,
                              norm=norm,
                              scale=True,
                              weight=np.moveaxis(wp, 0, -1))
            else:
                y = normalize(data, bg, var, norm=norm, scale=True)

            if out is None:
                out = np.empty(shape=(NRUN, 12) + y.shape, dtype=y.dtype)
            out[i, norm] = y

    return out, bgs, vars
コード例 #7
0
def calculate(binning=1):
    out = None

    for i in range(NRUN):

        print("Run {}/{}".format(i + 1, NRUN))

        importlib.reload(dual_video_simulator)  #recreates iterator

        #reset seed... because we use seed(0) in dual_video_simulator
        seed(i)

        t1, t2 = dual_video_simulator.t1, dual_video_simulator.t2

        video = multiply(dual_video_simulator.video, window_video)

        #: if the intesity of light source flickers you can normalize each frame to the intensity of the frame
        #video = normalize_video(video)

        #: perform rfft2 and crop results, to take only first kimax and first kjmax wavenumbers.
        fft = rfft2(video, kimax=51, kjmax=0)

        #: you can also normalize each frame with respect to the [0,0] component of the fft
        #: this it therefore equivalent to  normalize_video
        #fft = normalize_fft(fft)

        #: now perform auto correlation calculation with default parameters and show live
        data, bg, var = iccorr_multi(fft,
                                     t1,
                                     t2,
                                     level_size=16,
                                     binning=binning,
                                     period=PERIOD,
                                     auto_background=True)
        #perform normalization and merge data

        #5 and 7 are redundand, but we are calulating it for easier indexing
        for norm in (1, 2, 3, 5, 6, 7, 9, 10, 11, 13, 14, 15):

            fast, slow = normalize_multi(data, bg, var, norm=norm, scale=True)

            #we merge with binning (averaging) of linear data enabled/disabled
            x, y = log_merge(fast, slow, binning=binning)

            if out is None:
                out = np.empty(shape=(NRUN, 16) + y.shape, dtype=y.dtype)
                out[0, norm] = y
            else:
                out[i, norm] = y

    return x, out
コード例 #8
0
def calculate():
    out = None
    bgs = []
    vars = []

    for i in range(NRUN):

        print("Run {}/{}".format(i + 1, NRUN))

        seed(i)
        importlib.reload(video_simulator)  #recreates iterator with new seed

        t = video_simulator.t

        video = multiply(video_simulator.video, window_video)

        #: perform rfft2 and crop results, to take only first kimax and first kjmax wavenumbers.
        fft = rfft2(video, kimax=KIMAX, kjmax=0)

        fft_array, = asarrays(fft, NFRAMES_RANDOM)

        data = acorr(fft_array, t=t, n=int(NFRAMES / DT_RANDOM))
        bg, var = stats(fft_array)

        bgs.append(bg)
        vars.append(var)
        for norm in (1, 2, 3, 5, 6, 7, 9, 10, 11):
            # weighted (subtracted)
            if norm in (7, 11):
                y = normalize(data,
                              bg,
                              var,
                              norm=norm,
                              scale=True,
                              weight=np.moveaxis(w, 0, -1))
            # weighted prime (baseline)
            elif norm in (3, ):
                y = normalize(data,
                              bg,
                              var,
                              norm=norm,
                              scale=True,
                              weight=np.moveaxis(wp, 0, -1))
            else:
                y = normalize(data, bg, var, norm=norm, scale=True)
            if out is None:
                out = np.empty(shape=(NRUN, 12) + y.shape, dtype=y.dtype)
            out[i, norm] = y

    return out, bgs, vars
コード例 #9
0
def fft_save(frames_iter, path_out, size, count=None, kmax=None):
    if not kmax:
        # Ignore features of size smaller than 32 pixels
        # (My avi file has JPEG artifact of size 8 pixels)
        kmax = size // 32

    # These codes are copied from the examples in cddm package

    #: create window for multiplication...
    # window = blackman(SHAPE)

    #: we must create a video of windows for multiplication
    # window_video = ((window,),)*NFRAMES

    #: perform the actual multiplication
    # video = multiply(video_simulator.video, window_video)
    video = frames_iter

    #: if the intesity of light source flickers you can normalize each frame to the intensity of the frame
    # video = normalize_video(video)

    #: perform rfft2 and crop results, to take only first kimax and first kjmax wavenumbers.
    fft = rfft2(video, kimax=kmax, kjmax=kmax)

    #: you can also normalize each frame with respect to the [0,0] component of the fft
    #: this it therefore equivalent to  normalize_video
    # fft = normalize_fft(fft)

    #: load in numpy array
    # fft_array, = asarrays(fft, count)
    # np.save(fft_array, path_out)

    # save into disk
    asmemmaps(path_out, fft, count=count)

    return {
        'kmax': kmax
    }
コード例 #10
0
ファイル: auto_correlate_multi.py プロジェクト: inwwin/cddm
importlib.reload(video_simulator)  #recreates iterator

#: create window for multiplication...
window = blackman(SHAPE)

#: we must create a video of windows for multiplication
window_video = ((window, ), ) * NFRAMES

#:perform the actual multiplication
video = multiply(video_simulator.video, window_video)

#: if the intesity of light source flickers you can normalize each frame to the intensity of the frame
#video = normalize_video(video)

#: perform rfft2 and crop results, to take only first kimax and first kjmax wavenumbers.
fft = rfft2(video, kimax=KIMAX, kjmax=KJMAX)

#: you can also normalize each frame with respect to the [0,0] component of the fft
#: this it therefore equivalent to  normalize_video
#fft = normalize_fft(fft)

if __name__ == "__main__":
    import os.path as p

    #: now perform auto correlation calculation with default parameters using iterative algorithm
    data, bg, var = iacorr_multi(fft, count=NFRAMES)

    #: inspect the data
    viewer = MultitauViewer(scale=True)
    viewer.set_data(data, bg, var)
    viewer.set_mask(k=25, angle=0, sector=30)
コード例 #11
0
ファイル: simple_brownian_fft.py プロジェクト: arkomatej/cddm
#setting this to 2 shows progress bar
conf.set_verbose(2)

SHAPE = (512, 512)

vid = np.load("simple_brownian_ddm_video.npy")
nframes = len(vid)

#obtain frames iterator
video = fromarrays((vid, ))
##apply blackman window
window = blackman(SHAPE)
video = apply_window(video, (window, ))
#perform rfft2 and crop data
video = rfft2(video, kisize=64, kjsize=64)
#load all frames into numpy array
#video, = asmemmaps("brownian_single_camera_fft", video, nframes)

#compute and create numpy array
video, = asarrays(video, nframes)
np.save("simple_brownian_ddm_fft.npy", video)

v1 = np.load("simple_brownian_cddm_video_0.npy")
v2 = np.load("simple_brownian_cddm_video_1.npy")
nframes = len(v1)

#obtain frames iterator
video = fromarrays((v1, v2))
##apply blackman window
window = blackman(SHAPE)
コード例 #12
0
ファイル: cross_correlate_live.py プロジェクト: gmnamra/cddm
from cddm import conf
import numpy as np

#video iterator
from simple_brownian_video import get_dual_video
#trigger parameters
from simple_brownian_video import t1, t2, PERIOD, SHAPE

#setting this to 2 shows progress bar
conf.set_verbose(2)
#obtain frames iterator
dual_video = get_dual_video()
#apply blackman window
#dual_video = apply_window(dual_video, blackman(SHAPE))
dual_video = show_video(dual_video)
fdual_video = rfft2(dual_video, kisize=64, kjsize=64)
fdual_video = show_fft(fdual_video)

fdual_video = play(fdual_video, fps=100)

data, bg = iccorr_multi(fdual_video,
                        t1,
                        t2,
                        period=PERIOD,
                        level=5,
                        chunk_size=256,
                        show=True,
                        auto_background=False,
                        binning=True,
                        return_background=True)
plt.figure()