示例#1
0
def calculate():
    out = None
    bgs = []
    vars = []
    
    for i in range(NRUN):
        
        print("Run {}/{}".format(i+1,NRUN))
        
        seed(i)
        importlib.reload(video_simulator) #recreates iterator with new seed
        
        t1,t2 = video_simulator.t1,video_simulator.t2
        
        video = multiply(video_simulator.video, window_video)
        
        #: if the intesity of light source flickers you can normalize each frame to the intensity of the frame
        #video = normalize_video(video)
        
        #: perform rfft2 and crop results, to take only first kimax and first kjmax wavenumbers.
        fft = rfft2(video, kimax = KIMAX, kjmax = 0)
        
        #: you can also normalize each frame with respect to the [0,0] component of the fft
        #: this it therefore equivalent to  normalize_video
        #fft = normalize_fft(fft)
        
        f1, f2 = asarrays(fft,NFRAMES)
        bg, var = stats(f1,f2)
        
        bg, var = stats(f1,f2)
        data = ccorr(f1,f2, t1 = t1,t2=t2, n = NFRAMES)

    
        #: now perform auto correlation calculation with default parameters and show live
        #data, bg, var = iacorr(fft, t,  auto_background = True, n = NFRAMES)
        #perform normalization and merge data
        bgs.append(bg)
        vars.append(var)
        
        #5 and 7 are redundand, but we are calulating it for easier indexing
        for norm in (1,2,3,5,6,7,9,10,11):
            # weighted (subtracted and compensated)
            if norm in (7,11):
                y = normalize(data, bg, var, norm = norm, scale = True, weight = np.moveaxis(w,0,-1))
            #weighted prime
            elif norm in (3,):
                y = normalize(data, bg, var, norm = norm, scale = True, weight = np.moveaxis(wp,0,-1))
            else:
                y = normalize(data, bg, var, norm = norm, scale = True)

            if out is None:
                out = np.empty(shape = (NRUN,12)+ y.shape, dtype = y.dtype)
            out[i,norm] = y
        
    return out, bgs, vars
示例#2
0
def calculate():
    out = None
    bgs = []
    vars = []

    for i in range(NRUN):

        print("Run {}/{}".format(i + 1, NRUN))

        seed(i)
        importlib.reload(video_simulator)  #recreates iterator with new seed

        t1, t2 = video_simulator.t1, video_simulator.t2

        video = multiply(video_simulator.video, window_video)

        fft = rfft2(video, kimax=KIMAX, kjmax=0)

        f1, f2 = asarrays(fft, NFRAMES)
        bg, var = stats(f1, f2)

        bg, var = stats(f1, f2)
        data = ccorr(f1, f2, t1=t1, t2=t2, n=NFRAMES)

        bgs.append(bg)
        vars.append(var)

        for norm in (1, 2, 3, 5, 6, 7, 9, 10, 11):
            # weighted (subtracted)
            if norm in (7, 11):
                y = normalize(data,
                              bg,
                              var,
                              norm=norm,
                              scale=True,
                              weight=np.moveaxis(w, 0, -1))
            # weighted prime (baseline)
            elif norm in (3, ):
                y = normalize(data,
                              bg,
                              var,
                              norm=norm,
                              scale=True,
                              weight=np.moveaxis(wp, 0, -1))
            else:
                y = normalize(data, bg, var, norm=norm, scale=True)

            if out is None:
                out = np.empty(shape=(NRUN, 12) + y.shape, dtype=y.dtype)
            out[i, norm] = y

    return out, bgs, vars
示例#3
0
def acorr_save(fft_array, path_out, method='diff', mode='diff'):
    # These codes are copied from the examples in cddm package

    #: now perform auto correlation calculation with default parameters
    data = acorr(fft_array, method=method)
    bg, var = stats(fft_array)

    #: perform normalization and merge data
    data_lin = normalize(data, bg, var, scale=True, mode=mode)

    np.save(path_out / f'auto_correlate_data_lin_{method}_{mode}.npy',
            data_lin)

    # #: change size, to define time resolution in log space
    # x, y = log_average(data_lin, size=16)

    # #: save the normalized data to numpy files
    # np.save(path_out / 'auto_correlate_t.npy', x)
    # np.save(path_out / 'auto_correlate_data.npy', y)

    return (
        {
            'bg': bg,
            'var': var,
            'data_lin_shape': data_lin.shape,
            # 't_shape': x.shape,
            # 'data_shape': y.shape,
        },
        data_lin)  # , x, y)
示例#4
0
 def test_equivalence_diff_3(self):
     norm = 3
     bg, var = stats(self.test_data1)
     data = multitau.acorr_multi(self.test_data1,
                                 level_size=16,
                                 norm=1,
                                 method="corr",
                                 binning=0)
     data = multitau.normalize_multi(data, bg, var, norm=1)
     x_, out0 = multitau.log_merge(*data)
     data = multitau.ccorr_multi(self.test_data1,
                                 self.test_data1,
                                 level_size=16,
                                 norm=norm,
                                 method="diff",
                                 binning=0)
     data = multitau.normalize_multi(data, bg, var, norm=norm)
     x_, out = multitau.log_merge(*data)
     self.assertTrue(np.allclose(out0, out))
     data, bg, var = multitau.iacorr_multi(fromarrays((self.test_data1, )),
                                           count=64,
                                           level_size=16,
                                           norm=1,
                                           method="diff",
                                           binning=0)
     data = multitau.normalize_multi(data, bg, var, norm=1)
     x_, out = multitau.log_merge(*data)
     self.assertTrue(np.allclose(out0, out))
def calculate():
    out = None
    bgs = []
    vars = []

    for i in range(NRUN):

        print("Run {}/{}".format(i + 1, NRUN))

        seed(i)
        importlib.reload(video_simulator)  #recreates iterator with new seed

        video = multiply(video_simulator.video, window_video)
        fft = rfft2(video, kimax=51, kjmax=0)

        fft_array, = asarrays(fft, NFRAMES_RANDOM)

        data = acorr(fft_array)
        bg, var = stats(fft_array)

        bgs.append(bg)
        vars.append(var)
        for norm in (1, 2, 3, 5, 6, 7, 9, 10, 11):
            y = normalize(data, bg, var, norm=norm, scale=True)
            if out is None:
                out = np.empty(shape=(NRUN, 12) + y.shape, dtype=y.dtype)
            out[i, norm] = y

    return out, bgs, vars
示例#6
0
    def test_corr_regular_2_mask(self):
        for scale in (True, False):
            for mode in ("corr", "diff"):
                bg, var = core.stats(test_data1, test_data2)
                data = core.ccorr(test_data1, test_data2, norm=2, method="fft")
                self.out = core.normalize(data,
                                          bg,
                                          var,
                                          norm=2,
                                          mode=mode,
                                          scale=scale,
                                          mask=test_mask)

                data = core.ccorr(test_data1,
                                  test_data2,
                                  norm=2,
                                  method="corr")
                out_other = core.normalize(data,
                                           bg,
                                           var,
                                           norm=2,
                                           mode=mode,
                                           scale=scale,
                                           mask=test_mask)

                self.assertTrue(allclose(self.out, out_other))
示例#7
0
    def test_corr_regular_2(self):
        for scale in (True, False):
            for mode in ("corr", "diff"):
                for axis in (0, 1, 2):
                    bg, var = core.stats(test_data1, test_data2, axis=axis)
                    data = core.ccorr(test_data1,
                                      test_data2,
                                      norm=2,
                                      method="fft",
                                      axis=axis)
                    self.out = core.normalize(data,
                                              bg,
                                              var,
                                              norm=2,
                                              mode=mode,
                                              scale=scale)

                    data = core.ccorr(test_data1,
                                      test_data2,
                                      norm=2,
                                      method="corr",
                                      axis=axis)
                    out_other = core.normalize(data,
                                               bg,
                                               var,
                                               norm=2,
                                               mode=mode,
                                               scale=scale)

                    self.assertTrue(allclose(self.out, out_other))
示例#8
0
 def test_auto_equivalence_1(self):
     for method in ("corr","fft","diff"):
         bg,var = core.stats(test_data1, axis = 0)
         data1 = core.acorr(test_data1, n = 8, norm = 1, method = method)
         out1 = core.normalize(data1, bg, var, norm = 1)
         data2,bg,var = core.iacorr(test_data1, n = 8, norm = 1, method = method)
         out2 = core.normalize(data2, bg, var, norm = 1)  
         self.assertTrue(np.allclose(out1, out2))    
示例#9
0
 def test_auto_equivalence_2(self):
     for method in ("corr",):
         bg,var = core.stats(test_data1, axis = 0)
         data1 = core.ccorr(test_data1,test_data1, n = 8, norm = 2, method = method)
         out1 = core.normalize(data1, bg, var, norm = 2)
         data2,bg,var = core.iacorr(test_data1, n = 8, norm = 2, method = method)
         out2 = core.normalize(data2, bg, var, norm = 2)  
         self.assertTrue(np.allclose(out1, out2))    
示例#10
0
 def test_cross_equivalence(self):
     for method in ("corr","diff","fft"):
         bg,var = core.stats(test_data1, test_data2, axis = 0)
         data = core.ccorr(test_data1, test_data2,n = 8, norm = 1, method = method)
         out1 = core.normalize(data, bg, var)
         vid = fromarrays((test_data1, test_data2))
         data,bg,var = core.iccorr(vid, count = len(test_data1),chunk_size = 16,n = 8, norm = 1, method = method)
         out2 = core.normalize(data, bg, var)  
         self.assertTrue(np.allclose(out1, out2))              
def calculate():
    out = None
    bgs = []
    vars = []

    for i in range(NRUN):

        print("Run {}/{}".format(i + 1, NRUN))

        seed(i)
        importlib.reload(video_simulator)  #recreates iterator with new seed

        t = video_simulator.t

        video = multiply(video_simulator.video, window_video)

        #: perform rfft2 and crop results, to take only first kimax and first kjmax wavenumbers.
        fft = rfft2(video, kimax=KIMAX, kjmax=0)

        fft_array, = asarrays(fft, NFRAMES_RANDOM)

        data = acorr(fft_array, t=t, n=int(NFRAMES / DT_RANDOM))
        bg, var = stats(fft_array)

        bgs.append(bg)
        vars.append(var)
        for norm in (1, 2, 3, 5, 6, 7, 9, 10, 11):
            # weighted (subtracted)
            if norm in (7, 11):
                y = normalize(data,
                              bg,
                              var,
                              norm=norm,
                              scale=True,
                              weight=np.moveaxis(w, 0, -1))
            # weighted prime (baseline)
            elif norm in (3, ):
                y = normalize(data,
                              bg,
                              var,
                              norm=norm,
                              scale=True,
                              weight=np.moveaxis(wp, 0, -1))
            else:
                y = normalize(data, bg, var, norm=norm, scale=True)
            if out is None:
                out = np.empty(shape=(NRUN, 12) + y.shape, dtype=y.dtype)
            out[i, norm] = y

    return out, bgs, vars
示例#12
0
 def test_equivalence_norm_2(self):
     norm = 2
     bg, var = stats(self.test_data1)
     data= multitau.acorr_multi(self.test_data1, level_size = 16, norm = norm)
     data = multitau.normalize_multi(data,bg,var, norm = norm)
     x_, out0 = multitau.log_merge(*data)
     data = multitau.ccorr_multi(self.test_data1,self.test_data1, level_size = 16, norm = norm)
     data = multitau.normalize_multi(data,bg,var, norm = norm)
     x_, out = multitau.log_merge(*data)
     self.assertTrue(np.allclose(out0,out))
     
     data,bg,var = multitau.iacorr_multi(fromarrays((self.test_data1,)),count = 64, level_size = 16,  norm = norm)
     data = multitau.normalize_multi(data,bg,var, norm = norm)
     x_, out = multitau.log_merge(*data)
     self.assertTrue(np.allclose(out0,out))
示例#13
0
 def test_ccorr_regular_3_mask(self):
     for scale in (True, False):
         for mode in ("corr", "diff"):
             axis = 0
             bg,var = core.stats(test_data1, test_data2, axis = axis)
             data = core.ccorr(test_data1, test_data2, norm = 3, method = "fft", axis = axis)
             self.out = core.normalize(data, bg, var, norm = 3, mode = mode, scale = scale, mask = test_mask)
 
             data = core.ccorr(test_data1, test_data2, norm = 3, method = "corr", axis = axis)
             out_other = core.normalize(data, bg, var, norm = 3, mode = mode, scale = scale, mask = test_mask)
             
             self.assertTrue(np.allclose(self.out, out_other))
 
             data = core.ccorr(test_data1, test_data2, norm = 3, method = "diff", axis = axis)
             out_other = core.normalize(data, bg, var, norm = 3, mode = mode, scale = scale, mask = test_mask)
             
             self.assertTrue(np.allclose(self.out, out_other))
示例#14
0
#: perform rfft2 and crop results, to take only first kimax and first kjmax wavenumbers.
fft = rfft2(video, kimax = KIMAX, kjmax = KJMAX)

#: you can also normalize each frame with respect to the [0,0] component of the fft
#: this it therefore equivalent to  normalize_video
#fft = normalize_fft(fft)

#load in numpy array
fft_array, = asarrays(fft, NFRAMES_RANDOM)

if __name__ == "__main__":
    import os.path as p

    #: now perform auto correlation calculation with default parameters 
    data = acorr(fft_array, t = video_simulator.t, n = int(NFRAMES/DT_RANDOM))
    bg, var = stats(fft_array)
    
    for norm in (1,2,3,5,6,7,9,10,11):
    
        #: perform normalization and merge data
        data_lin = normalize(data, bg, var, scale = True, norm = norm)
    
        #: change size, to define time resolution in log space
        x,y = log_average(data_lin, size = 16)
        
        #: save the normalized data to numpy files
        np.save(p.join(DATA_PATH, "corr_random_t.npy"),x*DT_RANDOM)
        np.save(p.join(DATA_PATH, "corr_random_data_norm{}.npy".format(norm)),y)

    
    #: inspect the data
示例#15
0
#: if the intesity of light source flickers you can normalize each frame to the intensity of the frame
#video = normalize_video(video)

#: perform rfft2 and crop results, to take only first kimax and first kjmax wavenumbers.
fft = rfft2(video, kimax = KIMAX, kjmax = KJMAX)

#load in numpy array
fft1,fft2 = asarrays(fft, NFRAMES_DUAL)

if __name__ == "__main__":
    import os.path as p

    #: now perform cross correlation calculation with default parameters 
    data = ccorr(fft1,fft2, t1 = t1,t2 = t2, n = NFRAMES_DUAL)
    bg, var = stats(fft1,fft2)
    
    for norm in range(8):
    
        #: perform normalization and merge data
        data_lin = normalize(data, bg, var, scale = True, norm = norm)

        if norm == 6:
            np.save(p.join(DATA_PATH, "corr_dual_linear.npy"),data_lin)

        #: change size, to define time resolution in log space
        x,y = log_average(data_lin, size = 16)
        
        #: save the normalized data to numpy files
        np.save(p.join(DATA_PATH, "corr_dual_t.npy"),x)
        np.save(p.join(DATA_PATH, "corr_dual_data_norm{}.npy".format(norm)),y)