Esempio n. 1
0
 def test_equivalence_diff_3(self):
     norm = 3
     bg, var = stats(self.test_data1)
     data = multitau.acorr_multi(self.test_data1,
                                 level_size=16,
                                 norm=1,
                                 method="corr",
                                 binning=0)
     data = multitau.normalize_multi(data, bg, var, norm=1)
     x_, out0 = multitau.log_merge(*data)
     data = multitau.ccorr_multi(self.test_data1,
                                 self.test_data1,
                                 level_size=16,
                                 norm=norm,
                                 method="diff",
                                 binning=0)
     data = multitau.normalize_multi(data, bg, var, norm=norm)
     x_, out = multitau.log_merge(*data)
     self.assertTrue(np.allclose(out0, out))
     data, bg, var = multitau.iacorr_multi(fromarrays((self.test_data1, )),
                                           count=64,
                                           level_size=16,
                                           norm=1,
                                           method="diff",
                                           binning=0)
     data = multitau.normalize_multi(data, bg, var, norm=1)
     x_, out = multitau.log_merge(*data)
     self.assertTrue(np.allclose(out0, out))
Esempio n. 2
0
 def test_equivalence_norm_2(self):
     norm = 2
     bg, var = stats(self.test_data1)
     data= multitau.acorr_multi(self.test_data1, level_size = 16, norm = norm)
     data = multitau.normalize_multi(data,bg,var, norm = norm)
     x_, out0 = multitau.log_merge(*data)
     data = multitau.ccorr_multi(self.test_data1,self.test_data1, level_size = 16, norm = norm)
     data = multitau.normalize_multi(data,bg,var, norm = norm)
     x_, out = multitau.log_merge(*data)
     self.assertTrue(np.allclose(out0,out))
     
     data,bg,var = multitau.iacorr_multi(fromarrays((self.test_data1,)),count = 64, level_size = 16,  norm = norm)
     data = multitau.normalize_multi(data,bg,var, norm = norm)
     x_, out = multitau.log_merge(*data)
     self.assertTrue(np.allclose(out0,out))
Esempio n. 3
0
 def _get_avg_data(self):
     data = normalize_multi(self.data,
                            self.background,
                            self.variance,
                            norm=self.norm,
                            scale=self.scale,
                            mask=self.mask)
     t, data = log_merge(*data)
     avg_data = np.nanmean(data, axis=-2)
     return t, avg_data
Esempio n. 4
0
def calculate(binning=1):
    out = None

    for i in range(NRUN):

        print("Run {}/{}".format(i + 1, NRUN))

        importlib.reload(dual_video_simulator)  #recreates iterator

        #reset seed... because we use seed(0) in dual_video_simulator
        seed(i)

        t1, t2 = dual_video_simulator.t1, dual_video_simulator.t2

        video = multiply(dual_video_simulator.video, window_video)

        #: if the intesity of light source flickers you can normalize each frame to the intensity of the frame
        #video = normalize_video(video)

        #: perform rfft2 and crop results, to take only first kimax and first kjmax wavenumbers.
        fft = rfft2(video, kimax=51, kjmax=0)

        #: you can also normalize each frame with respect to the [0,0] component of the fft
        #: this it therefore equivalent to  normalize_video
        #fft = normalize_fft(fft)

        #: now perform auto correlation calculation with default parameters and show live
        data, bg, var = iccorr_multi(fft,
                                     t1,
                                     t2,
                                     level_size=16,
                                     binning=binning,
                                     period=PERIOD,
                                     auto_background=True)
        #perform normalization and merge data

        #5 and 7 are redundand, but we are calulating it for easier indexing
        for norm in (1, 2, 3, 5, 6, 7, 9, 10, 11, 13, 14, 15):

            fast, slow = normalize_multi(data, bg, var, norm=norm, scale=True)

            #we merge with binning (averaging) of linear data enabled/disabled
            x, y = log_merge(fast, slow, binning=binning)

            if out is None:
                out = np.empty(shape=(NRUN, 16) + y.shape, dtype=y.dtype)
                out[0, norm] = y
            else:
                out[i, norm] = y

    return x, out
Esempio n. 5
0
$ auto_correlate_multi.py 
"""
from cddm.multitau import log_merge
import matplotlib.pyplot as plt
import os.path as p
from examples.conf import DATA_PATH

#: load the normalized data to numpy files
import numpy as np
lin_data = np.load(p.join(DATA_PATH,"auto_correlate_multi_raw_fast.npy"))
multi_level = np.load(p.join(DATA_PATH,"auto_correlate_multi_raw_slow.npy"))

(i,j) = (4,12)

x,y = log_merge(lin_data, multi_level)    
plt.semilogx(x[1:],y[i,j,1:],"k-", label = "merged")

x = np.arange(lin_data.shape[-1])
plt.semilogx(x[1:], lin_data[i,j,1:], "o", fillstyle = "none", label = "linear (level 0)")

for n,data in enumerate(multi_level):
    x = x * 2
    plt.semilogx(x[1:], data[i,j,1:], "o", fillstyle = "none", label = "multi  (level {})".format(n+1))


plt.title("Multilevel data @ k = ({},{})".format(i,j))
plt.xlabel("t")
plt.ylabel("G / Var")
plt.legend()
plt.show()
Esempio n. 6
0
#: you can also normalize each frame with respect to the [0,0] component of the fft
#: this it therefore equivalent to  normalize_video
#fft = normalize_fft(fft)

if __name__ == "__main__":
    import os.path as p

    #: now perform auto correlation calculation with default parameters using iterative algorithm
    data, bg, var = iacorr_multi(fft, count=NFRAMES)

    #: inspect the data
    viewer = MultitauViewer(scale=True)
    viewer.set_data(data, bg, var)
    viewer.set_mask(k=25, angle=0, sector=30)
    viewer.plot()
    viewer.show()

    #perform normalization and merge data
    fast, slow = normalize_multi(data, bg, var, scale=True)

    #: save the normalized raw data to numpy files
    np.save(p.join(DATA_PATH, "auto_correlate_multi_raw_fast.npy"), fast)
    np.save(p.join(DATA_PATH, "auto_correlate_multi_raw_slow.npy"), slow)

    x, y = log_merge(fast, slow)

    #: save the normalized merged data to numpy files
    np.save(p.join(DATA_PATH, "auto_correlate_multi_t.npy"), x)
    np.save(p.join(DATA_PATH, "auto_correlate_multi_data.npy"), y)
import numpy as np

#: norm = 2 data normalized with scale = True
lin_5 = np.load(p.join(DATA_PATH,"cross_correlate_multi_raw_fast_norm_5.npy"))
multi_5 = np.load(p.join(DATA_PATH,"cross_correlate_multi_raw_slow_norm_5.npy"))

#: norm = 3 data normalized with scale = True
lin_6 = np.load(p.join(DATA_PATH,"cross_correlate_multi_raw_fast_norm_6.npy"))
multi_6 = np.load(p.join(DATA_PATH,"cross_correlate_multi_raw_slow_norm_6.npy"))

#: take (i,j) k value
(i,j) = (12,2)

f, (ax1, ax2) = plt.subplots(2, 1, sharex=True)

x,y = log_merge(lin_6, multi_6)

#: denoised data, used as a estimator for the weight
yd = denoise(decreasing(np.clip(denoise(y),0,1)))

x,y = log_merge(lin_5, multi_5)
ax2.semilogx(x,y[i,j], label = "norm 5")

x,y = log_merge(lin_6, multi_6)
ax2.semilogx(x,y[i,j], label = "norm 6")

ax1.semilogx(x,yd[i,j], label = "g1")

#: now calculate weights and do weighted sum.

#: x values for the interpolator
#: norm = 2 data normalized with scale = True
lin_2 = np.load(p.join(DATA_PATH, "cross_correlate_multi_raw_fast_norm_2.npy"))
multi_2 = np.load(
    p.join(DATA_PATH, "cross_correlate_multi_raw_slow_norm_2.npy"))

#: norm = 3 data normalized with scale = True
lin_3 = np.load(p.join(DATA_PATH, "cross_correlate_multi_raw_fast_norm_3.npy"))
multi_3 = np.load(
    p.join(DATA_PATH, "cross_correlate_multi_raw_slow_norm_3.npy"))

#: take (i,j) k value
(i, j) = (12, 2)

f, (ax1, ax2) = plt.subplots(2, 1, sharex=True)

x, y = log_merge(lin_3, multi_3)

#: denoised data, used as a estimator for the weight
yd = denoise(decreasing(np.clip(denoise(y), 0, 1)))

x, y = log_merge(lin_2, multi_2)
ax2.semilogx(x, y[i, j], label="norm 2")

x, y = log_merge(lin_3, multi_3)
ax2.semilogx(x, y[i, j], label="norm 3")

ax1.semilogx(x, yd[i, j], label="g1")

#: now calculate weights and do weighted sum.

#: x values for the interpolator