示例#1
0
 def test_multiply(self):
     video = fromarrays((vid, ))
     with self.assertRaises(ValueError):
         list(multiply(video, ((window, window), ) * 128))
     video = fromarrays((vid, ))
     out = multiply(video, ((window, ), ) * 128)
     for frames, true_frame in zip(out, vid_multiply):
         self.assertTrue(np.allclose(frames[0], true_frame))
def calculate():
    out = None
    bgs = []
    vars = []

    for i in range(NRUN):

        print("Run {}/{}".format(i + 1, NRUN))

        seed(i)
        importlib.reload(video_simulator)  #recreates iterator with new seed

        video = multiply(video_simulator.video, window_video)
        fft = rfft2(video, kimax=51, kjmax=0)

        fft_array, = asarrays(fft, NFRAMES_RANDOM)

        data = acorr(fft_array)
        bg, var = stats(fft_array)

        bgs.append(bg)
        vars.append(var)
        for norm in (1, 2, 3, 5, 6, 7, 9, 10, 11):
            y = normalize(data, bg, var, norm=norm, scale=True)
            if out is None:
                out = np.empty(shape=(NRUN, 12) + y.shape, dtype=y.dtype)
            out[i, norm] = y

    return out, bgs, vars
示例#3
0
    def test_multiple(self):
        video = fromarrays((vid, ))
        video = subtract(video, ((bg, ), ) * 128, dtype=FDTYPE)
        video = multiply(video, ((window, ), ) * 128, inplace=True)
        video = add(video, ((bg, ), ) * 128, inplace=True)
        video = subtract(video, ((bg, ), ) * 128, inplace=True)
        out = normalize_video(video, inplace=True)

        for frames, true_frame in zip(out, vid_multiple):
            self.assertTrue(np.allclose(frames[0], true_frame))
示例#4
0
def calculate():
    out = None
    bgs = []
    vars = []
    
    for i in range(NRUN):
        
        print("Run {}/{}".format(i+1,NRUN))
        
        seed(i)
        importlib.reload(video_simulator) #recreates iterator with new seed
        
        t1,t2 = video_simulator.t1,video_simulator.t2
        
        video = multiply(video_simulator.video, window_video)
        
        #: if the intesity of light source flickers you can normalize each frame to the intensity of the frame
        #video = normalize_video(video)
        
        #: perform rfft2 and crop results, to take only first kimax and first kjmax wavenumbers.
        fft = rfft2(video, kimax = KIMAX, kjmax = 0)
        
        #: you can also normalize each frame with respect to the [0,0] component of the fft
        #: this it therefore equivalent to  normalize_video
        #fft = normalize_fft(fft)
        
        f1, f2 = asarrays(fft,NFRAMES)
        bg, var = stats(f1,f2)
        
        bg, var = stats(f1,f2)
        data = ccorr(f1,f2, t1 = t1,t2=t2, n = NFRAMES)

    
        #: now perform auto correlation calculation with default parameters and show live
        #data, bg, var = iacorr(fft, t,  auto_background = True, n = NFRAMES)
        #perform normalization and merge data
        bgs.append(bg)
        vars.append(var)
        
        #5 and 7 are redundand, but we are calulating it for easier indexing
        for norm in (1,2,3,5,6,7,9,10,11):
            # weighted (subtracted and compensated)
            if norm in (7,11):
                y = normalize(data, bg, var, norm = norm, scale = True, weight = np.moveaxis(w,0,-1))
            #weighted prime
            elif norm in (3,):
                y = normalize(data, bg, var, norm = norm, scale = True, weight = np.moveaxis(wp,0,-1))
            else:
                y = normalize(data, bg, var, norm = norm, scale = True)

            if out is None:
                out = np.empty(shape = (NRUN,12)+ y.shape, dtype = y.dtype)
            out[i,norm] = y
        
    return out, bgs, vars
示例#5
0
def calculate():
    out = None
    bgs = []
    vars = []

    for i in range(NRUN):

        print("Run {}/{}".format(i + 1, NRUN))

        seed(i)
        importlib.reload(video_simulator)  #recreates iterator with new seed

        t1, t2 = video_simulator.t1, video_simulator.t2

        video = multiply(video_simulator.video, window_video)

        fft = rfft2(video, kimax=KIMAX, kjmax=0)

        f1, f2 = asarrays(fft, NFRAMES)
        bg, var = stats(f1, f2)

        bg, var = stats(f1, f2)
        data = ccorr(f1, f2, t1=t1, t2=t2, n=NFRAMES)

        bgs.append(bg)
        vars.append(var)

        for norm in (1, 2, 3, 5, 6, 7, 9, 10, 11):
            # weighted (subtracted)
            if norm in (7, 11):
                y = normalize(data,
                              bg,
                              var,
                              norm=norm,
                              scale=True,
                              weight=np.moveaxis(w, 0, -1))
            # weighted prime (baseline)
            elif norm in (3, ):
                y = normalize(data,
                              bg,
                              var,
                              norm=norm,
                              scale=True,
                              weight=np.moveaxis(wp, 0, -1))
            else:
                y = normalize(data, bg, var, norm=norm, scale=True)

            if out is None:
                out = np.empty(shape=(NRUN, 12) + y.shape, dtype=y.dtype)
            out[i, norm] = y

    return out, bgs, vars
示例#6
0
def calculate(binning=1):
    out = None

    for i in range(NRUN):

        print("Run {}/{}".format(i + 1, NRUN))

        importlib.reload(dual_video_simulator)  #recreates iterator

        #reset seed... because we use seed(0) in dual_video_simulator
        seed(i)

        t1, t2 = dual_video_simulator.t1, dual_video_simulator.t2

        video = multiply(dual_video_simulator.video, window_video)

        #: if the intesity of light source flickers you can normalize each frame to the intensity of the frame
        #video = normalize_video(video)

        #: perform rfft2 and crop results, to take only first kimax and first kjmax wavenumbers.
        fft = rfft2(video, kimax=51, kjmax=0)

        #: you can also normalize each frame with respect to the [0,0] component of the fft
        #: this it therefore equivalent to  normalize_video
        #fft = normalize_fft(fft)

        #: now perform auto correlation calculation with default parameters and show live
        data, bg, var = iccorr_multi(fft,
                                     t1,
                                     t2,
                                     level_size=16,
                                     binning=binning,
                                     period=PERIOD,
                                     auto_background=True)
        #perform normalization and merge data

        #5 and 7 are redundand, but we are calulating it for easier indexing
        for norm in (1, 2, 3, 5, 6, 7, 9, 10, 11, 13, 14, 15):

            fast, slow = normalize_multi(data, bg, var, norm=norm, scale=True)

            #we merge with binning (averaging) of linear data enabled/disabled
            x, y = log_merge(fast, slow, binning=binning)

            if out is None:
                out = np.empty(shape=(NRUN, 16) + y.shape, dtype=y.dtype)
                out[0, norm] = y
            else:
                out[i, norm] = y

    return x, out
def calculate():
    out = None
    bgs = []
    vars = []

    for i in range(NRUN):

        print("Run {}/{}".format(i + 1, NRUN))

        seed(i)
        importlib.reload(video_simulator)  #recreates iterator with new seed

        t = video_simulator.t

        video = multiply(video_simulator.video, window_video)

        #: perform rfft2 and crop results, to take only first kimax and first kjmax wavenumbers.
        fft = rfft2(video, kimax=KIMAX, kjmax=0)

        fft_array, = asarrays(fft, NFRAMES_RANDOM)

        data = acorr(fft_array, t=t, n=int(NFRAMES / DT_RANDOM))
        bg, var = stats(fft_array)

        bgs.append(bg)
        vars.append(var)
        for norm in (1, 2, 3, 5, 6, 7, 9, 10, 11):
            # weighted (subtracted)
            if norm in (7, 11):
                y = normalize(data,
                              bg,
                              var,
                              norm=norm,
                              scale=True,
                              weight=np.moveaxis(w, 0, -1))
            # weighted prime (baseline)
            elif norm in (3, ):
                y = normalize(data,
                              bg,
                              var,
                              norm=norm,
                              scale=True,
                              weight=np.moveaxis(wp, 0, -1))
            else:
                y = normalize(data, bg, var, norm=norm, scale=True)
            if out is None:
                out = np.empty(shape=(NRUN, 12) + y.shape, dtype=y.dtype)
            out[i, norm] = y

    return out, bgs, vars
示例#8
0
video = add(video1, video2)

#video = (move_pixels(frames) for frames in video)

#: crop video to selected region of interest
video = crop(video, roi=((0, SHAPE[0]), (0, SHAPE[1])))

# apply dust particles
if APPLY_DUST:
    dust1 = plt.imread(DUST1_PATH)[0:SHAPE[0], 0:SHAPE[1],
                                   0]  #float normalized to (0,1)
    dust2 = plt.imread(DUST2_PATH)[0:SHAPE[0], 0:SHAPE[1],
                                   0]  #float normalized to (0,1)

    dust = ((dust1, dust2), ) * NFRAMES_DUAL
    video = multiply(video, dust, dtype="uint16")

noise_model = (NOISE_MODEL, NOISE_MODEL)

video = (tuple((adc(f,
                    noise_model=noise_model[i],
                    saturation=SATURATION,
                    readout_noise=READOUT_NOISE,
                    bit_depth=BIT_DEPTH) for i, f in enumerate(frames)))
         for frames in video)

#video = load(video, NFRAMES_DUAL)

if __name__ == "__main__":

    #: no need to load video, but this way we load video into memory, and we
示例#9
0
import numpy as np

from examples.conf import NFRAMES, SHAPE, KIMAX, KJMAX, DATA_PATH
#: see video_simulator for details, loads sample video
import examples.video_simulator as video_simulator
import importlib
importlib.reload(video_simulator)  #recreates iterator

#: create window for multiplication...
window = blackman(SHAPE)

#: we must create a video of windows for multiplication
window_video = ((window, ), ) * NFRAMES

#:perform the actual multiplication
video = multiply(video_simulator.video, window_video)

#: if the intesity of light source flickers you can normalize each frame to the intensity of the frame
#video = normalize_video(video)

#: perform rfft2 and crop results, to take only first kimax and first kjmax wavenumbers.
fft = rfft2(video, kimax=KIMAX, kjmax=KJMAX)

#: you can also normalize each frame with respect to the [0,0] component of the fft
#: this it therefore equivalent to  normalize_video
#fft = normalize_fft(fft)

if __name__ == "__main__":
    import os.path as p

    #: now perform auto correlation calculation with default parameters using iterative algorithm
示例#10
0
文件: full_video.py 项目: inwwin/cddm
                              background=BACKGROUND,
                              dt=DT_FULL,
                              sigma=SIGMA,
                              delta=DELTA,
                              intensity=INTENSITY,
                              dtype="uint16")

#: crop video to selected region of interest
video = crop(video, roi=((0, SHAPE[0]), (0, SHAPE[1])))

#: apply dust particles
if APPLY_DUST:
    dust = plt.imread(DUST1_PATH)[0:SHAPE[0], 0:SHAPE[1],
                                  0]  #float normalized to (0,1)
    dust = ((dust, ), ) * NFRAMES_FULL
    video = multiply(video, dust)

video = (tuple((adc(f,
                    noise_model=NOISE_MODEL,
                    saturation=SATURATION,
                    readout_noise=READOUT_NOISE,
                    bit_depth=BIT_DEPTH) for f in frames)) for frames in video)

if __name__ == "__main__":
    #: no need to load video, but this way we load video into memory, and we
    #: can scroll back and forth with the viewer. Uncomment the line below.
    #video = load(video, NFRAMES) # loads and displays progress bar

    #: VideoViewer either expects a multi_frame iterator, or a numpy array
    viewer = VideoViewer(video,
                         count=NFRAMES_FULL,
示例#11
0
t1, t2 = dual_video_simulator.t1, dual_video_simulator.t2 

#: create window for multiplication...
window = blackman(SHAPE)

#: we must create a video of windows for multiplication
window_video = ((window,window),)*NFRAMES

video = dual_video_simulator.video
#video = load(video,count = NFRAMES)

#video = show_diff(video, dt = (5,7,8,9,10), t1= t1, t2 = t2)
video = show_video(video)

#:perform the actual multiplication
video = multiply(video, window_video)

#: if the intesity of light source flickers you can normalize each frame to the intensity of the frame
#video = normalize_video(video)

#: perform rfft2 and crop results, to take only first kimax and first kjmax wavenumbers.
fft = rfft2(video, kimax = KIMAX, kjmax = KJMAX)

#: you can also normalize each frame with respect to the [0,0] component of the fft
#: this it therefore equivalent to  normalize_video
#fft = normalize_fft(fft)

fft = play_threaded(fft)

if __name__ == "__main__":
    import os.path as p
示例#12
0
#: see video_simulator for details, loads sample video
import examples.paper.simple_video.dual_video as dual_video

importlib.reload(dual_video) #recreates iterator

t1, t2 = dual_video.t1, dual_video.t2 

#: create window for multiplication...
window = blackman(SHAPE)

#: we must create a video of windows for multiplication
window_video = ((window,window),)*NFRAMES_DUAL

#:perform the actual multiplication
if APPLY_WINDOW:
    video = multiply(dual_video.video, window_video)
else:
    video = dual_video.video

#: if the intesity of light source flickers you can normalize each frame to the intensity of the frame
#video = normalize_video(video)

#: perform rfft2 and crop results, to take only first kimax and first kjmax wavenumbers.
fft = rfft2(video, kimax = KIMAX, kjmax = KJMAX)

#load in numpy array
fft1,fft2 = asarrays(fft, NFRAMES_DUAL)

if __name__ == "__main__":
    import os.path as p