Esempio n. 1
0
def move_pixels(frames, ni=10):
    f1, f2 = frames
    return nd.zoom(f1, 1.04)[10:512 + 10, 10:512 + 10], f2


#random time according to Eq.7 from the SoftMatter paper
t1, t2 = create_random_times1(NFRAMES_DUAL, n=N_PARAMETER)

#: this creates a brownian motion frame iterator.
#: each element of the iterator is a tuple holding a single numpy array (frame)
video = simple_brownian_video(t1,
                              t2,
                              shape=SIMSHAPE,
                              background=BACKGROUND,
                              num_particles=NUM_PARTICLES,
                              dt=DT_DUAL,
                              sigma=SIGMA,
                              delta=DELTA,
                              intensity=INTENSITY,
                              dtype="uint16")

#video = (move_pixels(frames) for frames in video)

#: crop video to selected region of interest
video = crop(video, roi=((0, SHAPE[0]), (0, SHAPE[1])))

# apply dust particles
if APPLY_DUST:
    dust1 = plt.imread(DUST1_PATH)[0:SHAPE[0], 0:SHAPE[1],
                                   0]  #float normalized to (0,1)
    dust2 = plt.imread(DUST2_PATH)[0:SHAPE[0], 0:SHAPE[1],
Esempio n. 2
0
def move_pixels(frames, ni=10):
    f1, f2 = frames
    return nd.zoom(f1, 1.04)[10:512 + 10, 10:512 + 10], f2


#random time according to Eq.7 from the SoftMatter paper
t1, t2 = create_random_times1(NFRAMES_DUAL, n=N_PARAMETER)

#: this creates a brownian motion frame iterator.
#: each element of the iterator is a tuple holding a single numpy array (frame)
video1 = simple_brownian_video(t1,
                               t2,
                               shape=SIMSHAPE,
                               background=BACKGROUND,
                               num_particles=NUM_PARTICLES1,
                               dt=DT_DUAL,
                               sigma=SIGMA1,
                               delta=DELTA1,
                               intensity=INTENSITY1,
                               dtype="uint16")

video2 = simple_brownian_video(t1,
                               t2,
                               shape=SIMSHAPE,
                               background=0,
                               num_particles=NUM_PARTICLES2,
                               dt=DT_DUAL,
                               sigma=SIGMA2,
                               delta=DELTA2,
                               intensity=INTENSITY2,
                               dtype="uint16")
Esempio n. 3
0
"""
from cddm.sim import simple_brownian_video, seed, adc
from cddm.viewer import VideoViewer
from cddm.video import load, crop, multiply
from examples.paper.one_component.conf import NFRAMES_FULL, SIMSHAPE, BACKGROUND, DELTA, DT_FULL, \
    INTENSITY, PERIOD, SIGMA, SHAPE,DUST1_PATH, BIT_DEPTH, VMAX, NOISE_MODEL, SATURATION, READOUT_NOISE, APPLY_DUST
import matplotlib.pyplot as plt

import numpy as np

#: this cretaes a brownian motion frame iterator.
#: each element of the iterator is a tuple holding a single numpy array (frame)
video = simple_brownian_video(range(NFRAMES_FULL),
                              shape=SIMSHAPE,
                              background=BACKGROUND,
                              dt=DT_FULL,
                              sigma=SIGMA,
                              delta=DELTA,
                              intensity=INTENSITY,
                              dtype="uint16")

#: crop video to selected region of interest
video = crop(video, roi=((0, SHAPE[0]), (0, SHAPE[1])))

#: apply dust particles
if APPLY_DUST:
    dust = plt.imread(DUST1_PATH)[0:SHAPE[0], 0:SHAPE[1],
                                  0]  #float normalized to (0,1)
    dust = ((dust, ), ) * NFRAMES_FULL
    video = multiply(video, dust)

video = (tuple((adc(f,
Esempio n. 4
0
# uppercase values
from examples.conf import NFRAMES, N_PARAMETER, SIMSHAPE, BACKGROUND, DELTA, \
    INTENSITY, SIGMA, SHAPE,DUST1_PATH,DUST2_PATH

#: set seed for randum number generator, so that each run is the same
seed(0)

#random time according to Eq.7 from the SoftMatter paper
t1, t2 = create_random_times1(NFRAMES, n=N_PARAMETER)

#: this creates a brownian motion frame iterator.
#: each element of the iterator is a tuple holding a single numpy array (frame)
video = simple_brownian_video(t1,
                              t2,
                              shape=SIMSHAPE,
                              background=BACKGROUND,
                              sigma=SIGMA,
                              delta=DELTA,
                              intensity=INTENSITY)

#: crop video to selected region of interest
video = crop(video, roi=((0, SHAPE[0]), (0, SHAPE[1])))

#: apply dust particles
dust1 = plt.imread(DUST1_PATH)[..., 0]  #float normalized to (0,1)
dust2 = plt.imread(DUST2_PATH)[..., 0]
dust = ((dust1, dust2), ) * NFRAMES

video = multiply(video, dust)

if __name__ == "__main__":
Esempio n. 5
0
Visualization takes place during the iteration over the iterator contructed
with the play function. Note that you can use the video for further processing
(correlation analysis) 

"""
from conf import SHAPE, NFRAMES, BACKGROUND

from cddm.video import show_video, show_fft, play, show_diff, multiply
from cddm.sim import simple_brownian_video
from cddm.conf import set_showlib

import matplotlib.pyplot as plt

# test dual camera video (regular spaced)
video = simple_brownian_video(range(NFRAMES),
                              range(NFRAMES),
                              shape=SHAPE,
                              background=BACKGROUND)

#: apply dust particles
dust1 = plt.imread('dust1.png')[..., 0]  #float normalized to (0,1)
dust2 = plt.imread('dust2.png')[..., 0]
dust = ((dust1, dust2), ) * NFRAMES
video = multiply(video, dust)

video = show_video(video)
video = show_diff(video)
video = show_fft(video, mode="real")

#: set fps to your required FPS. Video will be updated only if visualization
#: is fast enough not to interfere with the acquisition.
#: here video is again a valid video iterator, no visualization has yet took place
Esempio n. 6
0
 def test_dual_video(self):
     t1, t2 = create_random_times1(32, 2)
     t1, t2 = create_random_times2(32, 2)
     video = simple_brownian_video(t1, t2)
     for frames in video:
         pass
Esempio n. 7
0
 def test_video(self):
     video = simple_brownian_video(range(10), sigma=None)
     for frames in video:
         pass