Exemple #1
0
print('ROI: x={0:d}...{1:d}  y={2:d}...{3:d}'.format(ROI_x1,
                                                     ROI_x2,
                                                     ROI_y1,
                                                     ROI_y2))
if (frm_end > tiflen) or (frm_end < 1):
    frm_end = tiflen

print('selected frames: start={0:d}  end={1:d}'.format(frm_start,
                                                       frm_end))
Nframes = frm_end - frm_start
imgItot = np.zeros(Nframes)
E_dimg = np.zeros(Nframes-1)
previmg = 0.0 # just initialize this as a scalar value
# loop over selected images in file
# tqdm just makes a progress bar (useful for large files!)
for i in tqdm(range(Nframes)):
    imgfull = tif.pages[i + frm_start].asarray()
    # get ROI only
    img = imgfull[ROI_y1:ROI_y2,ROI_x1:ROI_x2]
    # The total intensity is simply the sum over all
    # pixels in an image frame
    imgItot[i] = np.sum(img)
    # Here we calculate the difference between two
    # subsequent frames (frame pair #0 = frame#0 and frame#1)
    # and then take the 'energy' (sum over squares)
    # This helps in detecting any glitches in the video (skipped
    # frames etc.)
    if i>0:
        dimg = (img - previmg)
        E_dimg[i-1] = np.sum(dimg**2)
        previmg = img
print('frame shape: {0:d} x {1:d}'.format(*img.shape))
# enable full image processing
if ROI_size < 0:
    ROI_x2 = img.shape[1]
    ROI_y2 = img.shape[0]

frm_prevend = frm_start + frm_Npreview
if frm_prevend > tiflen:
    frm_prevend = tiflen
frm_Npreview = frm_prevend - frm_start

vidshape = (frm_Npreview, img.shape[0], img.shape[1])
vid = np.zeros(vidshape)

print('loading preview frames into memory')
for i in tqdm(range(frm_Npreview)):
    img = tif.pages[frm_start + i].asarray()
    # copy all pixels divided by ROIcontrast (low intensity)
    vid[i, :, :] = img[:, :] / ROIcontrast
    # only copy ROI zone at full intensity
    vid[i, ROI_y1:ROI_y2, ROI_x1:ROI_x2] = img[ROI_y1:ROI_y2, ROI_x1:ROI_x2]

tif.close()

#%% display video

vmx = vid.max() / vid_overdrive  # avoid autoscale of colormap
vf_redraw_init = False
vf_redraw_img = None

Exemple #3
0
# CALCULATE VIDEO (IMAGE) STRUCTURE FUNCTION
# video was saved using: np.savez_compressed(videof, img=ims)
ims = np.load(videof)['img']

ISE_Nbuf = 50
ISE_Npx = ims.shape[1]
Nt = ims.shape[0]

#push onto DDM engines
#TODO: this could use some multiprocessing!
ISE1 = ImageStructureEngine(ISE_Npx, ISE_Nbuf)
ISE2 = ImageStructureEngine2(ISE_Npx, ISE_Nbuf)
ISE3 = ImageStructureEngine3(ISE_Npx, ISE_Nbuf)

for it in tqdm(range(Nt)):
    ISE1.push(ims[it])
    ISE2.push(ims[it])
    ISE3.push(ims[it])

ISF1 = ImageStructureFunction.fromImageStructureEngine(ISE1)
ISF2 = ImageStructureFunction.fromImageStructureEngine(ISE2)
ISF3 = ImageStructureFunction.fromImageStructureEngine(ISE3)

good2 = np.allclose(ISF1.ISF, ISF2.ISF)
good3 = np.allclose(ISF1.ISF, ISF3.ISF)
devsq2 = np.sum((ISF2.ISF - ISF1.ISF)**2)
devsq3 = np.sum((ISF3.ISF - ISF1.ISF)**2)
sq = np.sum(ISF1.ISF**2)

print('Engine type#2 gives same result as Reference Engine: ', good2)
Exemple #4
0
#
# SIMULATION (2D)
#
#set initial particle coordinates
x0 = random_coordinates(sim.Np, sim.bl_x)
y0 = random_coordinates(sim.Np, sim.bl_y)
#create array of coordinates of the particles at different timesteps
x1 = brownian_softbox(x0, sim.Nt, sim.dt, sim.D, sim.bl_x)
y1 = brownian_softbox(y0, sim.Nt, sim.dt, sim.D, sim.bl_y)

#
# make the synthetic image stack (video)
#
ims = []
for it in tqdm(range(sim.Nt)):
    img = imgsynth2(x1[:, it],
                    y1[:, it],
                    sim.img_w,
                    -sim.img_border,
                    -sim.img_border,
                    sim.bl_x + sim.img_border,
                    sim.bl_y + sim.img_border,
                    sim.img_Npx,
                    sim.img_Npx,
                    subpix=2)
    if not (sim.img_I_offset is None):
        img += sim.img_I_offset
    if not (sim.img_I_noise <= 0.0):
        imgnoise = PRNG.normal(loc=0.0, scale=sim.img_I_noise, size=img.shape)
        img += imgnoise
Exemple #5
0
def generate_datafile(fname):
    # STEP 1: Brownian simulation and Video synthesis
    # ==============================
    # SIMULATION/ANALYSIS PARAMETERS
    # ==============================

    # SIMULATION parameters
    # D  [µm2 s-1]  Fickian diffusion coefficient of the particles
    # Np []         number of particles
    # bl [µm]       length of simulation box sides (square box)
    # Nt []         number of time steps => number of frames
    # T  [s]        total time
    D = 0.1
    Np = 200

    bl = 200.
    bl_x = bl  #Simulation box side length in x direction [µm]
    bl_y = bl

    Nt = 300
    T = 400.

    # IMAGE SYNTHESIS parameters
    # img_center [µm, µm]   NOT YET USED: coordinates of the center of the image
    # img_border [µm]       width of border around simuation box (may be negative!)
    # img_w      [µm]       width parameter of 2D Gaussian to simulate optical transfer function
    # img_Npx    []
    img_border = 16.
    img_w = 2.
    img_Npx = 256

    # IMAGE STRUCTURE ENGINE PARAMETERS
    # ISE_Nbuf []    buffer size of image structure engine
    # ISF_fpn        file (path) name for storing/retrieving image structure function
    ISE_Nbuf = 100
    ISE_Npx = img_Npx  # frame size: Npx by Npx  must be equal to img_Npx
    ISF_fpn = 'datafiles/imageseq_pytest_tests_ISF.npz'

    # conversion units, derived from simulation settings
    img_l = (bl + 2 * img_border)
    um_p_pix = img_l / img_Npx
    dt = T / Nt  # frame period [s]
    s_p_frame = dt

    # SIMULATION (2D)

    #set initial particle coordinates
    x0 = random_coordinates(Np, bl_x)
    y0 = random_coordinates(Np, bl_y)
    #create array of coordinates of the particles at different timesteps
    x1 = brownian_softbox(x0, Nt, dt, D, bl_x)
    y1 = brownian_softbox(y0, Nt, dt, D, bl_y)

    #make the synthetic image stack
    ims = []
    for it in tqdm(range(Nt)):
        img = imgsynth2(x1[:, it],
                        y1[:, it],
                        img_w,
                        -img_border,
                        -img_border,
                        bl_x + img_border,
                        bl_y + img_border,
                        img_Npx,
                        img_Npx,
                        subpix=2)
        ims.append(img)

    #save video
    np.savez_compressed(fname, img=ims)
#
# GET SIMULATION/ANALYSIS PARAMETERS
#
sim = sim_params()
ImageStructureEngine = ImageStructureEngineSelector(sim.ISE_type)

# CALCULATE VIDEO (IMAGE) STRUCTURE FUNCTION

# LOAD VIDEO
# video was saved using: np.savez_compressed(videof, img=ims)
ims = np.load(sim.vidfpn)['img']

# process using ImageStructureEngine
ISE1 = ImageStructureEngine(sim.ISE_Npx, sim.ISE_Nbuf)
for it in tqdm(range(sim.Nframes)):
    ISE1.push(ims[it])
ISE1.ISFcount

# write result file
ISE1.save(sim.ISE_outfpn)

#
# quick plot of radially average image structure function
#
IA = ImageStructureFunction.fromImageStructureEngine(ISE1)
IAqtau = np.zeros((len(IA.tauf), len(IA.u)))
for i in range(len(IA.tauf)):
    IAqtau[i, :] = IA.radavg(i)

plt.figure("radially averaged (video) image structure function")