def func(args):
    """A function to process each image pair."""

    # this line is REQUIRED for multiprocessing to work
    # always use it in your custom function

    file_a, file_b, counter = args

    #####################
    # Here goes you code
    #####################

    # read images into numpy arrays
    frame_a = tools.imread(os.path.join(path, file_a))
    frame_b = tools.imread(os.path.join(path, file_b))

    frame_a = (frame_a * 1024).astype(np.int32)
    frame_b = (frame_b * 1024).astype(np.int32)

    # process image pair with extended search area piv algorithm.
    u, v, sig2noise = process.extended_search_area_piv( frame_a, frame_b, \
        window_size=64, overlap=32, dt=0.02, search_area_size=128, sig2noise_method='peak2peak')
    u, v, mask = validation.sig2noise_val(u, v, sig2noise, threshold=1.5)
    u, v = filters.replace_outliers(u,
                                    v,
                                    method='localmean',
                                    max_iter=10,
                                    kernel_size=2)
    # get window centers coordinates
    x, y = process.get_coordinates(image_size=frame_a.shape,
                                   window_size=64,
                                   overlap=32)
    # save to a file
    tools.save(x, y, u, v, mask, 'test2_%03d.txt' % counter)
    tools.display_vector_field('test2_%03d.txt' % counter)
示例#2
0
def process_node(i):
    DeltaFrame = 1
    winsize = 50  # pixels
    searchsize = 50  #pixels
    overlap = 25  # piexels
    dt = DeltaFrame * 1. / fps  # piexels
    frame_a = tools.imread(fileNameList[i])
    frame_b = tools.imread(fileNameList[i + DeltaFrame])
    u0, v0, sig2noise = process.extended_search_area_piv(
        frame_a.astype(np.int32),
        frame_b.astype(np.int32),
        window_size=winsize,
        overlap=overlap,
        dt=dt,
        search_area_size=searchsize,
        sig2noise_method='peak2peak')
    x, y = process.get_coordinates(image_size=frame_a.shape,
                                   window_size=winsize,
                                   overlap=overlap)
    u1, v1, mask = validation.sig2noise_val(u0, v0, sig2noise, threshold=1.3)
    u2, v2 = filters.replace_outliers(u1,
                                      v1,
                                      method='localmean',
                                      max_iter=5,
                                      kernel_size=5)
    tools.save(x, y, u2, v2, mask,
               '../muscle10fpsbotleft_results/' + str(i) + '.txt')
def openpiv_default_run(im1, im2):
    """ default settings for OpenPIV analysis using
    extended_search_area_piv algorithm for two images
    
    Inputs:
        im1,im2 : str,str = path of two image
    """
    frame_a = tools.imread(im1)
    frame_b = tools.imread(im2)

    u, v, sig2noise = process.extended_search_area_piv(
        frame_a.astype(np.int32),
        frame_b.astype(np.int32),
        window_size=32,
        overlap=8,
        dt=1,
        search_area_size=64,
        sig2noise_method='peak2peak')
    x, y = process.get_coordinates(image_size=frame_a.shape,
                                   window_size=32,
                                   overlap=8)
    u, v, mask = validation.sig2noise_val(u, v, sig2noise, threshold=1.3)
    u, v = filters.replace_outliers(u,
                                    v,
                                    method='localmean',
                                    max_iter=10,
                                    kernel_size=2)
    x, y, u, v = scaling.uniform(x, y, u, v, scaling_factor=1)
    tools.save(x, y, u, v, mask, list_of_images[0] + '.txt')
    fig, ax = tools.display_vector_field(list_of_images[0] + '.txt',
                                         on_img=True,
                                         image_name=list_of_images[0],
                                         scaling_factor=1,
                                         ax=None)
示例#4
0
def two_images(image_1, image_2, search_area_size=64, window_size=32, overlap=16, dt=0.02):
    with open("image_1.bmp", "wb") as fh1:
        fh1.write(base64.b64decode(image_1))

    with open("image_2.bmp", "wb") as fh2:
        fh2.write(base64.b64decode(image_2))

    frame_a  = tools.imread( 'image_1.bmp' )
    frame_b  = tools.imread( 'image_2.bmp' )
    frame_a = (frame_a*1024).astype(np.int32)
    frame_b = (frame_b*1024).astype(np.int32)

    if not search_area_size:
        search_area_size = 64
    if not window_size:
        window_size = 32
    if not overlap:
        overlap = 16
    if not dt:
        dt = 0.02

    u, v, sig2noise = process.extended_search_area_piv( frame_a, frame_b, window_size=window_size, 
        overlap=overlap, dt=dt, search_area_size=search_area_size, sig2noise_method='peak2peak' )
    x, y = process.get_coordinates( image_size=frame_a.shape, window_size=window_size, overlap=overlap )
    u, v, mask = validation.sig2noise_val( u, v, sig2noise, threshold = 1.3 )
    u, v, mask = validation.global_val( u, v, (-1000, 2000), (-1000, 1000) )
    u, v = filters.replace_outliers( u, v, method='localmean', max_iter=10, kernel_size=2)
    x, y, u, v = scaling.uniform(x, y, u, v, scaling_factor = 96.52 )

    file_name_text = 'result.txt'
    file_name_png = 'result.png'
    if os.path.isfile(file_name_text):
        os.remove(file_name_text)
    if os.path.isfile(file_name_png):
        os.remove(file_name_png)
    tools.save(x, y, u, v, mask, file_name_text)
    a = np.loadtxt(file_name_text)
    fig = plt.figure()
    invalid = a[:,4].astype('bool')
    fig.canvas.set_window_title('Vector field, '+str(np.count_nonzero(invalid))+' wrong vectors')
    valid = ~invalid
    plt.quiver(a[invalid,0],a[invalid,1],a[invalid,2],a[invalid,3],color='r',scale=100, width=0.0025)
    plt.quiver(a[valid,0],a[valid,1],a[valid,2],a[valid,3],color='b',scale=100, width=0.0025)
    plt.draw()
    plt.savefig(file_name_png, format="png")
 
    with open(file_name_text, "rb") as resultFileText:
        file_reader_text = resultFileText.read()
        text_encode = base64.encodestring(file_reader_text)
        base64_string_text = str(text_encode, 'utf-8')
    
    with open(file_name_png, "rb") as resultFilePng:
        file_reader_image = resultFilePng.read()
        image_encode = base64.encodestring(file_reader_image)
        base64_string_image = str(image_encode, 'utf-8')
    
    return base64_string_text, base64_string_image
示例#5
0
    def _piv_frame(self, img1, img2, show=False, **kwargs):
        """
        calculate velocity using piv method on two frames
        """
        from openpiv.process import extended_search_area_piv, get_coordinates
        # from openpiv.scaling import uniform

        if self._debug:
            print('... [PIV] window size: {}'.format(self._windowSize))
            print('... [PIV] overlap: {}'.format(self._overlap))
            print('... [PIV] search area size: {}'.format(self._searchArea))
            print('... [PIV] threshold: {}'.format(self._piv_threshold))

        u, v, sig2noise = extended_search_area_piv(
            img1,
            img2,
            window_size=self._windowSize,
            overlap=self._overlap,
            dt=self._exposuretime,
            search_area_size=self._searchArea,
            sig2noise_method='peak2peak')
        self._pivx, self._pivy = get_coordinates(image_size=img1.shape,
                                                 window_size=self._windowSize,
                                                 overlap=self._overlap)
        #self._pivy = np.flipud(self._pivy)
        #self._pivx, self._pivy, u, v = uniform(self._pivx, self._pivy, u, v, scaling_factor=self._mpp)

        if show:
            from openpiv.validation import sig2noise_val
            from openpiv.filters import replace_outliers
            u, v, mask = sig2noise_val(u,
                                       v,
                                       sig2noise,
                                       threshold=self._piv_threshold)
            u, v = replace_outliers(u,
                                    v,
                                    method='localmean',
                                    max_iter=10,
                                    kernel_size=2)
            # show quiver plot
            plt.figure(figsize=(12, 6))
            plt.imshow(img1)
            plt.quiver(self._pivx, self._pivy, u, v, color='w', pivot='mid')
            plt.savefig(self._fname[:-4] + '_piv.png', dpi=100)

        if self._debug:
            print(
                "... [PIV] mean velocity [um/sec]: ({:4.2f}, {:4.2f})".format(
                    np.mean(u) * self._mpp,
                    np.mean(v) * self._mpp))
            print("... [PIV] mean velocity [pixel/frame]: ({:4.2f}, {:4.2f})".
                  format(
                      np.mean(u) * self._exposuretime,
                      np.mean(v) * self._exposuretime))

        return (u, v, sig2noise)
示例#6
0
def test_process_extended_search_area():
    """ test of the extended area PIV from Cython """
    frame_a = np.zeros((64,64))
    frame_a = random_noise(frame_a)
    frame_a = img_as_ubyte(frame_a)
    frame_b = np.roll(np.roll(frame_a,3,axis=1),2,axis=0)
    u,v = process.extended_search_area_piv(frame_a.astype(np.int32),
                                           frame_b.astype(np.int32),
window_size=16,search_area_size=32,dt=1,overlap=0)
    # print u,v
    assert(np.max(np.abs(u[:-1,:-1]-3)+np.abs(v[:-1,:-1]+2)) <= 0.2)
示例#7
0
def test_process_extended_search_area():
    """ test of the extended area PIV from Cython """
    frame_a = np.zeros((64, 64))
    frame_a = random_noise(frame_a)
    frame_a = img_as_ubyte(frame_a)
    frame_b = np.roll(np.roll(frame_a, 3, axis=1), 2, axis=0)
    u, v = process.extended_search_area_piv(frame_a.astype(np.int32),
                                            frame_b.astype(np.int32),
                                            window_size=16,
                                            search_area_size=32,
                                            dt=1,
                                            overlap=0)
    # print u,v
    assert (np.max(np.abs(u[:-1, :-1] - 3) + np.abs(v[:-1, :-1] + 2)) <= 0.2)
示例#8
0
def analyzer(frame_a, frame_b, text, plot, num_scene, pathout, scal, zre, xre,
             dt):

    winsize = 16  # pixels
    searchsize = 32  # pixels, search in image b
    overlap = 8  # pixels

    frame_a = cv2.adaptiveThreshold(frame_a, 255, cv2.ADAPTIVE_THRESH_MEAN_C,
                                    cv2.THRESH_BINARY, 5, 5)
    frame_b = cv2.adaptiveThreshold(frame_b, 255, cv2.ADAPTIVE_THRESH_MEAN_C,
                                    cv2.THRESH_BINARY, 5, 5)
    #frame_a = cv2.adaptiveThreshold(frame_a,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY,11,2)
    #frame_b = cv2.adaptiveThreshold(frame_b,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY,11,2)

    plt.imshow(np.c_[frame_a, frame_b], cmap='gray')
    plt.savefig(pathout + '/filtered' + str(num_scene) + '.png', dpi=800)

    u0, v0, sig2noise = process.extended_search_area_piv(
        frame_a.astype(np.int32),
        frame_b.astype(np.int32),
        window_size=winsize,
        overlap=overlap,
        dt=dt,
        search_area_size=searchsize,
        sig2noise_method='peak2peak')
    x, y = process.get_coordinates(image_size=frame_a.shape,
                                   window_size=winsize,
                                   overlap=overlap)
    u1, v1, mask = validation.sig2noise_val(u0, v0, sig2noise, threshold=1.3)
    u2, v2 = filters.replace_outliers(u1,
                                      v1,
                                      method='localmean',
                                      max_iter=10,
                                      kernel_size=2)
    x, y, u3, v3 = scaling.uniform(
        x, y, u2, v2, scaling_factor=scal)  # scaling_factor (pixel per meter)

    u3 = np.flip(u3, axis=0)
    v3 = np.flip(v3, axis=0)

    xre = np.linspace(0, xre / 100, len(x[0, :]))
    zre = np.linspace(0, zre / 100, len(x[:, 0]))

    if plot == 1:
        piv_plotting(xre, zre, u3, v3, num_scene, pathout)

    if text == 0:
        tools.save(x, y, u3, v3, mask,
                   pathout + '/piv' + str(num_scene) + '.txt')
示例#9
0
    def process_frame(self, frame_a, frame_b, s2n_thresh=1.3):
        frame_a = frame_a.astype(np.int32)
        frame_b = frame_b.astype(np.int32)

        u, v, sig2noise = process.extended_search_area_piv(
            frame_a,
            frame_b,
            window_size=self.window_size,
            overlap=self.overlap,
            dt=1,
            search_area_size=self.search_area_size,
            sig2noise_method='peak2peak')
        #u, v, mask = validation.sig2noise_val( u, v, sig2noise, threshold = s2n_thresh )

        return u, v
def update(winsize, overlap, s2n, s2n_method):
    u, v, sig2noise = process.extended_search_area_piv(
        frame_a.astype(np.int32),
        frame_b.astype(np.int32),
        window_size=winsize,
        overlap=overlap,
        dt=1.0,
        search_area_size=64,
        sig2noise_method=s2n_method)
    x, y = process.get_coordinates(image_size=frame_a.shape,
                                   window_size=winsize,
                                   overlap=overlap)
    u, v, mask = validation.sig2noise_val(u, v, sig2noise, threshold=s2n)
    # u, v = filters.replace_outliers( u, v, method='localmean', max_iter=10, kernel_size=2)
    # x, y, u, v = scaling.uniform(x, y, u, v, scaling_factor = 1.0 )
    # tools.save(x, y, u, v, mask, 'tutorial-part3.txt' )
    fig, ax = plt.subplots()
    ax.imshow(tools.imread('20110919 exp3 x10 stream 488570.TIF'),
              cmap=plt.cm.gray,
              origin='upper')
    ax.quiver(x, np.flipud(y), u / 10., v / 10., scale=10, color='r', lw=3)
    plt.show()
# %%
# for whatever reason the shape of frame_a is (3, 284, 256)
# so we first tranpose to the RGB image and then convert to the gray scale

# frame_a = img_as_uint(rgb2gray(frame_a))
# frame_b = img_as_uint(rgb2gray(frame_b))
plt.imshow(np.c_[frame_a, frame_b], cmap=plt.cm.gray)

# %%
# Use Cython version: process.pyx

u, v, sig2noise = process.extended_search_area_piv(
    frame_a.astype(np.int32),
    frame_b.astype(np.int32),
    window_size=32,
    overlap=8,
    dt=.1,
    sig2noise_method='peak2peak')
x, y = process.get_coordinates(image_size=frame_a.shape,
                               window_size=32,
                               overlap=8)

u, v, mask = validation.sig2noise_val(u, v, sig2noise, threshold=1.3)
u, v = filters.replace_outliers(u,
                                v,
                                method='localmean',
                                max_iter=10,
                                kernel_size=2)
x, y, u, v = scaling.uniform(x, y, u, v, scaling_factor=96.52)
#!/usr/bin/env ipython
import sys

if 'OpenPIV' not in sys.path:
    sys.path.append('/Users/alex/Documents/OpenPIV/alexlib/openpiv-python')

from openpiv import tools, validation, process, filters, scaling, pyprocess
import numpy as np

frame_a = tools.imread('exp1_001_a.bmp')
frame_b = tools.imread('exp1_001_b.bmp')

u, v, sig2noise = process.extended_search_area_piv(
    frame_a.astype(np.int32),
    frame_b.astype(np.int32),
    window_size=24,
    overlap=12,
    dt=0.02,
    search_area_size=64,
    sig2noise_method='peak2peak')
x, y = process.get_coordinates(image_size=frame_a.shape,
                               window_size=24,
                               overlap=12)
u, v, mask = validation.sig2noise_val(u, v, sig2noise, threshold=2.5)
u, v = filters.replace_outliers(u,
                                v,
                                method='localmean',
                                max_iter=10,
                                kernel_size=2)
x, y, u, v = scaling.uniform(x, y, u, v, scaling_factor=96.52)
tools.save(x, y, u, v, mask, 'exp1_001.txt')
tools.display_vector_field('exp1_001.txt', scale=100, width=0.0025)
# frame_a = img_as_ubyte(resize(frame_a,(1024,256)))
# frame_b = img_as_ubyte(resize(frame_b,(1024,256)))

frame_a = frame_a[:512, -200:]
frame_b = frame_b[:512, -200:]

#frame_a = frame_a[:256,:]
#frame_b = frame_b[:256,:]

plt.figure()
plt.imshow(np.c_[frame_a, frame_b], cmap=plt.cm.gray)
plt.show()

u, v = process.extended_search_area_piv(frame_a.astype(np.int32),
                                        frame_b.astype(np.int32),
                                        window_size=24,
                                        overlap=12,
                                        search_area_size=32,
                                        dt=1.)

plt.figure()
plt.quiver(u, v)
plt.axis('equal')
plt.show()

u1, v1 = pyprocess.piv(frame_a,
                       frame_b,
                       window_size=32,
                       search_size=48,
                       overlap=24)

# x, y = process.get_coordinates( image_size=frame_a.shape, window_size=24, overlap=0)
示例#14
0
#!/usr/bin/env ipython
import sys

if 'OpenPIV' not in sys.path:
    sys.path.append('/Users/alex/Documents/OpenPIV/alexlib/openpiv-python')

from openpiv import tools, validation, process, filters, scaling, pyprocess
import numpy as np

frame_a  = tools.imread( 'exp1_001_a.bmp' )
frame_b  = tools.imread( 'exp1_001_b.bmp' )

u, v, sig2noise = process.extended_search_area_piv( frame_a.astype(np.int32), 
frame_b.astype(np.int32), window_size=24, overlap=12, dt=0.02, search_area_size=64, 
sig2noise_method='peak2peak' )
x, y = process.get_coordinates( image_size=frame_a.shape, window_size=24, overlap=12 )
u, v, mask = validation.sig2noise_val( u, v, sig2noise, threshold = 2.5 )
u, v = filters.replace_outliers( u, v, method='localmean', max_iter=10, kernel_size=2)
x, y, u, v = scaling.uniform(x, y, u, v, scaling_factor = 96.52 )
tools.save(x, y, u, v, mask, 'exp1_001.txt' )
tools.display_vector_field('exp1_001.txt', scale=100, width=0.0025)



u, v, s2n= pyprocess.piv(frame_a, frame_b, corr_method='fft', window_size=24, overlap=12, 
dt=0.02, sig2noise_method='peak2peak' )
x, y = pyprocess.get_coordinates( image_size=frame_a.shape, window_size=24, overlap=12 )
u, v, mask = validation.sig2noise_val( u, v, s2n, threshold = 2.5 )
u, v = filters.replace_outliers( u, v, method='localmean', max_iter=10, kernel_size=2.5)
x, y, u, v = scaling.uniform(x, y, u, v, scaling_factor = 96.52 )
tools.save(x, y, u, v, mask, 'exp1_002.txt' )
示例#15
0
文件: image.py 项目: davidbhr/arnold
def piv_analysis(contr,
                 relax,
                 outfolder,
                 scale,
                 winsize_um=10,
                 overlap_um=None,
                 searchsize_um=None,
                 drift_correction=True,
                 threshold=1.2,
                 scale_quiver=None):
    """
    Computes deformations between 2 images by crosscorrelation using openpiv (must be installed - see Readme).
    Saves several quiver plots and the maximal found deformation for later analysis.
    
    contr: Path to active/contracted image file to calculate deformations between contracted and relaxed state
    relax: Path to relaxed image file to calculate deformations between contracted and relaxed state
    scale: resolution of image in µm per pixel
    winsize_um: size of the search window to be applied in µm 
    drift_correction: Applies a drift correction before piv analysis
    threshold: filters displacement
    scale_quiver: can be used to scale the arrows in quiver plot (only for visualization)
                Default is None meaning automatically scaling ,  scale is inverse
  
    """
    from openpiv import tools, process, validation, filters, scaling

    winsize = int(winsize_um / scale)  # for pixel

    # if not specified use defaults
    if not overlap_um:
        overlap = winsize / 2
    else:
        overlap = int(overlap_um / scale)

    if not searchsize_um:
        searchsize = winsize
    else:
        searchsize = int(searchsize_um / scale)

    print(winsize, overlap, searchsize)
    # odd winsize raise problems due to the overlap, thefore decrease by one pixel if odd and give warning
    if not (winsize % 2) == 0:
        print(
            'Odd pixelnumbers raise problems due to the overlap: Winsize changed to '
            + str((winsize - 1) * scale) + ' um')
        winsize -= 1

    # creates folder if it doesn't exist
    if not os.path.exists(outfolder):
        os.makedirs(outfolder)

    #read in images
    relax = tools.imread(relax)
    contr = tools.imread(contr)

    # convert for openpiv
    relax = relax.astype(np.int32)
    contr = contr.astype(np.int32)

    dt = 1  # sec

    u0, v0, sig2noise = process.extended_search_area_piv(
        relax,
        contr,
        window_size=winsize,
        overlap=overlap,
        dt=dt,
        search_area_size=searchsize,
        sig2noise_method='peak2peak')

    x, y = process.get_coordinates(image_size=relax.shape,
                                   window_size=winsize,
                                   overlap=overlap)

    # drift correction
    if drift_correction:
        u0 -= np.nanmean(u0)
        v0 -= np.nanmean(v0)

    # filtered deformations
    u1, v1, mask = validation.sig2noise_val(u0,
                                            v0,
                                            sig2noise,
                                            threshold=threshold)

    # save deformations + coords
    np.save(outfolder + "/u.npy", u1)
    np.save(outfolder + "/v.npy", v1)
    np.save(outfolder + "/x.npy", x)
    np.save(outfolder + "/y.npy", y)

    # show filtered+unfiltered data
    plt.figure(figsize=(6, 3))
    plt.subplot(121)
    plt.quiver(x,
               y[::-1],
               u0,
               v0,
               alpha=1,
               facecolor='orange',
               scale_units='xy',
               scale=scale_quiver)
    plt.title('unfiltered')
    plt.subplot(122)
    plt.title('filtered')
    plt.quiver(x,
               y[::-1],
               u1,
               v1,
               alpha=1,
               facecolor='b',
               scale_units='xy',
               scale=scale_quiver)
    plt.savefig(outfolder + '/filtered+unfilterd.png',
                bbox_inches='tight',
                pad_inches=0)
    plt.close()
    # save overlay
    # different color channels
    plt.figure()
    overlay = np.zeros((contr.shape[0], contr.shape[1], 3), 'uint8')
    overlay[..., 1] = contr  #1
    overlay[..., 2] = relax  #2
    plt.imshow(overlay, origin='upper'
               )  #extent=[0, contr.shape[0], contr.shape[1], 0])  # turn Y
    plt.quiver(x,
               y,
               u1,
               v1,
               facecolor='orange',
               alpha=1,
               scale_units='xy',
               scale=scale_quiver)
    plt.axis('off')
    plt.savefig(outfolder + '/overlay-filtered.png',
                bbox_inches='tight',
                pad_inches=0)
    # difference image
    plt.figure()
    plt.imshow(np.abs(contr - relax), cmap='viridis', origin='upper')
    plt.quiver(x,
               y,
               u1,
               v1,
               facecolor='orange',
               alpha=1,
               scale_units='xy',
               scale=scale_quiver)
    plt.axis('off')
    plt.savefig(outfolder + '/difference-img-filtered.png',
                bbox_inches='tight',
                pad_inches=0)
    plt.close()

    # save deformation and image data
    deformation_unfiltered = np.sqrt(u0**2 + v0**2)
    deformation_filtered = np.sqrt(u1**2 + v1**2)
    maxdefo_um_unfiltered = np.nanmax(deformation_unfiltered) * scale
    maxdefo_um_filtered = np.nanmax(deformation_filtered) * scale
    np.savetxt(outfolder + '/maxdefo_um_unfiltered.txt',
               [maxdefo_um_unfiltered])
    np.savetxt(outfolder + '/maxdefo_um_filtered.txt', [maxdefo_um_filtered])
    print('Maximal deformation unfiltered: ', maxdefo_um_unfiltered,
          'Maximal deformation filtered: ', maxdefo_um_filtered)
    plt.imsave(outfolder + '/raw_contr.png', contr, cmap='gray')
    plt.imsave(outfolder + '/raw_relax.png', relax, cmap='gray')
    plt.close()

    return
示例#16
0
frame_b =  im_b[380:1980,0:1390]
plt.imshow(np.c_[frame_a,frame_b],cmap='gray')

# Process the original cropped image and see the OpenPIV result:

# typical parameters:
window_size = 32 #pixels
overlap = 16 # pixels
search_area_size = 64 # pixels 
frame_rate = 40 # fps

# process again with the masked images, for comparison# process once with the original images
u, v, sig2noise = process.extended_search_area_piv(
                                                       frame_a.astype(np.int32) , frame_b.astype(np.int32), 
                                                       window_size = window_size,
                                                       overlap = overlap, 
                                                       dt=1./frame_rate, 
                                                       search_area_size = search_area_size, 
                                                       sig2noise_method = 'peak2peak')
x, y = process.get_coordinates( image_size = frame_a.shape, window_size = window_size, overlap = overlap )
u, v, mask = validation.global_val( u, v, (-300.,300.),(-300.,300.))
u, v, mask = validation.sig2noise_val( u, v, sig2noise, threshold = 1.1 )
u, v = filters.replace_outliers( u, v, method='localmean', max_iter = 3, kernel_size = 3)
x, y, u, v = scaling.uniform(x, y, u, v, scaling_factor = 96.52 )
# save to a file
tools.save(x, y, u, v, mask, 'test.txt', fmt='%9.6f', delimiter='\t')
tools.display_vector_field('test.txt', scale=50, width=0.002)



# masking using not optimal choice of the methods or parameters:
示例#17
0
frame_b  = tools.imread( '../test1/exp1_001_b.bmp' )

# %%
fig,ax = plt.subplots(1,2)
ax[0].imshow(frame_a,cmap=plt.cm.gray)
ax[1].imshow(frame_b,cmap=plt.cm.gray)


# %%
winsize = 24 # pixels
searchsize = 64  # pixels, search in image B
overlap = 12 # pixels
dt = 0.02 # sec


u0, v0, sig2noise = process.extended_search_area_piv( frame_a.astype(np.int32), frame_b.astype(np.int32), window_size=winsize, overlap=overlap, dt=dt, search_area_size=searchsize, sig2noise_method='peak2peak' )

# %%
x, y = process.get_coordinates( image_size=frame_a.shape, window_size=winsize, overlap=overlap )

# %%
u1, v1, mask = validation.sig2noise_val( u0, v0, sig2noise, threshold = 1.3 )

# %%
u2, v2 = filters.replace_outliers( u1, v1, method='localmean', max_iter=10, kernel_size=2)

# %%
x, y, u3, v3 = scaling.uniform(x, y, u2, v2, scaling_factor = 96.52 )

# %%
tools.save(x, y, u3, v3, mask, 'exp1_001.txt' )
示例#18
0
    def run(self):
        self.is_to_stop = False
        self.piv.piv_results_list = []

        for i in range(0, len(self.frames_list) - 1, abs(self.jump)):
            if self.piv.xy_zoom[0][0] and self.piv.xy_zoom[1][0]:
                """try:"""
                frame_a = self.frames_list[i][2][
                    int(self.piv.xy_zoom[1][0]):int(self.piv.xy_zoom[1][1]),
                    int(self.piv.xy_zoom[0][0]):int(self.piv.xy_zoom[0][1])]

                frame_b = self.frames_list[i + 1][2][
                    int(self.piv.xy_zoom[1][0]):int(self.piv.xy_zoom[1][1]),
                    int(self.piv.xy_zoom[0][0]):int(self.piv.xy_zoom[0][1])]
                """
                except ValueError:
                    frame_a = self.frames_list[i][2][int(self.frames_list[i][2].shape[1] - self.piv.xy_zoom[1][0]): int(
                        self.frames_list[i][2].shape[1] - self.piv.xy_zoom[1][1]),
                              int(self.piv.xy_zoom[0][0]): int(
                                  self.piv.xy_zoom[0][1])]

                    frame_b = self.frames_list[i + 1][2][
                              int(self.frames_list[i + 1][2].shape[1] - self.piv.xy_zoom[1][0]): int(
                                  self.frames_list[i + 1][2].shape[1] - self.piv.xy_zoom[1][1]),
                              int(self.piv.xy_zoom[0][0]): int(
                                  self.piv.xy_zoom[0][1])]
                """
            else:
                frame_a = self.frames_list[i][2]

                frame_b = self.frames_list[i + 1][2]

            try:
                self.u, self.v, self.sig2noise = extended_search_area_piv(
                    frame_a.astype(np.int32),
                    frame_b.astype(np.int32),
                    window_size=self.winsize,
                    overlap=self.overlap,
                    dt=self.dt,
                    search_area_size=self.searchsize,
                    sig2noise_method='peak2peak')
                self.x, self.y = get_coordinates(image_size=frame_a.shape,
                                                 window_size=self.winsize,
                                                 overlap=self.overlap)
                self.u, self.v, self.mask = sig2noise_val(self.u,
                                                          self.v,
                                                          self.sig2noise,
                                                          threshold=1.0)
                self.u, self.v = replace_outliers(self.u,
                                                  self.v,
                                                  method='localmean',
                                                  max_iter=10,
                                                  kernel_size=2)
                # self.x, self.y, self.u, self.v = uniform(self.x, self.y, self.u, self.v, scaling_factor=5)

                if self.piv.xy_zoom[0][0] and self.piv.xy_zoom[1][0]:
                    self.x += int(self.piv.xy_zoom[0][0])
                    self.y += int(self.piv.xy_zoom[1][0])

                self.y = np.flip(self.y, 0)
                self.x = np.flip(self.x, 0)

            except ValueError:
                if self.searchsize < self.winsize:
                    print("0")
                    self.error_message.setText(
                        "the search size cannot be smaller than the window size"
                    )
                elif self.overlap > self.winsize:
                    print("1")
                    self.error_message.setText(
                        "Overlap has to be smaller than the window_size")
                else:
                    print("2")
                    self.error_message.setText("ROI window to small")
                self.error_message.exec()
                break
            self.piv.piv_results_list.append(
                [self.x, self.y, self.u, self.v, self.mask])
            self.piv.piv_images_list[i][3] = self.piv.piv_results_list[
                i // abs(self.jump)]
            # data = np.zeros((len(np.ravel(self.u)), 5))
            # res_list = [np.ravel(self.x), np.ravel(self.y), np.ravel(self.u), np.ravel(self.v), np.ravel(self.mask)]
            # for j in range(0, 4):
            #    for k in range(len(res_list[j])):
            #        data[k][j] = res_list[j][k]
            # self.piv.piv_images_list[i][4] = data
            # save_openpiv_vec(self.piv.piv_images_list[i][1].split('.')[0], data, 'pix', 'dt',
            #                  len(data[0]), len(data))
            self.piv.show_plot(i, self.piv.bit, True)

            if i == len(self.frames_list) - 2 and self.jump == 1:
                self.piv.piv_results_list.append(
                    [self.x, self.y, self.u, self.v, self.mask])
                self.piv.piv_images_list[i +
                                         1][3] = self.piv.piv_results_list[i +
                                                                           1]
                # self.piv.piv_images_list[i + 1][4] = data
                self.piv.show_plot(i + 1, self.piv.bit, True)

            if self.is_to_stop:
                break

        self.piv_finished_signal.emit()
示例#19
0
# frame_a = img_as_ubyte(resize(frame_a,(1024,256)))
# frame_b = img_as_ubyte(resize(frame_b,(1024,256)))

frame_a = frame_a[:512,-200:]
frame_b = frame_b[:512,-200:]

#frame_a = frame_a[:256,:]
#frame_b = frame_b[:256,:]

plt.figure()
plt.imshow(np.c_[frame_a,frame_b],cmap=plt.cm.gray)
plt.show()


u, v = process.extended_search_area_piv(frame_a.astype(np.int32), 
frame_b.astype(np.int32), window_size=24, overlap=12, search_area_size=32, dt = 1.)

plt.figure()
plt.quiver(u,v)
plt.axis('equal')
plt.show()

u1, v1 = pyprocess.piv(frame_a, frame_b, window_size=32, search_size=48, overlap=24)

# x, y = process.get_coordinates( image_size=frame_a.shape, window_size=24, overlap=0)

plt.figure()
plt.quiver(u1,v1,v1)
plt.axis('equal')
plt.show()
示例#20
0
def piv():
    """
    Simplest PIV run on the pair of images using default settings

    piv(im1,im2) will create a tmp.vec file with the vector filed in pix/dt (dt=1) from 
    two images, im1,im2 provided as full path filenames (TIF is preferable, whatever imageio can read)

    """

    import imageio
    import numpy as np
    import matplotlib.pyplot as plt

    from openpiv import process
    import pkg_resources as pkg

    import numpy as np

    import matplotlib.animation as animation

    # if im1 is None and im2 is None:
    im1 = pkg.resource_filename('openpiv', 'examples/test5/frame_a.tif')
    im2 = pkg.resource_filename('openpiv', 'examples/test5/frame_b.tif')

    frame_a = imageio.imread(im1)
    frame_b = imageio.imread(im2)

    frame_a[0:32, 512 - 32:] = 255

    images = []
    images.extend([frame_a, frame_b])

    fig, ax = plt.subplots()

    # ims is a list of lists, each row is a list of artists to draw in the
    # current frame; here we are just animating one artist, the image, in
    # each frame
    ims = []
    for i in range(2):
        im = ax.imshow(images[i % 2], animated=True, cmap=plt.cm.gray)
        ims.append([im])

    _ = animation.ArtistAnimation(fig,
                                  ims,
                                  interval=200,
                                  blit=False,
                                  repeat_delay=0)
    plt.show()

    # import os

    u, v = process.extended_search_area_piv(frame_a.astype(np.int32),
                                            frame_b.astype(np.int32),
                                            window_size=32,
                                            overlap=16)
    x, y = process.get_coordinates(image_size=frame_a.shape,
                                   window_size=32,
                                   overlap=16)

    fig, ax = plt.subplots(1, 2, figsize=(11, 8))
    ax[0].imshow(frame_a, cmap=plt.get_cmap('gray'), alpha=0.8, origin='upper')
    ax[0].quiver(x, y, u, v, scale=50, color='r')

    ax[1].quiver(x, y, u, v, scale=50, color='b')
    ax[1].set_aspect(1.1)
    ax[1].invert_yaxis()
    plt.show()

    return x, y, u, v
示例#21
0
from openpiv import tools, process, scaling, validation, filters
import numpy as np

import os

# we can run it from any folder
path = os.path.dirname(os.path.abspath(__file__))

frame_a = tools.imread(os.path.join(path, '../test1/exp1_001_a.bmp'))
frame_b = tools.imread(os.path.join(path, '../test1/exp1_001_b.bmp'))

frame_a = (frame_a * 1024).astype(np.int32)
frame_b = (frame_b * 1024).astype(np.int32)

u, v, sig2noise = process.extended_search_area_piv( frame_a, frame_b, \
    window_size=32, overlap=16, dt=0.02, search_area_size=64, sig2noise_method='peak2peak' )
x, y = process.get_coordinates(image_size=frame_a.shape,
                               window_size=32,
                               overlap=16)
u, v, mask = validation.sig2noise_val(u, v, sig2noise, threshold=1.3)
u, v, mask = validation.global_val(u, v, (-1000, 2000), (-1000, 1000))
u, v = filters.replace_outliers(u,
                                v,
                                method='localmean',
                                max_iter=10,
                                kernel_size=2)
x, y, u, v = scaling.uniform(x, y, u, v, scaling_factor=96.52)
tools.save(x, y, u, v, mask, 'test1.vec')
tools.display_vector_field('test1.vec', scale=75, width=0.0035)