def openpiv_default_run(im1, im2):
    """ default settings for OpenPIV analysis using
    extended_search_area_piv algorithm for two images
    
    Inputs:
        im1,im2 : str,str = path of two image
    """
    frame_a = tools.imread(im1)
    frame_b = tools.imread(im2)

    u, v, sig2noise = process.extended_search_area_piv(
        frame_a.astype(np.int32),
        frame_b.astype(np.int32),
        window_size=32,
        overlap=8,
        dt=1,
        search_area_size=64,
        sig2noise_method='peak2peak')
    x, y = process.get_coordinates(image_size=frame_a.shape,
                                   window_size=32,
                                   overlap=8)
    u, v, mask = validation.sig2noise_val(u, v, sig2noise, threshold=1.3)
    u, v = filters.replace_outliers(u,
                                    v,
                                    method='localmean',
                                    max_iter=10,
                                    kernel_size=2)
    x, y, u, v = scaling.uniform(x, y, u, v, scaling_factor=1)
    tools.save(x, y, u, v, mask, list_of_images[0] + '.txt')
    fig, ax = tools.display_vector_field(list_of_images[0] + '.txt',
                                         on_img=True,
                                         image_name=list_of_images[0],
                                         scaling_factor=1,
                                         ax=None)
示例#2
0
def PIV(I0, I1, winsize, overlap, dt):
    """ Normal PIV """
    u0, v0, sig2noise = pyprocess.extended_search_area_piv(
        I0.astype(np.int32),
        I1.astype(np.int32),
        window_size=winsize,
        overlap=overlap,
        dt=dt,
        search_area_size=winsize,
        sig2noise_method='peak2peak',
    )
    # get x, y
    x, y = pyprocess.get_coordinates(image_size=I0.shape,
                                     search_area_size=winsize,
                                     overlap=overlap,
                                     window_size=winsize)
    u1, v1, mask_s2n = validation.sig2noise_val(
        u0,
        v0,
        sig2noise,
        threshold=1.05,
    )
    # replace_outliers
    u2, v2 = filters.replace_outliers(
        u1,
        v1,
        method='localmean',
        max_iter=3,
        kernel_size=3,
    )
    # median filter smoothing
    u3 = medfilt2d(u2, 3)
    v3 = medfilt2d(v2, 3)
    return x, y, u3, v3
示例#3
0
    def sig2noise(self):
        '''Filter vectors based on the signal to noise threshold.

        See:
            openpiv.validation.sig2noise_val()
        '''
        result_fnames = []
        for i, f in enumerate(self.p['fnames']):
            data = np.loadtxt(f)
            u, v, mask = piv_vld.sig2noise_val(
                data[:, 2],
                data[:, 3],
                data[:, 5],
                threshold=self.p['sig2noise_threshold'])

            save_fname = create_save_vec_fname(path=f, postfix='_sig2noise')

            save(data[:, 0],
                 data[:, 1],
                 u,
                 v,
                 data[:, 4] + mask,
                 sig2noise=data[:, 5],
                 filename=save_fname,
                 delimiter=delimiter)
            result_fnames.append(save_fname)
        return (result_fnames)
示例#4
0
def process_node(i):
    DeltaFrame = 1
    winsize = 50  # pixels
    searchsize = 50  #pixels
    overlap = 25  # piexels
    dt = DeltaFrame * 1. / fps  # piexels
    frame_a = tools.imread(fileNameList[i])
    frame_b = tools.imread(fileNameList[i + DeltaFrame])
    u0, v0, sig2noise = process.extended_search_area_piv(
        frame_a.astype(np.int32),
        frame_b.astype(np.int32),
        window_size=winsize,
        overlap=overlap,
        dt=dt,
        search_area_size=searchsize,
        sig2noise_method='peak2peak')
    x, y = process.get_coordinates(image_size=frame_a.shape,
                                   window_size=winsize,
                                   overlap=overlap)
    u1, v1, mask = validation.sig2noise_val(u0, v0, sig2noise, threshold=1.3)
    u2, v2 = filters.replace_outliers(u1,
                                      v1,
                                      method='localmean',
                                      max_iter=5,
                                      kernel_size=5)
    tools.save(x, y, u2, v2, mask,
               '../muscle10fpsbotleft_results/' + str(i) + '.txt')
示例#5
0
def func(args):
    """A function to process each image pair."""

    # this line is REQUIRED for multiprocessing to work
    # always use it in your custom function

    file_a, file_b, counter = args

    #####################
    # Here goes you code
    #####################

    # read images into numpy arrays
    frame_a = tools.imread(os.path.join(path, file_a))
    frame_b = tools.imread(os.path.join(path, file_b))

    frame_a = (frame_a * 1024).astype(np.int32)
    frame_b = (frame_b * 1024).astype(np.int32)

    # process image pair with extended search area piv algorithm.
    u, v, sig2noise = pyprocess.extended_search_area_piv( frame_a, frame_b, \
        window_size=64, overlap=32, dt=0.02, search_area_size=128, sig2noise_method='peak2peak')
    u, v, mask = validation.sig2noise_val(u, v, sig2noise, threshold=1.5)
    u, v = filters.replace_outliers(u,
                                    v,
                                    method='localmean',
                                    max_iter=10,
                                    kernel_size=2)
    # get window centers coordinates
    x, y = pyprocess.get_coordinates(image_size=frame_a.shape,
                                     search_area_size=128,
                                     overlap=32)
    # save to a file
    tools.save(x, y, u, v, mask, 'test2_%03d.txt' % counter)
    tools.display_vector_field('test2_%03d.txt' % counter)
示例#6
0
def run_piv(
    frame_a,
    frame_b,
):
    winsize = 64  # pixels, interrogation window size in frame A
    searchsize = 68  # pixels, search in image B
    overlap = 32  # pixels, 50% overlap
    dt = 0.0005  # sec, time interval between pulses

    u0, v0, sig2noise = pyprocess.extended_search_area_piv(
        frame_a.astype(np.int32),
        frame_b.astype(np.int32),
        window_size=winsize,
        overlap=overlap,
        dt=dt,
        search_area_size=searchsize,
        sig2noise_method='peak2peak')

    x, y = pyprocess.get_coordinates(image_size=frame_a.shape,
                                     search_area_size=searchsize,
                                     window_size=winsize,
                                     overlap=overlap)

    u1, v1, mask = validation.sig2noise_val(u0, v0, sig2noise, threshold=1.05)

    u2, v2 = filters.replace_outliers(u1,
                                      v1,
                                      method='localmean',
                                      max_iter=10,
                                      kernel_size=3)

    x, y, u3, v3 = scaling.uniform(x, y, u2, v2,
                                   scaling_factor=41.22)  # 41.22 microns/pixel

    mean_u = np.mean(u3)
    mean_v = np.mean(v3)

    deficit_u = u3 - mean_u
    deficit_v = v3 - mean_v

    u_prime = np.mean(np.sqrt(0.5 * (deficit_u**2 + deficit_v**2)))
    u_avg = np.mean(np.sqrt(0.5 * (mean_u**2 + mean_v**2)))

    turbulence_intensity = u_prime / u_avg

    #save in the simple ASCII table format
    fname = "./Tables/" + exp_string + ".txt"
    # tools.save(x, y, u3, v3, mask, fname)

    out = np.vstack([m.ravel() for m in [x, y, u3, v3]])
    # print(out)
    # np.savetxt(fname,out.T)

    with open(fname, "ab") as f:
        f.write(b"\n")
        np.savetxt(f, out.T)

    return turbulence_intensity
示例#7
0
def two_images(image_1, image_2, search_area_size=64, window_size=32, overlap=16, dt=0.02):
    with open("image_1.bmp", "wb") as fh1:
        fh1.write(base64.b64decode(image_1))

    with open("image_2.bmp", "wb") as fh2:
        fh2.write(base64.b64decode(image_2))

    frame_a  = tools.imread( 'image_1.bmp' )
    frame_b  = tools.imread( 'image_2.bmp' )
    frame_a = (frame_a*1024).astype(np.int32)
    frame_b = (frame_b*1024).astype(np.int32)

    if not search_area_size:
        search_area_size = 64
    if not window_size:
        window_size = 32
    if not overlap:
        overlap = 16
    if not dt:
        dt = 0.02

    u, v, sig2noise = process.extended_search_area_piv( frame_a, frame_b, window_size=window_size, 
        overlap=overlap, dt=dt, search_area_size=search_area_size, sig2noise_method='peak2peak' )
    x, y = process.get_coordinates( image_size=frame_a.shape, window_size=window_size, overlap=overlap )
    u, v, mask = validation.sig2noise_val( u, v, sig2noise, threshold = 1.3 )
    u, v, mask = validation.global_val( u, v, (-1000, 2000), (-1000, 1000) )
    u, v = filters.replace_outliers( u, v, method='localmean', max_iter=10, kernel_size=2)
    x, y, u, v = scaling.uniform(x, y, u, v, scaling_factor = 96.52 )

    file_name_text = 'result.txt'
    file_name_png = 'result.png'
    if os.path.isfile(file_name_text):
        os.remove(file_name_text)
    if os.path.isfile(file_name_png):
        os.remove(file_name_png)
    tools.save(x, y, u, v, mask, file_name_text)
    a = np.loadtxt(file_name_text)
    fig = plt.figure()
    invalid = a[:,4].astype('bool')
    fig.canvas.set_window_title('Vector field, '+str(np.count_nonzero(invalid))+' wrong vectors')
    valid = ~invalid
    plt.quiver(a[invalid,0],a[invalid,1],a[invalid,2],a[invalid,3],color='r',scale=100, width=0.0025)
    plt.quiver(a[valid,0],a[valid,1],a[valid,2],a[valid,3],color='b',scale=100, width=0.0025)
    plt.draw()
    plt.savefig(file_name_png, format="png")
 
    with open(file_name_text, "rb") as resultFileText:
        file_reader_text = resultFileText.read()
        text_encode = base64.encodestring(file_reader_text)
        base64_string_text = str(text_encode, 'utf-8')
    
    with open(file_name_png, "rb") as resultFilePng:
        file_reader_image = resultFilePng.read()
        image_encode = base64.encodestring(file_reader_image)
        base64_string_image = str(image_encode, 'utf-8')
    
    return base64_string_text, base64_string_image
示例#8
0
    def _piv_frame(self, img1, img2, show=False, **kwargs):
        """
        calculate velocity using piv method on two frames
        """
        from openpiv.process import extended_search_area_piv, get_coordinates
        # from openpiv.scaling import uniform

        if self._debug:
            print('... [PIV] window size: {}'.format(self._windowSize))
            print('... [PIV] overlap: {}'.format(self._overlap))
            print('... [PIV] search area size: {}'.format(self._searchArea))
            print('... [PIV] threshold: {}'.format(self._piv_threshold))

        u, v, sig2noise = extended_search_area_piv(
            img1,
            img2,
            window_size=self._windowSize,
            overlap=self._overlap,
            dt=self._exposuretime,
            search_area_size=self._searchArea,
            sig2noise_method='peak2peak')
        self._pivx, self._pivy = get_coordinates(image_size=img1.shape,
                                                 window_size=self._windowSize,
                                                 overlap=self._overlap)
        #self._pivy = np.flipud(self._pivy)
        #self._pivx, self._pivy, u, v = uniform(self._pivx, self._pivy, u, v, scaling_factor=self._mpp)

        if show:
            from openpiv.validation import sig2noise_val
            from openpiv.filters import replace_outliers
            u, v, mask = sig2noise_val(u,
                                       v,
                                       sig2noise,
                                       threshold=self._piv_threshold)
            u, v = replace_outliers(u,
                                    v,
                                    method='localmean',
                                    max_iter=10,
                                    kernel_size=2)
            # show quiver plot
            plt.figure(figsize=(12, 6))
            plt.imshow(img1)
            plt.quiver(self._pivx, self._pivy, u, v, color='w', pivot='mid')
            plt.savefig(self._fname[:-4] + '_piv.png', dpi=100)

        if self._debug:
            print(
                "... [PIV] mean velocity [um/sec]: ({:4.2f}, {:4.2f})".format(
                    np.mean(u) * self._mpp,
                    np.mean(v) * self._mpp))
            print("... [PIV] mean velocity [pixel/frame]: ({:4.2f}, {:4.2f})".
                  format(
                      np.mean(u) * self._exposuretime,
                      np.mean(v) * self._exposuretime))

        return (u, v, sig2noise)
def test_sig2noise_val():
    u = np.ones((5, 5))
    v = np.ones((5, 5))
    threshold = 1.05
    s2n = np.ones((5, 5)) * threshold
    s2n[2, 2] -= 0.1

    u, v, mask = validation.sig2noise_val(u, v, s2n, w=None, threshold=1.05)

    assert np.isnan(u[2, 2])
    assert np.sum(~np.isnan(u)) == 24
    assert mask[0, 0] == False
    assert mask[2, 2] == True
示例#10
0
def analyzer(frame_a, frame_b, text, plot, num_scene, pathout, scal, zre, xre,
             dt):

    winsize = 16  # pixels
    searchsize = 32  # pixels, search in image b
    overlap = 8  # pixels

    frame_a = cv2.adaptiveThreshold(frame_a, 255, cv2.ADAPTIVE_THRESH_MEAN_C,
                                    cv2.THRESH_BINARY, 5, 5)
    frame_b = cv2.adaptiveThreshold(frame_b, 255, cv2.ADAPTIVE_THRESH_MEAN_C,
                                    cv2.THRESH_BINARY, 5, 5)
    #frame_a = cv2.adaptiveThreshold(frame_a,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY,11,2)
    #frame_b = cv2.adaptiveThreshold(frame_b,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY,11,2)

    plt.imshow(np.c_[frame_a, frame_b], cmap='gray')
    plt.savefig(pathout + '/filtered' + str(num_scene) + '.png', dpi=800)

    u0, v0, sig2noise = process.extended_search_area_piv(
        frame_a.astype(np.int32),
        frame_b.astype(np.int32),
        window_size=winsize,
        overlap=overlap,
        dt=dt,
        search_area_size=searchsize,
        sig2noise_method='peak2peak')
    x, y = process.get_coordinates(image_size=frame_a.shape,
                                   window_size=winsize,
                                   overlap=overlap)
    u1, v1, mask = validation.sig2noise_val(u0, v0, sig2noise, threshold=1.3)
    u2, v2 = filters.replace_outliers(u1,
                                      v1,
                                      method='localmean',
                                      max_iter=10,
                                      kernel_size=2)
    x, y, u3, v3 = scaling.uniform(
        x, y, u2, v2, scaling_factor=scal)  # scaling_factor (pixel per meter)

    u3 = np.flip(u3, axis=0)
    v3 = np.flip(v3, axis=0)

    xre = np.linspace(0, xre / 100, len(x[0, :]))
    zre = np.linspace(0, zre / 100, len(x[:, 0]))

    if plot == 1:
        piv_plotting(xre, zre, u3, v3, num_scene, pathout)

    if text == 0:
        tools.save(x, y, u3, v3, mask,
                   pathout + '/piv' + str(num_scene) + '.txt')
示例#11
0
def two_images(image_1, image_2):

    with open("image_1.bmp", "wb") as fh1:
        fh1.write(base64.b64decode(image_1))

    with open("image_2.bmp", "wb") as fh2:
        fh2.write(base64.b64decode(image_2))

    frame_a = tools.imread('image_1.bmp')
    frame_b = tools.imread('image_2.bmp')

    winsize = 32  # pixels
    searchsize = 64  # pixels, search in image B
    overlap = 12  # pixels
    dt = 0.02  # sec

    u, v, sig2noise = pyprocess.piv(frame_a.astype(np.int32),
                                    frame_b.astype(np.int32),
                                    window_size=winsize,
                                    overlap=overlap,
                                    dt=dt,
                                    search_size=searchsize,
                                    sig2noise_method='peak2peak')
    x, y = pyprocess.get_coordinates(image_size=frame_a.shape,
                                     window_size=searchsize,
                                     overlap=overlap)
    u, v, mask = validation.sig2noise_val(u, v, sig2noise, threshold=1.3)
    u, v = filters.replace_outliers(u,
                                    v,
                                    method='localmean',
                                    max_iter=10,
                                    kernel_size=2)
    x, y, u, v = scaling.uniform(x, y, u, v, scaling_factor=96.52)

    file_name = 'result.txt'
    if os.path.isfile(file_name):
        os.remove(file_name)
    tools.save(x, y, u, v, np.zeros_like(u),
               file_name)  # no masking, all values are valid

    with open(file_name, "rb") as resultFile:
        file_reader = resultFile.read()
        image_encode = base64.encodestring(file_reader)
        base64_string = str(image_encode, 'utf-8')

    return base64_string
示例#12
0
def PIV(image_0, image_1, winsize, searchsize, overlap, frame_rate,
        scaling_factor):

    frame_0 = image_0
    #     [0:600, :]
    frame_1 = image_1
    #     [0:600, :]

    # Processing the images with interrogation area and search area / cross correlation algortihm
    u, v, sig2noise = pyprocess.extended_search_area_piv(
        frame_0,
        frame_1,
        window_size=winsize,
        overlap=overlap,
        dt=dt,
        search_area_size=searchsize,
        sig2noise_method='peak2peak')

    # Compute the coordinates of the centers of the interrogation windows
    x, y = pyprocess.get_coordinates(image_size=frame_0.shape,
                                     window_size=winsize,
                                     overlap=overlap)

    # This function actually sets to NaN all those vector for
    # which the signal to noise ratio is below 1.3.
    # mask is a True/False array, where elements corresponding to invalid vectors have been replace by Nan.
    u, v, mask = validation.sig2noise_val(u, v, sig2noise, threshold=1.5)

    # Function as described above, removing outliers deviating with more
    # than twice the standard deviation
    u, v, mask = remove_outliers(u, v, mask)

    # Replacing the outliers with interpolation
    #    u, v = filters.replace_outliers(u,
    #                                    v,
    #                                    method='nan',
    #                                    max_iter=50,
    #                                    kernel_size=3)

    # Apply an uniform scaling to the flow field to get dimensional units
    x, y, u, v = scaling.uniform(x, y, u, v, scaling_factor=scaling_factor)

    return x, y, u, v, mask
def update(winsize, overlap, s2n, s2n_method):
    u, v, sig2noise = process.extended_search_area_piv(
        frame_a.astype(np.int32),
        frame_b.astype(np.int32),
        window_size=winsize,
        overlap=overlap,
        dt=1.0,
        search_area_size=64,
        sig2noise_method=s2n_method)
    x, y = process.get_coordinates(image_size=frame_a.shape,
                                   window_size=winsize,
                                   overlap=overlap)
    u, v, mask = validation.sig2noise_val(u, v, sig2noise, threshold=s2n)
    # u, v = filters.replace_outliers( u, v, method='localmean', max_iter=10, kernel_size=2)
    # x, y, u, v = scaling.uniform(x, y, u, v, scaling_factor = 1.0 )
    # tools.save(x, y, u, v, mask, 'tutorial-part3.txt' )
    fig, ax = plt.subplots()
    ax.imshow(tools.imread('20110919 exp3 x10 stream 488570.TIF'),
              cmap=plt.cm.gray,
              origin='upper')
    ax.quiver(x, np.flipud(y), u / 10., v / 10., scale=10, color='r', lw=3)
    plt.show()
示例#14
0
def process(args):
    file_a, file_b, counter = args

    # read images into numpy arrays
    frame_a = tools.imread(file_a)
    frame_b = tools.imread(file_b)
    print(counter + 1)

    # process image pair with piv algorithm.
    u, v, sig2noise = pyprocess.extended_search_area_piv( frame_a, frame_b, \
        window_size=32, overlap=16, dt=0.0015, search_area_size=32, sig2noise_method='peak2peak')
    x, y = pyprocess.get_coordinates(image_size=frame_a.shape,
                                     window_size=32,
                                     overlap=16)

    u, v, mask1 = validation.sig2noise_val(u, v, sig2noise, threshold=1.0)
    u, v, mask2 = validation.global_val(u, v, (-2000, 2000), (-2000, 4000))
    u, v, mask3 = validation.local_median_val(u, v, 400, 400, size=2)
    #u, v, mask4 = validation.global_std(u, v, std_threshold=3)
    mask = mask1 | mask2 | mask3
    #u, v = filters.replace_outliers( u, v, method='localmean', max_iter=10, kernel_size=2)

    save_file = tools.create_path(file_a, 'Analysis')
    tools.save(x, y, u, v, mask, save_file + '.dat')
示例#15
0
文件: image.py 项目: davidbhr/arnold
def piv_analysis(contr,
                 relax,
                 outfolder,
                 scale,
                 winsize_um=10,
                 overlap_um=None,
                 searchsize_um=None,
                 drift_correction=True,
                 threshold=1.2,
                 scale_quiver=None):
    """
    Computes deformations between 2 images by crosscorrelation using openpiv (must be installed - see Readme).
    Saves several quiver plots and the maximal found deformation for later analysis.
    
    contr: Path to active/contracted image file to calculate deformations between contracted and relaxed state
    relax: Path to relaxed image file to calculate deformations between contracted and relaxed state
    scale: resolution of image in µm per pixel
    winsize_um: size of the search window to be applied in µm 
    drift_correction: Applies a drift correction before piv analysis
    threshold: filters displacement
    scale_quiver: can be used to scale the arrows in quiver plot (only for visualization)
                Default is None meaning automatically scaling ,  scale is inverse
  
    """
    from openpiv import tools, process, validation, filters, scaling

    winsize = int(winsize_um / scale)  # for pixel

    # if not specified use defaults
    if not overlap_um:
        overlap = winsize / 2
    else:
        overlap = int(overlap_um / scale)

    if not searchsize_um:
        searchsize = winsize
    else:
        searchsize = int(searchsize_um / scale)

    print(winsize, overlap, searchsize)
    # odd winsize raise problems due to the overlap, thefore decrease by one pixel if odd and give warning
    if not (winsize % 2) == 0:
        print(
            'Odd pixelnumbers raise problems due to the overlap: Winsize changed to '
            + str((winsize - 1) * scale) + ' um')
        winsize -= 1

    # creates folder if it doesn't exist
    if not os.path.exists(outfolder):
        os.makedirs(outfolder)

    #read in images
    relax = tools.imread(relax)
    contr = tools.imread(contr)

    # convert for openpiv
    relax = relax.astype(np.int32)
    contr = contr.astype(np.int32)

    dt = 1  # sec

    u0, v0, sig2noise = process.extended_search_area_piv(
        relax,
        contr,
        window_size=winsize,
        overlap=overlap,
        dt=dt,
        search_area_size=searchsize,
        sig2noise_method='peak2peak')

    x, y = process.get_coordinates(image_size=relax.shape,
                                   window_size=winsize,
                                   overlap=overlap)

    # drift correction
    if drift_correction:
        u0 -= np.nanmean(u0)
        v0 -= np.nanmean(v0)

    # filtered deformations
    u1, v1, mask = validation.sig2noise_val(u0,
                                            v0,
                                            sig2noise,
                                            threshold=threshold)

    # save deformations + coords
    np.save(outfolder + "/u.npy", u1)
    np.save(outfolder + "/v.npy", v1)
    np.save(outfolder + "/x.npy", x)
    np.save(outfolder + "/y.npy", y)

    # show filtered+unfiltered data
    plt.figure(figsize=(6, 3))
    plt.subplot(121)
    plt.quiver(x,
               y[::-1],
               u0,
               v0,
               alpha=1,
               facecolor='orange',
               scale_units='xy',
               scale=scale_quiver)
    plt.title('unfiltered')
    plt.subplot(122)
    plt.title('filtered')
    plt.quiver(x,
               y[::-1],
               u1,
               v1,
               alpha=1,
               facecolor='b',
               scale_units='xy',
               scale=scale_quiver)
    plt.savefig(outfolder + '/filtered+unfilterd.png',
                bbox_inches='tight',
                pad_inches=0)
    plt.close()
    # save overlay
    # different color channels
    plt.figure()
    overlay = np.zeros((contr.shape[0], contr.shape[1], 3), 'uint8')
    overlay[..., 1] = contr  #1
    overlay[..., 2] = relax  #2
    plt.imshow(overlay, origin='upper'
               )  #extent=[0, contr.shape[0], contr.shape[1], 0])  # turn Y
    plt.quiver(x,
               y,
               u1,
               v1,
               facecolor='orange',
               alpha=1,
               scale_units='xy',
               scale=scale_quiver)
    plt.axis('off')
    plt.savefig(outfolder + '/overlay-filtered.png',
                bbox_inches='tight',
                pad_inches=0)
    # difference image
    plt.figure()
    plt.imshow(np.abs(contr - relax), cmap='viridis', origin='upper')
    plt.quiver(x,
               y,
               u1,
               v1,
               facecolor='orange',
               alpha=1,
               scale_units='xy',
               scale=scale_quiver)
    plt.axis('off')
    plt.savefig(outfolder + '/difference-img-filtered.png',
                bbox_inches='tight',
                pad_inches=0)
    plt.close()

    # save deformation and image data
    deformation_unfiltered = np.sqrt(u0**2 + v0**2)
    deformation_filtered = np.sqrt(u1**2 + v1**2)
    maxdefo_um_unfiltered = np.nanmax(deformation_unfiltered) * scale
    maxdefo_um_filtered = np.nanmax(deformation_filtered) * scale
    np.savetxt(outfolder + '/maxdefo_um_unfiltered.txt',
               [maxdefo_um_unfiltered])
    np.savetxt(outfolder + '/maxdefo_um_filtered.txt', [maxdefo_um_filtered])
    print('Maximal deformation unfiltered: ', maxdefo_um_unfiltered,
          'Maximal deformation filtered: ', maxdefo_um_filtered)
    plt.imsave(outfolder + '/raw_contr.png', contr, cmap='gray')
    plt.imsave(outfolder + '/raw_relax.png', relax, cmap='gray')
    plt.close()

    return
示例#16
0
                                                dt=(1 / du, 1 / dv, 1 / dw),
                                                subpixel_method='gaussian',
                                                sig2noise_method='peak2peak',
                                                width=2)

# %%
# correcting stage drift between the field of views
u -= np.nanmean(u)
v -= np.nanmean(v)
w -= np.nanmean(w)

# %%
# filtering
uf, vf, wf, mask = sig2noise_val(u,
                                 v,
                                 w=w,
                                 sig2noise=sig2noise,
                                 threshold=signoise_filter)
uf, vf, wf = replace_outliers(uf,
                              vf,
                              wf,
                              max_iter=1,
                              tol=100,
                              kernel_size=2,
                              method='disk')


# %%
# plotting
# representation of the image stacks by maximums projections. The red circle marks the position of the cell
def update_plot(i, ims, ax):
示例#17
0
    def func(args):
        """A function to process each image pair."""

        # this line is REQUIRED for multiprocessing to work
        # always use it in your custom function

        file_a, file_b, counter = args

        # counter2=str(counter2)
        #####################
        # Here goes you code
        #####################

        ' read images into numpy arrays'
        frame_a = tools.imread(os.path.join(settings.filepath_images, file_a))
        frame_b = tools.imread(os.path.join(settings.filepath_images, file_b))

        ## Miguel: I just had a quick look, and I do not understand the reason for this step.
        #  I propose to remove it.
        #frame_a = (frame_a*1024).astype(np.int32)
        #frame_b = (frame_b*1024).astype(np.int32)

        ' crop to ROI'
        if settings.ROI == 'full':
            frame_a = frame_a
            frame_b = frame_b
        else:
            frame_a = frame_a[settings.ROI[0]:settings.ROI[1],
                              settings.ROI[2]:settings.ROI[3]]
            frame_b = frame_b[settings.ROI[0]:settings.ROI[1],
                              settings.ROI[2]:settings.ROI[3]]
        if settings.dynamic_masking_method == 'edge' or 'intensity':
            frame_a = preprocess.dynamic_masking(
                frame_a,
                method=settings.dynamic_masking_method,
                filter_size=settings.dynamic_masking_filter_size,
                threshold=settings.dynamic_masking_threshold)
            frame_b = preprocess.dynamic_masking(
                frame_b,
                method=settings.dynamic_masking_method,
                filter_size=settings.dynamic_masking_filter_size,
                threshold=settings.dynamic_masking_threshold)
        '''%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%'''
        'first pass'
        x, y, u, v, sig2noise_ratio = first_pass(
            frame_a,
            frame_b,
            settings.windowsizes[0],
            settings.overlap[0],
            settings.iterations,
            correlation_method=settings.correlation_method,
            subpixel_method=settings.subpixel_method,
            do_sig2noise=settings.extract_sig2noise,
            sig2noise_method=settings.sig2noise_method,
            sig2noise_mask=settings.sig2noise_mask,
        )

        'validation using gloabl limits and std and local median'
        '''MinMaxU : two elements tuple
            sets the limits of the u displacment component
            Used for validation.

        MinMaxV : two elements tuple
            sets the limits of the v displacment component
            Used for validation.

        std_threshold : float
            sets the  threshold for the std validation

        median_threshold : float
            sets the threshold for the median validation

        filter_method : string
            the method used to replace the non-valid vectors
            Methods:
                'localmean',
                'disk',
                'distance', 

        max_filter_iteration : int
            maximum of filter iterations to replace nans

        filter_kernel_size : int
            size of the kernel used for the filtering'''

        mask = np.full_like(x, False)
        if settings.validation_first_pass == True:
            u, v, mask_g = validation.global_val(u, v, settings.MinMax_U_disp,
                                                 settings.MinMax_V_disp)
            u, v, mask_s = validation.global_std(
                u, v, std_threshold=settings.std_threshold)
            u, v, mask_m = validation.local_median_val(
                u,
                v,
                u_threshold=settings.median_threshold,
                v_threshold=settings.median_threshold,
                size=settings.median_size)
            if settings.extract_sig2noise == True and settings.iterations == 1 and settings.do_sig2noise_validation == True:
                u, v, mask_s2n = validation.sig2noise_val(
                    u,
                    v,
                    sig2noise_ratio,
                    threshold=settings.sig2noise_threshold)
                mask = mask + mask_g + mask_m + mask_s + mask_s2n
            else:
                mask = mask + mask_g + mask_m + mask_s
        'filter to replace the values that where marked by the validation'
        if settings.iterations > 1:
            u, v = filters.replace_outliers(
                u,
                v,
                method=settings.filter_method,
                max_iter=settings.max_filter_iteration,
                kernel_size=settings.filter_kernel_size)
            'adding masks to add the effect of all the validations'
            if settings.smoothn == True:
                u, dummy_u1, dummy_u2, dummy_u3 = smoothn.smoothn(
                    u, s=settings.smoothn_p)
                v, dummy_v1, dummy_v2, dummy_v3 = smoothn.smoothn(
                    v, s=settings.smoothn_p)
        elif settings.iterations == 1 and settings.replace_vectors == True:
            u, v = filters.replace_outliers(
                u,
                v,
                method=settings.filter_method,
                max_iter=settings.max_filter_iteration,
                kernel_size=settings.filter_kernel_size)
            'adding masks to add the effect of all the validations'
            if settings.smoothn == True:
                u, v = filters.replace_outliers(
                    u,
                    v,
                    method=settings.filter_method,
                    max_iter=settings.max_filter_iteration,
                    kernel_size=settings.filter_kernel_size)
                u, dummy_u1, dummy_u2, dummy_u3 = smoothn.smoothn(
                    u, s=settings.smoothn_p)
                v, dummy_v1, dummy_v2, dummy_v3 = smoothn.smoothn(
                    v, s=settings.smoothn_p)

        i = 1
        'all the following passes'
        for i in range(2, settings.iterations + 1):
            x, y, u, v, sig2noise_ratio, mask = multipass_img_deform(
                frame_a,
                frame_b,
                settings.windowsizes[i - 1],
                settings.overlap[i - 1],
                settings.iterations,
                i,
                x,
                y,
                u,
                v,
                correlation_method=settings.correlation_method,
                subpixel_method=settings.subpixel_method,
                do_sig2noise=settings.extract_sig2noise,
                sig2noise_method=settings.sig2noise_method,
                sig2noise_mask=settings.sig2noise_mask,
                MinMaxU=settings.MinMax_U_disp,
                MinMaxV=settings.MinMax_V_disp,
                std_threshold=settings.std_threshold,
                median_threshold=settings.median_threshold,
                median_size=settings.median_size,
                filter_method=settings.filter_method,
                max_filter_iteration=settings.max_filter_iteration,
                filter_kernel_size=settings.filter_kernel_size,
                interpolation_order=settings.interpolation_order)
            # If the smoothing is active, we do it at each pass
            if settings.smoothn == True:
                u, dummy_u1, dummy_u2, dummy_u3 = smoothn.smoothn(
                    u, s=settings.smoothn_p)
                v, dummy_v1, dummy_v2, dummy_v3 = smoothn.smoothn(
                    v, s=settings.smoothn_p)
        '''%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%'''
        if settings.extract_sig2noise == True and i == settings.iterations and settings.iterations != 1 and settings.do_sig2noise_validation == True:
            u, v, mask_s2n = validation.sig2noise_val(
                u, v, sig2noise_ratio, threshold=settings.sig2noise_threshold)
            mask = mask + mask_s2n
        if settings.replace_vectors == True:
            u, v = filters.replace_outliers(
                u,
                v,
                method=settings.filter_method,
                max_iter=settings.max_filter_iteration,
                kernel_size=settings.filter_kernel_size)
        'pixel/frame->pixel/sec'
        u = u / settings.dt
        v = v / settings.dt
        'scales the results pixel-> meter'
        x, y, u, v = scaling.uniform(x,
                                     y,
                                     u,
                                     v,
                                     scaling_factor=settings.scaling_factor)
        'save to a file'
        save(x,
             y,
             u,
             v,
             sig2noise_ratio,
             mask,
             os.path.join(save_path, 'field_A%03d.txt' % counter),
             delimiter='\t')
        'some messages to check if it is still alive'

        'some other stuff that one might want to use'
        if settings.show_plot == True or settings.save_plot == True:
            plt.close('all')
            plt.ioff()
            Name = os.path.join(save_path, 'Image_A%03d.png' % counter)
            display_vector_field(os.path.join(save_path,
                                              'field_A%03d.txt' % counter),
                                 scale=settings.scale_plot)
            if settings.save_plot == True:
                plt.savefig(Name)
            if settings.show_plot == True:
                plt.show()

        print('Image Pair ' + str(counter + 1))
# In[16]:


# let's consider 5% of signoise ratio problems. 
sig2noise_threshold = np.percentile(sig2noise_ratio[sig2noise_ratio>0],(5))
print(f"S2N threshold is estimated as {sig2noise_threshold:.3f}")

settings.sig2noise_threshold = sig2noise_threshold


# In[17]:


u, v, mask_s2n = validation.sig2noise_val(
            u, v, sig2noise_ratio,
            threshold=settings.sig2noise_threshold
)

status_message(u)


# In[18]:


plt.quiver(x,y,u,v,sig2noise_ratio)
plt.quiver(x[mask_s2n],y[mask_s2n],u0[mask_s2n],v0[mask_s2n],color='r')
plt.gca().invert_yaxis()
plt.colorbar()


# In[19]:
示例#19
0
    def piv_frames(self, topn=-1, show=True):

        from openpiv.validation import sig2noise_val
        from openpiv.filters import replace_outliers

        frames = self._frames[::2]
        topn = np.min([len(frames), topn])
        frames = frames[:topn]

        (ut, vt, s2nt) = self.piv_frame(frame=0, show=False)
        for i in tqdm.tqdm(frames[1:]):
            (u, v, s2n) = self.piv_frame(frame=i, show=False)
            ut += u
            vt += v
            s2nt += s2n
            #print(np.max(u), np.min(u), u.size)
            #print(np.max(v), np.max(v), v.size)

        ut /= len(frames)
        vt /= len(frames)
        s2nt /= len(frames)

        ut, vt, mask = sig2noise_val(ut,
                                     vt,
                                     s2nt,
                                     threshold=self._piv_threshold)
        ut, vt = replace_outliers(ut,
                                  vt,
                                  method='localmean',
                                  max_iter=10,
                                  kernel_size=2)
        self._pivu = ut
        self._pivv = vt
        self._pivs2n = s2nt
        self.save_piv()

        if show:
            fig = plt.figure(figsize=(12, 6))
            ax1 = fig.add_subplot(121)
            ax1.imshow(self.mean())
            ax1.quiver(self._pivx, self._pivy, ut, vt, pivot='mid', color='w')

            ax2 = fig.add_subplot(122)
            n_u, bins, patches = ax2.hist(ut.flatten() * self._mpp,
                                          bins=20,
                                          normed=1,
                                          facecolor='blue',
                                          alpha=0.75,
                                          label='u')
            n_v, bins, patches = ax2.hist(vt.flatten() * self._mpp,
                                          bins=20,
                                          normed=1,
                                          facecolor='green',
                                          alpha=0.75,
                                          label='v')
            ax2.annotate(np.mean(ut) * self._mpp,
                         xy=(np.mean(ut) * self._mpp, np.max(n_u)))
            ax2.annotate(np.mean(vt) * self._mpp,
                         xy=(np.mean(vt) * self._mpp, np.max(n_v)))
            plt.legend(loc='best')
            plt.tight_layout()
            plt.savefig(self._fname[:-4] + '_piv_t.png', dpi=150)

        print("... frames: {}, {}".format(frames[0], frames[-1]))
        print("... mean velocity [um/sec]: ({:4.2f}, {:4.2f})".format(
            np.mean(ut) * self._umtopixel,
            np.mean(vt) * self._umtopixel))
        print("... mean velocity [pixel/frame]: ({:4.2f}, {:4.2f})".format(
            np.mean(ut) * self._dt,
            np.mean(vt) * self._dt))
示例#20
0
    def run(self):
        self.is_to_stop = False
        self.piv.piv_results_list = []

        for i in range(0, len(self.frames_list) - 1, abs(self.jump)):
            if self.piv.xy_zoom[0][0] and self.piv.xy_zoom[1][0]:
                """try:"""
                frame_a = self.frames_list[i][2][
                    int(self.piv.xy_zoom[1][0]):int(self.piv.xy_zoom[1][1]),
                    int(self.piv.xy_zoom[0][0]):int(self.piv.xy_zoom[0][1])]

                frame_b = self.frames_list[i + 1][2][
                    int(self.piv.xy_zoom[1][0]):int(self.piv.xy_zoom[1][1]),
                    int(self.piv.xy_zoom[0][0]):int(self.piv.xy_zoom[0][1])]
                """
                except ValueError:
                    frame_a = self.frames_list[i][2][int(self.frames_list[i][2].shape[1] - self.piv.xy_zoom[1][0]): int(
                        self.frames_list[i][2].shape[1] - self.piv.xy_zoom[1][1]),
                              int(self.piv.xy_zoom[0][0]): int(
                                  self.piv.xy_zoom[0][1])]

                    frame_b = self.frames_list[i + 1][2][
                              int(self.frames_list[i + 1][2].shape[1] - self.piv.xy_zoom[1][0]): int(
                                  self.frames_list[i + 1][2].shape[1] - self.piv.xy_zoom[1][1]),
                              int(self.piv.xy_zoom[0][0]): int(
                                  self.piv.xy_zoom[0][1])]
                """
            else:
                frame_a = self.frames_list[i][2]

                frame_b = self.frames_list[i + 1][2]

            try:
                self.u, self.v, self.sig2noise = extended_search_area_piv(
                    frame_a.astype(np.int32),
                    frame_b.astype(np.int32),
                    window_size=self.winsize,
                    overlap=self.overlap,
                    dt=self.dt,
                    search_area_size=self.searchsize,
                    sig2noise_method='peak2peak')
                self.x, self.y = get_coordinates(image_size=frame_a.shape,
                                                 window_size=self.winsize,
                                                 overlap=self.overlap)
                self.u, self.v, self.mask = sig2noise_val(self.u,
                                                          self.v,
                                                          self.sig2noise,
                                                          threshold=1.0)
                self.u, self.v = replace_outliers(self.u,
                                                  self.v,
                                                  method='localmean',
                                                  max_iter=10,
                                                  kernel_size=2)
                # self.x, self.y, self.u, self.v = uniform(self.x, self.y, self.u, self.v, scaling_factor=5)

                if self.piv.xy_zoom[0][0] and self.piv.xy_zoom[1][0]:
                    self.x += int(self.piv.xy_zoom[0][0])
                    self.y += int(self.piv.xy_zoom[1][0])

                self.y = np.flip(self.y, 0)
                self.x = np.flip(self.x, 0)

            except ValueError:
                if self.searchsize < self.winsize:
                    print("0")
                    self.error_message.setText(
                        "the search size cannot be smaller than the window size"
                    )
                elif self.overlap > self.winsize:
                    print("1")
                    self.error_message.setText(
                        "Overlap has to be smaller than the window_size")
                else:
                    print("2")
                    self.error_message.setText("ROI window to small")
                self.error_message.exec()
                break
            self.piv.piv_results_list.append(
                [self.x, self.y, self.u, self.v, self.mask])
            self.piv.piv_images_list[i][3] = self.piv.piv_results_list[
                i // abs(self.jump)]
            # data = np.zeros((len(np.ravel(self.u)), 5))
            # res_list = [np.ravel(self.x), np.ravel(self.y), np.ravel(self.u), np.ravel(self.v), np.ravel(self.mask)]
            # for j in range(0, 4):
            #    for k in range(len(res_list[j])):
            #        data[k][j] = res_list[j][k]
            # self.piv.piv_images_list[i][4] = data
            # save_openpiv_vec(self.piv.piv_images_list[i][1].split('.')[0], data, 'pix', 'dt',
            #                  len(data[0]), len(data))
            self.piv.show_plot(i, self.piv.bit, True)

            if i == len(self.frames_list) - 2 and self.jump == 1:
                self.piv.piv_results_list.append(
                    [self.x, self.y, self.u, self.v, self.mask])
                self.piv.piv_images_list[i +
                                         1][3] = self.piv.piv_results_list[i +
                                                                           1]
                # self.piv.piv_images_list[i + 1][4] = data
                self.piv.show_plot(i + 1, self.piv.bit, True)

            if self.is_to_stop:
                break

        self.piv_finished_signal.emit()
示例#21
0
import sys

if 'OpenPIV' not in sys.path:
    sys.path.append('/Users/alex/Documents/OpenPIV/alexlib/openpiv-python')

from openpiv import tools, validation, process, filters, scaling, pyprocess
import numpy as np

frame_a  = tools.imread( 'exp1_001_a.bmp' )
frame_b  = tools.imread( 'exp1_001_b.bmp' )

u, v, sig2noise = process.extended_search_area_piv( frame_a.astype(np.int32), 
frame_b.astype(np.int32), window_size=24, overlap=12, dt=0.02, search_area_size=64, 
sig2noise_method='peak2peak' )
x, y = process.get_coordinates( image_size=frame_a.shape, window_size=24, overlap=12 )
u, v, mask = validation.sig2noise_val( u, v, sig2noise, threshold = 2.5 )
u, v = filters.replace_outliers( u, v, method='localmean', max_iter=10, kernel_size=2)
x, y, u, v = scaling.uniform(x, y, u, v, scaling_factor = 96.52 )
tools.save(x, y, u, v, mask, 'exp1_001.txt' )
tools.display_vector_field('exp1_001.txt', scale=100, width=0.0025)



u, v, s2n= pyprocess.piv(frame_a, frame_b, corr_method='fft', window_size=24, overlap=12, 
dt=0.02, sig2noise_method='peak2peak' )
x, y = pyprocess.get_coordinates( image_size=frame_a.shape, window_size=24, overlap=12 )
u, v, mask = validation.sig2noise_val( u, v, s2n, threshold = 2.5 )
u, v = filters.replace_outliers( u, v, method='localmean', max_iter=10, kernel_size=2.5)
x, y, u, v = scaling.uniform(x, y, u, v, scaling_factor = 96.52 )
tools.save(x, y, u, v, mask, 'exp1_002.txt' )
tools.display_vector_field('exp1_002.txt', scale=100, width=0.0025)
示例#22
0
import sys

if 'OpenPIV' not in sys.path:
    sys.path.append('/Users/alex/Documents/OpenPIV/alexlib/openpiv-python')

from openpiv import tools, validation, process, filters, scaling, pyprocess
import numpy as np

frame_a  = tools.imread( 'exp1_001_a.bmp' )
frame_b  = tools.imread( 'exp1_001_b.bmp' )

u, v, sig2noise = process.extended_search_area_piv( frame_a.astype(np.int32), frame_b.astype(np.int32), window_size=24, overlap=12, dt=0.02, search_area_size=64, sig2noise_method='peak2peak' )

x, y = process.get_coordinates( image_size=frame_a.shape, window_size=24, overlap=12 )

u, v, mask = validation.sig2noise_val( u, v, sig2noise, threshold = 1.3 )

u, v = filters.replace_outliers( u, v, method='localmean', max_iter=10, kernel_size=2)

x, y, u, v = scaling.uniform(x, y, u, v, scaling_factor = 96.52 )

tools.save(x, y, u, v, mask, 'exp1_001.txt' )

tools.display_vector_field('exp1_001.txt', scale=100, width=0.0025)



u1, v1, sig2noise = pyprocess.piv( frame_a.astype(np.int32), 
                                  frame_b.astype(np.int32), 
window_size=24, overlap=12, dt=0.02, search_size=64, sig2noise_method='peak2peak' )
# %%
# Use Cython version: process.pyx

u, v, sig2noise = process.extended_search_area_piv(
    frame_a.astype(np.int32),
    frame_b.astype(np.int32),
    window_size=32,
    overlap=8,
    dt=.1,
    sig2noise_method='peak2peak')
x, y = process.get_coordinates(image_size=frame_a.shape,
                               window_size=32,
                               overlap=8)

u, v, mask = validation.sig2noise_val(u, v, sig2noise, threshold=1.3)
u, v = filters.replace_outliers(u,
                                v,
                                method='localmean',
                                max_iter=10,
                                kernel_size=2)
x, y, u, v = scaling.uniform(x, y, u, v, scaling_factor=96.52)

tools.save(x, y, u, v, mask, 'Y4-S3_Camera000398_a.txt')

# %%
# Use Python version, pyprocess:

u, v, sig2noise = pyprocess.extended_search_area_piv(
    frame_a.astype(np.int32),
    frame_b.astype(np.int32),
示例#24
0
def run_piv(
    frame_a,
    frame_b,
    winsize=16,  # pixels, interrogation window size in frame A
    searchsize=20,  # pixels, search in image B
    overlap=8,  # pixels, 50% overlap
    dt=0.0001,  # sec, time interval between pulses
    image_check=False,
    show_vertical_profiles=False,
    figure_export_name='_results.png',
    text_export_name="_results.txt",
    scale_factor=1,
    pixel_density=36.74,
    arrow_width=0.001,
    show_result=True,
    u_bounds=(-100, 100),
    v_bounds=(-100, 100)):

    u0, v0, sig2noise = pyprocess.extended_search_area_piv(
        frame_a.astype(np.int32),
        frame_b.astype(np.int32),
        window_size=winsize,
        overlap=overlap,
        dt=dt,
        search_area_size=searchsize,
        sig2noise_method='peak2peak')

    x, y = pyprocess.get_coordinates(image_size=frame_a.shape,
                                     search_area_size=searchsize,
                                     overlap=overlap)

    x, y, u0, v0 = scaling.uniform(
        x, y, u0, v0, scaling_factor=pixel_density)  # no. pixel per distance

    u0, v0, mask = validation.global_val(u0, v0, u_bounds, v_bounds)

    u1, v1, mask = validation.sig2noise_val(u0, v0, sig2noise, threshold=1.05)

    u3, v3 = filters.replace_outliers(u1,
                                      v1,
                                      method='localmean',
                                      max_iter=10,
                                      kernel_size=3)

    #save in the simple ASCII table format
    if np.std(u3) < 480:
        tools.save(x, y, u3, v3, sig2noise, mask, text_export_name)

    if image_check == True:
        fig, ax = plt.subplots(2, 1, figsize=(24, 12))
        ax[0].imshow(frame_a)
        ax[1].imshow(frame_b)

    io.imwrite(figure_export_name, frame_a)

    if show_result == True:
        fig, ax = plt.subplots(figsize=(24, 12))
        tools.display_vector_field(
            text_export_name,
            ax=ax,
            scaling_factor=pixel_density,
            scale=scale_factor,  # scale defines here the arrow length
            width=arrow_width,  # width is the thickness of the arrow
            on_img=True,  # overlay on the image
            image_name=figure_export_name)
        fig.savefig(figure_export_name)

    if show_vertical_profiles:
        field_shape = pyprocess.get_field_shape(image_size=frame_a.shape,
                                                search_area_size=searchsize,
                                                overlap=overlap)
        vertical_profiles(text_export_name, field_shape)

    print('Std of u3: %.3f' % np.std(u3))
    print('Mean of u3: %.3f' % np.mean(u3))

    return np.std(u3)
示例#25
0
    def quick_piv(self, search_dict, index_a=100, index_b=101, folder=None):
        self.show_piv_param()
        ns = Namespace(**self.piv_param)

        if folder == None:
            img_a, img_b = self.read_two_images(search_dict,
                                                index_a=index_a,
                                                index_b=index_b)

            location_path = [
                x['path'] for x in self.piv_dict_list
                if search_dict.items() <= x.items()
            ]
            results_path = os.path.join(self.results_path, *location_path)
            try:
                os.makedirs(results_path)
            except FileExistsError:
                pass
        else:
            try:
                file_a_path = os.path.join(self.path, folder,
                                           'frame_%06d.tiff' % index_a)
                file_b_path = os.path.join(self.path, folder,
                                           'frame_%06d.tiff' % index_b)

                img_a = np.array(Image.open(file_a_path))
                img_b = np.array(Image.open(file_b_path))
            except:
                return None

        # crop
        img_a = img_a[ns.crop[0]:-ns.crop[1] - 1, ns.crop[2]:-ns.crop[3] - 1]
        img_b = img_b[ns.crop[0]:-ns.crop[1] - 1, ns.crop[2]:-ns.crop[3] - 1]

        u0, v0, sig2noise = pyprocess.extended_search_area_piv(
            img_a.astype(np.int32),
            img_b.astype(np.int32),
            window_size=ns.winsize,
            overlap=ns.overlap,
            dt=ns.dt,
            search_area_size=ns.searchsize,
            sig2noise_method='peak2peak')

        x, y = pyprocess.get_coordinates(image_size=img_a.shape,
                                         search_area_size=ns.searchsize,
                                         overlap=ns.overlap)

        x, y, u0, v0 = scaling.uniform(
            x, y, u0, v0,
            scaling_factor=ns.pixel_density)  # no. pixel per distance

        u0, v0, mask = validation.global_val(
            u0, v0, (ns.u_lower_bound, ns.u_upper_bound),
            (ns.v_lower_bound, ns.v_upper_bound))

        u1, v1, mask = validation.sig2noise_val(u0,
                                                v0,
                                                sig2noise,
                                                threshold=1.01)

        u3, v3 = filters.replace_outliers(u1,
                                          v1,
                                          method='localmean',
                                          max_iter=500,
                                          kernel_size=3)

        #save in the simple ASCII table format
        tools.save(x, y, u3, v3, sig2noise, mask,
                   os.path.join(results_path, ns.text_export_name))

        if ns.image_check == True:
            fig, ax = plt.subplots(2, 1, figsize=(24, 12))
            ax[0].imshow(img_a)
            ax[1].imshow(img_b)

        io.imwrite(os.path.join(results_path, ns.figure_export_name), img_a)

        if ns.show_result == True:
            fig, ax = plt.subplots(figsize=(24, 12))
            tools.display_vector_field(
                os.path.join(results_path, ns.text_export_name),
                ax=ax,
                scaling_factor=ns.pixel_density,
                scale=ns.scale_factor,  # scale defines here the arrow length
                width=ns.arrow_width,  # width is the thickness of the arrow
                on_img=True,  # overlay on the image
                image_name=os.path.join(results_path, ns.figure_export_name))
            fig.savefig(os.path.join(results_path, ns.figure_export_name))

        if ns.show_vertical_profiles:
            field_shape = pyprocess.get_field_shape(
                image_size=img_a.shape,
                search_area_size=ns.searchsize,
                overlap=ns.overlap)
            vertical_profiles(ns.text_export_name, field_shape)

        print('Mean of u: %.3f' % np.mean(u3))
        print('Std of u: %.3f' % np.std(u3))
        print('Mean of v: %.3f' % np.mean(v3))
        print('Std of v: %.3f' % np.std(v3))

        output = np.array([np.mean(u3), np.std(u3), np.mean(v3), np.std(v3)])
        # if np.absolute(np.mean(v3)) < 50:
        #     output = self.quick_piv(search_dict,index_a = index_a + 1, index_b = index_b + 1)

        return x, y, u3, v3
示例#26
0
    def func(args):
        file_a, file_b, counter = args
        # read the iamges
        frame_a = tools.imread(os.path.join(settings.filepath_images, file_a))
        frame_b = tools.imread(os.path.join(settings.filepath_images, file_b))
        if counter == settings.fall_start:
            settings.ROI[1] = frame_a.shape[0]
        """Here we check if the interface has reached the top of the roi yet
        by comparing it to the index in the observation_periods file. If it has
        not reached the roi yet we skip this part, if it did then we shift the
        roi for each pair after the initial one """
        if counter >= settings.roi_shift_start:
            # set the roi to the image height for the first frame
            # if counter == settings.roi_shift_start :
            #     settings.current_pos = 0
            # shift the roi for each pair (this is not done for the first one)
            settings.ROI[0] = int(settings.current_pos)

        # crop to roi
        if settings.ROI == 'full':
            frame_a = frame_a
            frame_b = frame_b
        else:
            frame_a = frame_a[settings.ROI[0]:settings.ROI[1],
                              settings.ROI[2]:settings.ROI[3]]
            frame_b = frame_b[settings.ROI[0]:settings.ROI[1],
                              settings.ROI[2]:settings.ROI[3]]
        if settings.dynamic_masking_method == 'edge' or settings.dynamic_masking_method == 'intensity':
            frame_a = preprocess.dynamic_masking(
                frame_a,
                method=settings.dynamic_masking_method,
                filter_size=settings.dynamic_masking_filter_size,
                threshold=settings.dynamic_masking_threshold)
            frame_b = preprocess.dynamic_masking(
                frame_b,
                method=settings.dynamic_masking_method,
                filter_size=settings.dynamic_masking_filter_size,
                threshold=settings.dynamic_masking_threshold)

#%%
        """ Here we do the first pass of the piv interrogation """
        x, y, u, v, sig2noise_ratio = first_pass(
            frame_a,
            frame_b,
            settings.window_width[0],
            settings.window_height[0],
            settings.overlap_width[0],
            settings.overlap_height[0],
            settings.iterations,
            correlation_method=settings.correlation_method,
            subpixel_method=settings.subpixel_method,
            do_sig2noise=settings.extract_sig2noise,
            sig2noise_method=settings.sig2noise_method,
            sig2noise_mask=settings.sig2noise_mask,
        )
        mask = np.full_like(x, False)
        if settings.validation_first_pass == True:
            u, v, mask_g = validation.global_val(u, v, settings.MinMax_U_disp,
                                                 settings.MinMax_V_disp)
            u, v, mask_s = validation.global_std(
                u, v, std_threshold=settings.std_threshold)
            u, v, mask_m = validation.local_median_val(
                u,
                v,
                u_threshold=settings.median_threshold,
                v_threshold=settings.median_threshold,
                size=settings.median_size)
            if settings.extract_sig2noise == True and settings.iterations == 1 and settings.do_sig2noise_validation == True:
                u, v, mask_s2n = validation.sig2noise_val(
                    u,
                    v,
                    sig2noise_ratio,
                    threshold=settings.sig2noise_threshold)
                mask = mask + mask_g + mask_m + mask_s + mask_s2n
            else:
                mask = mask + mask_g + mask_m + mask_s
        'filter to replace the values that where marked by the validation'
        if settings.iterations > 1:
            u, v = filters.replace_outliers(
                u,
                v,
                method=settings.filter_method,
                max_iter=settings.max_filter_iteration,
                kernel_size=settings.filter_kernel_size)
            'adding masks to add the effect of all the validations'
            if settings.smoothn == True:
                u, dummy_u1, dummy_u2, dummy_u3 = smoothn.smoothn(
                    u, s=settings.smoothn_p)
                v, dummy_v1, dummy_v2, dummy_v3 = smoothn.smoothn(
                    v, s=settings.smoothn_p)
        elif settings.iterations == 1 and settings.replace_vectors == True:
            u, v = filters.replace_outliers(
                u,
                v,
                method=settings.filter_method,
                max_iter=settings.max_filter_iteration,
                kernel_size=settings.filter_kernel_size)
            'adding masks to add the effect of all the validations'
            if settings.smoothn == True:
                u, v = filters.replace_outliers(
                    u,
                    v,
                    method=settings.filter_method,
                    max_iter=settings.max_filter_iteration,
                    kernel_size=settings.filter_kernel_size)
                u, dummy_u1, dummy_u2, dummy_u3 = smoothn.smoothn(
                    u, s=settings.smoothn_p)
                v, dummy_v1, dummy_v2, dummy_v3 = smoothn.smoothn(
                    v, s=settings.smoothn_p)

#%%
        i = 1
        """ Do the multipass until the maximum iterations are reached """
        for i in range(2, settings.iterations + 1):
            x, y, u, v, sig2noise_ratio, mask = multipass_img_deform(
                frame_a,
                frame_b,
                settings.window_width[i - 1],
                settings.window_height[i - 1],
                settings.overlap_width[i - 1],
                settings.overlap_height[i - 1],
                settings.iterations,
                i,
                x,
                y,
                u,
                v,
                correlation_method=settings.correlation_method,
                subpixel_method=settings.subpixel_method,
                do_sig2noise=settings.extract_sig2noise,
                sig2noise_method=settings.sig2noise_method,
                sig2noise_mask=settings.sig2noise_mask,
                MinMaxU=settings.MinMax_U_disp,
                MinMaxV=settings.MinMax_V_disp,
                std_threshold=settings.std_threshold,
                median_threshold=settings.median_threshold,
                median_size=settings.median_size,
                filter_method=settings.filter_method,
                max_filter_iteration=settings.max_filter_iteration,
                filter_kernel_size=settings.filter_kernel_size,
                interpolation_order=settings.interpolation_order)
            # smooth on each pass in case this is wanted
            if settings.smoothn == True:
                u, dummy_u1, dummy_u2, dummy_u3 = smoothn.smoothn(
                    u, s=settings.smoothn_p)
                v, dummy_v1, dummy_v2, dummy_v3 = smoothn.smoothn(
                    v, s=settings.smoothn_p)

        # extract the sig2noise ratio in case it is desired and replace the vectors
        if settings.extract_sig2noise == True and i == settings.iterations and settings.iterations != 1 and settings.do_sig2noise_validation == True:
            u, v, mask_s2n = validation_patch.sig2noise_val(
                u,
                v,
                sig2noise_ratio,
                threshold_low=settings.sig2noise_threshold)
            mask = mask + mask_s2n
        if settings.replace_vectors == True:
            u, v = filters.replace_outliers(
                u,
                v,
                method=settings.filter_method,
                max_iter=settings.max_filter_iteration,
                kernel_size=settings.filter_kernel_size)
        if counter >= settings.roi_shift_start:
            settings.current_pos = settings.current_pos - calc_disp(
                x, v, frame_b.shape[1])
            if ((settings.ROI[1] - settings.current_pos) < 300):
                return settings.current_pos, True
        # scale the result timewise and lengthwise
        u = u / settings.dt
        v = v / settings.dt
        x, y, u, v = scaling.uniform(x,
                                     y,
                                     u,
                                     v,
                                     scaling_factor=settings.scaling_factor)
        # save the result
        save(x,
             y,
             u,
             v,
             sig2noise_ratio,
             mask,
             os.path.join(save_path_txts, 'field_%06d.txt' % (counter)),
             delimiter='\t')
        # disable the grid in the rcParams file
        plt.rcParams['axes.grid'] = False
        # show and save the plot if it is desired
        if settings.show_plot == True or settings.save_plot == True:
            plt.ioff()
            Name = os.path.join(save_path_images, 'Image_%06d.png' % (counter))
            display_vector_field(os.path.join(save_path_txts,
                                              'field_%06d.txt' % (counter)),
                                 scale=settings.scale_plot)
            if settings.save_plot == True:
                plt.savefig(Name, dpi=100)
            if settings.show_plot == True:
                plt.show()
            plt.close('all')
        print('Image Pair ' + str(counter) + ' of ' +
              settings.save_folder_suffix)
        if settings.current_pos == np.nan:
            return settings.current_pos, True
        return settings.current_pos, False
frame_a = tools.imread('exp1_001_a.bmp')
frame_b = tools.imread('exp1_001_b.bmp')

u, v, sig2noise = process.extended_search_area_piv(
    frame_a.astype(np.int32),
    frame_b.astype(np.int32),
    window_size=24,
    overlap=12,
    dt=0.02,
    search_area_size=64,
    sig2noise_method='peak2peak')
x, y = process.get_coordinates(image_size=frame_a.shape,
                               window_size=24,
                               overlap=12)
u, v, mask = validation.sig2noise_val(u, v, sig2noise, threshold=2.5)
u, v = filters.replace_outliers(u,
                                v,
                                method='localmean',
                                max_iter=10,
                                kernel_size=2)
x, y, u, v = scaling.uniform(x, y, u, v, scaling_factor=96.52)
tools.save(x, y, u, v, mask, 'exp1_001.txt')
tools.display_vector_field('exp1_001.txt', scale=100, width=0.0025)

u, v, s2n = pyprocess.piv(frame_a,
                          frame_b,
                          corr_method='fft',
                          window_size=24,
                          overlap=12,
                          dt=0.02,
示例#28
0
def calc_piv_2_images(frame_a, frame_b, idx, dir_name):
    '''
    Performs Particle Image Velocimetry (PIV) of two images, and saves an image with PIV on it.
    :param frame_a: first image
    :param frame_b: consecutive image
    :param idx: index of the first frame, for saving and ordering the images
    :param dir_name: directory to save the image to
    :return: -
    '''
    u0, v0, sig2noise = pyprocess.extended_search_area_piv(
        frame_a.astype(np.int32),
        frame_b.astype(np.int32),
        window_size=winsize,
        overlap=overlap,
        dt=dt,
        search_area_size=searchsize,
        sig2noise_method='peak2peak')
    x, y = pyprocess.get_coordinates(image_size=frame_a.shape,
                                     search_area_size=searchsize,
                                     overlap=overlap)
    u1, v1, mask = validation.sig2noise_val(u0, v0, sig2noise, threshold=1.05)

    # to see where is a reasonable limit filter out
    # outliers that are very different from the neighbours
    u2, v2 = filters.replace_outliers(u1,
                                      v1,
                                      method='localmean',
                                      max_iter=3,
                                      kernel_size=3)

    # convert x,y to mm; convert u,v to mm/sec
    x, y, u3, v3 = scaling.uniform(
        x, y, u2, v2, scaling_factor=scaling_factor)  # 96.52 microns/pixel

    # 0,0 shall be bottom left, positive rotation rate is counterclockwise
    x, y, u3, v3 = tools.transform_coordinates(x, y, u3, v3)

    fig, ax = plt.subplots()
    im = np.negative(frame_a)  # plot negative of the image for more clarity
    xmax = np.amax(x) + winsize / (2 * scaling_factor)
    ymax = np.amax(y) + winsize / (2 * scaling_factor)
    ax.imshow(im, cmap="Greys_r", extent=[0.0, xmax, 0.0, ymax])

    invalid = mask.astype("bool")
    valid = ~invalid
    plt.quiver(x[invalid],
               y[invalid],
               u3[invalid],
               v3[invalid],
               color="r",
               width=width)
    plt.quiver(x[valid],
               y[valid],
               u3[valid],
               v3[valid],
               color="b",
               width=width)

    ax.set_aspect(1.)
    plt.title(r'Velocity Vectors Field (Frame #%d) $(\frac{\mu m}{hour})$' %
              idx)
    plt.savefig(dir_name + "/" + "vec_page%d.png" % idx, dpi=200)
    plt.show()
    plt.close()
示例#29
0
ax[0].imshow(frame_a,cmap=plt.cm.gray)
ax[1].imshow(frame_b,cmap=plt.cm.gray)


# %%
winsize = 24 # pixels
searchsize = 64  # pixels, search in image B
overlap = 12 # pixels
dt = 0.02 # sec


u0, v0, sig2noise = process.extended_search_area_piv( frame_a.astype(np.int32), frame_b.astype(np.int32), window_size=winsize, overlap=overlap, dt=dt, search_area_size=searchsize, sig2noise_method='peak2peak' )

# %%
x, y = process.get_coordinates( image_size=frame_a.shape, window_size=winsize, overlap=overlap )

# %%
u1, v1, mask = validation.sig2noise_val( u0, v0, sig2noise, threshold = 1.3 )

# %%
u2, v2 = filters.replace_outliers( u1, v1, method='localmean', max_iter=10, kernel_size=2)

# %%
x, y, u3, v3 = scaling.uniform(x, y, u2, v2, scaling_factor = 96.52 )

# %%
tools.save(x, y, u3, v3, mask, 'exp1_001.txt' )

# %%
tools.display_vector_field('exp1_001.txt', scale=100, width=0.0025)
示例#30
0
def PIV(frame_0,
        frame_1,
        winsize,
        searchsize,
        overlap,
        frame_rate,
        scaling_factor,
        threshold=1.3,
        output='fil'):
    """
    Particle Image Velocimetry processing for two sequential images.
    
    Input:
    ------
    frame_0 - first frame to indicate potential seeds.
    frame_1 - second frame to trace seed displacements.
    winsize - size of the individual (square) grid cells in pixels.
    searchsize - size of the search area in pixels in which the location with the highest similarity is found.
    overlap - overlap over the grid cells in pixels.
    frame_rate - frame rate of the video in frames per second (fps).
    scaling_factor - amount of pixels per meter.
    output - after which step the PIV processing is stopped ('raw', 'fil', or 'int'; default: 'fil')
    """

    # determine the timestep between the two sequential frames (1/fps)
    dt = 1. / frame_rate

    # estimation of seed displacements in x and y direction
    # and the corresponding signal-to-noise ratio
    u, v, sig2noise = pyprocess.extended_search_area_piv(
        frame_0,
        frame_1,
        window_size=winsize,
        overlap=overlap,
        dt=dt,
        search_area_size=searchsize,
        sig2noise_method='peak2peak')

    # xy-coordinates of the centre of each grid cell
    x, y = pyprocess.get_coordinates(image_size=frame_0.shape,
                                     window_size=winsize,
                                     overlap=overlap)

    # if ouput is 'fil' or 'int':
    # filter out grid cells with a low signal-to-noise ratio
    if output == 'fil' or output == 'int':
        u, v, mask = validation.sig2noise_val(u,
                                              v,
                                              sig2noise,
                                              threshold=threshold)

        # if output is 'int'
        # fill in missing values through interpolation
        if output == 'int':
            u, v = filters.replace_outliers(u,
                                            v,
                                            method='localmean',
                                            max_iter=50,
                                            kernel_size=3)

    # scale results based on the pixels per metres
    x, y, u, v = scaling.uniform(x, y, u, v, scaling_factor=scaling_factor)

    return x, y, u, v, sig2noise