Exemplo n.º 1
0
def process_node(i):
    DeltaFrame = 300
    winsize = 12  # pixels
    searchsize = 12  #pixels
    overlap = 6  # piexels
    dt = DeltaFrame * 1. / fps  # piexels
    frame_a = tools.imread(fileNameList[i])
    frame_b = tools.imread(fileNameList[i + DeltaFrame])
    u0, v0, sig2noise = process.extended_search_area_piv(
        frame_a.astype(np.int32),
        frame_b.astype(np.int32),
        window_size=winsize,
        overlap=overlap,
        dt=dt,
        search_area_size=searchsize,
        sig2noise_method='peak2peak')
    x, y = process.get_coordinates(image_size=frame_a.shape,
                                   window_size=winsize,
                                   overlap=overlap)
    u1, v1, mask = validation.sig2noise_val(u0, v0, sig2noise, threshold=1.3)
    u2, v2 = filters.replace_outliers(u1,
                                      v1,
                                      method='localmean',
                                      max_iter=5,
                                      kernel_size=5)
    u3, v3, mask1 = validation.local_median_val(u2, v2, 3, 3, 1)
    u4, v4 = filters.replace_outliers(u3,
                                      v3,
                                      method='localmean',
                                      max_iter=5,
                                      kernel_size=5)
    tools.save(x, y, u4, v4, mask1, '../testResult/' + str(i) + '.txt')
def openpiv_default_run(im1, im2):
    """ default settings for OpenPIV analysis using
    extended_search_area_piv algorithm for two images
    
    Inputs:
        im1,im2 : str,str = path of two image
    """
    frame_a = tools.imread(im1)
    frame_b = tools.imread(im2)

    u, v, sig2noise = process.extended_search_area_piv(
        frame_a.astype(np.int32),
        frame_b.astype(np.int32),
        window_size=32,
        overlap=8,
        dt=1,
        search_area_size=64,
        sig2noise_method='peak2peak')
    x, y = process.get_coordinates(image_size=frame_a.shape,
                                   window_size=32,
                                   overlap=8)
    u, v, mask = validation.sig2noise_val(u, v, sig2noise, threshold=1.3)
    u, v = filters.replace_outliers(u,
                                    v,
                                    method='localmean',
                                    max_iter=10,
                                    kernel_size=2)
    x, y, u, v = scaling.uniform(x, y, u, v, scaling_factor=1)
    tools.save(x, y, u, v, mask, list_of_images[0] + '.txt')
    fig, ax = tools.display_vector_field(list_of_images[0] + '.txt',
                                         on_img=True,
                                         image_name=list_of_images[0],
                                         scaling_factor=1,
                                         ax=None)
Exemplo n.º 3
0
def PIV(I0, I1, winsize, overlap, dt):
    """ Normal PIV """
    u0, v0, sig2noise = pyprocess.extended_search_area_piv(
        I0.astype(np.int32),
        I1.astype(np.int32),
        window_size=winsize,
        overlap=overlap,
        dt=dt,
        search_area_size=winsize,
        sig2noise_method='peak2peak',
    )
    # get x, y
    x, y = pyprocess.get_coordinates(image_size=I0.shape,
                                     search_area_size=winsize,
                                     overlap=overlap,
                                     window_size=winsize)
    u1, v1, mask_s2n = validation.sig2noise_val(
        u0,
        v0,
        sig2noise,
        threshold=1.05,
    )
    # replace_outliers
    u2, v2 = filters.replace_outliers(
        u1,
        v1,
        method='localmean',
        max_iter=3,
        kernel_size=3,
    )
    # median filter smoothing
    u3 = medfilt2d(u2, 3)
    v3 = medfilt2d(v2, 3)
    return x, y, u3, v3
Exemplo n.º 4
0
def func(args):
    """A function to process each image pair."""

    # this line is REQUIRED for multiprocessing to work
    # always use it in your custom function

    file_a, file_b, counter = args

    #####################
    # Here goes you code
    #####################

    # read images into numpy arrays
    frame_a = tools.imread(os.path.join(path, file_a))
    frame_b = tools.imread(os.path.join(path, file_b))

    frame_a = (frame_a * 1024).astype(np.int32)
    frame_b = (frame_b * 1024).astype(np.int32)

    # process image pair with extended search area piv algorithm.
    u, v, sig2noise = pyprocess.extended_search_area_piv( frame_a, frame_b, \
        window_size=64, overlap=32, dt=0.02, search_area_size=128, sig2noise_method='peak2peak')
    u, v, mask = validation.sig2noise_val(u, v, sig2noise, threshold=1.5)
    u, v = filters.replace_outliers(u,
                                    v,
                                    method='localmean',
                                    max_iter=10,
                                    kernel_size=2)
    # get window centers coordinates
    x, y = pyprocess.get_coordinates(image_size=frame_a.shape,
                                     search_area_size=128,
                                     overlap=32)
    # save to a file
    tools.save(x, y, u, v, mask, 'test2_%03d.txt' % counter)
    tools.display_vector_field('test2_%03d.txt' % counter)
Exemplo n.º 5
0
def test_replace_outliers():
    """ test of replacing outliers """
    v = np.ones((9, 9))
    v[1:-1, 1:-1] = np.nan
    u = v.copy()
    uf, vf = filters.replace_outliers(u, v)
    assert (np.sum(np.isnan(u)) == 7**2)
    assert (np.allclose(np.ones((9, 9)), uf))
Exemplo n.º 6
0
def run_piv(
    frame_a,
    frame_b,
):
    winsize = 64  # pixels, interrogation window size in frame A
    searchsize = 68  # pixels, search in image B
    overlap = 32  # pixels, 50% overlap
    dt = 0.0005  # sec, time interval between pulses

    u0, v0, sig2noise = pyprocess.extended_search_area_piv(
        frame_a.astype(np.int32),
        frame_b.astype(np.int32),
        window_size=winsize,
        overlap=overlap,
        dt=dt,
        search_area_size=searchsize,
        sig2noise_method='peak2peak')

    x, y = pyprocess.get_coordinates(image_size=frame_a.shape,
                                     search_area_size=searchsize,
                                     window_size=winsize,
                                     overlap=overlap)

    u1, v1, mask = validation.sig2noise_val(u0, v0, sig2noise, threshold=1.05)

    u2, v2 = filters.replace_outliers(u1,
                                      v1,
                                      method='localmean',
                                      max_iter=10,
                                      kernel_size=3)

    x, y, u3, v3 = scaling.uniform(x, y, u2, v2,
                                   scaling_factor=41.22)  # 41.22 microns/pixel

    mean_u = np.mean(u3)
    mean_v = np.mean(v3)

    deficit_u = u3 - mean_u
    deficit_v = v3 - mean_v

    u_prime = np.mean(np.sqrt(0.5 * (deficit_u**2 + deficit_v**2)))
    u_avg = np.mean(np.sqrt(0.5 * (mean_u**2 + mean_v**2)))

    turbulence_intensity = u_prime / u_avg

    #save in the simple ASCII table format
    fname = "./Tables/" + exp_string + ".txt"
    # tools.save(x, y, u3, v3, mask, fname)

    out = np.vstack([m.ravel() for m in [x, y, u3, v3]])
    # print(out)
    # np.savetxt(fname,out.T)

    with open(fname, "ab") as f:
        f.write(b"\n")
        np.savetxt(f, out.T)

    return turbulence_intensity
Exemplo n.º 7
0
def two_images(image_1, image_2, search_area_size=64, window_size=32, overlap=16, dt=0.02):
    with open("image_1.bmp", "wb") as fh1:
        fh1.write(base64.b64decode(image_1))

    with open("image_2.bmp", "wb") as fh2:
        fh2.write(base64.b64decode(image_2))

    frame_a  = tools.imread( 'image_1.bmp' )
    frame_b  = tools.imread( 'image_2.bmp' )
    frame_a = (frame_a*1024).astype(np.int32)
    frame_b = (frame_b*1024).astype(np.int32)

    if not search_area_size:
        search_area_size = 64
    if not window_size:
        window_size = 32
    if not overlap:
        overlap = 16
    if not dt:
        dt = 0.02

    u, v, sig2noise = process.extended_search_area_piv( frame_a, frame_b, window_size=window_size, 
        overlap=overlap, dt=dt, search_area_size=search_area_size, sig2noise_method='peak2peak' )
    x, y = process.get_coordinates( image_size=frame_a.shape, window_size=window_size, overlap=overlap )
    u, v, mask = validation.sig2noise_val( u, v, sig2noise, threshold = 1.3 )
    u, v, mask = validation.global_val( u, v, (-1000, 2000), (-1000, 1000) )
    u, v = filters.replace_outliers( u, v, method='localmean', max_iter=10, kernel_size=2)
    x, y, u, v = scaling.uniform(x, y, u, v, scaling_factor = 96.52 )

    file_name_text = 'result.txt'
    file_name_png = 'result.png'
    if os.path.isfile(file_name_text):
        os.remove(file_name_text)
    if os.path.isfile(file_name_png):
        os.remove(file_name_png)
    tools.save(x, y, u, v, mask, file_name_text)
    a = np.loadtxt(file_name_text)
    fig = plt.figure()
    invalid = a[:,4].astype('bool')
    fig.canvas.set_window_title('Vector field, '+str(np.count_nonzero(invalid))+' wrong vectors')
    valid = ~invalid
    plt.quiver(a[invalid,0],a[invalid,1],a[invalid,2],a[invalid,3],color='r',scale=100, width=0.0025)
    plt.quiver(a[valid,0],a[valid,1],a[valid,2],a[valid,3],color='b',scale=100, width=0.0025)
    plt.draw()
    plt.savefig(file_name_png, format="png")
 
    with open(file_name_text, "rb") as resultFileText:
        file_reader_text = resultFileText.read()
        text_encode = base64.encodestring(file_reader_text)
        base64_string_text = str(text_encode, 'utf-8')
    
    with open(file_name_png, "rb") as resultFilePng:
        file_reader_image = resultFilePng.read()
        image_encode = base64.encodestring(file_reader_image)
        base64_string_image = str(image_encode, 'utf-8')
    
    return base64_string_text, base64_string_image
Exemplo n.º 8
0
    def _piv_frame(self, img1, img2, show=False, **kwargs):
        """
        calculate velocity using piv method on two frames
        """
        from openpiv.process import extended_search_area_piv, get_coordinates
        # from openpiv.scaling import uniform

        if self._debug:
            print('... [PIV] window size: {}'.format(self._windowSize))
            print('... [PIV] overlap: {}'.format(self._overlap))
            print('... [PIV] search area size: {}'.format(self._searchArea))
            print('... [PIV] threshold: {}'.format(self._piv_threshold))

        u, v, sig2noise = extended_search_area_piv(
            img1,
            img2,
            window_size=self._windowSize,
            overlap=self._overlap,
            dt=self._exposuretime,
            search_area_size=self._searchArea,
            sig2noise_method='peak2peak')
        self._pivx, self._pivy = get_coordinates(image_size=img1.shape,
                                                 window_size=self._windowSize,
                                                 overlap=self._overlap)
        #self._pivy = np.flipud(self._pivy)
        #self._pivx, self._pivy, u, v = uniform(self._pivx, self._pivy, u, v, scaling_factor=self._mpp)

        if show:
            from openpiv.validation import sig2noise_val
            from openpiv.filters import replace_outliers
            u, v, mask = sig2noise_val(u,
                                       v,
                                       sig2noise,
                                       threshold=self._piv_threshold)
            u, v = replace_outliers(u,
                                    v,
                                    method='localmean',
                                    max_iter=10,
                                    kernel_size=2)
            # show quiver plot
            plt.figure(figsize=(12, 6))
            plt.imshow(img1)
            plt.quiver(self._pivx, self._pivy, u, v, color='w', pivot='mid')
            plt.savefig(self._fname[:-4] + '_piv.png', dpi=100)

        if self._debug:
            print(
                "... [PIV] mean velocity [um/sec]: ({:4.2f}, {:4.2f})".format(
                    np.mean(u) * self._mpp,
                    np.mean(v) * self._mpp))
            print("... [PIV] mean velocity [pixel/frame]: ({:4.2f}, {:4.2f})".
                  format(
                      np.mean(u) * self._exposuretime,
                      np.mean(v) * self._exposuretime))

        return (u, v, sig2noise)
Exemplo n.º 9
0
def test_replace_outliers():
    """ test of replacing outliers """
    v = np.ones((9,9))
    v[1:-1,1:-1] = np.nan
    u = v.copy()
    uf,vf = filters.replace_outliers(u,v)
    assert(np.sum(np.isnan(u))==7**2)
    assert(np.allclose(np.ones((9,9)),uf))

    
Exemplo n.º 10
0
def analyzer(frame_a, frame_b, text, plot, num_scene, pathout, scal, zre, xre,
             dt):

    winsize = 16  # pixels
    searchsize = 32  # pixels, search in image b
    overlap = 8  # pixels

    frame_a = cv2.adaptiveThreshold(frame_a, 255, cv2.ADAPTIVE_THRESH_MEAN_C,
                                    cv2.THRESH_BINARY, 5, 5)
    frame_b = cv2.adaptiveThreshold(frame_b, 255, cv2.ADAPTIVE_THRESH_MEAN_C,
                                    cv2.THRESH_BINARY, 5, 5)
    #frame_a = cv2.adaptiveThreshold(frame_a,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY,11,2)
    #frame_b = cv2.adaptiveThreshold(frame_b,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY,11,2)

    plt.imshow(np.c_[frame_a, frame_b], cmap='gray')
    plt.savefig(pathout + '/filtered' + str(num_scene) + '.png', dpi=800)

    u0, v0, sig2noise = process.extended_search_area_piv(
        frame_a.astype(np.int32),
        frame_b.astype(np.int32),
        window_size=winsize,
        overlap=overlap,
        dt=dt,
        search_area_size=searchsize,
        sig2noise_method='peak2peak')
    x, y = process.get_coordinates(image_size=frame_a.shape,
                                   window_size=winsize,
                                   overlap=overlap)
    u1, v1, mask = validation.sig2noise_val(u0, v0, sig2noise, threshold=1.3)
    u2, v2 = filters.replace_outliers(u1,
                                      v1,
                                      method='localmean',
                                      max_iter=10,
                                      kernel_size=2)
    x, y, u3, v3 = scaling.uniform(
        x, y, u2, v2, scaling_factor=scal)  # scaling_factor (pixel per meter)

    u3 = np.flip(u3, axis=0)
    v3 = np.flip(v3, axis=0)

    xre = np.linspace(0, xre / 100, len(x[0, :]))
    zre = np.linspace(0, zre / 100, len(x[:, 0]))

    if plot == 1:
        piv_plotting(xre, zre, u3, v3, num_scene, pathout)

    if text == 0:
        tools.save(x, y, u3, v3, mask,
                   pathout + '/piv' + str(num_scene) + '.txt')
Exemplo n.º 11
0
def process(args, bga, bgb, reflection):
    file_a, file_b, counter = args

    # read images into numpy arrays
    frame_a = tools.imread(file_a)
    frame_b = tools.imread(file_b)

    # removing background and reflections
    frame_a = frame_a - bga
    frame_b = frame_b - bgb
    frame_a[reflection == 255] = 0
    frame_b[reflection == 255] = 0

    #applying a static mask (taking out the regions where we have walls)
    yp = [580, 435, 0, 0, 580, 580, 0, 0, 435, 580]
    xp = [570, 570, 680, 780, 780, 0, 0, 105, 230, 230]
    pnts = draw.polygon(yp, xp, frame_a.shape)
    frame_a[pnts] = 0
    frame_b[pnts] = 0

    # checking the resulting frame
    #fig, ax = plt.subplots(2,2)
    #ax[0,0].imshow(frame_a_org, cmap='gray')
    #ax[0,1].imshow(frame_a, cmap='gray')
    #ax[1,0].imshow(frame_b_org, cmap='gray')
    #ax[1,1].imshow(frame_b, cmap='gray')
    #plt.tight_layout()
    #plt.show()

    # main piv processing
    u, v, sig2noise = pyprocess.extended_search_area_piv( frame_a, frame_b, \
        window_size=48, overlap=16, dt=0.001094, search_area_size=64, sig2noise_method='peak2peak')
    x, y = pyprocess.get_coordinates(image_size=frame_a.shape,
                                     window_size=48,
                                     overlap=16)
    u, v, mask = validation.local_median_val(u, v, 2000, 2000, size=2)
    u, v = filters.replace_outliers(u,
                                    v,
                                    method='localmean',
                                    max_iter=10,
                                    kernel_size=2)
    u, *_ = smoothn(u, s=1.0)
    v, *_ = smoothn(v, s=1.0)

    # saving the results
    save_file = tools.create_path(file_a, 'Analysis')
    tools.save(x, y, u, v, mask, save_file + '.dat')
Exemplo n.º 12
0
def two_images(image_1, image_2):

    with open("image_1.bmp", "wb") as fh1:
        fh1.write(base64.b64decode(image_1))

    with open("image_2.bmp", "wb") as fh2:
        fh2.write(base64.b64decode(image_2))

    frame_a = tools.imread('image_1.bmp')
    frame_b = tools.imread('image_2.bmp')

    winsize = 32  # pixels
    searchsize = 64  # pixels, search in image B
    overlap = 12  # pixels
    dt = 0.02  # sec

    u, v, sig2noise = pyprocess.piv(frame_a.astype(np.int32),
                                    frame_b.astype(np.int32),
                                    window_size=winsize,
                                    overlap=overlap,
                                    dt=dt,
                                    search_size=searchsize,
                                    sig2noise_method='peak2peak')
    x, y = pyprocess.get_coordinates(image_size=frame_a.shape,
                                     window_size=searchsize,
                                     overlap=overlap)
    u, v, mask = validation.sig2noise_val(u, v, sig2noise, threshold=1.3)
    u, v = filters.replace_outliers(u,
                                    v,
                                    method='localmean',
                                    max_iter=10,
                                    kernel_size=2)
    x, y, u, v = scaling.uniform(x, y, u, v, scaling_factor=96.52)

    file_name = 'result.txt'
    if os.path.isfile(file_name):
        os.remove(file_name)
    tools.save(x, y, u, v, np.zeros_like(u),
               file_name)  # no masking, all values are valid

    with open(file_name, "rb") as resultFile:
        file_reader = resultFile.read()
        image_encode = base64.encodestring(file_reader)
        base64_string = str(image_encode, 'utf-8')

    return base64_string
Exemplo n.º 13
0
def ProcessPIV(args, bga, bgb, reflection, stg):
    # read images into numpy arrays
    file_a, file_b, counter = args
    frame_a = tools.imread(file_a)
    frame_b = tools.imread(file_b)
    # removing background and reflections
    if bgb is not None:
        frame_a = frame_a - bga
        frame_b = frame_b - bgb
        frame_a[reflection == 255] = 0
        frame_b[reflection == 255] = 0
    #plt.imshow(frame_a, cmap='gray')
    #plt.show()

    # main piv processing
    u, v, s2n = pyprocess.extended_search_area_piv( frame_a, frame_b, \
        window_size=stg['WS'], overlap=stg['OL'], dt=stg['DT'], search_area_size=stg['SA'], sig2noise_method='peak2peak')
    x, y = pyprocess.get_coordinates(image_size=frame_a.shape,
                                     window_size=stg['WS'],
                                     overlap=stg['OL'])

    if stg['BVR'] == 'on':
        u, v, mask = validation.local_median_val(u,
                                                 v,
                                                 stg['MF'][0],
                                                 stg['MF'][1],
                                                 size=2)
        u, v, mask = validation.global_val(u,
                                           v,
                                           u_thresholds=stg['GF'][0],
                                           v_thresholds=stg['GF'][1])
        u, v = filters.replace_outliers(u,
                                        v,
                                        method='localmean',
                                        max_iter=10,
                                        kernel_size=2)
        u, *_ = smoothn(u, s=0.5)
        v, *_ = smoothn(v, s=0.5)
    x, y, u, v = scaling.uniform(x, y, u, v, stg['SC'])
    # saving the results
    save_file = tools.create_path(file_a, 'Analysis')
    tools.save(x, y, u, v, s2n, save_file + '.dat')
Exemplo n.º 14
0
def PIV(image_0, image_1, winsize, searchsize, overlap, frame_rate,
        scaling_factor):

    frame_0 = image_0
    #     [0:600, :]
    frame_1 = image_1
    #     [0:600, :]

    # Processing the images with interrogation area and search area / cross correlation algortihm
    u, v, sig2noise = pyprocess.extended_search_area_piv(
        frame_0,
        frame_1,
        window_size=winsize,
        overlap=overlap,
        dt=dt,
        search_area_size=searchsize,
        sig2noise_method='peak2peak')

    # Compute the coordinates of the centers of the interrogation windows
    x, y = pyprocess.get_coordinates(image_size=frame_0.shape,
                                     window_size=winsize,
                                     overlap=overlap)

    # This function actually sets to NaN all those vector for
    # which the signal to noise ratio is below 1.3.
    u, v, mask = validation.sig2noise_val(u, v, sig2noise, threshold=1.2)

    # Function as described above, removing outliers deviating with more
    # than twice the standard deviation
    u, v, mask = remove_outliers(u, v, mask)

    # Replacing the outliers with interpolation
    u, v = filters.replace_outliers(u,
                                    v,
                                    method='localmean',
                                    max_iter=50,
                                    kernel_size=3)

    # Apply an uniform scaling to the flow field to get dimensional units
    x, y, u, v = scaling.uniform(x, y, u, v, scaling_factor=scaling_factor)

    return x, y, u, v, mask
Exemplo n.º 15
0
 def repl_outliers(self):
     '''Replace outliers.'''
     result_fnames = []
     for i, f in enumerate(self.p['fnames']):
         data = np.loadtxt(f)
         u, v = piv_flt.replace_outliers(np.array([data[:, 2]]),
                                         np.array([data[:, 3]]),
                                         method=self.p['repl_method'],
                                         max_iter=self.p['repl_iter'],
                                         kernel_size=self.p['repl_kernel'])
         save_fname = create_save_vec_fname(path=f, postfix='_repl')
         save(data[:, 0],
              data[:, 1],
              u,
              v,
              data[:, 4],
              data[:, 5],
              save_fname,
              delimiter=delimiter)
         result_fnames.append(save_fname)
     return (result_fnames)
Exemplo n.º 16
0
if 'OpenPIV' not in sys.path:
    sys.path.append('/Users/alex/Documents/OpenPIV/alexlib/openpiv-python')

from openpiv import tools, validation, process, filters, scaling, pyprocess
import numpy as np

frame_a  = tools.imread( 'exp1_001_a.bmp' )
frame_b  = tools.imread( 'exp1_001_b.bmp' )

u, v, sig2noise = process.extended_search_area_piv( frame_a.astype(np.int32), 
frame_b.astype(np.int32), window_size=24, overlap=12, dt=0.02, search_area_size=64, 
sig2noise_method='peak2peak' )
x, y = process.get_coordinates( image_size=frame_a.shape, window_size=24, overlap=12 )
u, v, mask = validation.sig2noise_val( u, v, sig2noise, threshold = 2.5 )
u, v = filters.replace_outliers( u, v, method='localmean', max_iter=10, kernel_size=2)
x, y, u, v = scaling.uniform(x, y, u, v, scaling_factor = 96.52 )
tools.save(x, y, u, v, mask, 'exp1_001.txt' )
tools.display_vector_field('exp1_001.txt', scale=100, width=0.0025)



u, v, s2n= pyprocess.piv(frame_a, frame_b, corr_method='fft', window_size=24, overlap=12, 
dt=0.02, sig2noise_method='peak2peak' )
x, y = pyprocess.get_coordinates( image_size=frame_a.shape, window_size=24, overlap=12 )
u, v, mask = validation.sig2noise_val( u, v, s2n, threshold = 2.5 )
u, v = filters.replace_outliers( u, v, method='localmean', max_iter=10, kernel_size=2.5)
x, y, u, v = scaling.uniform(x, y, u, v, scaling_factor = 96.52 )
tools.save(x, y, u, v, mask, 'exp1_002.txt' )
tools.display_vector_field('exp1_002.txt', scale=100, width=0.0025)
Exemplo n.º 17
0
def multipass_img_deform(
    frame_a,
    frame_b,
    current_iteration,
    x_old,
    y_old,
    u_old,
    v_old,
    settings,
    mask_coords=[],
):
    # window_size,
    # overlap,
    # iterations,
    # current_iteration,
    # x_old,
    # y_old,
    # u_old,
    # v_old,
    # correlation_method="circular",
    # normalized_correlation=False,
    # subpixel_method="gaussian",
    # deformation_method="symmetric",
    # sig2noise_method="peak2peak",
    # sig2noise_threshold=1.0,
    # sig2noise_mask=2,
    # interpolation_order=1,
    """
    Multi pass of the PIV evaluation.

    This function does the PIV evaluation of the second and other passes.
    It returns the coordinates of the interrogation window centres,
    the displacement u, v for each interrogation window as well as
    the signal to noise ratio array (which is full of NaNs if opted out)


    Parameters
    ----------
    frame_a : 2d np.ndarray
        the first image

    frame_b : 2d np.ndarray
        the second image

    window_size : tuple of ints
         the size of the interrogation window

    overlap : tuple of ints
        the overlap of the interrogation window, e.g. window_size/2

    x_old : 2d np.ndarray
        the x coordinates of the vector field of the previous pass

    y_old : 2d np.ndarray
        the y coordinates of the vector field of the previous pass

    u_old : 2d np.ndarray
        the u displacement of the vector field of the previous pass
        in case of the image mask - u_old and v_old are MaskedArrays

    v_old : 2d np.ndarray
        the v displacement of the vector field of the previous pass

    subpixel_method: string
        the method used for the subpixel interpolation.
        one of the following methods to estimate subpixel location of the peak:
        'centroid' [replaces default if correlation map is negative],
        'gaussian' [default if correlation map is positive],
        'parabolic'

    interpolation_order : int
        the order of the spline interpolation used for the image deformation

    mask_coords : list of x,y coordinates (pixels) of the image mask,
        default is an empty list

    Returns
    -------
    x : 2d np.array
        array containg the x coordinates of the interrogation window centres

    y : 2d np.array
        array containg the y coordinates of the interrogation window centres

    u : 2d np.array
        array containing the horizontal displacement for every interrogation
        window [pixels]

    u : 2d np.array
        array containing the vertical displacement for every interrogation
        window it returns values in [pixels]

    s2n : 2D np.array of signal to noise ratio values

    """

    if not isinstance(u_old, np.ma.MaskedArray):
        raise ValueError('Expected masked array')

    # calculate the y and y coordinates of the interrogation window centres.
    # Hence, the
    # edges must be extracted to provide the sufficient input. x_old and y_old
    # are the coordinates of the old grid. x_int and y_int are the coordinates
    # of the new grid

    window_size = settings.windowsizes[current_iteration]
    overlap = settings.overlap[current_iteration]

    x, y = get_coordinates(frame_a.shape, window_size, overlap)

    # The interpolation function dont like meshgrids as input.
    # plus the coordinate system for y is now from top to bottom
    # and RectBivariateSpline wants an increasing set

    y_old = y_old[:, 0]
    # y_old = y_old[::-1]
    x_old = x_old[0, :]

    y_int = y[:, 0]
    # y_int = y_int[::-1]
    x_int = x[0, :]

    # interpolating the displacements from the old grid onto the new grid
    # y befor x because of numpy works row major
    ip = RectBivariateSpline(y_old, x_old, u_old.filled(0.))
    u_pre = ip(y_int, x_int)

    ip2 = RectBivariateSpline(y_old, x_old, v_old.filled(0.))
    v_pre = ip2(y_int, x_int)

    # if settings.show_plot:
    if settings.show_all_plots:
        plt.figure()
        plt.quiver(x_old, y_old, u_old, -1 * v_old, color='b')
        plt.quiver(x_int, y_int, u_pre, -1 * v_pre, color='r', lw=2)
        plt.gca().set_aspect(1.)
        plt.gca().invert_yaxis()
        plt.title('inside deform, invert')
        plt.show()

    # @TKauefer added another method to the windowdeformation, 'symmetric'
    # splits the onto both frames, takes more effort due to additional
    # interpolation however should deliver better results

    old_frame_a = frame_a.copy()
    old_frame_b = frame_b.copy()

    # Image deformation has to occur in image coordinates
    # therefore we need to convert the results of the
    # previous pass which are stored in the physical units
    # and so y from the get_coordinates

    if settings.deformation_method == "symmetric":
        # this one is doing the image deformation (see above)
        x_new, y_new, ut, vt = create_deformation_field(
            frame_a, x, y, u_pre, v_pre)
        frame_a = scn.map_coordinates(frame_a,
                                      ((y_new - vt / 2, x_new - ut / 2)),
                                      order=settings.interpolation_order,
                                      mode='nearest')
        frame_b = scn.map_coordinates(frame_b,
                                      ((y_new + vt / 2, x_new + ut / 2)),
                                      order=settings.interpolation_order,
                                      mode='nearest')
    elif settings.deformation_method == "second image":
        frame_b = deform_windows(
            frame_b,
            x,
            y,
            u_pre,
            -v_pre,
            interpolation_order=settings.interpolation_order)
    else:
        raise Exception("Deformation method is not valid.")

    # if settings.show_plot:
    if settings.show_all_plots:
        if settings.deformation_method == 'symmetric':
            plt.figure()
            plt.imshow(frame_a - old_frame_a)
            plt.show()

        plt.figure()
        plt.imshow(frame_b - old_frame_b)
        plt.show()

    # if do_sig2noise is True
    #     sig2noise_method = sig2noise_method
    # else:
    #     sig2noise_method = None

    # so we use here default circular not normalized correlation:
    # if we did not want to validate every step, remove the method
    if settings.sig2noise_validate is False:
        settings.sig2noise_method = None

    u, v, s2n = extended_search_area_piv(
        frame_a,
        frame_b,
        window_size=window_size,
        overlap=overlap,
        width=settings.sig2noise_mask,
        subpixel_method=settings.subpixel_method,
        sig2noise_method=settings.sig2noise_method,
        correlation_method=settings.correlation_method,
        normalized_correlation=settings.normalized_correlation,
    )

    shapes = np.array(get_field_shape(frame_a.shape, window_size, overlap))
    u = u.reshape(shapes)
    v = v.reshape(shapes)
    s2n = s2n.reshape(shapes)

    u += u_pre
    v += v_pre

    # reapply the image mask to the new grid
    if settings.image_mask:
        grid_mask = preprocess.prepare_mask_on_grid(x, y, mask_coords)
        u = np.ma.masked_array(u, mask=grid_mask)
        v = np.ma.masked_array(v, mask=grid_mask)
    else:
        u = np.ma.masked_array(u, np.ma.nomask)
        v = np.ma.masked_array(v, np.ma.nomask)

    # validate in the multi-pass by default
    u, v, mask = validation.typical_validation(u, v, s2n, settings)

    if np.all(mask):
        raise ValueError("Something happened in the validation")

    if not isinstance(u, np.ma.MaskedArray):
        raise ValueError('not a masked array anymore')

    if settings.show_all_plots:
        plt.figure()
        nans = np.nonzero(mask)

        plt.quiver(x[~nans], y[~nans], u[~nans], -v[~nans], color='b')
        plt.quiver(x[nans], y[nans], u[nans], -v[nans], color='r')
        plt.gca().invert_yaxis()
        plt.gca().set_aspect(1.)
        plt.title('After sig2noise, inverted')
        plt.show()

    # we have to replace outliers
    u, v = filters.replace_outliers(
        u,
        v,
        method=settings.filter_method,
        max_iter=settings.max_filter_iteration,
        kernel_size=settings.filter_kernel_size,
    )

    # reapply the image mask to the new grid
    if settings.image_mask:
        grid_mask = preprocess.prepare_mask_on_grid(x, y, mask_coords)
        u = np.ma.masked_array(u, mask=grid_mask)
        v = np.ma.masked_array(v, mask=grid_mask)
    else:
        u = np.ma.masked_array(u, np.ma.nomask)
        v = np.ma.masked_array(v, np.ma.nomask)

    if settings.show_all_plots:
        plt.figure()
        plt.quiver(x, y, u, -v, color='r')
        plt.quiver(x, y, u_pre, -1 * v_pre, color='b')
        plt.gca().invert_yaxis()
        plt.gca().set_aspect(1.)
        plt.title(' after replaced outliers, red, invert')
        plt.show()

    return x, y, u, v, s2n, mask
Exemplo n.º 18
0
    def func(args):
        """A function to process each image pair."""

        # this line is REQUIRED for multiprocessing to work
        # always use it in your custom function

        file_a, file_b, counter = args

        # counter2=str(counter2)
        #####################
        # Here goes you code
        #####################

        " read images into numpy arrays"
        frame_a = imread(os.path.join(settings.filepath_images, file_a))
        frame_b = imread(os.path.join(settings.filepath_images, file_b))

        # Miguel: I just had a quick look, and I do not understand the reason
        # for this step.
        #  I propose to remove it.
        # frame_a = (frame_a*1024).astype(np.int32)
        # frame_b = (frame_b*1024).astype(np.int32)

        " crop to ROI"
        if settings.ROI == "full":
            frame_a = frame_a
            frame_b = frame_b
        else:
            frame_a = frame_a[settings.ROI[0]:settings.ROI[1],
                              settings.ROI[2]:settings.ROI[3]]
            frame_b = frame_b[settings.ROI[0]:settings.ROI[1],
                              settings.ROI[2]:settings.ROI[3]]

        if settings.invert is True:
            frame_a = invert(frame_a)
            frame_b = invert(frame_b)

        if settings.show_all_plots:
            fig, ax = plt.subplots(1, 1)
            ax.imshow(frame_a, cmap=plt.get_cmap('Reds'))
            ax.imshow(frame_b, cmap=plt.get_cmap('Blues'), alpha=.5)
            plt.show()

        if settings.dynamic_masking_method in ("edge", "intensity"):
            frame_a, mask_a = preprocess.dynamic_masking(
                frame_a,
                method=settings.dynamic_masking_method,
                filter_size=settings.dynamic_masking_filter_size,
                threshold=settings.dynamic_masking_threshold,
            )
            frame_b, mask_b = preprocess.dynamic_masking(
                frame_b,
                method=settings.dynamic_masking_method,
                filter_size=settings.dynamic_masking_filter_size,
                threshold=settings.dynamic_masking_threshold,
            )

        # "first pass"
        x, y, u, v, s2n = first_pass(frame_a, frame_b, settings)

        if settings.show_all_plots:
            plt.figure()
            plt.quiver(x, y, u, -v, color='b')
            # plt.gca().invert_yaxis()
            # plt.gca().set_aspect(1.)
            # plt.title('after first pass, invert')
            # plt.show()

        # " Image masking "
        if settings.image_mask:
            image_mask = np.logical_and(mask_a, mask_b)
            mask_coords = preprocess.mask_coordinates(image_mask)
            # mark those points on the grid of PIV inside the mask
            grid_mask = preprocess.prepare_mask_on_grid(x, y, mask_coords)

            # mask the velocity
            u = np.ma.masked_array(u, mask=grid_mask)
            v = np.ma.masked_array(v, mask=grid_mask)
        else:
            mask_coords = []
            u = np.ma.masked_array(u, mask=np.ma.nomask)
            v = np.ma.masked_array(v, mask=np.ma.nomask)

        if settings.validation_first_pass:
            u, v, mask = validation.typical_validation(u, v, s2n, settings)

        if settings.show_all_plots:
            # plt.figure()
            plt.quiver(x, y, u, -v, color='r')
            plt.gca().invert_yaxis()
            plt.gca().set_aspect(1.)
            plt.title('after first pass validation new, inverted')
            plt.show()

        # "filter to replace the values that where marked by the validation"
        if settings.num_iterations == 1 and settings.replace_vectors:
            # for multi-pass we cannot have holes in the data
            # after the first pass
            u, v = filters.replace_outliers(
                u,
                v,
                method=settings.filter_method,
                max_iter=settings.max_filter_iteration,
                kernel_size=settings.filter_kernel_size,
            )
        # don't even check if it's true or false
        elif settings.num_iterations > 1:
            u, v = filters.replace_outliers(
                u,
                v,
                method=settings.filter_method,
                max_iter=settings.max_filter_iteration,
                kernel_size=settings.filter_kernel_size,
            )

            # "adding masks to add the effect of all the validations"
        if settings.smoothn:
            u, dummy_u1, dummy_u2, dummy_u3 = smoothn.smoothn(
                u, s=settings.smoothn_p)
            v, dummy_v1, dummy_v2, dummy_v3 = smoothn.smoothn(
                v, s=settings.smoothn_p)

        if settings.image_mask:
            grid_mask = preprocess.prepare_mask_on_grid(x, y, mask_coords)
            u = np.ma.masked_array(u, mask=grid_mask)
            v = np.ma.masked_array(v, mask=grid_mask)
        else:
            u = np.ma.masked_array(u, np.ma.nomask)
            v = np.ma.masked_array(v, np.ma.nomask)

        if settings.show_all_plots:
            plt.figure()
            plt.quiver(x, y, u, -v)
            plt.gca().invert_yaxis()
            plt.gca().set_aspect(1.)
            plt.title('before multi pass, inverted')
            plt.show()

        if not isinstance(u, np.ma.MaskedArray):
            raise ValueError("Expected masked array")
        """ Multi pass """

        for i in range(1, settings.num_iterations):

            if not isinstance(u, np.ma.MaskedArray):
                raise ValueError("Expected masked array")

            x, y, u, v, s2n, mask = multipass_img_deform(
                frame_a,
                frame_b,
                i,
                x,
                y,
                u,
                v,
                settings,
                mask_coords=mask_coords)

            # If the smoothing is active, we do it at each pass
            # but not the last one
            if settings.smoothn is True and i < settings.num_iterations - 1:
                u, dummy_u1, dummy_u2, dummy_u3 = smoothn.smoothn(
                    u, s=settings.smoothn_p)
                v, dummy_v1, dummy_v2, dummy_v3 = smoothn.smoothn(
                    v, s=settings.smoothn_p)
            if not isinstance(u, np.ma.MaskedArray):
                raise ValueError('not a masked array anymore')

            if hasattr(settings, 'image_mask') and settings.image_mask:
                grid_mask = preprocess.prepare_mask_on_grid(x, y, mask_coords)
                u = np.ma.masked_array(u, mask=grid_mask)
                v = np.ma.masked_array(v, mask=grid_mask)
            else:
                u = np.ma.masked_array(u, np.ma.nomask)
                v = np.ma.masked_array(v, np.ma.nomask)

            if settings.show_all_plots:
                plt.figure()
                plt.quiver(x, y, u, -1 * v, color='r')
                plt.gca().set_aspect(1.)
                plt.gca().invert_yaxis()
                plt.title('end of the multipass, invert')
                plt.show()

        if settings.show_all_plots and settings.num_iterations > 1:
            plt.figure()
            plt.quiver(x, y, u, -v)
            plt.gca().invert_yaxis()
            plt.gca().set_aspect(1.)
            plt.title('after multi pass, before saving, inverted')
            plt.show()

        # we now use only 0s instead of the image
        # masked regions.
        # we could do Nan, not sure what is best
        u = u.filled(0.)
        v = v.filled(0.)

        # "scales the results pixel-> meter"
        x, y, u, v = scaling.uniform(x,
                                     y,
                                     u,
                                     v,
                                     scaling_factor=settings.scaling_factor)

        if settings.image_mask:
            grid_mask = preprocess.prepare_mask_on_grid(x, y, mask_coords)
            u = np.ma.masked_array(u, mask=grid_mask)
            v = np.ma.masked_array(v, mask=grid_mask)
        else:
            u = np.ma.masked_array(u, np.ma.nomask)
            v = np.ma.masked_array(v, np.ma.nomask)

        # before saving we conver to the "physically relevant"
        # right-hand coordinate system with 0,0 at the bottom left
        # x to the right, y upwards
        # and so u,v

        x, y, u, v = transform_coordinates(x, y, u, v)
        # import pdb; pdb.set_trace()
        # "save to a file"
        tools.save(x,
                   y,
                   u,
                   v,
                   mask,
                   os.path.join(save_path, "field_A%03d.txt" % counter),
                   delimiter="\t")
        # "some other stuff that one might want to use"
        if settings.show_plot or settings.save_plot:
            Name = os.path.join(save_path, "Image_A%03d.png" % counter)
            fig, _ = display_vector_field(
                os.path.join(save_path, "field_A%03d.txt" % counter),
                scale=settings.scale_plot,
            )
            if settings.save_plot is True:
                fig.savefig(Name)
            if settings.show_plot is True:
                plt.show()

        print(f"Image Pair {counter + 1}")
        print(file_a.rsplit('/')[-1], file_b.rsplit('/')[-1])
Exemplo n.º 19
0
def multipass_img_deform(frame_a,
                         frame_b,
                         window_size,
                         overlap,
                         iterations,
                         current_iteration,
                         x_old,
                         y_old,
                         u_old,
                         v_old,
                         correlation_method='circular',
                         subpixel_method='gaussian',
                         do_sig2noise=False,
                         sig2noise_method='peak2peak',
                         sig2noise_mask=2,
                         MinMaxU=(-100, 50),
                         MinMaxV=(-50, 50),
                         std_threshold=5,
                         median_threshold=2,
                         median_size=1,
                         filter_method='localmean',
                         max_filter_iteration=10,
                         filter_kernel_size=2,
                         interpolation_order=3):
    """
    First pass of the PIV evaluation.

    This function does the PIV evaluation of the first pass. It returns
    the coordinates of the interrogation window centres, the displacment
    u and v for each interrogation window as well as the mask which indicates
    wether the displacement vector was interpolated or not.


    Parameters
    ----------
    frame_a : 2d np.ndarray
        the first image

    frame_b : 2d np.ndarray
        the second image

    window_size : tuple of ints
         the size of the interrogation window

    overlap : tuple of ints
        the overlap of the interrogation window normal for example window_size/2

    x_old : 2d np.ndarray
        the x coordinates of the vector field of the previous pass

    y_old : 2d np.ndarray
        the y coordinates of the vector field of the previous pass

    u_old : 2d np.ndarray
        the u displacement of the vector field of the previous pass

    v_old : 2d np.ndarray
        the v displacement of the vector field of the previous pass

    subpixel_method: string
        the method used for the subpixel interpolation.
        one of the following methods to estimate subpixel location of the peak:
        'centroid' [replaces default if correlation map is negative],
        'gaussian' [default if correlation map is positive],
        'parabolic'

    MinMaxU : two elements tuple
        sets the limits of the u displacment component
        Used for validation.

    MinMaxV : two elements tuple
        sets the limits of the v displacment component
        Used for validation.

    std_threshold : float
        sets the  threshold for the std validation

    median_threshold : float
        sets the threshold for the median validation

    filter_method : string
        the method used to replace the non-valid vectors
        Methods:
            'localmean',
            'disk',
            'distance', 

    max_filter_iteration : int
        maximum of filter iterations to replace nans

    filter_kernel_size : int
        size of the kernel used for the filtering

    interpolation_order : int
        the order of the spline interpolation used for the image deformation

    Returns
    -------
    x : 2d np.array
        array containg the x coordinates of the interrogation window centres

    y : 2d np.array
        array containg the y coordinates of the interrogation window centres 

    u : 2d np.array
        array containing the u displacement for every interrogation window

    u : 2d np.array
        array containing the u displacement for every interrogation window

    mask : 2d np.array
        array containg the mask values (bool) which contains information if
        the vector was filtered

    """

    x, y = get_coordinates(np.shape(frame_a), window_size, overlap)
    'calculate the y and y coordinates of the interrogation window centres'
    y_old = y_old[:, 0]
    # y_old = y_old[::-1]
    x_old = x_old[0, :]
    y_int = y[:, 0]
    # y_int = y_int[::-1]
    x_int = x[0, :]
    '''The interpolation function dont like meshgrids as input. Hence, the the edges
    must be extracted to provide the sufficient input. x_old and y_old are the 
    are the coordinates of the old grid. x_int and y_int are the coordiantes
    of the new grid'''

    ip = RectBivariateSpline(y_old, x_old, u_old)
    u_pre = ip(y_int, x_int)
    ip2 = RectBivariateSpline(y_old, x_old, v_old)
    v_pre = ip2(y_int, x_int)
    ''' interpolating the displacements from the old grid onto the new grid
    y befor x because of numpy works row major
    '''

    frame_b_deform = frame_interpolation(
        frame_b, x, y, u_pre, -v_pre, interpolation_order=interpolation_order)
    '''this one is doing the image deformation (see above)'''

    cor_win_1 = pyprocess.moving_window_array(frame_a, window_size, overlap)
    cor_win_2 = pyprocess.moving_window_array(frame_b_deform, window_size,
                                              overlap)
    '''Filling the interrogation window. They windows are arranged
    in a 3d array with number of interrogation window *window_size*window_size
    this way is much faster then using a loop'''

    correlation = correlation_func(cor_win_1,
                                   cor_win_2,
                                   correlation_method=correlation_method,
                                   normalized_correlation=False)
    'do the correlation'
    disp = np.zeros((np.size(correlation, 0), 2))
    for i in range(0, np.size(correlation, 0)):
        ''' determine the displacment on subpixel level  '''
        disp[i, :] = find_subpixel_peak_position(
            correlation[i, :, :], subpixel_method=subpixel_method)
    'this loop is doing the displacment evaluation for each window '
    disp = np.array(disp) - np.floor(np.array(correlation[0, :, :].shape) / 2)

    'reshaping the interrogation window to vector field shape'
    shapes = np.array(
        pyprocess.get_field_shape(np.shape(frame_a), window_size, overlap))
    u = disp[:, 1].reshape(shapes)
    v = -disp[:, 0].reshape(shapes)

    'adding the recent displacment on to the displacment of the previous pass'
    u = u + u_pre
    v = v + v_pre
    'validation using gloabl limits and local median'
    u, v, mask_g = validation.global_val(u, v, MinMaxU, MinMaxV)
    u, v, mask_s = validation.global_std(u, v, std_threshold=std_threshold)
    u, v, mask_m = validation.local_median_val(u,
                                               v,
                                               u_threshold=median_threshold,
                                               v_threshold=median_threshold,
                                               size=median_size)
    mask = mask_g + mask_m + mask_s
    'adding masks to add the effect of alle the validations'
    #mask=np.zeros_like(u)
    'filter to replace the values that where marked by the validation'
    if current_iteration != iterations:
        'filter to replace the values that where marked by the validation'
        u, v = filters.replace_outliers(u,
                                        v,
                                        method=filter_method,
                                        max_iter=max_filter_iteration,
                                        kernel_size=filter_kernel_size)
    if do_sig2noise == True and current_iteration == iterations and iterations != 1:
        sig2noise_ratio = sig2noise_ratio_function(
            correlation,
            sig2noise_method=sig2noise_method,
            width=sig2noise_mask)
        sig2noise_ratio = sig2noise_ratio.reshape(shapes)
    else:
        sig2noise_ratio = np.full_like(u, np.nan)

    return x, y, u, v, sig2noise_ratio, mask
Exemplo n.º 20
0
    def func(args):
        """A function to process each image pair."""

        # this line is REQUIRED for multiprocessing to work
        # always use it in your custom function

        file_a, file_b, counter = args

        # counter2=str(counter2)
        #####################
        # Here goes you code
        #####################

        ' read images into numpy arrays'
        frame_a = tools.imread(os.path.join(settings.filepath_images, file_a))
        frame_b = tools.imread(os.path.join(settings.filepath_images, file_b))

        ## Miguel: I just had a quick look, and I do not understand the reason for this step.
        #  I propose to remove it.
        #frame_a = (frame_a*1024).astype(np.int32)
        #frame_b = (frame_b*1024).astype(np.int32)

        ' crop to ROI'
        if settings.ROI == 'full':
            frame_a = frame_a
            frame_b = frame_b
        else:
            frame_a = frame_a[settings.ROI[0]:settings.ROI[1],
                              settings.ROI[2]:settings.ROI[3]]
            frame_b = frame_b[settings.ROI[0]:settings.ROI[1],
                              settings.ROI[2]:settings.ROI[3]]
        if settings.dynamic_masking_method == 'edge' or 'intensity':
            frame_a = preprocess.dynamic_masking(
                frame_a,
                method=settings.dynamic_masking_method,
                filter_size=settings.dynamic_masking_filter_size,
                threshold=settings.dynamic_masking_threshold)
            frame_b = preprocess.dynamic_masking(
                frame_b,
                method=settings.dynamic_masking_method,
                filter_size=settings.dynamic_masking_filter_size,
                threshold=settings.dynamic_masking_threshold)
        '''%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%'''
        'first pass'
        x, y, u, v, sig2noise_ratio = first_pass(
            frame_a,
            frame_b,
            settings.windowsizes[0],
            settings.overlap[0],
            settings.iterations,
            correlation_method=settings.correlation_method,
            subpixel_method=settings.subpixel_method,
            do_sig2noise=settings.extract_sig2noise,
            sig2noise_method=settings.sig2noise_method,
            sig2noise_mask=settings.sig2noise_mask,
        )

        'validation using gloabl limits and std and local median'
        '''MinMaxU : two elements tuple
            sets the limits of the u displacment component
            Used for validation.

        MinMaxV : two elements tuple
            sets the limits of the v displacment component
            Used for validation.

        std_threshold : float
            sets the  threshold for the std validation

        median_threshold : float
            sets the threshold for the median validation

        filter_method : string
            the method used to replace the non-valid vectors
            Methods:
                'localmean',
                'disk',
                'distance', 

        max_filter_iteration : int
            maximum of filter iterations to replace nans

        filter_kernel_size : int
            size of the kernel used for the filtering'''

        mask = np.full_like(x, False)
        if settings.validation_first_pass == True:
            u, v, mask_g = validation.global_val(u, v, settings.MinMax_U_disp,
                                                 settings.MinMax_V_disp)
            u, v, mask_s = validation.global_std(
                u, v, std_threshold=settings.std_threshold)
            u, v, mask_m = validation.local_median_val(
                u,
                v,
                u_threshold=settings.median_threshold,
                v_threshold=settings.median_threshold,
                size=settings.median_size)
            if settings.extract_sig2noise == True and settings.iterations == 1 and settings.do_sig2noise_validation == True:
                u, v, mask_s2n = validation.sig2noise_val(
                    u,
                    v,
                    sig2noise_ratio,
                    threshold=settings.sig2noise_threshold)
                mask = mask + mask_g + mask_m + mask_s + mask_s2n
            else:
                mask = mask + mask_g + mask_m + mask_s
        'filter to replace the values that where marked by the validation'
        if settings.iterations > 1:
            u, v = filters.replace_outliers(
                u,
                v,
                method=settings.filter_method,
                max_iter=settings.max_filter_iteration,
                kernel_size=settings.filter_kernel_size)
            'adding masks to add the effect of all the validations'
            if settings.smoothn == True:
                u, dummy_u1, dummy_u2, dummy_u3 = smoothn.smoothn(
                    u, s=settings.smoothn_p)
                v, dummy_v1, dummy_v2, dummy_v3 = smoothn.smoothn(
                    v, s=settings.smoothn_p)
        elif settings.iterations == 1 and settings.replace_vectors == True:
            u, v = filters.replace_outliers(
                u,
                v,
                method=settings.filter_method,
                max_iter=settings.max_filter_iteration,
                kernel_size=settings.filter_kernel_size)
            'adding masks to add the effect of all the validations'
            if settings.smoothn == True:
                u, v = filters.replace_outliers(
                    u,
                    v,
                    method=settings.filter_method,
                    max_iter=settings.max_filter_iteration,
                    kernel_size=settings.filter_kernel_size)
                u, dummy_u1, dummy_u2, dummy_u3 = smoothn.smoothn(
                    u, s=settings.smoothn_p)
                v, dummy_v1, dummy_v2, dummy_v3 = smoothn.smoothn(
                    v, s=settings.smoothn_p)

        i = 1
        'all the following passes'
        for i in range(2, settings.iterations + 1):
            x, y, u, v, sig2noise_ratio, mask = multipass_img_deform(
                frame_a,
                frame_b,
                settings.windowsizes[i - 1],
                settings.overlap[i - 1],
                settings.iterations,
                i,
                x,
                y,
                u,
                v,
                correlation_method=settings.correlation_method,
                subpixel_method=settings.subpixel_method,
                do_sig2noise=settings.extract_sig2noise,
                sig2noise_method=settings.sig2noise_method,
                sig2noise_mask=settings.sig2noise_mask,
                MinMaxU=settings.MinMax_U_disp,
                MinMaxV=settings.MinMax_V_disp,
                std_threshold=settings.std_threshold,
                median_threshold=settings.median_threshold,
                median_size=settings.median_size,
                filter_method=settings.filter_method,
                max_filter_iteration=settings.max_filter_iteration,
                filter_kernel_size=settings.filter_kernel_size,
                interpolation_order=settings.interpolation_order)
            # If the smoothing is active, we do it at each pass
            if settings.smoothn == True:
                u, dummy_u1, dummy_u2, dummy_u3 = smoothn.smoothn(
                    u, s=settings.smoothn_p)
                v, dummy_v1, dummy_v2, dummy_v3 = smoothn.smoothn(
                    v, s=settings.smoothn_p)
        '''%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%'''
        if settings.extract_sig2noise == True and i == settings.iterations and settings.iterations != 1 and settings.do_sig2noise_validation == True:
            u, v, mask_s2n = validation.sig2noise_val(
                u, v, sig2noise_ratio, threshold=settings.sig2noise_threshold)
            mask = mask + mask_s2n
        if settings.replace_vectors == True:
            u, v = filters.replace_outliers(
                u,
                v,
                method=settings.filter_method,
                max_iter=settings.max_filter_iteration,
                kernel_size=settings.filter_kernel_size)
        'pixel/frame->pixel/sec'
        u = u / settings.dt
        v = v / settings.dt
        'scales the results pixel-> meter'
        x, y, u, v = scaling.uniform(x,
                                     y,
                                     u,
                                     v,
                                     scaling_factor=settings.scaling_factor)
        'save to a file'
        save(x,
             y,
             u,
             v,
             sig2noise_ratio,
             mask,
             os.path.join(save_path, 'field_A%03d.txt' % counter),
             delimiter='\t')
        'some messages to check if it is still alive'

        'some other stuff that one might want to use'
        if settings.show_plot == True or settings.save_plot == True:
            plt.close('all')
            plt.ioff()
            Name = os.path.join(save_path, 'Image_A%03d.png' % counter)
            display_vector_field(os.path.join(save_path,
                                              'field_A%03d.txt' % counter),
                                 scale=settings.scale_plot)
            if settings.save_plot == True:
                plt.savefig(Name)
            if settings.show_plot == True:
                plt.show()

        print('Image Pair ' + str(counter + 1))

status_message(u)


# In[33]:


# "filter to replace the values that where marked by the validation"
# if settings.iterations > 1:


u, v = filters.replace_outliers(
    u,
    v,
    method=settings.filter_method,
    max_iter=settings.max_filter_iteration,
    kernel_size=settings.filter_kernel_size,
)


# In[34]:


# mask the velocity maps
tmp = np.zeros_like(x,dtype=bool)
tmp.flat[xymask] = 1

u = np.ma.masked_array(u, mask = tmp)
v = np.ma.masked_array(v, mask = tmp)
Exemplo n.º 22
0
dt = DeltaFrame * 1. / fps  # piexels
u0, v0, sig2noise = process.extended_search_area_piv(
    frame_a.astype(np.int32),
    frame_b.astype(np.int32),
    window_size=winsize,
    overlap=overlap,
    dt=dt,
    search_area_size=searchsize,
    sig2noise_method='peak2peak')
x, y = process.get_coordinates(image_size=frame_a.shape,
                               window_size=winsize,
                               overlap=overlap)
u1, v1, mask = validation.sig2noise_val(u0, v0, sig2noise, threshold=1.3)
u2, v2 = filters.replace_outliers(u1,
                                  v1,
                                  method='localmean',
                                  max_iter=5,
                                  kernel_size=5)
u3, v3, mask1 = validation.local_median_val(u2, v2, 3, 3, 1)
u4, v4 = filters.replace_outliers(u3,
                                  v3,
                                  method='localmean',
                                  max_iter=5,
                                  kernel_size=5)
tools.save(x, y, u4, v4, mask1, '../testResult/test.txt')

tools.display_vector_field('../testResult/test.txt', scale=500, width=0.0025)


#%% define node
def process_node(i):
Exemplo n.º 23
0
    def piv_frames(self, topn=-1, show=True):

        from openpiv.validation import sig2noise_val
        from openpiv.filters import replace_outliers

        frames = self._frames[::2]
        topn = np.min([len(frames), topn])
        frames = frames[:topn]

        (ut, vt, s2nt) = self.piv_frame(frame=0, show=False)
        for i in tqdm.tqdm(frames[1:]):
            (u, v, s2n) = self.piv_frame(frame=i, show=False)
            ut += u
            vt += v
            s2nt += s2n
            #print(np.max(u), np.min(u), u.size)
            #print(np.max(v), np.max(v), v.size)

        ut /= len(frames)
        vt /= len(frames)
        s2nt /= len(frames)

        ut, vt, mask = sig2noise_val(ut,
                                     vt,
                                     s2nt,
                                     threshold=self._piv_threshold)
        ut, vt = replace_outliers(ut,
                                  vt,
                                  method='localmean',
                                  max_iter=10,
                                  kernel_size=2)
        self._pivu = ut
        self._pivv = vt
        self._pivs2n = s2nt
        self.save_piv()

        if show:
            fig = plt.figure(figsize=(12, 6))
            ax1 = fig.add_subplot(121)
            ax1.imshow(self.mean())
            ax1.quiver(self._pivx, self._pivy, ut, vt, pivot='mid', color='w')

            ax2 = fig.add_subplot(122)
            n_u, bins, patches = ax2.hist(ut.flatten() * self._mpp,
                                          bins=20,
                                          normed=1,
                                          facecolor='blue',
                                          alpha=0.75,
                                          label='u')
            n_v, bins, patches = ax2.hist(vt.flatten() * self._mpp,
                                          bins=20,
                                          normed=1,
                                          facecolor='green',
                                          alpha=0.75,
                                          label='v')
            ax2.annotate(np.mean(ut) * self._mpp,
                         xy=(np.mean(ut) * self._mpp, np.max(n_u)))
            ax2.annotate(np.mean(vt) * self._mpp,
                         xy=(np.mean(vt) * self._mpp, np.max(n_v)))
            plt.legend(loc='best')
            plt.tight_layout()
            plt.savefig(self._fname[:-4] + '_piv_t.png', dpi=150)

        print("... frames: {}, {}".format(frames[0], frames[-1]))
        print("... mean velocity [um/sec]: ({:4.2f}, {:4.2f})".format(
            np.mean(ut) * self._umtopixel,
            np.mean(vt) * self._umtopixel))
        print("... mean velocity [pixel/frame]: ({:4.2f}, {:4.2f})".format(
            np.mean(ut) * self._dt,
            np.mean(vt) * self._dt))
Exemplo n.º 24
0
    def process(self, args):
        """
            Process chain as configured in the GUI.

            Parameters
            ----------
            args : tuple
                Tuple as expected by the inherited run method:
                file_a (str) -- image file a
                file_b (str) -- image file b
                counter (int) -- index pointing to an element of the filename
                                 list
        """
        file_a, file_b, counter = args
        frame_a = piv_tls.imread(file_a)
        frame_b = piv_tls.imread(file_b)

        # Smoothning script borrowed from openpiv.windef
        s = self.p['smoothn_val']

        def smoothn(u, s):
            s = s
            u, _, _, _ = piv_smt.smoothn(u, s=s, isrobust=self.p['robust'])
            return (u)

        # delimiters placed here for safety
        delimiter = self.p['separator']
        if delimiter == 'tab':
            delimiter = '\t'
        if delimiter == 'space':
            delimiter = ' '

        # preprocessing
        print('\nPre-pocessing image pair: {}'.format(counter + 1))
        if self.p['background_subtract'] \
                and self.p['background_type'] == 'minA - minB':
            self.background = gen_background(self.p, frame_a, frame_b)

        frame_a = frame_a.astype(np.int32)
        frame_a = process_images(self,
                                 frame_a,
                                 self.GUI.preprocessing_methods,
                                 background=self.background)
        frame_b = frame_b.astype(np.int32)
        frame_b = process_images(self,
                                 frame_b,
                                 self.GUI.preprocessing_methods,
                                 background=self.background)

        print('Evaluating image pair: {}'.format(counter + 1))

        # evaluation first pass
        start = time.time()
        passes = 1
        # setup custom windowing if selected
        if self.parameter['custom_windowing']:
            corr_window_0 = self.parameter['corr_window_1']
            overlap_0 = self.parameter['overlap_1']
            for i in range(2, 8):
                if self.parameter['pass_%1d' % i]:
                    passes += 1
                else:
                    break

        else:
            passes = self.parameter['coarse_factor']
            if self.parameter['grid_refinement'] == 'all passes' \
                    and self.parameter['coarse_factor'] != 1:
                corr_window_0 = self.parameter['corr_window'] * \
                    2**(self.parameter['coarse_factor'] - 1)
                overlap_0 = self.parameter['overlap'] * \
                    2**(self.parameter['coarse_factor'] - 1)

            # Refine all passes after first when there are more than 1 pass.
            elif self.parameter['grid_refinement'] == '2nd pass on' \
                    and self.parameter['coarse_factor'] != 1:
                corr_window_0 = self.parameter['corr_window'] * \
                    2**(self.parameter['coarse_factor'] - 2)
                overlap_0 = self.parameter['overlap'] * \
                    2**(self.parameter['coarse_factor'] - 2)

            # If >>none<< is selected or something goes wrong, the window
            # size would remain the same.
            else:
                corr_window_0 = self.parameter['corr_window']
                overlap_0 = self.parameter['overlap']
        overlap_percent = overlap_0 / corr_window_0
        sizeX = corr_window_0

        u, v, sig2noise = piv_wdf.extended_search_area_piv(
            frame_a.astype(np.int32),
            frame_b.astype(np.int32),
            window_size=corr_window_0,
            overlap=overlap_0,
            search_area_size=corr_window_0,
            width=self.parameter['s2n_mask'],
            subpixel_method=self.parameter['subpixel_method'],
            sig2noise_method=self.parameter['sig2noise_method'],
            correlation_method=self.parameter['corr_method'],
            normalized_correlation=self.parameter['normalize_correlation'])

        x, y = piv_prc.get_coordinates(frame_a.shape, corr_window_0, overlap_0)

        # validating first pass
        mask = np.full_like(x, 0)
        if self.parameter['fp_vld_global_threshold']:
            u, v, Mask = piv_vld.global_val(
                u,
                v,
                u_thresholds=(self.parameter['fp_MinU'],
                              self.parameter['fp_MaxU']),
                v_thresholds=(self.parameter['fp_MinV'],
                              self.parameter['fp_MaxV']))
            # consolidate effects of mask
            mask += Mask

        if self.parameter['fp_local_med']:
            u, v, Mask = piv_vld.local_median_val(
                u,
                v,
                u_threshold=self.parameter['fp_local_med'],
                v_threshold=self.parameter['fp_local_med'],
                size=self.parameter['fp_local_med_size'])
            mask += Mask

        if self.parameter['adv_repl']:
            u, v = piv_flt.replace_outliers(
                u,
                v,
                method=self.parameter['adv_repl_method'],
                max_iter=self.parameter['adv_repl_iter'],
                kernel_size=self.parameter['adv_repl_kernel'])
        print('Validated first pass result of image pair: {}.'.format(counter +
                                                                      1))

        # smoothning  before deformation if 'each pass' is selected
        if self.parameter['smoothn_each_pass']:
            if self.parameter['smoothn_first_more']:
                s *= 2
            u = smoothn(u, s)
            v = smoothn(v, s)
            print('Smoothned pass 1 for image pair: {}.'.format(counter + 1))
            s = self.parameter['smoothn_val1']

        print('Finished pass 1 for image pair: {}.'.format(counter + 1))
        print("window size: " + str(corr_window_0))
        print('overlap: ' + str(overlap_0), '\n')

        # evaluation of all other passes
        if passes != 1:
            iterations = passes - 1
            for i in range(2, passes + 1):
                # setting up the windowing of each pass
                if self.parameter['custom_windowing']:
                    corr_window = self.parameter['corr_window_%1d' % i]
                    overlap = int(corr_window * overlap_percent)

                else:
                    if self.parameter['grid_refinement'] == 'all passes' or \
                            self.parameter['grid_refinement'] == '2nd pass on':
                        corr_window = self.parameter['corr_window'] * \
                            2**(iterations - 1)
                        overlap = self.parameter['overlap'] * \
                            2**(iterations - 1)

                    else:
                        corr_window = self.parameter['corr_window']
                        overlap = self.parameter['overlap']
                sizeX = corr_window

                # translate settings to windef settings object
                piv_wdf_settings = piv_wdf.Settings()
                piv_wdf_settings.correlation_method = \
                    self.parameter['corr_method']
                piv_wdf_settings.normalized_correlation = \
                    self.parameter['normalize_correlation']
                piv_wdf_settings.windowsizes = (corr_window, ) * (passes + 1)
                piv_wdf_settings.overlap = (overlap, ) * (passes + 1)
                piv_wdf_settings.num_iterations = passes
                piv_wdf_settings.subpixel_method = \
                    self.parameter['subpixel_method']
                piv_wdf_settings.deformation_method = \
                    self.parameter['deformation_method']
                piv_wdf_settings.interpolation_order = \
                    self.parameter['interpolation_order']
                piv_wdf_settings.sig2noise_validate = True,
                piv_wdf_settings.sig2noise_method = \
                    self.parameter['sig2noise_method']
                piv_wdf_settings.sig2noise_mask = self.parameter['s2n_mask']

                # do the correlation
                x, y, u, v, sig2noise, mask = piv_wdf.multipass_img_deform(
                    frame_a.astype(np.int32),
                    frame_b.astype(np.int32),
                    i,  # current iteration
                    x,
                    y,
                    u,
                    v,
                    piv_wdf_settings)

                # validate other passes
                if self.parameter['sp_vld_global_threshold']:
                    u, v, Mask = piv_vld.global_val(
                        u,
                        v,
                        u_thresholds=(self.parameter['sp_MinU'],
                                      self.parameter['sp_MaxU']),
                        v_thresholds=(self.parameter['sp_MinV'],
                                      self.parameter['sp_MaxV']))
                    mask += Mask  # consolidate effects of mask

                if self.parameter['sp_vld_global_threshold']:
                    u, v, Mask = piv_vld.global_std(
                        u, v, std_threshold=self.parameter['sp_std_threshold'])
                    mask += Mask

                if self.parameter['sp_local_med_validation']:
                    u, v, Mask = piv_vld.local_median_val(
                        u,
                        v,
                        u_threshold=self.parameter['sp_local_med'],
                        v_threshold=self.parameter['sp_local_med'],
                        size=self.parameter['sp_local_med_size'])
                    mask += Mask

                if self.parameter['adv_repl']:
                    u, v = piv_flt.replace_outliers(
                        u,
                        v,
                        method=self.parameter['adv_repl_method'],
                        max_iter=self.parameter['adv_repl_iter'],
                        kernel_size=self.parameter['adv_repl_kernel'])
                print('Validated pass {} of image pair: {}.'.format(
                    i, counter + 1))

                # smoothning each individual pass if 'each pass' is selected
                if self.parameter['smoothn_each_pass']:
                    u = smoothn(u, s)
                    v = smoothn(v, s)
                    print('Smoothned pass {} for image pair: {}.'.format(
                        i, counter + 1))

                print('Finished pass {} for image pair: {}.'.format(
                    i, counter + 1))
                print("window size: " + str(corr_window))
                print('overlap: ' + str(overlap), '\n')
                iterations -= 1

        if self.p['flip_u']:
            u = np.flipud(u)

        if self.p['flip_v']:
            v = np.flipud(v)

        if self.p['invert_u']:
            u *= -1

        if self.p['invert_v']:
            v *= -1

        # scaling
        u = u / self.parameter['dt']
        v = v / self.parameter['dt']
        x, y, u, v = piv_scl.uniform(x,
                                     y,
                                     u,
                                     v,
                                     scaling_factor=self.parameter['scale'])
        end = time.time()

        # save data to file.
        out = np.vstack([m.ravel() for m in [x, y, u, v, mask, sig2noise]])
        np.savetxt(self.save_fnames[counter],
                   out.T,
                   fmt='%8.4f',
                   delimiter=delimiter)
        print('Processed image pair: {}'.format(counter + 1))

        sizeY = sizeX
        sizeX = ((int(frame_a.shape[0] - sizeX) //
                  (sizeX - (sizeX * overlap_percent))) + 1)
        sizeY = ((int(frame_a.shape[1] - sizeY) //
                  (sizeY - (sizeY * overlap_percent))) + 1)

        time_per_vec = _round((((end - start) * 1000) / ((sizeX * sizeY) - 1)),
                              3)

        print('Process time: {} second(s)'.format((_round((end - start), 3))))
        print('Number of vectors: {}'.format(int((sizeX * sizeY) - 1)))
        print('Time per vector: {} millisecond(s)'.format(time_per_vec))
Exemplo n.º 25
0
# correcting stage drift between the field of views
u -= np.nanmean(u)
v -= np.nanmean(v)
w -= np.nanmean(w)

# %%
# filtering
uf, vf, wf, mask = sig2noise_val(u,
                                 v,
                                 w=w,
                                 sig2noise=sig2noise,
                                 threshold=signoise_filter)
uf, vf, wf = replace_outliers(uf,
                              vf,
                              wf,
                              max_iter=1,
                              tol=100,
                              kernel_size=2,
                              method='disk')


# %%
# plotting
# representation of the image stacks by maximums projections. The red circle marks the position of the cell
def update_plot(i, ims, ax):
    a1 = ax.imshow(ims[i])
    a2 = ax.add_patch(plt.Circle((330, 140), 100, color="red", fill=False))
    return [a1, a2]


# %%
u, v, sig2noise = process.extended_search_area_piv(
    frame_a.astype(np.int32),
    frame_b.astype(np.int32),
    window_size=32,
    overlap=8,
    dt=.1,
    sig2noise_method='peak2peak')
x, y = process.get_coordinates(image_size=frame_a.shape,
                               window_size=32,
                               overlap=8)

u, v, mask = validation.sig2noise_val(u, v, sig2noise, threshold=1.3)
u, v = filters.replace_outliers(u,
                                v,
                                method='localmean',
                                max_iter=10,
                                kernel_size=2)
x, y, u, v = scaling.uniform(x, y, u, v, scaling_factor=96.52)

tools.save(x, y, u, v, mask, 'Y4-S3_Camera000398_a.txt')

# %%
# Use Python version, pyprocess:

u, v, sig2noise = pyprocess.extended_search_area_piv(
    frame_a.astype(np.int32),
    frame_b.astype(np.int32),
    window_size=32,
    overlap=8,
    dt=.1,
Exemplo n.º 27
0
    def func(args):
        file_a, file_b, counter = args
        # read the iamges
        frame_a = tools.imread(os.path.join(settings.filepath_images, file_a))
        frame_b = tools.imread(os.path.join(settings.filepath_images, file_b))
        if counter == settings.fall_start:
            settings.ROI[1] = frame_a.shape[0]
        """Here we check if the interface has reached the top of the roi yet
        by comparing it to the index in the observation_periods file. If it has
        not reached the roi yet we skip this part, if it did then we shift the
        roi for each pair after the initial one """
        if counter >= settings.roi_shift_start:
            # set the roi to the image height for the first frame
            # if counter == settings.roi_shift_start :
            #     settings.current_pos = 0
            # shift the roi for each pair (this is not done for the first one)
            settings.ROI[0] = int(settings.current_pos)

        # crop to roi
        if settings.ROI == 'full':
            frame_a = frame_a
            frame_b = frame_b
        else:
            frame_a = frame_a[settings.ROI[0]:settings.ROI[1],
                              settings.ROI[2]:settings.ROI[3]]
            frame_b = frame_b[settings.ROI[0]:settings.ROI[1],
                              settings.ROI[2]:settings.ROI[3]]
        if settings.dynamic_masking_method == 'edge' or settings.dynamic_masking_method == 'intensity':
            frame_a = preprocess.dynamic_masking(
                frame_a,
                method=settings.dynamic_masking_method,
                filter_size=settings.dynamic_masking_filter_size,
                threshold=settings.dynamic_masking_threshold)
            frame_b = preprocess.dynamic_masking(
                frame_b,
                method=settings.dynamic_masking_method,
                filter_size=settings.dynamic_masking_filter_size,
                threshold=settings.dynamic_masking_threshold)

#%%
        """ Here we do the first pass of the piv interrogation """
        x, y, u, v, sig2noise_ratio = first_pass(
            frame_a,
            frame_b,
            settings.window_width[0],
            settings.window_height[0],
            settings.overlap_width[0],
            settings.overlap_height[0],
            settings.iterations,
            correlation_method=settings.correlation_method,
            subpixel_method=settings.subpixel_method,
            do_sig2noise=settings.extract_sig2noise,
            sig2noise_method=settings.sig2noise_method,
            sig2noise_mask=settings.sig2noise_mask,
        )
        mask = np.full_like(x, False)
        if settings.validation_first_pass == True:
            u, v, mask_g = validation.global_val(u, v, settings.MinMax_U_disp,
                                                 settings.MinMax_V_disp)
            u, v, mask_s = validation.global_std(
                u, v, std_threshold=settings.std_threshold)
            u, v, mask_m = validation.local_median_val(
                u,
                v,
                u_threshold=settings.median_threshold,
                v_threshold=settings.median_threshold,
                size=settings.median_size)
            if settings.extract_sig2noise == True and settings.iterations == 1 and settings.do_sig2noise_validation == True:
                u, v, mask_s2n = validation.sig2noise_val(
                    u,
                    v,
                    sig2noise_ratio,
                    threshold=settings.sig2noise_threshold)
                mask = mask + mask_g + mask_m + mask_s + mask_s2n
            else:
                mask = mask + mask_g + mask_m + mask_s
        'filter to replace the values that where marked by the validation'
        if settings.iterations > 1:
            u, v = filters.replace_outliers(
                u,
                v,
                method=settings.filter_method,
                max_iter=settings.max_filter_iteration,
                kernel_size=settings.filter_kernel_size)
            'adding masks to add the effect of all the validations'
            if settings.smoothn == True:
                u, dummy_u1, dummy_u2, dummy_u3 = smoothn.smoothn(
                    u, s=settings.smoothn_p)
                v, dummy_v1, dummy_v2, dummy_v3 = smoothn.smoothn(
                    v, s=settings.smoothn_p)
        elif settings.iterations == 1 and settings.replace_vectors == True:
            u, v = filters.replace_outliers(
                u,
                v,
                method=settings.filter_method,
                max_iter=settings.max_filter_iteration,
                kernel_size=settings.filter_kernel_size)
            'adding masks to add the effect of all the validations'
            if settings.smoothn == True:
                u, v = filters.replace_outliers(
                    u,
                    v,
                    method=settings.filter_method,
                    max_iter=settings.max_filter_iteration,
                    kernel_size=settings.filter_kernel_size)
                u, dummy_u1, dummy_u2, dummy_u3 = smoothn.smoothn(
                    u, s=settings.smoothn_p)
                v, dummy_v1, dummy_v2, dummy_v3 = smoothn.smoothn(
                    v, s=settings.smoothn_p)

#%%
        i = 1
        """ Do the multipass until the maximum iterations are reached """
        for i in range(2, settings.iterations + 1):
            x, y, u, v, sig2noise_ratio, mask = multipass_img_deform(
                frame_a,
                frame_b,
                settings.window_width[i - 1],
                settings.window_height[i - 1],
                settings.overlap_width[i - 1],
                settings.overlap_height[i - 1],
                settings.iterations,
                i,
                x,
                y,
                u,
                v,
                correlation_method=settings.correlation_method,
                subpixel_method=settings.subpixel_method,
                do_sig2noise=settings.extract_sig2noise,
                sig2noise_method=settings.sig2noise_method,
                sig2noise_mask=settings.sig2noise_mask,
                MinMaxU=settings.MinMax_U_disp,
                MinMaxV=settings.MinMax_V_disp,
                std_threshold=settings.std_threshold,
                median_threshold=settings.median_threshold,
                median_size=settings.median_size,
                filter_method=settings.filter_method,
                max_filter_iteration=settings.max_filter_iteration,
                filter_kernel_size=settings.filter_kernel_size,
                interpolation_order=settings.interpolation_order)
            # smooth on each pass in case this is wanted
            if settings.smoothn == True:
                u, dummy_u1, dummy_u2, dummy_u3 = smoothn.smoothn(
                    u, s=settings.smoothn_p)
                v, dummy_v1, dummy_v2, dummy_v3 = smoothn.smoothn(
                    v, s=settings.smoothn_p)

        # extract the sig2noise ratio in case it is desired and replace the vectors
        if settings.extract_sig2noise == True and i == settings.iterations and settings.iterations != 1 and settings.do_sig2noise_validation == True:
            u, v, mask_s2n = validation_patch.sig2noise_val(
                u,
                v,
                sig2noise_ratio,
                threshold_low=settings.sig2noise_threshold)
            mask = mask + mask_s2n
        if settings.replace_vectors == True:
            u, v = filters.replace_outliers(
                u,
                v,
                method=settings.filter_method,
                max_iter=settings.max_filter_iteration,
                kernel_size=settings.filter_kernel_size)
        if counter >= settings.roi_shift_start:
            settings.current_pos = settings.current_pos - calc_disp(
                x, v, frame_b.shape[1])
            if ((settings.ROI[1] - settings.current_pos) < 300):
                return settings.current_pos, True
        # scale the result timewise and lengthwise
        u = u / settings.dt
        v = v / settings.dt
        x, y, u, v = scaling.uniform(x,
                                     y,
                                     u,
                                     v,
                                     scaling_factor=settings.scaling_factor)
        # save the result
        save(x,
             y,
             u,
             v,
             sig2noise_ratio,
             mask,
             os.path.join(save_path_txts, 'field_%06d.txt' % (counter)),
             delimiter='\t')
        # disable the grid in the rcParams file
        plt.rcParams['axes.grid'] = False
        # show and save the plot if it is desired
        if settings.show_plot == True or settings.save_plot == True:
            plt.ioff()
            Name = os.path.join(save_path_images, 'Image_%06d.png' % (counter))
            display_vector_field(os.path.join(save_path_txts,
                                              'field_%06d.txt' % (counter)),
                                 scale=settings.scale_plot)
            if settings.save_plot == True:
                plt.savefig(Name, dpi=100)
            if settings.show_plot == True:
                plt.show()
            plt.close('all')
        print('Image Pair ' + str(counter) + ' of ' +
              settings.save_folder_suffix)
        if settings.current_pos == np.nan:
            return settings.current_pos, True
        return settings.current_pos, False
Exemplo n.º 28
0
    def quick_piv(self, search_dict, index_a=100, index_b=101, folder=None):
        self.show_piv_param()
        ns = Namespace(**self.piv_param)

        if folder == None:
            img_a, img_b = self.read_two_images(search_dict,
                                                index_a=index_a,
                                                index_b=index_b)

            location_path = [
                x['path'] for x in self.piv_dict_list
                if search_dict.items() <= x.items()
            ]
            results_path = os.path.join(self.results_path, *location_path)
            try:
                os.makedirs(results_path)
            except FileExistsError:
                pass
        else:
            try:
                file_a_path = os.path.join(self.path, folder,
                                           'frame_%06d.tiff' % index_a)
                file_b_path = os.path.join(self.path, folder,
                                           'frame_%06d.tiff' % index_b)

                img_a = np.array(Image.open(file_a_path))
                img_b = np.array(Image.open(file_b_path))
            except:
                return None

        # crop
        img_a = img_a[ns.crop[0]:-ns.crop[1] - 1, ns.crop[2]:-ns.crop[3] - 1]
        img_b = img_b[ns.crop[0]:-ns.crop[1] - 1, ns.crop[2]:-ns.crop[3] - 1]

        u0, v0, sig2noise = pyprocess.extended_search_area_piv(
            img_a.astype(np.int32),
            img_b.astype(np.int32),
            window_size=ns.winsize,
            overlap=ns.overlap,
            dt=ns.dt,
            search_area_size=ns.searchsize,
            sig2noise_method='peak2peak')

        x, y = pyprocess.get_coordinates(image_size=img_a.shape,
                                         search_area_size=ns.searchsize,
                                         overlap=ns.overlap)

        x, y, u0, v0 = scaling.uniform(
            x, y, u0, v0,
            scaling_factor=ns.pixel_density)  # no. pixel per distance

        u0, v0, mask = validation.global_val(
            u0, v0, (ns.u_lower_bound, ns.u_upper_bound),
            (ns.v_lower_bound, ns.v_upper_bound))

        u1, v1, mask = validation.sig2noise_val(u0,
                                                v0,
                                                sig2noise,
                                                threshold=1.01)

        u3, v3 = filters.replace_outliers(u1,
                                          v1,
                                          method='localmean',
                                          max_iter=500,
                                          kernel_size=3)

        #save in the simple ASCII table format
        tools.save(x, y, u3, v3, sig2noise, mask,
                   os.path.join(results_path, ns.text_export_name))

        if ns.image_check == True:
            fig, ax = plt.subplots(2, 1, figsize=(24, 12))
            ax[0].imshow(img_a)
            ax[1].imshow(img_b)

        io.imwrite(os.path.join(results_path, ns.figure_export_name), img_a)

        if ns.show_result == True:
            fig, ax = plt.subplots(figsize=(24, 12))
            tools.display_vector_field(
                os.path.join(results_path, ns.text_export_name),
                ax=ax,
                scaling_factor=ns.pixel_density,
                scale=ns.scale_factor,  # scale defines here the arrow length
                width=ns.arrow_width,  # width is the thickness of the arrow
                on_img=True,  # overlay on the image
                image_name=os.path.join(results_path, ns.figure_export_name))
            fig.savefig(os.path.join(results_path, ns.figure_export_name))

        if ns.show_vertical_profiles:
            field_shape = pyprocess.get_field_shape(
                image_size=img_a.shape,
                search_area_size=ns.searchsize,
                overlap=ns.overlap)
            vertical_profiles(ns.text_export_name, field_shape)

        print('Mean of u: %.3f' % np.mean(u3))
        print('Std of u: %.3f' % np.std(u3))
        print('Mean of v: %.3f' % np.mean(v3))
        print('Std of v: %.3f' % np.std(v3))

        output = np.array([np.mean(u3), np.std(u3), np.mean(v3), np.std(v3)])
        # if np.absolute(np.mean(v3)) < 50:
        #     output = self.quick_piv(search_dict,index_a = index_a + 1, index_b = index_b + 1)

        return x, y, u3, v3
Exemplo n.º 29
0
def run_piv(
    frame_a,
    frame_b,
    winsize=16,  # pixels, interrogation window size in frame A
    searchsize=20,  # pixels, search in image B
    overlap=8,  # pixels, 50% overlap
    dt=0.0001,  # sec, time interval between pulses
    image_check=False,
    show_vertical_profiles=False,
    figure_export_name='_results.png',
    text_export_name="_results.txt",
    scale_factor=1,
    pixel_density=36.74,
    arrow_width=0.001,
    show_result=True,
    u_bounds=(-100, 100),
    v_bounds=(-100, 100)):

    u0, v0, sig2noise = pyprocess.extended_search_area_piv(
        frame_a.astype(np.int32),
        frame_b.astype(np.int32),
        window_size=winsize,
        overlap=overlap,
        dt=dt,
        search_area_size=searchsize,
        sig2noise_method='peak2peak')

    x, y = pyprocess.get_coordinates(image_size=frame_a.shape,
                                     search_area_size=searchsize,
                                     overlap=overlap)

    x, y, u0, v0 = scaling.uniform(
        x, y, u0, v0, scaling_factor=pixel_density)  # no. pixel per distance

    u0, v0, mask = validation.global_val(u0, v0, u_bounds, v_bounds)

    u1, v1, mask = validation.sig2noise_val(u0, v0, sig2noise, threshold=1.05)

    u3, v3 = filters.replace_outliers(u1,
                                      v1,
                                      method='localmean',
                                      max_iter=10,
                                      kernel_size=3)

    #save in the simple ASCII table format
    if np.std(u3) < 480:
        tools.save(x, y, u3, v3, sig2noise, mask, text_export_name)

    if image_check == True:
        fig, ax = plt.subplots(2, 1, figsize=(24, 12))
        ax[0].imshow(frame_a)
        ax[1].imshow(frame_b)

    io.imwrite(figure_export_name, frame_a)

    if show_result == True:
        fig, ax = plt.subplots(figsize=(24, 12))
        tools.display_vector_field(
            text_export_name,
            ax=ax,
            scaling_factor=pixel_density,
            scale=scale_factor,  # scale defines here the arrow length
            width=arrow_width,  # width is the thickness of the arrow
            on_img=True,  # overlay on the image
            image_name=figure_export_name)
        fig.savefig(figure_export_name)

    if show_vertical_profiles:
        field_shape = pyprocess.get_field_shape(image_size=frame_a.shape,
                                                search_area_size=searchsize,
                                                overlap=overlap)
        vertical_profiles(text_export_name, field_shape)

    print('Std of u3: %.3f' % np.std(u3))
    print('Mean of u3: %.3f' % np.mean(u3))

    return np.std(u3)
Exemplo n.º 30
0
    def run(self):
        self.is_to_stop = False
        self.piv.piv_results_list = []

        for i in range(0, len(self.frames_list) - 1, abs(self.jump)):
            if self.piv.xy_zoom[0][0] and self.piv.xy_zoom[1][0]:
                """try:"""
                frame_a = self.frames_list[i][2][
                    int(self.piv.xy_zoom[1][0]):int(self.piv.xy_zoom[1][1]),
                    int(self.piv.xy_zoom[0][0]):int(self.piv.xy_zoom[0][1])]

                frame_b = self.frames_list[i + 1][2][
                    int(self.piv.xy_zoom[1][0]):int(self.piv.xy_zoom[1][1]),
                    int(self.piv.xy_zoom[0][0]):int(self.piv.xy_zoom[0][1])]
                """
                except ValueError:
                    frame_a = self.frames_list[i][2][int(self.frames_list[i][2].shape[1] - self.piv.xy_zoom[1][0]): int(
                        self.frames_list[i][2].shape[1] - self.piv.xy_zoom[1][1]),
                              int(self.piv.xy_zoom[0][0]): int(
                                  self.piv.xy_zoom[0][1])]

                    frame_b = self.frames_list[i + 1][2][
                              int(self.frames_list[i + 1][2].shape[1] - self.piv.xy_zoom[1][0]): int(
                                  self.frames_list[i + 1][2].shape[1] - self.piv.xy_zoom[1][1]),
                              int(self.piv.xy_zoom[0][0]): int(
                                  self.piv.xy_zoom[0][1])]
                """
            else:
                frame_a = self.frames_list[i][2]

                frame_b = self.frames_list[i + 1][2]

            try:
                self.u, self.v, self.sig2noise = extended_search_area_piv(
                    frame_a.astype(np.int32),
                    frame_b.astype(np.int32),
                    window_size=self.winsize,
                    overlap=self.overlap,
                    dt=self.dt,
                    search_area_size=self.searchsize,
                    sig2noise_method='peak2peak')
                self.x, self.y = get_coordinates(image_size=frame_a.shape,
                                                 window_size=self.winsize,
                                                 overlap=self.overlap)
                self.u, self.v, self.mask = sig2noise_val(self.u,
                                                          self.v,
                                                          self.sig2noise,
                                                          threshold=1.0)
                self.u, self.v = replace_outliers(self.u,
                                                  self.v,
                                                  method='localmean',
                                                  max_iter=10,
                                                  kernel_size=2)
                # self.x, self.y, self.u, self.v = uniform(self.x, self.y, self.u, self.v, scaling_factor=5)

                if self.piv.xy_zoom[0][0] and self.piv.xy_zoom[1][0]:
                    self.x += int(self.piv.xy_zoom[0][0])
                    self.y += int(self.piv.xy_zoom[1][0])

                self.y = np.flip(self.y, 0)
                self.x = np.flip(self.x, 0)

            except ValueError:
                if self.searchsize < self.winsize:
                    print("0")
                    self.error_message.setText(
                        "the search size cannot be smaller than the window size"
                    )
                elif self.overlap > self.winsize:
                    print("1")
                    self.error_message.setText(
                        "Overlap has to be smaller than the window_size")
                else:
                    print("2")
                    self.error_message.setText("ROI window to small")
                self.error_message.exec()
                break
            self.piv.piv_results_list.append(
                [self.x, self.y, self.u, self.v, self.mask])
            self.piv.piv_images_list[i][3] = self.piv.piv_results_list[
                i // abs(self.jump)]
            # data = np.zeros((len(np.ravel(self.u)), 5))
            # res_list = [np.ravel(self.x), np.ravel(self.y), np.ravel(self.u), np.ravel(self.v), np.ravel(self.mask)]
            # for j in range(0, 4):
            #    for k in range(len(res_list[j])):
            #        data[k][j] = res_list[j][k]
            # self.piv.piv_images_list[i][4] = data
            # save_openpiv_vec(self.piv.piv_images_list[i][1].split('.')[0], data, 'pix', 'dt',
            #                  len(data[0]), len(data))
            self.piv.show_plot(i, self.piv.bit, True)

            if i == len(self.frames_list) - 2 and self.jump == 1:
                self.piv.piv_results_list.append(
                    [self.x, self.y, self.u, self.v, self.mask])
                self.piv.piv_images_list[i +
                                         1][3] = self.piv.piv_results_list[i +
                                                                           1]
                # self.piv.piv_images_list[i + 1][4] = data
                self.piv.show_plot(i + 1, self.piv.bit, True)

            if self.is_to_stop:
                break

        self.piv_finished_signal.emit()
Exemplo n.º 31
0
ax[0].imshow(frame_a,cmap=plt.cm.gray)
ax[1].imshow(frame_b,cmap=plt.cm.gray)


# %%
winsize = 24 # pixels
searchsize = 64  # pixels, search in image B
overlap = 12 # pixels
dt = 0.02 # sec


u0, v0, sig2noise = process.extended_search_area_piv( frame_a.astype(np.int32), frame_b.astype(np.int32), window_size=winsize, overlap=overlap, dt=dt, search_area_size=searchsize, sig2noise_method='peak2peak' )

# %%
x, y = process.get_coordinates( image_size=frame_a.shape, window_size=winsize, overlap=overlap )

# %%
u1, v1, mask = validation.sig2noise_val( u0, v0, sig2noise, threshold = 1.3 )

# %%
u2, v2 = filters.replace_outliers( u1, v1, method='localmean', max_iter=10, kernel_size=2)

# %%
x, y, u3, v3 = scaling.uniform(x, y, u2, v2, scaling_factor = 96.52 )

# %%
tools.save(x, y, u3, v3, mask, 'exp1_001.txt' )

# %%
tools.display_vector_field('exp1_001.txt', scale=100, width=0.0025)
Exemplo n.º 32
0
def calc_piv_2_images(frame_a, frame_b, idx, dir_name):
    '''
    Performs Particle Image Velocimetry (PIV) of two images, and saves an image with PIV on it.
    :param frame_a: first image
    :param frame_b: consecutive image
    :param idx: index of the first frame, for saving and ordering the images
    :param dir_name: directory to save the image to
    :return: -
    '''
    u0, v0, sig2noise = pyprocess.extended_search_area_piv(
        frame_a.astype(np.int32),
        frame_b.astype(np.int32),
        window_size=winsize,
        overlap=overlap,
        dt=dt,
        search_area_size=searchsize,
        sig2noise_method='peak2peak')
    x, y = pyprocess.get_coordinates(image_size=frame_a.shape,
                                     search_area_size=searchsize,
                                     overlap=overlap)
    u1, v1, mask = validation.sig2noise_val(u0, v0, sig2noise, threshold=1.05)

    # to see where is a reasonable limit filter out
    # outliers that are very different from the neighbours
    u2, v2 = filters.replace_outliers(u1,
                                      v1,
                                      method='localmean',
                                      max_iter=3,
                                      kernel_size=3)

    # convert x,y to mm; convert u,v to mm/sec
    x, y, u3, v3 = scaling.uniform(
        x, y, u2, v2, scaling_factor=scaling_factor)  # 96.52 microns/pixel

    # 0,0 shall be bottom left, positive rotation rate is counterclockwise
    x, y, u3, v3 = tools.transform_coordinates(x, y, u3, v3)

    fig, ax = plt.subplots()
    im = np.negative(frame_a)  # plot negative of the image for more clarity
    xmax = np.amax(x) + winsize / (2 * scaling_factor)
    ymax = np.amax(y) + winsize / (2 * scaling_factor)
    ax.imshow(im, cmap="Greys_r", extent=[0.0, xmax, 0.0, ymax])

    invalid = mask.astype("bool")
    valid = ~invalid
    plt.quiver(x[invalid],
               y[invalid],
               u3[invalid],
               v3[invalid],
               color="r",
               width=width)
    plt.quiver(x[valid],
               y[valid],
               u3[valid],
               v3[valid],
               color="b",
               width=width)

    ax.set_aspect(1.)
    plt.title(r'Velocity Vectors Field (Frame #%d) $(\frac{\mu m}{hour})$' %
              idx)
    plt.savefig(dir_name + "/" + "vec_page%d.png" % idx, dpi=200)
    plt.show()
    plt.close()