示例#1
0
def test_display_vector_field(file_a=_file_a, file_b=_file_b, test_file=_test_file):
    a = imread(file_a)
    b = imread(file_b)

    window_size = 32
    overlap = 16
    search_area_size = 40

    u, v, s2n = extended_search_area_piv(a, b, window_size,
                                         search_area_size=search_area_size,
                                         overlap=overlap,
                                         correlation_method='circular',
                                         normalized_correlation=False)

    x, y = get_coordinates(a.shape, search_area_size=search_area_size, overlap=overlap)

    x, y, u, v = transform_coordinates(x, y, u, v)

    mask = np.zeros_like(x)
    mask[-1,1] = 1 # test of invalid vector plot
    save(x, y, u, v, mask, 'tmp.txt')
    fig, ax = plt.subplots(figsize=(6, 6))
    display_vector_field('tmp.txt', on_img=True, image_name=file_a, ax=ax)
    decorators.remove_ticks_and_titles(fig)
    fig.savefig('./tmp.png')
    res = compare.compare_images('./tmp.png', test_file, 0.001)
    assert res is None
示例#2
0
def func(args):
    """A function to process each image pair."""

    # this line is REQUIRED for multiprocessing to work
    # always use it in your custom function

    file_a, file_b, counter = args

    #####################
    # Here goes you code
    #####################

    # read images into numpy arrays
    frame_a = tools.imread(os.path.join(path, file_a))
    frame_b = tools.imread(os.path.join(path, file_b))

    frame_a = (frame_a * 1024).astype(np.int32)
    frame_b = (frame_b * 1024).astype(np.int32)

    # process image pair with extended search area piv algorithm.
    u, v, sig2noise = pyprocess.extended_search_area_piv( frame_a, frame_b, \
        window_size=64, overlap=32, dt=0.02, search_area_size=128, sig2noise_method='peak2peak')
    u, v, mask = validation.sig2noise_val(u, v, sig2noise, threshold=1.5)
    u, v = filters.replace_outliers(u,
                                    v,
                                    method='localmean',
                                    max_iter=10,
                                    kernel_size=2)
    # get window centers coordinates
    x, y = pyprocess.get_coordinates(image_size=frame_a.shape,
                                     search_area_size=128,
                                     overlap=32)
    # save to a file
    tools.save(x, y, u, v, mask, 'test2_%03d.txt' % counter)
    tools.display_vector_field('test2_%03d.txt' % counter)
def openpiv_default_run(im1, im2):
    """ default settings for OpenPIV analysis using
    extended_search_area_piv algorithm for two images
    
    Inputs:
        im1,im2 : str,str = path of two image
    """
    frame_a = tools.imread(im1)
    frame_b = tools.imread(im2)

    u, v, sig2noise = process.extended_search_area_piv(
        frame_a.astype(np.int32),
        frame_b.astype(np.int32),
        window_size=32,
        overlap=8,
        dt=1,
        search_area_size=64,
        sig2noise_method='peak2peak')
    x, y = process.get_coordinates(image_size=frame_a.shape,
                                   window_size=32,
                                   overlap=8)
    u, v, mask = validation.sig2noise_val(u, v, sig2noise, threshold=1.3)
    u, v = filters.replace_outliers(u,
                                    v,
                                    method='localmean',
                                    max_iter=10,
                                    kernel_size=2)
    x, y, u, v = scaling.uniform(x, y, u, v, scaling_factor=1)
    tools.save(x, y, u, v, mask, list_of_images[0] + '.txt')
    fig, ax = tools.display_vector_field(list_of_images[0] + '.txt',
                                         on_img=True,
                                         image_name=list_of_images[0],
                                         scaling_factor=1,
                                         ax=None)
示例#4
0
def run_single(index, scale=1, src_dir=None, save_dir=None):
    frame_a = tools.imread(os.path.join(src_dir, f'{index:06}.tif'))
    frame_b = tools.imread(os.path.join(src_dir, f'{index + 1:06}.tif'))
    # no background removal will be performed so 'mask' is initialized to 1 everywhere
    mask = np.ones(frame_a.shape, dtype=np.int32)

    # main algorithm
    with warnings.catch_warnings():
        warnings.simplefilter("ignore")
        x, y, u, v, mask = process.WiDIM(frame_a.astype(np.int32), 
                                         frame_b.astype(np.int32),
                                         mask,
                                         min_window_size=MIN_WINDOW_SIZE,
                                         overlap_ratio=0.0,
                                         coarse_factor=2,
                                         dt=DT,
                                         validation_method='mean_velocity', 
                                         trust_1st_iter=1, 
                                         validation_iter=1, 
                                         tolerance=0.4,
                                         nb_iter_max=3,
                                         sig2noise_method='peak2peak')

    x, y, u, v = scaling.uniform(x, y, u, v, scaling_factor=SCALING_FACTOR)


    tmp_fname = '.tmp_' + ''.join(random.choices(string.ascii_uppercase + string.digits, k=32))
    tools.save(x, y, u, v, mask, filename=tmp_fname)
    tools.display_vector_field(tmp_fname, scale=scale, width=LINE_WIDTH) # scale: vector length ratio; width: line width of vector arrows
    os.remove(tmp_fname)

    # plt.quiver(x, y, u3, v3, color='blue')
    if save_dir is not None:
        save_path = os.path.join(save_dir, f'{index:06}.pdf')
        print(save_path)

        plt.savefig(save_path)
u, v = filters.replace_outliers(u,
                                v,
                                method='localmean',
                                max_iter=3,
                                kernel_size=3)
x, y, u, v = scaling.uniform(x, y, u, v, scaling_factor=96.52)
# save to a file
tools.save(x,
           y,
           u,
           v,
           mask,
           '../data/test4/test.txt',
           fmt='%9.6f',
           delimiter='\t')
tools.display_vector_field('../data/test4/test.txt', scale=50, width=0.002)

# masking using not optimal choice of the methods or parameters:
masked_a, _ = preprocess.dynamic_masking(frame_a,
                                         method='edges',
                                         filter_size=7,
                                         threshold=0.005)
masked_b, _ = preprocess.dynamic_masking(frame_b,
                                         method='intensity',
                                         filter_size=3,
                                         threshold=0.0)
plt.imshow(np.c_[masked_a, masked_b], cmap='gray')

# masking using optimal (manually tuned) set of parameters and the right method:
masked_a, _ = preprocess.dynamic_masking(frame_a,
                                         method='edges',
示例#6
0
from openpiv import tools, validation, process, filters, scaling, pyprocess
import numpy as np

frame_a  = tools.imread( 'exp1_001_a.bmp' )
frame_b  = tools.imread( 'exp1_001_b.bmp' )

u, v, sig2noise = process.extended_search_area_piv( frame_a.astype(np.int32), 
frame_b.astype(np.int32), window_size=24, overlap=12, dt=0.02, search_area_size=64, 
sig2noise_method='peak2peak' )
x, y = process.get_coordinates( image_size=frame_a.shape, window_size=24, overlap=12 )
u, v, mask = validation.sig2noise_val( u, v, sig2noise, threshold = 2.5 )
u, v = filters.replace_outliers( u, v, method='localmean', max_iter=10, kernel_size=2)
x, y, u, v = scaling.uniform(x, y, u, v, scaling_factor = 96.52 )
tools.save(x, y, u, v, mask, 'exp1_001.txt' )
tools.display_vector_field('exp1_001.txt', scale=100, width=0.0025)



u, v, s2n= pyprocess.piv(frame_a, frame_b, corr_method='fft', window_size=24, overlap=12, 
dt=0.02, sig2noise_method='peak2peak' )
x, y = pyprocess.get_coordinates( image_size=frame_a.shape, window_size=24, overlap=12 )
u, v, mask = validation.sig2noise_val( u, v, s2n, threshold = 2.5 )
u, v = filters.replace_outliers( u, v, method='localmean', max_iter=10, kernel_size=2.5)
x, y, u, v = scaling.uniform(x, y, u, v, scaling_factor = 96.52 )
tools.save(x, y, u, v, mask, 'exp1_002.txt' )
tools.display_vector_field('exp1_002.txt', scale=100, width=0.0025)



示例#7
0
    def func(args):
        """A function to process each image pair."""

        # this line is REQUIRED for multiprocessing to work
        # always use it in your custom function

        file_a, file_b, counter = args

        # counter2=str(counter2)
        #####################
        # Here goes you code
        #####################

        " read images into numpy arrays"
        frame_a = imread(os.path.join(settings.filepath_images, file_a))
        frame_b = imread(os.path.join(settings.filepath_images, file_b))

        # Miguel: I just had a quick look, and I do not understand the reason
        # for this step.
        #  I propose to remove it.
        # frame_a = (frame_a*1024).astype(np.int32)
        # frame_b = (frame_b*1024).astype(np.int32)

        " crop to ROI"
        if settings.ROI == "full":
            frame_a = frame_a
            frame_b = frame_b
        else:
            frame_a = frame_a[settings.ROI[0]:settings.ROI[1],
                              settings.ROI[2]:settings.ROI[3]]
            frame_b = frame_b[settings.ROI[0]:settings.ROI[1],
                              settings.ROI[2]:settings.ROI[3]]

        if settings.invert is True:
            frame_a = invert(frame_a)
            frame_b = invert(frame_b)

        if settings.show_all_plots:
            fig, ax = plt.subplots(1, 1)
            ax.imshow(frame_a, cmap=plt.get_cmap('Reds'))
            ax.imshow(frame_b, cmap=plt.get_cmap('Blues'), alpha=.5)
            plt.show()

        if settings.dynamic_masking_method in ("edge", "intensity"):
            frame_a, mask_a = preprocess.dynamic_masking(
                frame_a,
                method=settings.dynamic_masking_method,
                filter_size=settings.dynamic_masking_filter_size,
                threshold=settings.dynamic_masking_threshold,
            )
            frame_b, mask_b = preprocess.dynamic_masking(
                frame_b,
                method=settings.dynamic_masking_method,
                filter_size=settings.dynamic_masking_filter_size,
                threshold=settings.dynamic_masking_threshold,
            )

        # "first pass"
        x, y, u, v, s2n = first_pass(frame_a, frame_b, settings)

        if settings.show_all_plots:
            plt.figure()
            plt.quiver(x, y, u, -v, color='b')
            # plt.gca().invert_yaxis()
            # plt.gca().set_aspect(1.)
            # plt.title('after first pass, invert')
            # plt.show()

        # " Image masking "
        if settings.image_mask:
            image_mask = np.logical_and(mask_a, mask_b)
            mask_coords = preprocess.mask_coordinates(image_mask)
            # mark those points on the grid of PIV inside the mask
            grid_mask = preprocess.prepare_mask_on_grid(x, y, mask_coords)

            # mask the velocity
            u = np.ma.masked_array(u, mask=grid_mask)
            v = np.ma.masked_array(v, mask=grid_mask)
        else:
            mask_coords = []
            u = np.ma.masked_array(u, mask=np.ma.nomask)
            v = np.ma.masked_array(v, mask=np.ma.nomask)

        if settings.validation_first_pass:
            u, v, mask = validation.typical_validation(u, v, s2n, settings)

        if settings.show_all_plots:
            # plt.figure()
            plt.quiver(x, y, u, -v, color='r')
            plt.gca().invert_yaxis()
            plt.gca().set_aspect(1.)
            plt.title('after first pass validation new, inverted')
            plt.show()

        # "filter to replace the values that where marked by the validation"
        if settings.num_iterations == 1 and settings.replace_vectors:
            # for multi-pass we cannot have holes in the data
            # after the first pass
            u, v = filters.replace_outliers(
                u,
                v,
                method=settings.filter_method,
                max_iter=settings.max_filter_iteration,
                kernel_size=settings.filter_kernel_size,
            )
        # don't even check if it's true or false
        elif settings.num_iterations > 1:
            u, v = filters.replace_outliers(
                u,
                v,
                method=settings.filter_method,
                max_iter=settings.max_filter_iteration,
                kernel_size=settings.filter_kernel_size,
            )

            # "adding masks to add the effect of all the validations"
        if settings.smoothn:
            u, dummy_u1, dummy_u2, dummy_u3 = smoothn.smoothn(
                u, s=settings.smoothn_p)
            v, dummy_v1, dummy_v2, dummy_v3 = smoothn.smoothn(
                v, s=settings.smoothn_p)

        if settings.image_mask:
            grid_mask = preprocess.prepare_mask_on_grid(x, y, mask_coords)
            u = np.ma.masked_array(u, mask=grid_mask)
            v = np.ma.masked_array(v, mask=grid_mask)
        else:
            u = np.ma.masked_array(u, np.ma.nomask)
            v = np.ma.masked_array(v, np.ma.nomask)

        if settings.show_all_plots:
            plt.figure()
            plt.quiver(x, y, u, -v)
            plt.gca().invert_yaxis()
            plt.gca().set_aspect(1.)
            plt.title('before multi pass, inverted')
            plt.show()

        if not isinstance(u, np.ma.MaskedArray):
            raise ValueError("Expected masked array")
        """ Multi pass """

        for i in range(1, settings.num_iterations):

            if not isinstance(u, np.ma.MaskedArray):
                raise ValueError("Expected masked array")

            x, y, u, v, s2n, mask = multipass_img_deform(
                frame_a,
                frame_b,
                i,
                x,
                y,
                u,
                v,
                settings,
                mask_coords=mask_coords)

            # If the smoothing is active, we do it at each pass
            # but not the last one
            if settings.smoothn is True and i < settings.num_iterations - 1:
                u, dummy_u1, dummy_u2, dummy_u3 = smoothn.smoothn(
                    u, s=settings.smoothn_p)
                v, dummy_v1, dummy_v2, dummy_v3 = smoothn.smoothn(
                    v, s=settings.smoothn_p)
            if not isinstance(u, np.ma.MaskedArray):
                raise ValueError('not a masked array anymore')

            if hasattr(settings, 'image_mask') and settings.image_mask:
                grid_mask = preprocess.prepare_mask_on_grid(x, y, mask_coords)
                u = np.ma.masked_array(u, mask=grid_mask)
                v = np.ma.masked_array(v, mask=grid_mask)
            else:
                u = np.ma.masked_array(u, np.ma.nomask)
                v = np.ma.masked_array(v, np.ma.nomask)

            if settings.show_all_plots:
                plt.figure()
                plt.quiver(x, y, u, -1 * v, color='r')
                plt.gca().set_aspect(1.)
                plt.gca().invert_yaxis()
                plt.title('end of the multipass, invert')
                plt.show()

        if settings.show_all_plots and settings.num_iterations > 1:
            plt.figure()
            plt.quiver(x, y, u, -v)
            plt.gca().invert_yaxis()
            plt.gca().set_aspect(1.)
            plt.title('after multi pass, before saving, inverted')
            plt.show()

        # we now use only 0s instead of the image
        # masked regions.
        # we could do Nan, not sure what is best
        u = u.filled(0.)
        v = v.filled(0.)

        # "scales the results pixel-> meter"
        x, y, u, v = scaling.uniform(x,
                                     y,
                                     u,
                                     v,
                                     scaling_factor=settings.scaling_factor)

        if settings.image_mask:
            grid_mask = preprocess.prepare_mask_on_grid(x, y, mask_coords)
            u = np.ma.masked_array(u, mask=grid_mask)
            v = np.ma.masked_array(v, mask=grid_mask)
        else:
            u = np.ma.masked_array(u, np.ma.nomask)
            v = np.ma.masked_array(v, np.ma.nomask)

        # before saving we conver to the "physically relevant"
        # right-hand coordinate system with 0,0 at the bottom left
        # x to the right, y upwards
        # and so u,v

        x, y, u, v = transform_coordinates(x, y, u, v)
        # import pdb; pdb.set_trace()
        # "save to a file"
        tools.save(x,
                   y,
                   u,
                   v,
                   mask,
                   os.path.join(save_path, "field_A%03d.txt" % counter),
                   delimiter="\t")
        # "some other stuff that one might want to use"
        if settings.show_plot or settings.save_plot:
            Name = os.path.join(save_path, "Image_A%03d.png" % counter)
            fig, _ = display_vector_field(
                os.path.join(save_path, "field_A%03d.txt" % counter),
                scale=settings.scale_plot,
            )
            if settings.save_plot is True:
                fig.savefig(Name)
            if settings.show_plot is True:
                plt.show()

        print(f"Image Pair {counter + 1}")
        print(file_a.rsplit('/')[-1], file_b.rsplit('/')[-1])
示例#8
0
    def func(args):
        """A function to process each image pair."""

        # this line is REQUIRED for multiprocessing to work
        # always use it in your custom function

        file_a, file_b, counter = args

        # counter2=str(counter2)
        #####################
        # Here goes you code
        #####################

        ' read images into numpy arrays'
        frame_a = tools.imread(os.path.join(settings.filepath_images, file_a))
        frame_b = tools.imread(os.path.join(settings.filepath_images, file_b))

        ## Miguel: I just had a quick look, and I do not understand the reason for this step.
        #  I propose to remove it.
        #frame_a = (frame_a*1024).astype(np.int32)
        #frame_b = (frame_b*1024).astype(np.int32)

        ' crop to ROI'
        if settings.ROI == 'full':
            frame_a = frame_a
            frame_b = frame_b
        else:
            frame_a = frame_a[settings.ROI[0]:settings.ROI[1],
                              settings.ROI[2]:settings.ROI[3]]
            frame_b = frame_b[settings.ROI[0]:settings.ROI[1],
                              settings.ROI[2]:settings.ROI[3]]
        if settings.dynamic_masking_method == 'edge' or 'intensity':
            frame_a = preprocess.dynamic_masking(
                frame_a,
                method=settings.dynamic_masking_method,
                filter_size=settings.dynamic_masking_filter_size,
                threshold=settings.dynamic_masking_threshold)
            frame_b = preprocess.dynamic_masking(
                frame_b,
                method=settings.dynamic_masking_method,
                filter_size=settings.dynamic_masking_filter_size,
                threshold=settings.dynamic_masking_threshold)
        '''%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%'''
        'first pass'
        x, y, u, v, sig2noise_ratio = first_pass(
            frame_a,
            frame_b,
            settings.windowsizes[0],
            settings.overlap[0],
            settings.iterations,
            correlation_method=settings.correlation_method,
            subpixel_method=settings.subpixel_method,
            do_sig2noise=settings.extract_sig2noise,
            sig2noise_method=settings.sig2noise_method,
            sig2noise_mask=settings.sig2noise_mask,
        )

        'validation using gloabl limits and std and local median'
        '''MinMaxU : two elements tuple
            sets the limits of the u displacment component
            Used for validation.

        MinMaxV : two elements tuple
            sets the limits of the v displacment component
            Used for validation.

        std_threshold : float
            sets the  threshold for the std validation

        median_threshold : float
            sets the threshold for the median validation

        filter_method : string
            the method used to replace the non-valid vectors
            Methods:
                'localmean',
                'disk',
                'distance', 

        max_filter_iteration : int
            maximum of filter iterations to replace nans

        filter_kernel_size : int
            size of the kernel used for the filtering'''

        mask = np.full_like(x, False)
        if settings.validation_first_pass == True:
            u, v, mask_g = validation.global_val(u, v, settings.MinMax_U_disp,
                                                 settings.MinMax_V_disp)
            u, v, mask_s = validation.global_std(
                u, v, std_threshold=settings.std_threshold)
            u, v, mask_m = validation.local_median_val(
                u,
                v,
                u_threshold=settings.median_threshold,
                v_threshold=settings.median_threshold,
                size=settings.median_size)
            if settings.extract_sig2noise == True and settings.iterations == 1 and settings.do_sig2noise_validation == True:
                u, v, mask_s2n = validation.sig2noise_val(
                    u,
                    v,
                    sig2noise_ratio,
                    threshold=settings.sig2noise_threshold)
                mask = mask + mask_g + mask_m + mask_s + mask_s2n
            else:
                mask = mask + mask_g + mask_m + mask_s
        'filter to replace the values that where marked by the validation'
        if settings.iterations > 1:
            u, v = filters.replace_outliers(
                u,
                v,
                method=settings.filter_method,
                max_iter=settings.max_filter_iteration,
                kernel_size=settings.filter_kernel_size)
            'adding masks to add the effect of all the validations'
            if settings.smoothn == True:
                u, dummy_u1, dummy_u2, dummy_u3 = smoothn.smoothn(
                    u, s=settings.smoothn_p)
                v, dummy_v1, dummy_v2, dummy_v3 = smoothn.smoothn(
                    v, s=settings.smoothn_p)
        elif settings.iterations == 1 and settings.replace_vectors == True:
            u, v = filters.replace_outliers(
                u,
                v,
                method=settings.filter_method,
                max_iter=settings.max_filter_iteration,
                kernel_size=settings.filter_kernel_size)
            'adding masks to add the effect of all the validations'
            if settings.smoothn == True:
                u, v = filters.replace_outliers(
                    u,
                    v,
                    method=settings.filter_method,
                    max_iter=settings.max_filter_iteration,
                    kernel_size=settings.filter_kernel_size)
                u, dummy_u1, dummy_u2, dummy_u3 = smoothn.smoothn(
                    u, s=settings.smoothn_p)
                v, dummy_v1, dummy_v2, dummy_v3 = smoothn.smoothn(
                    v, s=settings.smoothn_p)

        i = 1
        'all the following passes'
        for i in range(2, settings.iterations + 1):
            x, y, u, v, sig2noise_ratio, mask = multipass_img_deform(
                frame_a,
                frame_b,
                settings.windowsizes[i - 1],
                settings.overlap[i - 1],
                settings.iterations,
                i,
                x,
                y,
                u,
                v,
                correlation_method=settings.correlation_method,
                subpixel_method=settings.subpixel_method,
                do_sig2noise=settings.extract_sig2noise,
                sig2noise_method=settings.sig2noise_method,
                sig2noise_mask=settings.sig2noise_mask,
                MinMaxU=settings.MinMax_U_disp,
                MinMaxV=settings.MinMax_V_disp,
                std_threshold=settings.std_threshold,
                median_threshold=settings.median_threshold,
                median_size=settings.median_size,
                filter_method=settings.filter_method,
                max_filter_iteration=settings.max_filter_iteration,
                filter_kernel_size=settings.filter_kernel_size,
                interpolation_order=settings.interpolation_order)
            # If the smoothing is active, we do it at each pass
            if settings.smoothn == True:
                u, dummy_u1, dummy_u2, dummy_u3 = smoothn.smoothn(
                    u, s=settings.smoothn_p)
                v, dummy_v1, dummy_v2, dummy_v3 = smoothn.smoothn(
                    v, s=settings.smoothn_p)
        '''%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%'''
        if settings.extract_sig2noise == True and i == settings.iterations and settings.iterations != 1 and settings.do_sig2noise_validation == True:
            u, v, mask_s2n = validation.sig2noise_val(
                u, v, sig2noise_ratio, threshold=settings.sig2noise_threshold)
            mask = mask + mask_s2n
        if settings.replace_vectors == True:
            u, v = filters.replace_outliers(
                u,
                v,
                method=settings.filter_method,
                max_iter=settings.max_filter_iteration,
                kernel_size=settings.filter_kernel_size)
        'pixel/frame->pixel/sec'
        u = u / settings.dt
        v = v / settings.dt
        'scales the results pixel-> meter'
        x, y, u, v = scaling.uniform(x,
                                     y,
                                     u,
                                     v,
                                     scaling_factor=settings.scaling_factor)
        'save to a file'
        save(x,
             y,
             u,
             v,
             sig2noise_ratio,
             mask,
             os.path.join(save_path, 'field_A%03d.txt' % counter),
             delimiter='\t')
        'some messages to check if it is still alive'

        'some other stuff that one might want to use'
        if settings.show_plot == True or settings.save_plot == True:
            plt.close('all')
            plt.ioff()
            Name = os.path.join(save_path, 'Image_A%03d.png' % counter)
            display_vector_field(os.path.join(save_path,
                                              'field_A%03d.txt' % counter),
                                 scale=settings.scale_plot)
            if settings.save_plot == True:
                plt.savefig(Name)
            if settings.show_plot == True:
                plt.show()

        print('Image Pair ' + str(counter + 1))
示例#9
0

scaling_factor = 100

# we can run it from any folder
path = os.path.dirname(os.path.abspath(__file__))


frame_a  = tools.imread( os.path.join(path,'../test2/2image_00.tif'))
frame_b  = tools.imread( os.path.join(path,'../test2/2image_01.tif'))

#no background removal will be performed so 'mark' is initialized to 1 everywhere
mark = np.zeros(frame_a.shape, dtype=np.int32)
for I in range(mark.shape[0]):
    for J in range(mark.shape[1]):
        mark[I,J]=1

#main algorithm
with warnings.catch_warnings():
    warnings.simplefilter("ignore")
    x,y,u,v, mask=process.WiDIM( frame_a.astype(np.int32), frame_b.astype(np.int32), mark, min_window_size=16, overlap_ratio=0.0, coarse_factor=2, dt=0.02, validation_method='mean_velocity', trust_1st_iter=1, validation_iter=1, tolerance=0.7, nb_iter_max=3, sig2noise_method='peak2peak')

#display results
x, y, u, v = scaling.uniform(x, y, u, v, scaling_factor = scaling_factor )

tools.save(x, y, u, v, mask, '2image_00.txt' )

tools.display_vector_field('2image_00.txt',on_img=True, image_name=os.path.join(path,'../test2/2image_00.tif'), window_size=16, scaling_factor=scaling_factor, scale=200, width=0.001)

#further validation can be performed to eliminate the few remaining wrong vectors
示例#10
0
                               overlap=overlap)
u1, v1, mask = validation.sig2noise_val(u0, v0, sig2noise, threshold=1.3)
u2, v2 = filters.replace_outliers(u1,
                                  v1,
                                  method='localmean',
                                  max_iter=5,
                                  kernel_size=5)
u3, v3, mask1 = validation.local_median_val(u2, v2, 3, 3, 1)
u4, v4 = filters.replace_outliers(u3,
                                  v3,
                                  method='localmean',
                                  max_iter=5,
                                  kernel_size=5)
tools.save(x, y, u4, v4, mask1, '../testResult/test.txt')

tools.display_vector_field('../testResult/test.txt', scale=500, width=0.0025)


#%% define node
def process_node(i):
    DeltaFrame = 300
    winsize = 12  # pixels
    searchsize = 12  #pixels
    overlap = 6  # piexels
    dt = DeltaFrame * 1. / fps  # piexels
    frame_a = tools.imread(fileNameList[i])
    frame_b = tools.imread(fileNameList[i + DeltaFrame])
    u0, v0, sig2noise = process.extended_search_area_piv(
        frame_a.astype(np.int32),
        frame_b.astype(np.int32),
        window_size=winsize,
#u, v, sig2noise = pyprocess.extended_search_area_piv( frame_a, frame_b, window_size=32, overlap=8, dt=.1, sig2noise_method='peak2peak' )
x, y = pyprocess.get_coordinates( image_size=frame_a.shape, window_size=32, overlap=8 )
x, y = process.get_coordinates( image_size=frame_a.shape, window_size=32, overlap=8 )

# %%
u, v, mask = validation.sig2noise_val( u, v, sig2noise, threshold = 1.3 )
u, v = filters.replace_outliers( u, v, method='localmean', max_iter=10, kernel_size=2)
x, y, u, v = scaling.uniform(x, y, u, v, scaling_factor = 96.52 )

# %%
plt.figure()
plt.quiver(x,y,u,v)

# %%
tools.save(x, y, u, v, mask, 'Y4-S3_Camera000398.txt' )
tools.display_vector_field('Y4-S3_Camera000398.txt', scale=3, width=0.0125)
# frame_vectors  = io.imshow(vectors)

# %%
x,y,u,v, mask = process.WiDIM(frame_a.astype(np.int32), frame_b.astype(np.int32), ones_like(frame_a).astype(np.int32), min_window_size=32, overlap_ratio=0.25, coarse_factor=0, dt=0.1, validation_method='mean_velocity', trust_1st_iter=0, validation_iter=0, tolerance=0.7, nb_iter_max=1, sig2noise_method='peak2peak')

# %%
tools.save(x, y, u, v, zeros_like(u), 'Y4-S3_Camera000398.txt' )
tools.display_vector_field('Y4-S3_Camera000398.txt', scale=300, width=0.005)

# %%
x,y,u,v, mask = process.WiDIM(frame_a.astype(np.int32), frame_b.astype(np.int32), ones_like(frame_a).astype(np.int32), min_window_size=16, overlap_ratio=0.25, coarse_factor=2, dt=0.1, validation_method='mean_velocity', trust_1st_iter=1, validation_iter=2, tolerance=0.7, nb_iter_max=4, sig2noise_method='peak2peak')

# %%
tools.save(x, y, u, v, zeros_like(u), 'Y4-S3_Camera000398.txt' )
tools.display_vector_field('Y4-S3_Camera000398.txt', scale=300, width=0.005)
settings.save_plot = False
# Choose wether you want to see the vectorfield or not :True or False
settings.show_plot = False
settings.scale_plot = 50  # select a value to scale the quiver plot of the vectorfield
# run the script with the given settings

# %%
windef.piv(settings)

# %%
for i, f in enumerate(res_list):
    fig, ax = plt.subplots(figsize=(12, 12))
    tools.display_vector_field(f,
                               on_img=True,
                               image_name=image_path / file_list[i],
                               scaling_factor=1,
                               scale=50,
                               width=0.002,
                               ax=ax,
                               widim=True)
    fig.savefig(f'{str(f)[:-4]}.png', dpi=150)

# settings.frame_pattern_a = f1
# settings.frame_pattern_b = f2
# windef.piv(settings);

# for f1,f2 in zip(file_list[0:-1:2],file_list[1::2]):
#     settings.frame_pattern_a = f1
#     settings.frame_pattern_b = f2
#     windef.piv(settings);

#     fig,ax = plt.subplots(figsize=(12,6))
示例#13
0
def run_piv(
    frame_a,
    frame_b,
    winsize=16,  # pixels, interrogation window size in frame A
    searchsize=20,  # pixels, search in image B
    overlap=8,  # pixels, 50% overlap
    dt=0.0001,  # sec, time interval between pulses
    image_check=False,
    show_vertical_profiles=False,
    figure_export_name='_results.png',
    text_export_name="_results.txt",
    scale_factor=1,
    pixel_density=36.74,
    arrow_width=0.001,
    show_result=True,
    u_bounds=(-100, 100),
    v_bounds=(-100, 100)):

    u0, v0, sig2noise = pyprocess.extended_search_area_piv(
        frame_a.astype(np.int32),
        frame_b.astype(np.int32),
        window_size=winsize,
        overlap=overlap,
        dt=dt,
        search_area_size=searchsize,
        sig2noise_method='peak2peak')

    x, y = pyprocess.get_coordinates(image_size=frame_a.shape,
                                     search_area_size=searchsize,
                                     overlap=overlap)

    x, y, u0, v0 = scaling.uniform(
        x, y, u0, v0, scaling_factor=pixel_density)  # no. pixel per distance

    u0, v0, mask = validation.global_val(u0, v0, u_bounds, v_bounds)

    u1, v1, mask = validation.sig2noise_val(u0, v0, sig2noise, threshold=1.05)

    u3, v3 = filters.replace_outliers(u1,
                                      v1,
                                      method='localmean',
                                      max_iter=10,
                                      kernel_size=3)

    #save in the simple ASCII table format
    if np.std(u3) < 480:
        tools.save(x, y, u3, v3, sig2noise, mask, text_export_name)

    if image_check == True:
        fig, ax = plt.subplots(2, 1, figsize=(24, 12))
        ax[0].imshow(frame_a)
        ax[1].imshow(frame_b)

    io.imwrite(figure_export_name, frame_a)

    if show_result == True:
        fig, ax = plt.subplots(figsize=(24, 12))
        tools.display_vector_field(
            text_export_name,
            ax=ax,
            scaling_factor=pixel_density,
            scale=scale_factor,  # scale defines here the arrow length
            width=arrow_width,  # width is the thickness of the arrow
            on_img=True,  # overlay on the image
            image_name=figure_export_name)
        fig.savefig(figure_export_name)

    if show_vertical_profiles:
        field_shape = pyprocess.get_field_shape(image_size=frame_a.shape,
                                                search_area_size=searchsize,
                                                overlap=overlap)
        vertical_profiles(text_export_name, field_shape)

    print('Std of u3: %.3f' % np.std(u3))
    print('Mean of u3: %.3f' % np.mean(u3))

    return np.std(u3)
示例#14
0
u2, v2 = filters.replace_outliers(u1,
                                  v1,
                                  method='localmean',
                                  max_iter=10,
                                  kernel_size=2)

# %%
x, y, u3, v3 = scaling.uniform(x, y, u2, v2, scaling_factor=1.)

# %%
tools.save(x, y, u3, v3, mask, 'exp1_001.txt')

# %%
# tools.display_vector_field('exp1_001.txt', scaling_factor=100., width=0.0025)

# %%
# If you need a larger view:
fig, ax = plt.subplots(figsize=(12, 12))
tools.display_vector_field(
    'exp1_001.txt',
    ax=ax,
    scaling_factor=1.0,
    scale=3500,
    width=0.0045,
    on_img=True,
    image_name='~/Downloads/bridgepile_wake/frame0001.tif')

# %%

# %%
示例#15
0
from openpiv import tools, pyprocess, scaling, validation, filters
import numpy as np

import os

# we can run it from any folder
path = os.path.dirname(os.path.abspath(__file__))


frame_a  = tools.imread( os.path.join(path,'../data/test1/exp1_001_a.bmp'))
frame_b  = tools.imread( os.path.join(path,'../data/test1/exp1_001_b.bmp'))

frame_a = (frame_a*1024).astype(np.int32)
frame_b = (frame_b*1024).astype(np.int32)

u, v, sig2noise = pyprocess.extended_search_area_piv( frame_a, frame_b, \
    window_size=32, overlap=16, dt=0.02, search_area_size=64, sig2noise_method='peak2peak' )

print(u,v,sig2noise)

x, y = pyprocess.get_coordinates( image_size=frame_a.shape, search_area_size=64, overlap=16 )
u, v, mask = validation.sig2noise_val( u, v, sig2noise, threshold = 1.3 )
u, v, mask = validation.global_val( u, v, (-1000, 2000), (-1000, 1000) )
u, v = filters.replace_outliers( u, v, method='localmean', max_iter=10, kernel_size=2)
x, y, u, v = scaling.uniform(x, y, u, v, scaling_factor = 96.52 )
tools.save(x, y, u, v, mask, '../data/test1/test_data.vec' )
tools.display_vector_field('../data/test1/test_data.vec', scale=75, width=0.0035)

示例#16
0
    overlap=12,
    dt=0.02,
    search_area_size=64,
    sig2noise_method='peak2peak')
x, y = process.get_coordinates(image_size=frame_a.shape,
                               window_size=24,
                               overlap=12)
u, v, mask = validation.sig2noise_val(u, v, sig2noise, threshold=2.5)
u, v = filters.replace_outliers(u,
                                v,
                                method='localmean',
                                max_iter=10,
                                kernel_size=2)
x, y, u, v = scaling.uniform(x, y, u, v, scaling_factor=96.52)
tools.save(x, y, u, v, mask, 'exp1_001_extended.txt')
tools.display_vector_field('exp1_001_extended.txt', scale=100, width=0.0025)

# %%
# %%time
u, v, sig2noise = pyprocess.extended_search_area_piv(
    frame_a,
    frame_b,
    corr_method='fft',
    window_size=24,
    overlap=12,
    dt=0.02,
    sig2noise_method='peak2peak')
x, y = pyprocess.get_coordinates(image_size=frame_a.shape,
                                 window_size=24,
                                 overlap=12)
u, v, mask = validation.sig2noise_val(u, v, sig2noise, threshold=2.5)
示例#17
0
import os

# we can run it from any folder
path = os.path.dirname(os.path.abspath(__file__))

frame_a = tools.imread(os.path.join(path, '../test1/exp1_001_a.bmp'))
frame_b = tools.imread(os.path.join(path, '../test1/exp1_001_b.bmp'))

frame_a = (frame_a * 1024).astype(np.int32)
frame_b = (frame_b * 1024).astype(np.int32)

u, v, sig2noise = pyprocess.extended_search_area_piv( frame_a, frame_b, \
    window_size=32, overlap=16, dt=0.02, search_area_size=64, sig2noise_method='peak2peak' )

print(u, v, sig2noise)

x, y = pyprocess.get_coordinates(image_size=frame_a.shape,
                                 search_area_size=64,
                                 overlap=16)
u, v, mask = validation.sig2noise_val(u, v, sig2noise, threshold=1.3)
u, v, mask = validation.global_val(u, v, (-1000, 2000), (-1000, 1000))
u, v = filters.replace_outliers(u,
                                v,
                                method='localmean',
                                max_iter=10,
                                kernel_size=2)
x, y, u, v = scaling.uniform(x, y, u, v, scaling_factor=96.52)
tools.save(x, y, u, v, mask, 'test1.vec')
tools.display_vector_field('test1.vec', scale=75, width=0.0035)
示例#18
0
os.chdir(inputFilePath)
data = np.loadtxt('0.txt')
m, n = data.shape
Result = data
Result[:, 2] = 0.
Result[:, 3] = 0.
Result[:, 4] = 0.
for i in range(390):
    txtName = str(i) + '.txt'
    data = np.loadtxt(txtName)
    m, n = data.shape
    for j in range(m):
        if (data[j][4] == 0):
            Result[j][4] += 1.
            Result[j][2] += data[j][2]
            Result[j][3] += data[j][3]
for i in range(m):
    if (Result[i][4] != 0.):
        Result[i][2] /= Result[i][4]
        Result[i][3] /= Result[i][4]

#%% save the Result data as txt for tecplot
CalibrateData = 1  #micro meter per pixel
Result[:, 0:5] *= CalibrateData
#tools.save(Result[:,0], Result[:,1], Result[:,2], Result[:,3], 'Result.txt')
np.savetxt('result.txt', Result, delimiter=',', fmt='%8f')
tools.display_vector_field('389.txt', scale=1000, width=0.0025)

#%% Check data size
print(data.shape)
    sig2noise_ratio,
    outliers_mask,
    os.path.join(save_path, "field_A%03d.txt" % counter),
    delimiter="\t",
)

# "some other stuff that one might want to use"
settings.show_plot = True
settings.save_plot = True

if settings.show_plot is True or settings.save_plot is True:
    plt.close("all")
    plt.ioff()
    filename = os.path.join(save_path, "Image_A%03d.png" % counter)
    tools.display_vector_field(
        os.path.join(save_path, "field_A%03d.txt" % counter),
        scale=settings.scale_plot,
    )
    if settings.save_plot is True:
        plt.savefig(filename)
    if settings.show_plot is True:
        plt.show()

print("Image Pair " + str(counter+1))


# In[40]:


attrs = vars(settings)
print(', '.join("%s: %s \n" % item for item in attrs.items()))
示例#20
0
    def quick_piv(self, search_dict, index_a=100, index_b=101, folder=None):
        self.show_piv_param()
        ns = Namespace(**self.piv_param)

        if folder == None:
            img_a, img_b = self.read_two_images(search_dict,
                                                index_a=index_a,
                                                index_b=index_b)

            location_path = [
                x['path'] for x in self.piv_dict_list
                if search_dict.items() <= x.items()
            ]
            results_path = os.path.join(self.results_path, *location_path)
            try:
                os.makedirs(results_path)
            except FileExistsError:
                pass
        else:
            try:
                file_a_path = os.path.join(self.path, folder,
                                           'frame_%06d.tiff' % index_a)
                file_b_path = os.path.join(self.path, folder,
                                           'frame_%06d.tiff' % index_b)

                img_a = np.array(Image.open(file_a_path))
                img_b = np.array(Image.open(file_b_path))
            except:
                return None

        # crop
        img_a = img_a[ns.crop[0]:-ns.crop[1] - 1, ns.crop[2]:-ns.crop[3] - 1]
        img_b = img_b[ns.crop[0]:-ns.crop[1] - 1, ns.crop[2]:-ns.crop[3] - 1]

        u0, v0, sig2noise = pyprocess.extended_search_area_piv(
            img_a.astype(np.int32),
            img_b.astype(np.int32),
            window_size=ns.winsize,
            overlap=ns.overlap,
            dt=ns.dt,
            search_area_size=ns.searchsize,
            sig2noise_method='peak2peak')

        x, y = pyprocess.get_coordinates(image_size=img_a.shape,
                                         search_area_size=ns.searchsize,
                                         overlap=ns.overlap)

        x, y, u0, v0 = scaling.uniform(
            x, y, u0, v0,
            scaling_factor=ns.pixel_density)  # no. pixel per distance

        u0, v0, mask = validation.global_val(
            u0, v0, (ns.u_lower_bound, ns.u_upper_bound),
            (ns.v_lower_bound, ns.v_upper_bound))

        u1, v1, mask = validation.sig2noise_val(u0,
                                                v0,
                                                sig2noise,
                                                threshold=1.01)

        u3, v3 = filters.replace_outliers(u1,
                                          v1,
                                          method='localmean',
                                          max_iter=500,
                                          kernel_size=3)

        #save in the simple ASCII table format
        tools.save(x, y, u3, v3, sig2noise, mask,
                   os.path.join(results_path, ns.text_export_name))

        if ns.image_check == True:
            fig, ax = plt.subplots(2, 1, figsize=(24, 12))
            ax[0].imshow(img_a)
            ax[1].imshow(img_b)

        io.imwrite(os.path.join(results_path, ns.figure_export_name), img_a)

        if ns.show_result == True:
            fig, ax = plt.subplots(figsize=(24, 12))
            tools.display_vector_field(
                os.path.join(results_path, ns.text_export_name),
                ax=ax,
                scaling_factor=ns.pixel_density,
                scale=ns.scale_factor,  # scale defines here the arrow length
                width=ns.arrow_width,  # width is the thickness of the arrow
                on_img=True,  # overlay on the image
                image_name=os.path.join(results_path, ns.figure_export_name))
            fig.savefig(os.path.join(results_path, ns.figure_export_name))

        if ns.show_vertical_profiles:
            field_shape = pyprocess.get_field_shape(
                image_size=img_a.shape,
                search_area_size=ns.searchsize,
                overlap=ns.overlap)
            vertical_profiles(ns.text_export_name, field_shape)

        print('Mean of u: %.3f' % np.mean(u3))
        print('Std of u: %.3f' % np.std(u3))
        print('Mean of v: %.3f' % np.mean(v3))
        print('Std of v: %.3f' % np.std(v3))

        output = np.array([np.mean(u3), np.std(u3), np.mean(v3), np.std(v3)])
        # if np.absolute(np.mean(v3)) < 50:
        #     output = self.quick_piv(search_dict,index_a = index_a + 1, index_b = index_b + 1)

        return x, y, u3, v3
# %%
u2, v2 = filters.replace_outliers(u1,
                                  v1,
                                  method='localmean',
                                  max_iter=10,
                                  kernel_size=2)

# %%
x, y, u3, v3 = scaling.uniform(x, y, u2, v2, scaling_factor=96.52)

# %%
tools.save(x, y, u3, v3, mask, 'exp1_001.txt')

# %%
tools.display_vector_field('exp1_001.txt', scale=50, width=0.0025)

# %%
# If you need a larger view:

fig, ax = plt.subplots(figsize=(12, 12))
tools.display_vector_field('exp1_001.txt',
                           ax=ax,
                           scaling_factor=96.52,
                           scale=50,
                           width=0.0025,
                           on_img=True,
                           image_name='../test1/exp1_001_a.bmp')

# %%
tools.save(x, y, u, v, mask, 'Y4-S3_Camera000398_b.txt')

# %%
# "natural" view without image
fig, ax = plt.subplots(2, 1, figsize=(6, 12))
ax[0].invert_yaxis()
ax[0].quiver(x, y, u, v)
ax[0].set_title(' Sort of natural view ')

ax[1].quiver(x, y, u, -v)
ax[1].set_title('Quiver with 0,0 origin needs `negative` v for display')
# plt.quiver(x,y,u,v)

# %%
tools.display_vector_field('Y4-S3_Camera000398_a.txt',
                           on_img=True,
                           image_name='../test3/Y4-S3_Camera000398.tif',
                           scaling_factor=96.52)

# %%
tools.display_vector_field('Y4-S3_Camera000398_a.txt')

# %%
tools.display_vector_field('Y4-S3_Camera000398_b.txt')

# %%
x, y, u, v, mask = process.WiDIM(frame_a.astype(np.int32),
                                 frame_b.astype(np.int32),
                                 ones_like(frame_a).astype(np.int32),
                                 min_window_size=32,
                                 overlap_ratio=0.25,
                                 coarse_factor=0,
示例#23
0
tools.save(
    values_u, u_values, u_avg, v_avg, mask,
    'C:/Users/SSchurer/Documents/TU_Delft/Thesis/LSPIV/Edited/After rain/OSGGC/amount_of_values.txt'
)

# save the average vectors in a .txt file
tools.save(
    x, y, u_avg, v_avg, mask_avg,
    'C:/Users/SSchurer/Documents/TU_Delft/Thesis/LSPIV/Edited/After rain/OSGGC/average.txt'
)

# display the vector field that is saved (blue field)
tools.display_vector_field(
    'C:/Users/SSchurer/Documents/TU_Delft/Thesis/LSPIV/Edited/After rain/OSGGC/average.txt',
    scale=6,
    width=0.0035,
)
plt.savefig(
    'C:/Users/SSchurer/Documents/TU_Delft/Thesis/LSPIV/Edited/After rain/OSGGC/average.jpg'
)

plt.axis('scaled')

plt.plot(u_values, v_values, 'ko', ms=1)
plt.axis('scaled')
plt.xlabel('u')
plt.ylabel('v')
plt.savefig(
    'C:/Users/SSchurer/Documents/TU_Delft/Thesis/LSPIV/Edited/After rain/OSGGC/deviation_U_V_adjusted'
)
示例#24
0
ax[0].imshow(frame_a,cmap=plt.cm.gray)
ax[1].imshow(frame_b,cmap=plt.cm.gray)


# %%
winsize = 24 # pixels
searchsize = 64  # pixels, search in image B
overlap = 12 # pixels
dt = 0.02 # sec


u0, v0, sig2noise = process.extended_search_area_piv( frame_a.astype(np.int32), frame_b.astype(np.int32), window_size=winsize, overlap=overlap, dt=dt, search_area_size=searchsize, sig2noise_method='peak2peak' )

# %%
x, y = process.get_coordinates( image_size=frame_a.shape, window_size=winsize, overlap=overlap )

# %%
u1, v1, mask = validation.sig2noise_val( u0, v0, sig2noise, threshold = 1.3 )

# %%
u2, v2 = filters.replace_outliers( u1, v1, method='localmean', max_iter=10, kernel_size=2)

# %%
x, y, u3, v3 = scaling.uniform(x, y, u2, v2, scaling_factor = 96.52 )

# %%
tools.save(x, y, u3, v3, mask, 'exp1_001.txt' )

# %%
tools.display_vector_field('exp1_001.txt', scale=100, width=0.0025)
示例#25
0
u_avg = u_sum / N
v_avg = v_sum / N
mask_avg = np.zeros_like(mask)

# save the average vectors in a .txt file
tools.save(
    x * winsize, y * winsize, u_avg, v_avg, mask_avg,
    'C:/Users/User/Documents/Universiteit STUDIE/TU Delft/MSC Thesis/PythonPIV/Dommel_Images_KH/average.txt'
)

# display the vector field that is saved (blue field)
tools.display_vector_field(
    'C:/Users/User/Documents/Universiteit STUDIE/TU Delft/MSC Thesis/PythonPIV/Dommel_Images_KH/average.txt',
    #                            on_img=True,
    #                            image_name='Brenta/'+str(images[0]),
    #                            window_size=winsize,
    #                            scaling_factor=scaling_factor,
    scale=6,
    width=0.0035,
)
plt.savefig('average.jpg')

plt.axis('scaled')

plt.plot(u, v, 'ko', ms=1)
plt.axis('scaled')
plt.xlabel('u')
plt.ylabel('v')
plt.savefig('deviation_U_V')

fig = plt.figure(figsize=(9, 4))