def func(args): """A function to process each image pair.""" # this line is REQUIRED for multiprocessing to work # always use it in your custom function file_a, file_b, counter = args ##################### # Here goes you code ##################### # read images into numpy arrays frame_a = tools.imread(os.path.join(path, file_a)) frame_b = tools.imread(os.path.join(path, file_b)) frame_a = (frame_a * 1024).astype(np.int32) frame_b = (frame_b * 1024).astype(np.int32) # process image pair with extended search area piv algorithm. u, v, sig2noise = pyprocess.extended_search_area_piv( frame_a, frame_b, \ window_size=64, overlap=32, dt=0.02, search_area_size=128, sig2noise_method='peak2peak') u, v, mask = validation.sig2noise_val(u, v, sig2noise, threshold=1.5) u, v = filters.replace_outliers(u, v, method='localmean', max_iter=10, kernel_size=2) # get window centers coordinates x, y = pyprocess.get_coordinates(image_size=frame_a.shape, search_area_size=128, overlap=32) # save to a file tools.save(x, y, u, v, mask, 'test2_%03d.txt' % counter) tools.display_vector_field('test2_%03d.txt' % counter)
def openpiv_default_run(im1, im2): """ default settings for OpenPIV analysis using extended_search_area_piv algorithm for two images Inputs: im1,im2 : str,str = path of two image """ frame_a = tools.imread(im1) frame_b = tools.imread(im2) u, v, sig2noise = process.extended_search_area_piv( frame_a.astype(np.int32), frame_b.astype(np.int32), window_size=32, overlap=8, dt=1, search_area_size=64, sig2noise_method='peak2peak') x, y = process.get_coordinates(image_size=frame_a.shape, window_size=32, overlap=8) u, v, mask = validation.sig2noise_val(u, v, sig2noise, threshold=1.3) u, v = filters.replace_outliers(u, v, method='localmean', max_iter=10, kernel_size=2) x, y, u, v = scaling.uniform(x, y, u, v, scaling_factor=1) tools.save(x, y, u, v, mask, list_of_images[0] + '.txt') fig, ax = tools.display_vector_field(list_of_images[0] + '.txt', on_img=True, image_name=list_of_images[0], scaling_factor=1, ax=None)
def process_node(i): DeltaFrame = 1 winsize = 50 # pixels searchsize = 50 #pixels overlap = 25 # piexels dt = DeltaFrame * 1. / fps # piexels frame_a = tools.imread(fileNameList[i]) frame_b = tools.imread(fileNameList[i + DeltaFrame]) u0, v0, sig2noise = process.extended_search_area_piv( frame_a.astype(np.int32), frame_b.astype(np.int32), window_size=winsize, overlap=overlap, dt=dt, search_area_size=searchsize, sig2noise_method='peak2peak') x, y = process.get_coordinates(image_size=frame_a.shape, window_size=winsize, overlap=overlap) u1, v1, mask = validation.sig2noise_val(u0, v0, sig2noise, threshold=1.3) u2, v2 = filters.replace_outliers(u1, v1, method='localmean', max_iter=5, kernel_size=5) tools.save(x, y, u2, v2, mask, '../muscle10fpsbotleft_results/' + str(i) + '.txt')
def test_display_vector_field(file_a=_file_a, file_b=_file_b, test_file=_test_file): a = imread(file_a) b = imread(file_b) window_size = 32 overlap = 16 search_area_size = 40 u, v, s2n = extended_search_area_piv(a, b, window_size, search_area_size=search_area_size, overlap=overlap, correlation_method='circular', normalized_correlation=False) x, y = get_coordinates(a.shape, search_area_size=search_area_size, overlap=overlap) x, y, u, v = transform_coordinates(x, y, u, v) mask = np.zeros_like(x) mask[-1,1] = 1 # test of invalid vector plot save(x, y, u, v, mask, 'tmp.txt') fig, ax = plt.subplots(figsize=(6, 6)) display_vector_field('tmp.txt', on_img=True, image_name=file_a, ax=ax) decorators.remove_ticks_and_titles(fig) fig.savefig('./tmp.png') res = compare.compare_images('./tmp.png', test_file, 0.001) assert res is None
def two_images(image_1, image_2, search_area_size=64, window_size=32, overlap=16, dt=0.02): with open("image_1.bmp", "wb") as fh1: fh1.write(base64.b64decode(image_1)) with open("image_2.bmp", "wb") as fh2: fh2.write(base64.b64decode(image_2)) frame_a = tools.imread( 'image_1.bmp' ) frame_b = tools.imread( 'image_2.bmp' ) frame_a = (frame_a*1024).astype(np.int32) frame_b = (frame_b*1024).astype(np.int32) if not search_area_size: search_area_size = 64 if not window_size: window_size = 32 if not overlap: overlap = 16 if not dt: dt = 0.02 u, v, sig2noise = process.extended_search_area_piv( frame_a, frame_b, window_size=window_size, overlap=overlap, dt=dt, search_area_size=search_area_size, sig2noise_method='peak2peak' ) x, y = process.get_coordinates( image_size=frame_a.shape, window_size=window_size, overlap=overlap ) u, v, mask = validation.sig2noise_val( u, v, sig2noise, threshold = 1.3 ) u, v, mask = validation.global_val( u, v, (-1000, 2000), (-1000, 1000) ) u, v = filters.replace_outliers( u, v, method='localmean', max_iter=10, kernel_size=2) x, y, u, v = scaling.uniform(x, y, u, v, scaling_factor = 96.52 ) file_name_text = 'result.txt' file_name_png = 'result.png' if os.path.isfile(file_name_text): os.remove(file_name_text) if os.path.isfile(file_name_png): os.remove(file_name_png) tools.save(x, y, u, v, mask, file_name_text) a = np.loadtxt(file_name_text) fig = plt.figure() invalid = a[:,4].astype('bool') fig.canvas.set_window_title('Vector field, '+str(np.count_nonzero(invalid))+' wrong vectors') valid = ~invalid plt.quiver(a[invalid,0],a[invalid,1],a[invalid,2],a[invalid,3],color='r',scale=100, width=0.0025) plt.quiver(a[valid,0],a[valid,1],a[valid,2],a[valid,3],color='b',scale=100, width=0.0025) plt.draw() plt.savefig(file_name_png, format="png") with open(file_name_text, "rb") as resultFileText: file_reader_text = resultFileText.read() text_encode = base64.encodestring(file_reader_text) base64_string_text = str(text_encode, 'utf-8') with open(file_name_png, "rb") as resultFilePng: file_reader_image = resultFilePng.read() image_encode = base64.encodestring(file_reader_image) base64_string_image = str(image_encode, 'utf-8') return base64_string_text, base64_string_image
def analyzer(frame_a, frame_b, text, plot, num_scene, pathout, scal, zre, xre, dt): winsize = 16 # pixels searchsize = 32 # pixels, search in image b overlap = 8 # pixels frame_a = cv2.adaptiveThreshold(frame_a, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 5, 5) frame_b = cv2.adaptiveThreshold(frame_b, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 5, 5) #frame_a = cv2.adaptiveThreshold(frame_a,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY,11,2) #frame_b = cv2.adaptiveThreshold(frame_b,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY,11,2) plt.imshow(np.c_[frame_a, frame_b], cmap='gray') plt.savefig(pathout + '/filtered' + str(num_scene) + '.png', dpi=800) u0, v0, sig2noise = process.extended_search_area_piv( frame_a.astype(np.int32), frame_b.astype(np.int32), window_size=winsize, overlap=overlap, dt=dt, search_area_size=searchsize, sig2noise_method='peak2peak') x, y = process.get_coordinates(image_size=frame_a.shape, window_size=winsize, overlap=overlap) u1, v1, mask = validation.sig2noise_val(u0, v0, sig2noise, threshold=1.3) u2, v2 = filters.replace_outliers(u1, v1, method='localmean', max_iter=10, kernel_size=2) x, y, u3, v3 = scaling.uniform( x, y, u2, v2, scaling_factor=scal) # scaling_factor (pixel per meter) u3 = np.flip(u3, axis=0) v3 = np.flip(v3, axis=0) xre = np.linspace(0, xre / 100, len(x[0, :])) zre = np.linspace(0, zre / 100, len(x[:, 0])) if plot == 1: piv_plotting(xre, zre, u3, v3, num_scene, pathout) if text == 0: tools.save(x, y, u3, v3, mask, pathout + '/piv' + str(num_scene) + '.txt')
def process(args, bga, bgb, reflection): file_a, file_b, counter = args # read images into numpy arrays frame_a = tools.imread(file_a) frame_b = tools.imread(file_b) # removing background and reflections frame_a = frame_a - bga frame_b = frame_b - bgb frame_a[reflection == 255] = 0 frame_b[reflection == 255] = 0 #applying a static mask (taking out the regions where we have walls) yp = [580, 435, 0, 0, 580, 580, 0, 0, 435, 580] xp = [570, 570, 680, 780, 780, 0, 0, 105, 230, 230] pnts = draw.polygon(yp, xp, frame_a.shape) frame_a[pnts] = 0 frame_b[pnts] = 0 # checking the resulting frame #fig, ax = plt.subplots(2,2) #ax[0,0].imshow(frame_a_org, cmap='gray') #ax[0,1].imshow(frame_a, cmap='gray') #ax[1,0].imshow(frame_b_org, cmap='gray') #ax[1,1].imshow(frame_b, cmap='gray') #plt.tight_layout() #plt.show() # main piv processing u, v, sig2noise = pyprocess.extended_search_area_piv( frame_a, frame_b, \ window_size=48, overlap=16, dt=0.001094, search_area_size=64, sig2noise_method='peak2peak') x, y = pyprocess.get_coordinates(image_size=frame_a.shape, window_size=48, overlap=16) u, v, mask = validation.local_median_val(u, v, 2000, 2000, size=2) u, v = filters.replace_outliers(u, v, method='localmean', max_iter=10, kernel_size=2) u, *_ = smoothn(u, s=1.0) v, *_ = smoothn(v, s=1.0) # saving the results save_file = tools.create_path(file_a, 'Analysis') tools.save(x, y, u, v, mask, save_file + '.dat')
def two_images(image_1, image_2): with open("image_1.bmp", "wb") as fh1: fh1.write(base64.b64decode(image_1)) with open("image_2.bmp", "wb") as fh2: fh2.write(base64.b64decode(image_2)) frame_a = tools.imread('image_1.bmp') frame_b = tools.imread('image_2.bmp') winsize = 32 # pixels searchsize = 64 # pixels, search in image B overlap = 12 # pixels dt = 0.02 # sec u, v, sig2noise = pyprocess.piv(frame_a.astype(np.int32), frame_b.astype(np.int32), window_size=winsize, overlap=overlap, dt=dt, search_size=searchsize, sig2noise_method='peak2peak') x, y = pyprocess.get_coordinates(image_size=frame_a.shape, window_size=searchsize, overlap=overlap) u, v, mask = validation.sig2noise_val(u, v, sig2noise, threshold=1.3) u, v = filters.replace_outliers(u, v, method='localmean', max_iter=10, kernel_size=2) x, y, u, v = scaling.uniform(x, y, u, v, scaling_factor=96.52) file_name = 'result.txt' if os.path.isfile(file_name): os.remove(file_name) tools.save(x, y, u, v, np.zeros_like(u), file_name) # no masking, all values are valid with open(file_name, "rb") as resultFile: file_reader = resultFile.read() image_encode = base64.encodestring(file_reader) base64_string = str(image_encode, 'utf-8') return base64_string
def ProcessPIV(args, bga, bgb, reflection, stg): # read images into numpy arrays file_a, file_b, counter = args frame_a = tools.imread(file_a) frame_b = tools.imread(file_b) # removing background and reflections if bgb is not None: frame_a = frame_a - bga frame_b = frame_b - bgb frame_a[reflection == 255] = 0 frame_b[reflection == 255] = 0 #plt.imshow(frame_a, cmap='gray') #plt.show() # main piv processing u, v, s2n = pyprocess.extended_search_area_piv( frame_a, frame_b, \ window_size=stg['WS'], overlap=stg['OL'], dt=stg['DT'], search_area_size=stg['SA'], sig2noise_method='peak2peak') x, y = pyprocess.get_coordinates(image_size=frame_a.shape, window_size=stg['WS'], overlap=stg['OL']) if stg['BVR'] == 'on': u, v, mask = validation.local_median_val(u, v, stg['MF'][0], stg['MF'][1], size=2) u, v, mask = validation.global_val(u, v, u_thresholds=stg['GF'][0], v_thresholds=stg['GF'][1]) u, v = filters.replace_outliers(u, v, method='localmean', max_iter=10, kernel_size=2) u, *_ = smoothn(u, s=0.5) v, *_ = smoothn(v, s=0.5) x, y, u, v = scaling.uniform(x, y, u, v, stg['SC']) # saving the results save_file = tools.create_path(file_a, 'Analysis') tools.save(x, y, u, v, s2n, save_file + '.dat')
def two_images(image_1, image_2): local_dir = os.path.dirname(os.path.realpath(__file__)) newFile_1 = open('teting1.bmp', 'w+b') newFileByteArray = bytes(image_1) newFile_1.write(newFileByteArray) newFile_1.close() frame_a = tools.imread(local_dir + '/exp1_001_a.bmp') frame_b = tools.imread(local_dir + '/exp1_001_b.bmp') fig, ax = plt.subplots(1, 2, figsize=(10, 8)) ax[0].imshow(frame_a, cmap=plt.cm.gray) ax[1].imshow(frame_b, cmap=plt.cm.gray) winsize = 32 # pixels searchsize = 64 # pixels, search in image B overlap = 12 # pixels dt = 0.02 # sec u, v, sig2noise = pyprocess.extended_search_area_piv( frame_a.astype(np.int32), frame_b.astype(np.int32), window_size=winsize, overlap=overlap, dt=dt, search_area_size=searchsize, sig2noise_method='peak2peak') x, y = pyprocess.get_coordinates(image_size=frame_a.shape, window_size=searchsize, overlap=overlap) file_name = 'result.txt' # tools.save(x, y, u, v, np.zeros_like(u), 'exp1_001.txt' ) # no masking, all values are valid tools.save(x, y, u, v, np.zeros_like(u), file_name) # no masking, all values are valid with open(file_name, 'r') as result_file: data = result_file.read().replace('\n', '').replace('\t', ' ') return data
def run_single(index, scale=1, src_dir=None, save_dir=None): frame_a = tools.imread(os.path.join(src_dir, f'{index:06}.tif')) frame_b = tools.imread(os.path.join(src_dir, f'{index + 1:06}.tif')) # no background removal will be performed so 'mask' is initialized to 1 everywhere mask = np.ones(frame_a.shape, dtype=np.int32) # main algorithm with warnings.catch_warnings(): warnings.simplefilter("ignore") x, y, u, v, mask = process.WiDIM(frame_a.astype(np.int32), frame_b.astype(np.int32), mask, min_window_size=MIN_WINDOW_SIZE, overlap_ratio=0.0, coarse_factor=2, dt=DT, validation_method='mean_velocity', trust_1st_iter=1, validation_iter=1, tolerance=0.4, nb_iter_max=3, sig2noise_method='peak2peak') x, y, u, v = scaling.uniform(x, y, u, v, scaling_factor=SCALING_FACTOR) tmp_fname = '.tmp_' + ''.join(random.choices(string.ascii_uppercase + string.digits, k=32)) tools.save(x, y, u, v, mask, filename=tmp_fname) tools.display_vector_field(tmp_fname, scale=scale, width=LINE_WIDTH) # scale: vector length ratio; width: line width of vector arrows os.remove(tmp_fname) # plt.quiver(x, y, u3, v3, color='blue') if save_dir is not None: save_path = os.path.join(save_dir, f'{index:06}.pdf') print(save_path) plt.savefig(save_path)
def process(args): file_a, file_b, counter = args # read images into numpy arrays frame_a = tools.imread(file_a) frame_b = tools.imread(file_b) print(counter + 1) # process image pair with piv algorithm. u, v, sig2noise = pyprocess.extended_search_area_piv( frame_a, frame_b, \ window_size=32, overlap=16, dt=0.0015, search_area_size=32, sig2noise_method='peak2peak') x, y = pyprocess.get_coordinates(image_size=frame_a.shape, window_size=32, overlap=16) u, v, mask1 = validation.sig2noise_val(u, v, sig2noise, threshold=1.0) u, v, mask2 = validation.global_val(u, v, (-2000, 2000), (-2000, 4000)) u, v, mask3 = validation.local_median_val(u, v, 400, 400, size=2) #u, v, mask4 = validation.global_std(u, v, std_threshold=3) mask = mask1 | mask2 | mask3 #u, v = filters.replace_outliers( u, v, method='localmean', max_iter=10, kernel_size=2) save_file = tools.create_path(file_a, 'Analysis') tools.save(x, y, u, v, mask, save_file + '.dat')
def func(args): """A function to process each image pair.""" # this line is REQUIRED for multiprocessing to work # always use it in your custom function file_a, file_b, counter = args # counter2=str(counter2) ##################### # Here goes you code ##################### " read images into numpy arrays" frame_a = imread(os.path.join(settings.filepath_images, file_a)) frame_b = imread(os.path.join(settings.filepath_images, file_b)) # Miguel: I just had a quick look, and I do not understand the reason # for this step. # I propose to remove it. # frame_a = (frame_a*1024).astype(np.int32) # frame_b = (frame_b*1024).astype(np.int32) " crop to ROI" if settings.ROI == "full": frame_a = frame_a frame_b = frame_b else: frame_a = frame_a[settings.ROI[0]:settings.ROI[1], settings.ROI[2]:settings.ROI[3]] frame_b = frame_b[settings.ROI[0]:settings.ROI[1], settings.ROI[2]:settings.ROI[3]] if settings.invert is True: frame_a = invert(frame_a) frame_b = invert(frame_b) if settings.show_all_plots: fig, ax = plt.subplots(1, 1) ax.imshow(frame_a, cmap=plt.get_cmap('Reds')) ax.imshow(frame_b, cmap=plt.get_cmap('Blues'), alpha=.5) plt.show() if settings.dynamic_masking_method in ("edge", "intensity"): frame_a, mask_a = preprocess.dynamic_masking( frame_a, method=settings.dynamic_masking_method, filter_size=settings.dynamic_masking_filter_size, threshold=settings.dynamic_masking_threshold, ) frame_b, mask_b = preprocess.dynamic_masking( frame_b, method=settings.dynamic_masking_method, filter_size=settings.dynamic_masking_filter_size, threshold=settings.dynamic_masking_threshold, ) # "first pass" x, y, u, v, s2n = first_pass(frame_a, frame_b, settings) if settings.show_all_plots: plt.figure() plt.quiver(x, y, u, -v, color='b') # plt.gca().invert_yaxis() # plt.gca().set_aspect(1.) # plt.title('after first pass, invert') # plt.show() # " Image masking " if settings.image_mask: image_mask = np.logical_and(mask_a, mask_b) mask_coords = preprocess.mask_coordinates(image_mask) # mark those points on the grid of PIV inside the mask grid_mask = preprocess.prepare_mask_on_grid(x, y, mask_coords) # mask the velocity u = np.ma.masked_array(u, mask=grid_mask) v = np.ma.masked_array(v, mask=grid_mask) else: mask_coords = [] u = np.ma.masked_array(u, mask=np.ma.nomask) v = np.ma.masked_array(v, mask=np.ma.nomask) if settings.validation_first_pass: u, v, mask = validation.typical_validation(u, v, s2n, settings) if settings.show_all_plots: # plt.figure() plt.quiver(x, y, u, -v, color='r') plt.gca().invert_yaxis() plt.gca().set_aspect(1.) plt.title('after first pass validation new, inverted') plt.show() # "filter to replace the values that where marked by the validation" if settings.num_iterations == 1 and settings.replace_vectors: # for multi-pass we cannot have holes in the data # after the first pass u, v = filters.replace_outliers( u, v, method=settings.filter_method, max_iter=settings.max_filter_iteration, kernel_size=settings.filter_kernel_size, ) # don't even check if it's true or false elif settings.num_iterations > 1: u, v = filters.replace_outliers( u, v, method=settings.filter_method, max_iter=settings.max_filter_iteration, kernel_size=settings.filter_kernel_size, ) # "adding masks to add the effect of all the validations" if settings.smoothn: u, dummy_u1, dummy_u2, dummy_u3 = smoothn.smoothn( u, s=settings.smoothn_p) v, dummy_v1, dummy_v2, dummy_v3 = smoothn.smoothn( v, s=settings.smoothn_p) if settings.image_mask: grid_mask = preprocess.prepare_mask_on_grid(x, y, mask_coords) u = np.ma.masked_array(u, mask=grid_mask) v = np.ma.masked_array(v, mask=grid_mask) else: u = np.ma.masked_array(u, np.ma.nomask) v = np.ma.masked_array(v, np.ma.nomask) if settings.show_all_plots: plt.figure() plt.quiver(x, y, u, -v) plt.gca().invert_yaxis() plt.gca().set_aspect(1.) plt.title('before multi pass, inverted') plt.show() if not isinstance(u, np.ma.MaskedArray): raise ValueError("Expected masked array") """ Multi pass """ for i in range(1, settings.num_iterations): if not isinstance(u, np.ma.MaskedArray): raise ValueError("Expected masked array") x, y, u, v, s2n, mask = multipass_img_deform( frame_a, frame_b, i, x, y, u, v, settings, mask_coords=mask_coords) # If the smoothing is active, we do it at each pass # but not the last one if settings.smoothn is True and i < settings.num_iterations - 1: u, dummy_u1, dummy_u2, dummy_u3 = smoothn.smoothn( u, s=settings.smoothn_p) v, dummy_v1, dummy_v2, dummy_v3 = smoothn.smoothn( v, s=settings.smoothn_p) if not isinstance(u, np.ma.MaskedArray): raise ValueError('not a masked array anymore') if hasattr(settings, 'image_mask') and settings.image_mask: grid_mask = preprocess.prepare_mask_on_grid(x, y, mask_coords) u = np.ma.masked_array(u, mask=grid_mask) v = np.ma.masked_array(v, mask=grid_mask) else: u = np.ma.masked_array(u, np.ma.nomask) v = np.ma.masked_array(v, np.ma.nomask) if settings.show_all_plots: plt.figure() plt.quiver(x, y, u, -1 * v, color='r') plt.gca().set_aspect(1.) plt.gca().invert_yaxis() plt.title('end of the multipass, invert') plt.show() if settings.show_all_plots and settings.num_iterations > 1: plt.figure() plt.quiver(x, y, u, -v) plt.gca().invert_yaxis() plt.gca().set_aspect(1.) plt.title('after multi pass, before saving, inverted') plt.show() # we now use only 0s instead of the image # masked regions. # we could do Nan, not sure what is best u = u.filled(0.) v = v.filled(0.) # "scales the results pixel-> meter" x, y, u, v = scaling.uniform(x, y, u, v, scaling_factor=settings.scaling_factor) if settings.image_mask: grid_mask = preprocess.prepare_mask_on_grid(x, y, mask_coords) u = np.ma.masked_array(u, mask=grid_mask) v = np.ma.masked_array(v, mask=grid_mask) else: u = np.ma.masked_array(u, np.ma.nomask) v = np.ma.masked_array(v, np.ma.nomask) # before saving we conver to the "physically relevant" # right-hand coordinate system with 0,0 at the bottom left # x to the right, y upwards # and so u,v x, y, u, v = transform_coordinates(x, y, u, v) # import pdb; pdb.set_trace() # "save to a file" tools.save(x, y, u, v, mask, os.path.join(save_path, "field_A%03d.txt" % counter), delimiter="\t") # "some other stuff that one might want to use" if settings.show_plot or settings.save_plot: Name = os.path.join(save_path, "Image_A%03d.png" % counter) fig, _ = display_vector_field( os.path.join(save_path, "field_A%03d.txt" % counter), scale=settings.scale_plot, ) if settings.save_plot is True: fig.savefig(Name) if settings.show_plot is True: plt.show() print(f"Image Pair {counter + 1}") print(file_a.rsplit('/')[-1], file_b.rsplit('/')[-1])
def func(args): """A function to process each image pair.""" # this line is REQUIRED for multiprocessing to work # always use it in your custom function file_a, file_b, counter = args # counter2=str(counter2) ##################### # Here goes you code ##################### ' read images into numpy arrays' frame_a = tools.imread(os.path.join(settings.filepath_images, file_a)) frame_b = tools.imread(os.path.join(settings.filepath_images, file_b)) ## Miguel: I just had a quick look, and I do not understand the reason for this step. # I propose to remove it. #frame_a = (frame_a*1024).astype(np.int32) #frame_b = (frame_b*1024).astype(np.int32) ' crop to ROI' if settings.ROI == 'full': frame_a = frame_a frame_b = frame_b else: frame_a = frame_a[settings.ROI[0]:settings.ROI[1], settings.ROI[2]:settings.ROI[3]] frame_b = frame_b[settings.ROI[0]:settings.ROI[1], settings.ROI[2]:settings.ROI[3]] if settings.dynamic_masking_method == 'edge' or 'intensity': frame_a = preprocess.dynamic_masking( frame_a, method=settings.dynamic_masking_method, filter_size=settings.dynamic_masking_filter_size, threshold=settings.dynamic_masking_threshold) frame_b = preprocess.dynamic_masking( frame_b, method=settings.dynamic_masking_method, filter_size=settings.dynamic_masking_filter_size, threshold=settings.dynamic_masking_threshold) '''%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%''' 'first pass' x, y, u, v, sig2noise_ratio = first_pass( frame_a, frame_b, settings.windowsizes[0], settings.overlap[0], settings.iterations, correlation_method=settings.correlation_method, subpixel_method=settings.subpixel_method, do_sig2noise=settings.extract_sig2noise, sig2noise_method=settings.sig2noise_method, sig2noise_mask=settings.sig2noise_mask, ) 'validation using gloabl limits and std and local median' '''MinMaxU : two elements tuple sets the limits of the u displacment component Used for validation. MinMaxV : two elements tuple sets the limits of the v displacment component Used for validation. std_threshold : float sets the threshold for the std validation median_threshold : float sets the threshold for the median validation filter_method : string the method used to replace the non-valid vectors Methods: 'localmean', 'disk', 'distance', max_filter_iteration : int maximum of filter iterations to replace nans filter_kernel_size : int size of the kernel used for the filtering''' mask = np.full_like(x, False) if settings.validation_first_pass == True: u, v, mask_g = validation.global_val(u, v, settings.MinMax_U_disp, settings.MinMax_V_disp) u, v, mask_s = validation.global_std( u, v, std_threshold=settings.std_threshold) u, v, mask_m = validation.local_median_val( u, v, u_threshold=settings.median_threshold, v_threshold=settings.median_threshold, size=settings.median_size) if settings.extract_sig2noise == True and settings.iterations == 1 and settings.do_sig2noise_validation == True: u, v, mask_s2n = validation.sig2noise_val( u, v, sig2noise_ratio, threshold=settings.sig2noise_threshold) mask = mask + mask_g + mask_m + mask_s + mask_s2n else: mask = mask + mask_g + mask_m + mask_s 'filter to replace the values that where marked by the validation' if settings.iterations > 1: u, v = filters.replace_outliers( u, v, method=settings.filter_method, max_iter=settings.max_filter_iteration, kernel_size=settings.filter_kernel_size) 'adding masks to add the effect of all the validations' if settings.smoothn == True: u, dummy_u1, dummy_u2, dummy_u3 = smoothn.smoothn( u, s=settings.smoothn_p) v, dummy_v1, dummy_v2, dummy_v3 = smoothn.smoothn( v, s=settings.smoothn_p) elif settings.iterations == 1 and settings.replace_vectors == True: u, v = filters.replace_outliers( u, v, method=settings.filter_method, max_iter=settings.max_filter_iteration, kernel_size=settings.filter_kernel_size) 'adding masks to add the effect of all the validations' if settings.smoothn == True: u, v = filters.replace_outliers( u, v, method=settings.filter_method, max_iter=settings.max_filter_iteration, kernel_size=settings.filter_kernel_size) u, dummy_u1, dummy_u2, dummy_u3 = smoothn.smoothn( u, s=settings.smoothn_p) v, dummy_v1, dummy_v2, dummy_v3 = smoothn.smoothn( v, s=settings.smoothn_p) i = 1 'all the following passes' for i in range(2, settings.iterations + 1): x, y, u, v, sig2noise_ratio, mask = multipass_img_deform( frame_a, frame_b, settings.windowsizes[i - 1], settings.overlap[i - 1], settings.iterations, i, x, y, u, v, correlation_method=settings.correlation_method, subpixel_method=settings.subpixel_method, do_sig2noise=settings.extract_sig2noise, sig2noise_method=settings.sig2noise_method, sig2noise_mask=settings.sig2noise_mask, MinMaxU=settings.MinMax_U_disp, MinMaxV=settings.MinMax_V_disp, std_threshold=settings.std_threshold, median_threshold=settings.median_threshold, median_size=settings.median_size, filter_method=settings.filter_method, max_filter_iteration=settings.max_filter_iteration, filter_kernel_size=settings.filter_kernel_size, interpolation_order=settings.interpolation_order) # If the smoothing is active, we do it at each pass if settings.smoothn == True: u, dummy_u1, dummy_u2, dummy_u3 = smoothn.smoothn( u, s=settings.smoothn_p) v, dummy_v1, dummy_v2, dummy_v3 = smoothn.smoothn( v, s=settings.smoothn_p) '''%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%''' if settings.extract_sig2noise == True and i == settings.iterations and settings.iterations != 1 and settings.do_sig2noise_validation == True: u, v, mask_s2n = validation.sig2noise_val( u, v, sig2noise_ratio, threshold=settings.sig2noise_threshold) mask = mask + mask_s2n if settings.replace_vectors == True: u, v = filters.replace_outliers( u, v, method=settings.filter_method, max_iter=settings.max_filter_iteration, kernel_size=settings.filter_kernel_size) 'pixel/frame->pixel/sec' u = u / settings.dt v = v / settings.dt 'scales the results pixel-> meter' x, y, u, v = scaling.uniform(x, y, u, v, scaling_factor=settings.scaling_factor) 'save to a file' save(x, y, u, v, sig2noise_ratio, mask, os.path.join(save_path, 'field_A%03d.txt' % counter), delimiter='\t') 'some messages to check if it is still alive' 'some other stuff that one might want to use' if settings.show_plot == True or settings.save_plot == True: plt.close('all') plt.ioff() Name = os.path.join(save_path, 'Image_A%03d.png' % counter) display_vector_field(os.path.join(save_path, 'field_A%03d.txt' % counter), scale=settings.scale_plot) if settings.save_plot == True: plt.savefig(Name) if settings.show_plot == True: plt.show() print('Image Pair ' + str(counter + 1))
from openpiv import tools, pyprocess, scaling, validation, filters import numpy as np import os # we can run it from any folder path = os.path.dirname(os.path.abspath(__file__)) frame_a = tools.imread( os.path.join(path,'../data/test1/exp1_001_a.bmp')) frame_b = tools.imread( os.path.join(path,'../data/test1/exp1_001_b.bmp')) frame_a = (frame_a*1024).astype(np.int32) frame_b = (frame_b*1024).astype(np.int32) u, v, sig2noise = pyprocess.extended_search_area_piv( frame_a, frame_b, \ window_size=32, overlap=16, dt=0.02, search_area_size=64, sig2noise_method='peak2peak' ) print(u,v,sig2noise) x, y = pyprocess.get_coordinates( image_size=frame_a.shape, search_area_size=64, overlap=16 ) u, v, mask = validation.sig2noise_val( u, v, sig2noise, threshold = 1.3 ) u, v, mask = validation.global_val( u, v, (-1000, 2000), (-1000, 1000) ) u, v = filters.replace_outliers( u, v, method='localmean', max_iter=10, kernel_size=2) x, y, u, v = scaling.uniform(x, y, u, v, scaling_factor = 96.52 ) tools.save(x, y, u, v, mask, '../data/test1/test_data.vec' ) tools.display_vector_field('../data/test1/test_data.vec', scale=75, width=0.0035)
overlap=overlap) u1, v1, mask = validation.sig2noise_val(u0, v0, sig2noise, threshold=1.3) u2, v2 = filters.replace_outliers(u1, v1, method='localmean', max_iter=5, kernel_size=5) tools.save(x, y, u2, v2, mask, '../muscle10fpsbotleft_results/' + str(i) + '.txt') #%% element_information = Parallel(n_jobs=6)(delayed(process_node)(node) for node in range(N - DeltaFrame)) #%% processing parameter ''' winsize = 50 # pixels searchsize = 50 #pixels overlap = 25 # piexels dt = DeltaFrame*1./fps # piexels u0, v0, sig2noise = process.extended_search_area_piv(frame_a.astype(np.int32), frame_b.astype(np.int32), window_size=winsize, overlap=overlap, dt=dt, search_area_size=searchsize, sig2noise_method='peak2peak' ) x, y = process.get_coordinates( image_size=frame_a.shape, window_size=winsize, overlap=overlap ) u1, v1, mask = validation.sig2noise_val( u0, v0, sig2noise, threshold = 1.3) u2, v2 = filters.replace_outliers( u1, v1, method='localmean', max_iter=5, kernel_size=10) #x, y, u, v = scaling.uniform(x, y, u2, v2, scaling_factor = 96.52 ) tools.save(x, y, u2, v2, mask, '../muscle10fpsbotleft_results/test.txt' ) tools.display_vector_field('../muscle10fpsbotleft_results/test.txt', scale=10000, width=0.0025) ''' #%% ''' fileNameList = fileNameList[:1991]
import os # we can run it from any folder path = os.path.dirname(os.path.abspath(__file__)) frame_a = tools.imread(os.path.join(path, '../test1/exp1_001_a.bmp')) frame_b = tools.imread(os.path.join(path, '../test1/exp1_001_b.bmp')) frame_a = (frame_a * 1024).astype(np.int32) frame_b = (frame_b * 1024).astype(np.int32) u, v, sig2noise = pyprocess.extended_search_area_piv( frame_a, frame_b, \ window_size=32, overlap=16, dt=0.02, search_area_size=64, sig2noise_method='peak2peak' ) print(u, v, sig2noise) x, y = pyprocess.get_coordinates(image_size=frame_a.shape, search_area_size=64, overlap=16) u, v, mask = validation.sig2noise_val(u, v, sig2noise, threshold=1.3) u, v, mask = validation.global_val(u, v, (-1000, 2000), (-1000, 1000)) u, v = filters.replace_outliers(u, v, method='localmean', max_iter=10, kernel_size=2) x, y, u, v = scaling.uniform(x, y, u, v, scaling_factor=96.52) tools.save(x, y, u, v, mask, 'test1.vec') tools.display_vector_field('test1.vec', scale=75, width=0.0035)
window_size=24, overlap=12, dt=0.02, search_area_size=64, sig2noise_method='peak2peak') x, y = process.get_coordinates(image_size=frame_a.shape, window_size=24, overlap=12) u, v, mask = validation.sig2noise_val(u, v, sig2noise, threshold=2.5) u, v = filters.replace_outliers(u, v, method='localmean', max_iter=10, kernel_size=2) x, y, u, v = scaling.uniform(x, y, u, v, scaling_factor=96.52) tools.save(x, y, u, v, mask, 'exp1_001.txt') tools.display_vector_field('exp1_001.txt', scale=100, width=0.0025) u, v, s2n = pyprocess.piv(frame_a, frame_b, corr_method='fft', window_size=24, overlap=12, dt=0.02, sig2noise_method='peak2peak') x, y = pyprocess.get_coordinates(image_size=frame_a.shape, window_size=24, overlap=12) u, v, mask = validation.sig2noise_val(u, v, s2n, threshold=2.5) u, v = filters.replace_outliers(u, v,
def quick_piv(self, search_dict, index_a=100, index_b=101, folder=None): self.show_piv_param() ns = Namespace(**self.piv_param) if folder == None: img_a, img_b = self.read_two_images(search_dict, index_a=index_a, index_b=index_b) location_path = [ x['path'] for x in self.piv_dict_list if search_dict.items() <= x.items() ] results_path = os.path.join(self.results_path, *location_path) try: os.makedirs(results_path) except FileExistsError: pass else: try: file_a_path = os.path.join(self.path, folder, 'frame_%06d.tiff' % index_a) file_b_path = os.path.join(self.path, folder, 'frame_%06d.tiff' % index_b) img_a = np.array(Image.open(file_a_path)) img_b = np.array(Image.open(file_b_path)) except: return None # crop img_a = img_a[ns.crop[0]:-ns.crop[1] - 1, ns.crop[2]:-ns.crop[3] - 1] img_b = img_b[ns.crop[0]:-ns.crop[1] - 1, ns.crop[2]:-ns.crop[3] - 1] u0, v0, sig2noise = pyprocess.extended_search_area_piv( img_a.astype(np.int32), img_b.astype(np.int32), window_size=ns.winsize, overlap=ns.overlap, dt=ns.dt, search_area_size=ns.searchsize, sig2noise_method='peak2peak') x, y = pyprocess.get_coordinates(image_size=img_a.shape, search_area_size=ns.searchsize, overlap=ns.overlap) x, y, u0, v0 = scaling.uniform( x, y, u0, v0, scaling_factor=ns.pixel_density) # no. pixel per distance u0, v0, mask = validation.global_val( u0, v0, (ns.u_lower_bound, ns.u_upper_bound), (ns.v_lower_bound, ns.v_upper_bound)) u1, v1, mask = validation.sig2noise_val(u0, v0, sig2noise, threshold=1.01) u3, v3 = filters.replace_outliers(u1, v1, method='localmean', max_iter=500, kernel_size=3) #save in the simple ASCII table format tools.save(x, y, u3, v3, sig2noise, mask, os.path.join(results_path, ns.text_export_name)) if ns.image_check == True: fig, ax = plt.subplots(2, 1, figsize=(24, 12)) ax[0].imshow(img_a) ax[1].imshow(img_b) io.imwrite(os.path.join(results_path, ns.figure_export_name), img_a) if ns.show_result == True: fig, ax = plt.subplots(figsize=(24, 12)) tools.display_vector_field( os.path.join(results_path, ns.text_export_name), ax=ax, scaling_factor=ns.pixel_density, scale=ns.scale_factor, # scale defines here the arrow length width=ns.arrow_width, # width is the thickness of the arrow on_img=True, # overlay on the image image_name=os.path.join(results_path, ns.figure_export_name)) fig.savefig(os.path.join(results_path, ns.figure_export_name)) if ns.show_vertical_profiles: field_shape = pyprocess.get_field_shape( image_size=img_a.shape, search_area_size=ns.searchsize, overlap=ns.overlap) vertical_profiles(ns.text_export_name, field_shape) print('Mean of u: %.3f' % np.mean(u3)) print('Std of u: %.3f' % np.std(u3)) print('Mean of v: %.3f' % np.mean(v3)) print('Std of v: %.3f' % np.std(v3)) output = np.array([np.mean(u3), np.std(u3), np.mean(v3), np.std(v3)]) # if np.absolute(np.mean(v3)) < 50: # output = self.quick_piv(search_dict,index_a = index_a + 1, index_b = index_b + 1) return x, y, u3, v3
import numpy as np frame_a = tools.imread( 'exp1_001_a.bmp' ) frame_b = tools.imread( 'exp1_001_b.bmp' ) u, v, sig2noise = process.extended_search_area_piv( frame_a.astype(np.int32), frame_b.astype(np.int32), window_size=24, overlap=12, dt=0.02, search_area_size=64, sig2noise_method='peak2peak' ) x, y = process.get_coordinates( image_size=frame_a.shape, window_size=24, overlap=12 ) u, v, mask = validation.sig2noise_val( u, v, sig2noise, threshold = 1.3 ) u, v = filters.replace_outliers( u, v, method='localmean', max_iter=10, kernel_size=2) x, y, u, v = scaling.uniform(x, y, u, v, scaling_factor = 96.52 ) tools.save(x, y, u, v, mask, 'exp1_001.txt' ) tools.display_vector_field('exp1_001.txt', scale=100, width=0.0025) u1, v1, sig2noise = pyprocess.piv( frame_a.astype(np.int32), frame_b.astype(np.int32), window_size=24, overlap=12, dt=0.02, search_size=64, sig2noise_method='peak2peak' ) x, y = process.get_coordinates( image_size=frame_a.shape, window_size=24, overlap=12 ) u1, v1, mask = validation.sig2noise_val( u1, v1, sig2noise, threshold = 1.3 ) u1, v1 = filters.replace_outliers( u1, v1, method='localmean', max_iter=10, kernel_size=2)
import openpiv.gpu_process reload(openpiv.gpu_process) frame_a = tools.imread('exp1_001_a.bmp') frame_b = tools.imread('exp1_001_b.bmp') fig, ax = plt.subplots(1, 2, figsize=(10, 8)) ax[0].imshow(frame_a, cmap=plt.cm.gray) ax[1].imshow(frame_b, cmap=plt.cm.gray) # gpu code parametes min_window_size = 32 overlap_ratio = 0.5 coarse_factor = 1 nb_iter_max = 2 # First time is slow as the GPU modules need to compile. Once they are compiled, they stay compiled. #Every time you run this after the first time it will be fast. x, y, u, v, mask = openpiv.gpu_process.WiDIM(frame_a.astype(np.int32), frame_b.astype(np.int32), np.ones(frame_a.shape, dtype=np.int32), min_window_size, overlap_ratio, coarse_factor, dt, nb_iter_max=nb_iter_max) tools.save(x, y, u, v, np.zeros_like(u), 'exp1_001_gpu.txt')
save_path = '.' counter = 0 # "pixel/frame->pixel/sec" u = u / settings.dt v = v / settings.dt # "scales the results pixel-> meter" x, y, u, v = scaling.uniform(x, y, u, v, scaling_factor=settings.scaling_factor) # "save to a file" tools.save( x, y, u, v, sig2noise_ratio, outliers_mask, os.path.join(save_path, "field_A%03d.txt" % counter), delimiter="\t", ) # "some other stuff that one might want to use" settings.show_plot = True settings.save_plot = True if settings.show_plot is True or settings.save_plot is True: plt.close("all") plt.ioff() filename = os.path.join(save_path, "Image_A%03d.png" % counter) tools.display_vector_field( os.path.join(save_path, "field_A%03d.txt" % counter),
window_size=24, overlap=12, dt=0.02, search_area_size=64, sig2noise_method='peak2peak') x, y = process.get_coordinates(image_size=frame_a.shape, window_size=24, overlap=12) u, v, mask = validation.sig2noise_val(u, v, sig2noise, threshold=2.5) u, v = filters.replace_outliers(u, v, method='localmean', max_iter=10, kernel_size=2) x, y, u, v = scaling.uniform(x, y, u, v, scaling_factor=96.52) tools.save(x, y, u, v, mask, 'exp1_001_extended.txt') tools.display_vector_field('exp1_001_extended.txt', scale=100, width=0.0025) # %% # %%time u, v, sig2noise = pyprocess.extended_search_area_piv( frame_a, frame_b, corr_method='fft', window_size=24, overlap=12, dt=0.02, sig2noise_method='peak2peak') x, y = pyprocess.get_coordinates(image_size=frame_a.shape, window_size=24, overlap=12)
sys.path.append('/Users/alex/Documents/OpenPIV/alexlib/openpiv-python') from openpiv import tools, validation, process, filters, scaling, pyprocess import numpy as np frame_a = tools.imread( 'exp1_001_a.bmp' ) frame_b = tools.imread( 'exp1_001_b.bmp' ) u, v, sig2noise = process.extended_search_area_piv( frame_a.astype(np.int32), frame_b.astype(np.int32), window_size=24, overlap=12, dt=0.02, search_area_size=64, sig2noise_method='peak2peak' ) x, y = process.get_coordinates( image_size=frame_a.shape, window_size=24, overlap=12 ) u, v, mask = validation.sig2noise_val( u, v, sig2noise, threshold = 2.5 ) u, v = filters.replace_outliers( u, v, method='localmean', max_iter=10, kernel_size=2) x, y, u, v = scaling.uniform(x, y, u, v, scaling_factor = 96.52 ) tools.save(x, y, u, v, mask, 'exp1_001.txt' ) tools.display_vector_field('exp1_001.txt', scale=100, width=0.0025) u, v, s2n= pyprocess.piv(frame_a, frame_b, corr_method='fft', window_size=24, overlap=12, dt=0.02, sig2noise_method='peak2peak' ) x, y = pyprocess.get_coordinates( image_size=frame_a.shape, window_size=24, overlap=12 ) u, v, mask = validation.sig2noise_val( u, v, s2n, threshold = 2.5 ) u, v = filters.replace_outliers( u, v, method='localmean', max_iter=10, kernel_size=2.5) x, y, u, v = scaling.uniform(x, y, u, v, scaling_factor = 96.52 ) tools.save(x, y, u, v, mask, 'exp1_002.txt' ) tools.display_vector_field('exp1_002.txt', scale=100, width=0.0025)
x, y = pyprocess.get_coordinates(image_size=frame_a.shape, search_area_size=search_area_size, overlap=overlap) u, v, mask = validation.global_val(u, v, (-300., 300.), (-300., 300.)) u, v, mask = validation.sig2noise_val(u, v, sig2noise, threshold=1.1) u, v = filters.replace_outliers(u, v, method='localmean', max_iter=3, kernel_size=3) x, y, u, v = scaling.uniform(x, y, u, v, scaling_factor=96.52) # save to a file tools.save(x, y, u, v, mask, '../data/test4/test.txt', fmt='%9.6f', delimiter='\t') tools.display_vector_field('../data/test4/test.txt', scale=50, width=0.002) # masking using not optimal choice of the methods or parameters: masked_a, _ = preprocess.dynamic_masking(frame_a, method='edges', filter_size=7, threshold=0.005) masked_b, _ = preprocess.dynamic_masking(frame_b, method='intensity', filter_size=3, threshold=0.0) plt.imshow(np.c_[masked_a, masked_b], cmap='gray')
# Reading the images from the file and run them through the PIV function # Save the vectors in a .txt file with tqdm(total=N) as pbar: # use the progress bar for n in range(N): # loop through all images image_0 = tools.imread( 'C:/Users/SSchurer/Documents/TU_Delft/Thesis/LSPIV/Edited/After rain/Images_OSGGC_10sec/' + str(images[n])) image_1 = tools.imread( 'C:/Users/SSchurer/Documents/TU_Delft/Thesis/LSPIV/Edited/After rain/Images_OSGGC_10sec/' + str(images[n + 1])) x, y, u, v, mask = PIV(image_0, image_1, winsize, searchsize, overlap, frame_rate, scaling_factor) tools.save( x, y, u, v, mask, 'C:/Users/SSchurer/Documents/TU_Delft/Thesis/LSPIV/Edited/After rain/OSGGC/Output' + str(n) + '.txt') tools.save( x, y, u, u, v, 'C:/Users/SSchurer/Documents/TU_Delft/Thesis/LSPIV/Edited/After rain/OSGGC/STDV/stdv' + str(n) + '.txt') #Making the nan cells into 0 cells for the summation. Otherwise cells with 1 missing value end up with nan num_u = np.nan_to_num(u) num_v = np.nan_to_num(v) # sum the vector values of all images u_sum += num_u v_sum += num_v # returns a grid with 0 and 1. 1 for when the cell has a number
def run_piv( frame_a, frame_b, winsize=16, # pixels, interrogation window size in frame A searchsize=20, # pixels, search in image B overlap=8, # pixels, 50% overlap dt=0.0001, # sec, time interval between pulses image_check=False, show_vertical_profiles=False, figure_export_name='_results.png', text_export_name="_results.txt", scale_factor=1, pixel_density=36.74, arrow_width=0.001, show_result=True, u_bounds=(-100, 100), v_bounds=(-100, 100)): u0, v0, sig2noise = pyprocess.extended_search_area_piv( frame_a.astype(np.int32), frame_b.astype(np.int32), window_size=winsize, overlap=overlap, dt=dt, search_area_size=searchsize, sig2noise_method='peak2peak') x, y = pyprocess.get_coordinates(image_size=frame_a.shape, search_area_size=searchsize, overlap=overlap) x, y, u0, v0 = scaling.uniform( x, y, u0, v0, scaling_factor=pixel_density) # no. pixel per distance u0, v0, mask = validation.global_val(u0, v0, u_bounds, v_bounds) u1, v1, mask = validation.sig2noise_val(u0, v0, sig2noise, threshold=1.05) u3, v3 = filters.replace_outliers(u1, v1, method='localmean', max_iter=10, kernel_size=3) #save in the simple ASCII table format if np.std(u3) < 480: tools.save(x, y, u3, v3, sig2noise, mask, text_export_name) if image_check == True: fig, ax = plt.subplots(2, 1, figsize=(24, 12)) ax[0].imshow(frame_a) ax[1].imshow(frame_b) io.imwrite(figure_export_name, frame_a) if show_result == True: fig, ax = plt.subplots(figsize=(24, 12)) tools.display_vector_field( text_export_name, ax=ax, scaling_factor=pixel_density, scale=scale_factor, # scale defines here the arrow length width=arrow_width, # width is the thickness of the arrow on_img=True, # overlay on the image image_name=figure_export_name) fig.savefig(figure_export_name) if show_vertical_profiles: field_shape = pyprocess.get_field_shape(image_size=frame_a.shape, search_area_size=searchsize, overlap=overlap) vertical_profiles(text_export_name, field_shape) print('Std of u3: %.3f' % np.std(u3)) print('Mean of u3: %.3f' % np.mean(u3)) return np.std(u3)
x, y = process.get_coordinates(image_size=frame_a.shape, window_size=winsize, overlap=overlap) u1, v1, mask = validation.sig2noise_val(u0, v0, sig2noise, threshold=1.3) u2, v2 = filters.replace_outliers(u1, v1, method='localmean', max_iter=5, kernel_size=5) u3, v3, mask1 = validation.local_median_val(u2, v2, 3, 3, 1) u4, v4 = filters.replace_outliers(u3, v3, method='localmean', max_iter=5, kernel_size=5) tools.save(x, y, u4, v4, mask1, '../testResult/test.txt') tools.display_vector_field('../testResult/test.txt', scale=500, width=0.0025) #%% define node def process_node(i): DeltaFrame = 300 winsize = 12 # pixels searchsize = 12 #pixels overlap = 6 # piexels dt = DeltaFrame * 1. / fps # piexels frame_a = tools.imread(fileNameList[i]) frame_b = tools.imread(fileNameList[i + DeltaFrame]) u0, v0, sig2noise = process.extended_search_area_piv( frame_a.astype(np.int32),
overlap=8, dt=.1, sig2noise_method='peak2peak') x, y = process.get_coordinates(image_size=frame_a.shape, window_size=32, overlap=8) u, v, mask = validation.sig2noise_val(u, v, sig2noise, threshold=1.3) u, v = filters.replace_outliers(u, v, method='localmean', max_iter=10, kernel_size=2) x, y, u, v = scaling.uniform(x, y, u, v, scaling_factor=96.52) tools.save(x, y, u, v, mask, 'Y4-S3_Camera000398_a.txt') # %% # Use Python version, pyprocess: u, v, sig2noise = pyprocess.extended_search_area_piv( frame_a.astype(np.int32), frame_b.astype(np.int32), window_size=32, overlap=8, dt=.1, sig2noise_method='peak2peak') x, y = pyprocess.get_coordinates(image_size=frame_a.shape, window_size=32, overlap=8) u, v, mask = validation.sig2noise_val(u, v, sig2noise, threshold=1.3)
scaling_factor = 100 # we can run it from any folder path = os.path.dirname(os.path.abspath(__file__)) frame_a = tools.imread( os.path.join(path,'../test2/2image_00.tif')) frame_b = tools.imread( os.path.join(path,'../test2/2image_01.tif')) #no background removal will be performed so 'mark' is initialized to 1 everywhere mark = np.zeros(frame_a.shape, dtype=np.int32) for I in range(mark.shape[0]): for J in range(mark.shape[1]): mark[I,J]=1 #main algorithm with warnings.catch_warnings(): warnings.simplefilter("ignore") x,y,u,v, mask=process.WiDIM( frame_a.astype(np.int32), frame_b.astype(np.int32), mark, min_window_size=16, overlap_ratio=0.0, coarse_factor=2, dt=0.02, validation_method='mean_velocity', trust_1st_iter=1, validation_iter=1, tolerance=0.7, nb_iter_max=3, sig2noise_method='peak2peak') #display results x, y, u, v = scaling.uniform(x, y, u, v, scaling_factor = scaling_factor ) tools.save(x, y, u, v, mask, '2image_00.txt' ) tools.display_vector_field('2image_00.txt',on_img=True, image_name=os.path.join(path,'../test2/2image_00.tif'), window_size=16, scaling_factor=scaling_factor, scale=200, width=0.001) #further validation can be performed to eliminate the few remaining wrong vectors
ax[0].imshow(frame_a,cmap=plt.cm.gray) ax[1].imshow(frame_b,cmap=plt.cm.gray) # %% winsize = 24 # pixels searchsize = 64 # pixels, search in image B overlap = 12 # pixels dt = 0.02 # sec u0, v0, sig2noise = process.extended_search_area_piv( frame_a.astype(np.int32), frame_b.astype(np.int32), window_size=winsize, overlap=overlap, dt=dt, search_area_size=searchsize, sig2noise_method='peak2peak' ) # %% x, y = process.get_coordinates( image_size=frame_a.shape, window_size=winsize, overlap=overlap ) # %% u1, v1, mask = validation.sig2noise_val( u0, v0, sig2noise, threshold = 1.3 ) # %% u2, v2 = filters.replace_outliers( u1, v1, method='localmean', max_iter=10, kernel_size=2) # %% x, y, u3, v3 = scaling.uniform(x, y, u2, v2, scaling_factor = 96.52 ) # %% tools.save(x, y, u3, v3, mask, 'exp1_001.txt' ) # %% tools.display_vector_field('exp1_001.txt', scale=100, width=0.0025)
x, y = process.get_coordinates(image_size=frame_a.shape, window_size=24, overlap=12) u, v, mask = validation.sig2noise_val(u, v, sig2noise, threshold=1.3) u, v = filters.replace_outliers(u, v, method='localmean', max_iter=10, kernel_size=2) x, y, u, v = scaling.uniform(x, y, u, v, scaling_factor=96.52) tools.save(x, y, u, v, mask, 'exp1_001.txt') tools.display_vector_field('exp1_001.txt', scale=100, width=0.0025) u1, v1, sig2noise = pyprocess.extended_search_area_piv( frame_a.astype(np.int32), frame_b.astype(np.int32), window_size=24, overlap=12, dt=0.02, search_area_size=64, sig2noise_method='peak2peak') x, y = pyprocess.get_coordinates(image_size=frame_a.shape, window_size=24, overlap=12)