def test_display_vector_field(file_a=_file_a, file_b=_file_b, test_file=_test_file): a = imread(file_a) b = imread(file_b) window_size = 32 overlap = 16 search_area_size = 40 u, v, s2n = extended_search_area_piv(a, b, window_size, search_area_size=search_area_size, overlap=overlap, correlation_method='circular', normalized_correlation=False) x, y = get_coordinates(a.shape, search_area_size=search_area_size, overlap=overlap) x, y, u, v = transform_coordinates(x, y, u, v) mask = np.zeros_like(x) mask[-1,1] = 1 # test of invalid vector plot save(x, y, u, v, mask, 'tmp.txt') fig, ax = plt.subplots(figsize=(6, 6)) display_vector_field('tmp.txt', on_img=True, image_name=file_a, ax=ax) decorators.remove_ticks_and_titles(fig) fig.savefig('./tmp.png') res = compare.compare_images('./tmp.png', test_file, 0.001) assert res is None
def PIV1(I0, I1, winsize, overlap, dt, smooth=True): u0, v0 = pyprocess.extended_search_area_piv(I0.astype(np.int32), I1.astype(np.int32), window_size=winsize, overlap=overlap, dt=dt, search_area_size=winsize) x, y = pyprocess.get_coordinates(image_size=I0.shape, search_area_size=winsize, window_size=winsize, overlap=overlap) if smooth == True: u1 = smoothn(u0)[0] v1 = smoothn(v0)[0] frame_data = pd.DataFrame(data=np.array( [x.flatten(), y.flatten(), u1.flatten(), v1.flatten()]).T, columns=['x', 'y', 'u', 'v']) else: frame_data = pd.DataFrame(data=np.array( [x.flatten(), y.flatten(), u0.flatten(), v0.flatten()]).T, columns=['x', 'y', 'u', 'v']) return frame_data
def func(args): """A function to process each image pair.""" # this line is REQUIRED for multiprocessing to work # always use it in your custom function file_a, file_b, counter = args ##################### # Here goes you code ##################### # read images into numpy arrays frame_a = tools.imread(os.path.join(path, file_a)) frame_b = tools.imread(os.path.join(path, file_b)) frame_a = (frame_a * 1024).astype(np.int32) frame_b = (frame_b * 1024).astype(np.int32) # process image pair with extended search area piv algorithm. u, v, sig2noise = pyprocess.extended_search_area_piv( frame_a, frame_b, \ window_size=64, overlap=32, dt=0.02, search_area_size=128, sig2noise_method='peak2peak') u, v, mask = validation.sig2noise_val(u, v, sig2noise, threshold=1.5) u, v = filters.replace_outliers(u, v, method='localmean', max_iter=10, kernel_size=2) # get window centers coordinates x, y = pyprocess.get_coordinates(image_size=frame_a.shape, search_area_size=128, overlap=32) # save to a file tools.save(x, y, u, v, mask, 'test2_%03d.txt' % counter) tools.display_vector_field('test2_%03d.txt' % counter)
def PIV(I0, I1, winsize, overlap, dt): """ Normal PIV """ u0, v0, sig2noise = pyprocess.extended_search_area_piv( I0.astype(np.int32), I1.astype(np.int32), window_size=winsize, overlap=overlap, dt=dt, search_area_size=winsize, sig2noise_method='peak2peak', ) # get x, y x, y = pyprocess.get_coordinates(image_size=I0.shape, search_area_size=winsize, overlap=overlap, window_size=winsize) u1, v1, mask_s2n = validation.sig2noise_val( u0, v0, sig2noise, threshold=1.05, ) # replace_outliers u2, v2 = filters.replace_outliers( u1, v1, method='localmean', max_iter=3, kernel_size=3, ) # median filter smoothing u3 = medfilt2d(u2, 3) v3 = medfilt2d(v2, 3) return x, y, u3, v3
def run_piv( frame_a, frame_b, ): winsize = 64 # pixels, interrogation window size in frame A searchsize = 68 # pixels, search in image B overlap = 32 # pixels, 50% overlap dt = 0.0005 # sec, time interval between pulses u0, v0, sig2noise = pyprocess.extended_search_area_piv( frame_a.astype(np.int32), frame_b.astype(np.int32), window_size=winsize, overlap=overlap, dt=dt, search_area_size=searchsize, sig2noise_method='peak2peak') x, y = pyprocess.get_coordinates(image_size=frame_a.shape, search_area_size=searchsize, window_size=winsize, overlap=overlap) u1, v1, mask = validation.sig2noise_val(u0, v0, sig2noise, threshold=1.05) u2, v2 = filters.replace_outliers(u1, v1, method='localmean', max_iter=10, kernel_size=3) x, y, u3, v3 = scaling.uniform(x, y, u2, v2, scaling_factor=41.22) # 41.22 microns/pixel mean_u = np.mean(u3) mean_v = np.mean(v3) deficit_u = u3 - mean_u deficit_v = v3 - mean_v u_prime = np.mean(np.sqrt(0.5 * (deficit_u**2 + deficit_v**2))) u_avg = np.mean(np.sqrt(0.5 * (mean_u**2 + mean_v**2))) turbulence_intensity = u_prime / u_avg #save in the simple ASCII table format fname = "./Tables/" + exp_string + ".txt" # tools.save(x, y, u3, v3, mask, fname) out = np.vstack([m.ravel() for m in [x, y, u3, v3]]) # print(out) # np.savetxt(fname,out.T) with open(fname, "ab") as f: f.write(b"\n") np.savetxt(f, out.T) return turbulence_intensity
def piv_example(): """ PIV example uses examples/test5 vortex PIV data to show the main principles piv(im1,im2) will create a tmp.vec file with the vector filed in pix/dt (dt=1) from two images, im1,im2 provided as full path filenames (TIF is preferable) """ # if im1 is None and im2 is None: im1 = pkg.resource_filename("openpiv", "examples/test5/frame_a.tif") im2 = pkg.resource_filename("openpiv", "examples/test5/frame_b.tif") frame_a = tools.imread(im1) frame_b = tools.imread(im2) frame_a[0:32, 512 - 32:] = 255 images = [] images.extend([frame_a, frame_b]) fig, ax = plt.subplots() # ims is a list of lists, each row is a list of artists to draw in the # current frame; here we are just animating one artist, the image, in # each frame ims = [] for i in range(2): im = ax.imshow(images[i % 2], animated=True, cmap=plt.cm.gray) ims.append([im]) _ = animation.ArtistAnimation(fig, ims, interval=500, blit=False, repeat_delay=0) plt.show() # import os vel = pyprocess.extended_search_area_piv(frame_a.astype(np.int32), frame_b.astype(np.int32), window_size=32, search_area_size=64, overlap=8) x, y = pyprocess.get_coordinates(image_size=frame_a.shape, search_area_size=64, overlap=8) fig, ax = plt.subplots(1, 2, figsize=(11, 8)) ax[0].imshow(frame_a, cmap=plt.get_cmap("gray"), alpha=0.8) ax[0].quiver(x, y, vel[0], -vel[1], scale=50, color="r") ax[1].quiver(x, y[::-1, :], vel[0], -vel[1], scale=50, color="b") ax[1].set_aspect(1) # ax[1].invert_yaxis() plt.show() return x, y, vel[0], vel[1]
def process(args, bga, bgb, reflection): file_a, file_b, counter = args # read images into numpy arrays frame_a = tools.imread(file_a) frame_b = tools.imread(file_b) # removing background and reflections frame_a = frame_a - bga frame_b = frame_b - bgb frame_a[reflection == 255] = 0 frame_b[reflection == 255] = 0 #applying a static mask (taking out the regions where we have walls) yp = [580, 435, 0, 0, 580, 580, 0, 0, 435, 580] xp = [570, 570, 680, 780, 780, 0, 0, 105, 230, 230] pnts = draw.polygon(yp, xp, frame_a.shape) frame_a[pnts] = 0 frame_b[pnts] = 0 # checking the resulting frame #fig, ax = plt.subplots(2,2) #ax[0,0].imshow(frame_a_org, cmap='gray') #ax[0,1].imshow(frame_a, cmap='gray') #ax[1,0].imshow(frame_b_org, cmap='gray') #ax[1,1].imshow(frame_b, cmap='gray') #plt.tight_layout() #plt.show() # main piv processing u, v, sig2noise = pyprocess.extended_search_area_piv( frame_a, frame_b, \ window_size=48, overlap=16, dt=0.001094, search_area_size=64, sig2noise_method='peak2peak') x, y = pyprocess.get_coordinates(image_size=frame_a.shape, window_size=48, overlap=16) u, v, mask = validation.local_median_val(u, v, 2000, 2000, size=2) u, v = filters.replace_outliers(u, v, method='localmean', max_iter=10, kernel_size=2) u, *_ = smoothn(u, s=1.0) v, *_ = smoothn(v, s=1.0) # saving the results save_file = tools.create_path(file_a, 'Analysis') tools.save(x, y, u, v, mask, save_file + '.dat')
def two_images(image_1, image_2): with open("image_1.bmp", "wb") as fh1: fh1.write(base64.b64decode(image_1)) with open("image_2.bmp", "wb") as fh2: fh2.write(base64.b64decode(image_2)) frame_a = tools.imread('image_1.bmp') frame_b = tools.imread('image_2.bmp') winsize = 32 # pixels searchsize = 64 # pixels, search in image B overlap = 12 # pixels dt = 0.02 # sec u, v, sig2noise = pyprocess.piv(frame_a.astype(np.int32), frame_b.astype(np.int32), window_size=winsize, overlap=overlap, dt=dt, search_size=searchsize, sig2noise_method='peak2peak') x, y = pyprocess.get_coordinates(image_size=frame_a.shape, window_size=searchsize, overlap=overlap) u, v, mask = validation.sig2noise_val(u, v, sig2noise, threshold=1.3) u, v = filters.replace_outliers(u, v, method='localmean', max_iter=10, kernel_size=2) x, y, u, v = scaling.uniform(x, y, u, v, scaling_factor=96.52) file_name = 'result.txt' if os.path.isfile(file_name): os.remove(file_name) tools.save(x, y, u, v, np.zeros_like(u), file_name) # no masking, all values are valid with open(file_name, "rb") as resultFile: file_reader = resultFile.read() image_encode = base64.encodestring(file_reader) base64_string = str(image_encode, 'utf-8') return base64_string
def PIV(image_0, image_1, winsize, searchsize, overlap, frame_rate, scaling_factor): frame_0 = image_0 # [0:600, :] frame_1 = image_1 # [0:600, :] # Processing the images with interrogation area and search area / cross correlation algortihm u, v, sig2noise = pyprocess.extended_search_area_piv( frame_0, frame_1, window_size=winsize, overlap=overlap, dt=dt, search_area_size=searchsize, sig2noise_method='peak2peak') # Compute the coordinates of the centers of the interrogation windows x, y = pyprocess.get_coordinates(image_size=frame_0.shape, window_size=winsize, overlap=overlap) # This function actually sets to NaN all those vector for # which the signal to noise ratio is below 1.3. # mask is a True/False array, where elements corresponding to invalid vectors have been replace by Nan. u, v, mask = validation.sig2noise_val(u, v, sig2noise, threshold=1.5) # Function as described above, removing outliers deviating with more # than twice the standard deviation u, v, mask = remove_outliers(u, v, mask) # Replacing the outliers with interpolation # u, v = filters.replace_outliers(u, # v, # method='nan', # max_iter=50, # kernel_size=3) # Apply an uniform scaling to the flow field to get dimensional units x, y, u, v = scaling.uniform(x, y, u, v, scaling_factor=scaling_factor) return x, y, u, v, mask
def ProcessPIV(args, bga, bgb, reflection, stg): # read images into numpy arrays file_a, file_b, counter = args frame_a = tools.imread(file_a) frame_b = tools.imread(file_b) # removing background and reflections if bgb is not None: frame_a = frame_a - bga frame_b = frame_b - bgb frame_a[reflection == 255] = 0 frame_b[reflection == 255] = 0 #plt.imshow(frame_a, cmap='gray') #plt.show() # main piv processing u, v, s2n = pyprocess.extended_search_area_piv( frame_a, frame_b, \ window_size=stg['WS'], overlap=stg['OL'], dt=stg['DT'], search_area_size=stg['SA'], sig2noise_method='peak2peak') x, y = pyprocess.get_coordinates(image_size=frame_a.shape, window_size=stg['WS'], overlap=stg['OL']) if stg['BVR'] == 'on': u, v, mask = validation.local_median_val(u, v, stg['MF'][0], stg['MF'][1], size=2) u, v, mask = validation.global_val(u, v, u_thresholds=stg['GF'][0], v_thresholds=stg['GF'][1]) u, v = filters.replace_outliers(u, v, method='localmean', max_iter=10, kernel_size=2) u, *_ = smoothn(u, s=0.5) v, *_ = smoothn(v, s=0.5) x, y, u, v = scaling.uniform(x, y, u, v, stg['SC']) # saving the results save_file = tools.create_path(file_a, 'Analysis') tools.save(x, y, u, v, s2n, save_file + '.dat')
def two_images(image_1, image_2): local_dir = os.path.dirname(os.path.realpath(__file__)) newFile_1 = open('teting1.bmp', 'w+b') newFileByteArray = bytes(image_1) newFile_1.write(newFileByteArray) newFile_1.close() frame_a = tools.imread(local_dir + '/exp1_001_a.bmp') frame_b = tools.imread(local_dir + '/exp1_001_b.bmp') fig, ax = plt.subplots(1, 2, figsize=(10, 8)) ax[0].imshow(frame_a, cmap=plt.cm.gray) ax[1].imshow(frame_b, cmap=plt.cm.gray) winsize = 32 # pixels searchsize = 64 # pixels, search in image B overlap = 12 # pixels dt = 0.02 # sec u, v, sig2noise = pyprocess.extended_search_area_piv( frame_a.astype(np.int32), frame_b.astype(np.int32), window_size=winsize, overlap=overlap, dt=dt, search_area_size=searchsize, sig2noise_method='peak2peak') x, y = pyprocess.get_coordinates(image_size=frame_a.shape, window_size=searchsize, overlap=overlap) file_name = 'result.txt' # tools.save(x, y, u, v, np.zeros_like(u), 'exp1_001.txt' ) # no masking, all values are valid tools.save(x, y, u, v, np.zeros_like(u), file_name) # no masking, all values are valid with open(file_name, 'r') as result_file: data = result_file.read().replace('\n', '').replace('\t', ' ') return data
def doPIV(frame_a, frame_b, dT = 1.0, win_size = 64, overlap = 32, searchArea = 64, apply_clahe = False): # Check if image is color or grayscale and convert to grayscale if it is color try: imH, imW, channels = np.shape(frame_a) if(channels > 1): frame_a = cv2.cvtColor(frame_a , cv2.COLOR_BGR2GRAY) frame_b = cv2.cvtColor(frame_b , cv2.COLOR_BGR2GRAY) except: pass if(apply_clahe is True): clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(12,12)) frame_a = clahe.apply(frame_a) frame_b = clahe.apply(frame_b) u, v, sig2noise = pyprocess.extended_search_area_piv(frame_a.astype(np.int32), frame_b.astype(np.int32), window_size = win_size, overlap = overlap, dt = dT, search_area_size = searchArea, sig2noise_method='peak2mean', normalized_correlation=True) x, y = pyprocess.get_coordinates(frame_a.shape, win_size, overlap) return x,y,u,v, sig2noise
def simple_piv(im1, im2, plot=True): """ Simplest PIV run on the pair of images using default settings piv(im1,im2) will create a tmp.vec file with the vector filed in pix/dt (dt=1) from two images, im1,im2 provided as full path filenames (TIF is preferable, whatever imageio can read) """ if isinstance(im1, str): im1 = tools.imread(im1) im2 = tools.imread(im2) u, v, s2n = pyprocess.extended_search_area_piv(im1.astype(np.int32), im2.astype(np.int32), window_size=32, overlap=16, search_area_size=32) x, y = pyprocess.get_coordinates(image_size=im1.shape, search_area_size=32, overlap=16) valid = s2n > np.percentile(s2n, 5) if plot: _, ax = plt.subplots(figsize=(6, 6)) ax.imshow(im1, cmap=plt.get_cmap("gray"), alpha=0.5, origin="upper") ax.quiver(x[valid], y[valid], u[valid], -v[valid], scale=70, color='r', width=.005) plt.show() return x, y, u, v
def process(args): file_a, file_b, counter = args # read images into numpy arrays frame_a = tools.imread(file_a) frame_b = tools.imread(file_b) print(counter + 1) # process image pair with piv algorithm. u, v, sig2noise = pyprocess.extended_search_area_piv( frame_a, frame_b, \ window_size=32, overlap=16, dt=0.0015, search_area_size=32, sig2noise_method='peak2peak') x, y = pyprocess.get_coordinates(image_size=frame_a.shape, window_size=32, overlap=16) u, v, mask1 = validation.sig2noise_val(u, v, sig2noise, threshold=1.0) u, v, mask2 = validation.global_val(u, v, (-2000, 2000), (-2000, 4000)) u, v, mask3 = validation.local_median_val(u, v, 400, 400, size=2) #u, v, mask4 = validation.global_std(u, v, std_threshold=3) mask = mask1 | mask2 | mask3 #u, v = filters.replace_outliers( u, v, method='localmean', max_iter=10, kernel_size=2) save_file = tools.create_path(file_a, 'Analysis') tools.save(x, y, u, v, mask, save_file + '.dat')
def quick_piv(self, search_dict, index_a=100, index_b=101, folder=None): self.show_piv_param() ns = Namespace(**self.piv_param) if folder == None: img_a, img_b = self.read_two_images(search_dict, index_a=index_a, index_b=index_b) location_path = [ x['path'] for x in self.piv_dict_list if search_dict.items() <= x.items() ] results_path = os.path.join(self.results_path, *location_path) try: os.makedirs(results_path) except FileExistsError: pass else: try: file_a_path = os.path.join(self.path, folder, 'frame_%06d.tiff' % index_a) file_b_path = os.path.join(self.path, folder, 'frame_%06d.tiff' % index_b) img_a = np.array(Image.open(file_a_path)) img_b = np.array(Image.open(file_b_path)) except: return None # crop img_a = img_a[ns.crop[0]:-ns.crop[1] - 1, ns.crop[2]:-ns.crop[3] - 1] img_b = img_b[ns.crop[0]:-ns.crop[1] - 1, ns.crop[2]:-ns.crop[3] - 1] u0, v0, sig2noise = pyprocess.extended_search_area_piv( img_a.astype(np.int32), img_b.astype(np.int32), window_size=ns.winsize, overlap=ns.overlap, dt=ns.dt, search_area_size=ns.searchsize, sig2noise_method='peak2peak') x, y = pyprocess.get_coordinates(image_size=img_a.shape, search_area_size=ns.searchsize, overlap=ns.overlap) x, y, u0, v0 = scaling.uniform( x, y, u0, v0, scaling_factor=ns.pixel_density) # no. pixel per distance u0, v0, mask = validation.global_val( u0, v0, (ns.u_lower_bound, ns.u_upper_bound), (ns.v_lower_bound, ns.v_upper_bound)) u1, v1, mask = validation.sig2noise_val(u0, v0, sig2noise, threshold=1.01) u3, v3 = filters.replace_outliers(u1, v1, method='localmean', max_iter=500, kernel_size=3) #save in the simple ASCII table format tools.save(x, y, u3, v3, sig2noise, mask, os.path.join(results_path, ns.text_export_name)) if ns.image_check == True: fig, ax = plt.subplots(2, 1, figsize=(24, 12)) ax[0].imshow(img_a) ax[1].imshow(img_b) io.imwrite(os.path.join(results_path, ns.figure_export_name), img_a) if ns.show_result == True: fig, ax = plt.subplots(figsize=(24, 12)) tools.display_vector_field( os.path.join(results_path, ns.text_export_name), ax=ax, scaling_factor=ns.pixel_density, scale=ns.scale_factor, # scale defines here the arrow length width=ns.arrow_width, # width is the thickness of the arrow on_img=True, # overlay on the image image_name=os.path.join(results_path, ns.figure_export_name)) fig.savefig(os.path.join(results_path, ns.figure_export_name)) if ns.show_vertical_profiles: field_shape = pyprocess.get_field_shape( image_size=img_a.shape, search_area_size=ns.searchsize, overlap=ns.overlap) vertical_profiles(ns.text_export_name, field_shape) print('Mean of u: %.3f' % np.mean(u3)) print('Std of u: %.3f' % np.std(u3)) print('Mean of v: %.3f' % np.mean(v3)) print('Std of v: %.3f' % np.std(v3)) output = np.array([np.mean(u3), np.std(u3), np.mean(v3), np.std(v3)]) # if np.absolute(np.mean(v3)) < 50: # output = self.quick_piv(search_dict,index_a = index_a + 1, index_b = index_b + 1) return x, y, u3, v3
def process(self, args): """ Process chain as configured in the GUI. Parameters ---------- args : tuple Tuple as expected by the inherited run method: file_a (str) -- image file a file_b (str) -- image file b counter (int) -- index pointing to an element of the filename list """ file_a, file_b, counter = args frame_a = piv_tls.imread(file_a) frame_b = piv_tls.imread(file_b) # Smoothning script borrowed from openpiv.windef s = self.p['smoothn_val'] def smoothn(u, s): s = s u, _, _, _ = piv_smt.smoothn(u, s=s, isrobust=self.p['robust']) return (u) # delimiters placed here for safety delimiter = self.p['separator'] if delimiter == 'tab': delimiter = '\t' if delimiter == 'space': delimiter = ' ' # preprocessing print('\nPre-pocessing image pair: {}'.format(counter + 1)) if self.p['background_subtract'] \ and self.p['background_type'] == 'minA - minB': self.background = gen_background(self.p, frame_a, frame_b) frame_a = frame_a.astype(np.int32) frame_a = process_images(self, frame_a, self.GUI.preprocessing_methods, background=self.background) frame_b = frame_b.astype(np.int32) frame_b = process_images(self, frame_b, self.GUI.preprocessing_methods, background=self.background) print('Evaluating image pair: {}'.format(counter + 1)) # evaluation first pass start = time.time() passes = 1 # setup custom windowing if selected if self.parameter['custom_windowing']: corr_window_0 = self.parameter['corr_window_1'] overlap_0 = self.parameter['overlap_1'] for i in range(2, 8): if self.parameter['pass_%1d' % i]: passes += 1 else: break else: passes = self.parameter['coarse_factor'] if self.parameter['grid_refinement'] == 'all passes' \ and self.parameter['coarse_factor'] != 1: corr_window_0 = self.parameter['corr_window'] * \ 2**(self.parameter['coarse_factor'] - 1) overlap_0 = self.parameter['overlap'] * \ 2**(self.parameter['coarse_factor'] - 1) # Refine all passes after first when there are more than 1 pass. elif self.parameter['grid_refinement'] == '2nd pass on' \ and self.parameter['coarse_factor'] != 1: corr_window_0 = self.parameter['corr_window'] * \ 2**(self.parameter['coarse_factor'] - 2) overlap_0 = self.parameter['overlap'] * \ 2**(self.parameter['coarse_factor'] - 2) # If >>none<< is selected or something goes wrong, the window # size would remain the same. else: corr_window_0 = self.parameter['corr_window'] overlap_0 = self.parameter['overlap'] overlap_percent = overlap_0 / corr_window_0 sizeX = corr_window_0 u, v, sig2noise = piv_wdf.extended_search_area_piv( frame_a.astype(np.int32), frame_b.astype(np.int32), window_size=corr_window_0, overlap=overlap_0, search_area_size=corr_window_0, width=self.parameter['s2n_mask'], subpixel_method=self.parameter['subpixel_method'], sig2noise_method=self.parameter['sig2noise_method'], correlation_method=self.parameter['corr_method'], normalized_correlation=self.parameter['normalize_correlation']) x, y = piv_prc.get_coordinates(frame_a.shape, corr_window_0, overlap_0) # validating first pass mask = np.full_like(x, 0) if self.parameter['fp_vld_global_threshold']: u, v, Mask = piv_vld.global_val( u, v, u_thresholds=(self.parameter['fp_MinU'], self.parameter['fp_MaxU']), v_thresholds=(self.parameter['fp_MinV'], self.parameter['fp_MaxV'])) # consolidate effects of mask mask += Mask if self.parameter['fp_local_med']: u, v, Mask = piv_vld.local_median_val( u, v, u_threshold=self.parameter['fp_local_med'], v_threshold=self.parameter['fp_local_med'], size=self.parameter['fp_local_med_size']) mask += Mask if self.parameter['adv_repl']: u, v = piv_flt.replace_outliers( u, v, method=self.parameter['adv_repl_method'], max_iter=self.parameter['adv_repl_iter'], kernel_size=self.parameter['adv_repl_kernel']) print('Validated first pass result of image pair: {}.'.format(counter + 1)) # smoothning before deformation if 'each pass' is selected if self.parameter['smoothn_each_pass']: if self.parameter['smoothn_first_more']: s *= 2 u = smoothn(u, s) v = smoothn(v, s) print('Smoothned pass 1 for image pair: {}.'.format(counter + 1)) s = self.parameter['smoothn_val1'] print('Finished pass 1 for image pair: {}.'.format(counter + 1)) print("window size: " + str(corr_window_0)) print('overlap: ' + str(overlap_0), '\n') # evaluation of all other passes if passes != 1: iterations = passes - 1 for i in range(2, passes + 1): # setting up the windowing of each pass if self.parameter['custom_windowing']: corr_window = self.parameter['corr_window_%1d' % i] overlap = int(corr_window * overlap_percent) else: if self.parameter['grid_refinement'] == 'all passes' or \ self.parameter['grid_refinement'] == '2nd pass on': corr_window = self.parameter['corr_window'] * \ 2**(iterations - 1) overlap = self.parameter['overlap'] * \ 2**(iterations - 1) else: corr_window = self.parameter['corr_window'] overlap = self.parameter['overlap'] sizeX = corr_window # translate settings to windef settings object piv_wdf_settings = piv_wdf.Settings() piv_wdf_settings.correlation_method = \ self.parameter['corr_method'] piv_wdf_settings.normalized_correlation = \ self.parameter['normalize_correlation'] piv_wdf_settings.windowsizes = (corr_window, ) * (passes + 1) piv_wdf_settings.overlap = (overlap, ) * (passes + 1) piv_wdf_settings.num_iterations = passes piv_wdf_settings.subpixel_method = \ self.parameter['subpixel_method'] piv_wdf_settings.deformation_method = \ self.parameter['deformation_method'] piv_wdf_settings.interpolation_order = \ self.parameter['interpolation_order'] piv_wdf_settings.sig2noise_validate = True, piv_wdf_settings.sig2noise_method = \ self.parameter['sig2noise_method'] piv_wdf_settings.sig2noise_mask = self.parameter['s2n_mask'] # do the correlation x, y, u, v, sig2noise, mask = piv_wdf.multipass_img_deform( frame_a.astype(np.int32), frame_b.astype(np.int32), i, # current iteration x, y, u, v, piv_wdf_settings) # validate other passes if self.parameter['sp_vld_global_threshold']: u, v, Mask = piv_vld.global_val( u, v, u_thresholds=(self.parameter['sp_MinU'], self.parameter['sp_MaxU']), v_thresholds=(self.parameter['sp_MinV'], self.parameter['sp_MaxV'])) mask += Mask # consolidate effects of mask if self.parameter['sp_vld_global_threshold']: u, v, Mask = piv_vld.global_std( u, v, std_threshold=self.parameter['sp_std_threshold']) mask += Mask if self.parameter['sp_local_med_validation']: u, v, Mask = piv_vld.local_median_val( u, v, u_threshold=self.parameter['sp_local_med'], v_threshold=self.parameter['sp_local_med'], size=self.parameter['sp_local_med_size']) mask += Mask if self.parameter['adv_repl']: u, v = piv_flt.replace_outliers( u, v, method=self.parameter['adv_repl_method'], max_iter=self.parameter['adv_repl_iter'], kernel_size=self.parameter['adv_repl_kernel']) print('Validated pass {} of image pair: {}.'.format( i, counter + 1)) # smoothning each individual pass if 'each pass' is selected if self.parameter['smoothn_each_pass']: u = smoothn(u, s) v = smoothn(v, s) print('Smoothned pass {} for image pair: {}.'.format( i, counter + 1)) print('Finished pass {} for image pair: {}.'.format( i, counter + 1)) print("window size: " + str(corr_window)) print('overlap: ' + str(overlap), '\n') iterations -= 1 if self.p['flip_u']: u = np.flipud(u) if self.p['flip_v']: v = np.flipud(v) if self.p['invert_u']: u *= -1 if self.p['invert_v']: v *= -1 # scaling u = u / self.parameter['dt'] v = v / self.parameter['dt'] x, y, u, v = piv_scl.uniform(x, y, u, v, scaling_factor=self.parameter['scale']) end = time.time() # save data to file. out = np.vstack([m.ravel() for m in [x, y, u, v, mask, sig2noise]]) np.savetxt(self.save_fnames[counter], out.T, fmt='%8.4f', delimiter=delimiter) print('Processed image pair: {}'.format(counter + 1)) sizeY = sizeX sizeX = ((int(frame_a.shape[0] - sizeX) // (sizeX - (sizeX * overlap_percent))) + 1) sizeY = ((int(frame_a.shape[1] - sizeY) // (sizeY - (sizeY * overlap_percent))) + 1) time_per_vec = _round((((end - start) * 1000) / ((sizeX * sizeY) - 1)), 3) print('Process time: {} second(s)'.format((_round((end - start), 3)))) print('Number of vectors: {}'.format(int((sizeX * sizeY) - 1))) print('Time per vector: {} millisecond(s)'.format(time_per_vec))
window_size = 32 #pixels overlap = 16 # pixels search_area_size = 64 # pixels frame_rate = 40 # fps # process again with the masked images, for comparison# process once with the original images u, v, sig2noise = pyprocess.extended_search_area_piv( frame_a.astype(np.int32), frame_b.astype(np.int32), window_size=window_size, overlap=overlap, dt=1. / frame_rate, search_area_size=search_area_size, sig2noise_method='peak2peak') x, y = pyprocess.get_coordinates(image_size=frame_a.shape, search_area_size=search_area_size, overlap=overlap) u, v, mask = validation.global_val(u, v, (-300., 300.), (-300., 300.)) u, v, mask = validation.sig2noise_val(u, v, sig2noise, threshold=1.1) u, v = filters.replace_outliers(u, v, method='localmean', max_iter=3, kernel_size=3) x, y, u, v = scaling.uniform(x, y, u, v, scaling_factor=96.52) # save to a file tools.save(x, y, u, v, mask,
u, v, sig2noise = process.extended_search_area_piv( frame_a.astype(np.int32), frame_b.astype(np.int32), window_size=24, overlap=12, dt=0.02, search_area_size=64, sig2noise_method='peak2peak' ) x, y = process.get_coordinates( image_size=frame_a.shape, window_size=24, overlap=12 ) u, v, mask = validation.sig2noise_val( u, v, sig2noise, threshold = 2.5 ) u, v = filters.replace_outliers( u, v, method='localmean', max_iter=10, kernel_size=2) x, y, u, v = scaling.uniform(x, y, u, v, scaling_factor = 96.52 ) tools.save(x, y, u, v, mask, 'exp1_001.txt' ) tools.display_vector_field('exp1_001.txt', scale=100, width=0.0025) u, v, s2n= pyprocess.piv(frame_a, frame_b, corr_method='fft', window_size=24, overlap=12, dt=0.02, sig2noise_method='peak2peak' ) x, y = pyprocess.get_coordinates( image_size=frame_a.shape, window_size=24, overlap=12 ) u, v, mask = validation.sig2noise_val( u, v, s2n, threshold = 2.5 ) u, v = filters.replace_outliers( u, v, method='localmean', max_iter=10, kernel_size=2.5) x, y, u, v = scaling.uniform(x, y, u, v, scaling_factor = 96.52 ) tools.save(x, y, u, v, mask, 'exp1_002.txt' ) tools.display_vector_field('exp1_002.txt', scale=100, width=0.0025) u,v,s2n = pyprocess.piv( frame_a, frame_b, corr_method='direct', window_size=24, overlap=12, dt=0.02, sig2noise_method='peak2peak' ) x, y = pyprocess.get_coordinates( image_size=frame_a.shape, window_size=24, overlap=12 ) u, v, mask = validation.sig2noise_val( u, v, sig2noise, threshold = 2.5 ) u, v = filters.replace_outliers( u, v, method='localmean', max_iter=10, kernel_size=2.5) x, y, u, v = scaling.uniform(x, y, u, v, scaling_factor = 96.52 )
def multipass_img_deform( frame_a, frame_b, current_iteration, x_old, y_old, u_old, v_old, settings, mask_coords=[], ): # window_size, # overlap, # iterations, # current_iteration, # x_old, # y_old, # u_old, # v_old, # correlation_method="circular", # normalized_correlation=False, # subpixel_method="gaussian", # deformation_method="symmetric", # sig2noise_method="peak2peak", # sig2noise_threshold=1.0, # sig2noise_mask=2, # interpolation_order=1, """ Multi pass of the PIV evaluation. This function does the PIV evaluation of the second and other passes. It returns the coordinates of the interrogation window centres, the displacement u, v for each interrogation window as well as the signal to noise ratio array (which is full of NaNs if opted out) Parameters ---------- frame_a : 2d np.ndarray the first image frame_b : 2d np.ndarray the second image window_size : tuple of ints the size of the interrogation window overlap : tuple of ints the overlap of the interrogation window, e.g. window_size/2 x_old : 2d np.ndarray the x coordinates of the vector field of the previous pass y_old : 2d np.ndarray the y coordinates of the vector field of the previous pass u_old : 2d np.ndarray the u displacement of the vector field of the previous pass in case of the image mask - u_old and v_old are MaskedArrays v_old : 2d np.ndarray the v displacement of the vector field of the previous pass subpixel_method: string the method used for the subpixel interpolation. one of the following methods to estimate subpixel location of the peak: 'centroid' [replaces default if correlation map is negative], 'gaussian' [default if correlation map is positive], 'parabolic' interpolation_order : int the order of the spline interpolation used for the image deformation mask_coords : list of x,y coordinates (pixels) of the image mask, default is an empty list Returns ------- x : 2d np.array array containg the x coordinates of the interrogation window centres y : 2d np.array array containg the y coordinates of the interrogation window centres u : 2d np.array array containing the horizontal displacement for every interrogation window [pixels] u : 2d np.array array containing the vertical displacement for every interrogation window it returns values in [pixels] s2n : 2D np.array of signal to noise ratio values """ if not isinstance(u_old, np.ma.MaskedArray): raise ValueError('Expected masked array') # calculate the y and y coordinates of the interrogation window centres. # Hence, the # edges must be extracted to provide the sufficient input. x_old and y_old # are the coordinates of the old grid. x_int and y_int are the coordinates # of the new grid window_size = settings.windowsizes[current_iteration] overlap = settings.overlap[current_iteration] x, y = get_coordinates(frame_a.shape, window_size, overlap) # The interpolation function dont like meshgrids as input. # plus the coordinate system for y is now from top to bottom # and RectBivariateSpline wants an increasing set y_old = y_old[:, 0] # y_old = y_old[::-1] x_old = x_old[0, :] y_int = y[:, 0] # y_int = y_int[::-1] x_int = x[0, :] # interpolating the displacements from the old grid onto the new grid # y befor x because of numpy works row major ip = RectBivariateSpline(y_old, x_old, u_old.filled(0.)) u_pre = ip(y_int, x_int) ip2 = RectBivariateSpline(y_old, x_old, v_old.filled(0.)) v_pre = ip2(y_int, x_int) # if settings.show_plot: if settings.show_all_plots: plt.figure() plt.quiver(x_old, y_old, u_old, -1 * v_old, color='b') plt.quiver(x_int, y_int, u_pre, -1 * v_pre, color='r', lw=2) plt.gca().set_aspect(1.) plt.gca().invert_yaxis() plt.title('inside deform, invert') plt.show() # @TKauefer added another method to the windowdeformation, 'symmetric' # splits the onto both frames, takes more effort due to additional # interpolation however should deliver better results old_frame_a = frame_a.copy() old_frame_b = frame_b.copy() # Image deformation has to occur in image coordinates # therefore we need to convert the results of the # previous pass which are stored in the physical units # and so y from the get_coordinates if settings.deformation_method == "symmetric": # this one is doing the image deformation (see above) x_new, y_new, ut, vt = create_deformation_field( frame_a, x, y, u_pre, v_pre) frame_a = scn.map_coordinates(frame_a, ((y_new - vt / 2, x_new - ut / 2)), order=settings.interpolation_order, mode='nearest') frame_b = scn.map_coordinates(frame_b, ((y_new + vt / 2, x_new + ut / 2)), order=settings.interpolation_order, mode='nearest') elif settings.deformation_method == "second image": frame_b = deform_windows( frame_b, x, y, u_pre, -v_pre, interpolation_order=settings.interpolation_order) else: raise Exception("Deformation method is not valid.") # if settings.show_plot: if settings.show_all_plots: if settings.deformation_method == 'symmetric': plt.figure() plt.imshow(frame_a - old_frame_a) plt.show() plt.figure() plt.imshow(frame_b - old_frame_b) plt.show() # if do_sig2noise is True # sig2noise_method = sig2noise_method # else: # sig2noise_method = None # so we use here default circular not normalized correlation: # if we did not want to validate every step, remove the method if settings.sig2noise_validate is False: settings.sig2noise_method = None u, v, s2n = extended_search_area_piv( frame_a, frame_b, window_size=window_size, overlap=overlap, width=settings.sig2noise_mask, subpixel_method=settings.subpixel_method, sig2noise_method=settings.sig2noise_method, correlation_method=settings.correlation_method, normalized_correlation=settings.normalized_correlation, ) shapes = np.array(get_field_shape(frame_a.shape, window_size, overlap)) u = u.reshape(shapes) v = v.reshape(shapes) s2n = s2n.reshape(shapes) u += u_pre v += v_pre # reapply the image mask to the new grid if settings.image_mask: grid_mask = preprocess.prepare_mask_on_grid(x, y, mask_coords) u = np.ma.masked_array(u, mask=grid_mask) v = np.ma.masked_array(v, mask=grid_mask) else: u = np.ma.masked_array(u, np.ma.nomask) v = np.ma.masked_array(v, np.ma.nomask) # validate in the multi-pass by default u, v, mask = validation.typical_validation(u, v, s2n, settings) if np.all(mask): raise ValueError("Something happened in the validation") if not isinstance(u, np.ma.MaskedArray): raise ValueError('not a masked array anymore') if settings.show_all_plots: plt.figure() nans = np.nonzero(mask) plt.quiver(x[~nans], y[~nans], u[~nans], -v[~nans], color='b') plt.quiver(x[nans], y[nans], u[nans], -v[nans], color='r') plt.gca().invert_yaxis() plt.gca().set_aspect(1.) plt.title('After sig2noise, inverted') plt.show() # we have to replace outliers u, v = filters.replace_outliers( u, v, method=settings.filter_method, max_iter=settings.max_filter_iteration, kernel_size=settings.filter_kernel_size, ) # reapply the image mask to the new grid if settings.image_mask: grid_mask = preprocess.prepare_mask_on_grid(x, y, mask_coords) u = np.ma.masked_array(u, mask=grid_mask) v = np.ma.masked_array(v, mask=grid_mask) else: u = np.ma.masked_array(u, np.ma.nomask) v = np.ma.masked_array(v, np.ma.nomask) if settings.show_all_plots: plt.figure() plt.quiver(x, y, u, -v, color='r') plt.quiver(x, y, u_pre, -1 * v_pre, color='b') plt.gca().invert_yaxis() plt.gca().set_aspect(1.) plt.title(' after replaced outliers, red, invert') plt.show() return x, y, u, v, s2n, mask
def first_pass(frame_a, frame_b, settings): # window_size, # overlap, # iterations, # correlation_method="circular", # normalized_correlation=False, # subpixel_method="gaussian", # do_sig2noise=False, # sig2noise_method="peak2peak", # sig2noise_mask=2, # settings): """ First pass of the PIV evaluation. This function does the PIV evaluation of the first pass. It returns the coordinates of the interrogation window centres, the displacment u and v for each interrogation window as well as the mask which indicates wether the displacement vector was interpolated or not. Parameters ---------- frame_a : 2d np.ndarray the first image frame_b : 2d np.ndarray the second image window_size : int the size of the interrogation window overlap : int the overlap of the interrogation window, typically it is window_size/2 subpixel_method: string the method used for the subpixel interpolation. one of the following methods to estimate subpixel location of the peak: 'centroid' [replaces default if correlation map is negative], 'gaussian' [default if correlation map is positive], 'parabolic' Returns ------- x : 2d np.array array containg the x coordinates of the interrogation window centres y : 2d np.array array containg the y coordinates of the interrogation window centres u : 2d np.array array containing the u displacement for every interrogation window u : 2d np.array array containing the u displacement for every interrogation window """ # if do_sig2noise is False or iterations != 1: # sig2noise_method = None # this indicates to get out nans u, v, s2n = extended_search_area_piv( frame_a, frame_b, window_size=settings.windowsizes[0], overlap=settings.overlap[0], search_area_size=settings.windowsizes[0], width=settings.sig2noise_mask, subpixel_method=settings.subpixel_method, sig2noise_method=settings.sig2noise_method, correlation_method=settings.correlation_method, normalized_correlation=settings.normalized_correlation) shapes = np.array( get_field_shape(frame_a.shape, settings.windowsizes[0], settings.overlap[0])) u = u.reshape(shapes) v = v.reshape(shapes) s2n = s2n.reshape(shapes) x, y = get_coordinates(frame_a.shape, settings.windowsizes[0], settings.overlap[0]) return x, y, u, v, s2n
def multipass_img_deform(frame_a, frame_b, window_size, overlap, iterations, current_iteration, x_old, y_old, u_old, v_old, correlation_method='circular', subpixel_method='gaussian', do_sig2noise=False, sig2noise_method='peak2peak', sig2noise_mask=2, MinMaxU=(-100, 50), MinMaxV=(-50, 50), std_threshold=5, median_threshold=2, median_size=1, filter_method='localmean', max_filter_iteration=10, filter_kernel_size=2, interpolation_order=3): """ First pass of the PIV evaluation. This function does the PIV evaluation of the first pass. It returns the coordinates of the interrogation window centres, the displacment u and v for each interrogation window as well as the mask which indicates wether the displacement vector was interpolated or not. Parameters ---------- frame_a : 2d np.ndarray the first image frame_b : 2d np.ndarray the second image window_size : tuple of ints the size of the interrogation window overlap : tuple of ints the overlap of the interrogation window normal for example window_size/2 x_old : 2d np.ndarray the x coordinates of the vector field of the previous pass y_old : 2d np.ndarray the y coordinates of the vector field of the previous pass u_old : 2d np.ndarray the u displacement of the vector field of the previous pass v_old : 2d np.ndarray the v displacement of the vector field of the previous pass subpixel_method: string the method used for the subpixel interpolation. one of the following methods to estimate subpixel location of the peak: 'centroid' [replaces default if correlation map is negative], 'gaussian' [default if correlation map is positive], 'parabolic' MinMaxU : two elements tuple sets the limits of the u displacment component Used for validation. MinMaxV : two elements tuple sets the limits of the v displacment component Used for validation. std_threshold : float sets the threshold for the std validation median_threshold : float sets the threshold for the median validation filter_method : string the method used to replace the non-valid vectors Methods: 'localmean', 'disk', 'distance', max_filter_iteration : int maximum of filter iterations to replace nans filter_kernel_size : int size of the kernel used for the filtering interpolation_order : int the order of the spline interpolation used for the image deformation Returns ------- x : 2d np.array array containg the x coordinates of the interrogation window centres y : 2d np.array array containg the y coordinates of the interrogation window centres u : 2d np.array array containing the u displacement for every interrogation window u : 2d np.array array containing the u displacement for every interrogation window mask : 2d np.array array containg the mask values (bool) which contains information if the vector was filtered """ x, y = get_coordinates(np.shape(frame_a), window_size, overlap) 'calculate the y and y coordinates of the interrogation window centres' y_old = y_old[:, 0] # y_old = y_old[::-1] x_old = x_old[0, :] y_int = y[:, 0] # y_int = y_int[::-1] x_int = x[0, :] '''The interpolation function dont like meshgrids as input. Hence, the the edges must be extracted to provide the sufficient input. x_old and y_old are the are the coordinates of the old grid. x_int and y_int are the coordiantes of the new grid''' ip = RectBivariateSpline(y_old, x_old, u_old) u_pre = ip(y_int, x_int) ip2 = RectBivariateSpline(y_old, x_old, v_old) v_pre = ip2(y_int, x_int) ''' interpolating the displacements from the old grid onto the new grid y befor x because of numpy works row major ''' frame_b_deform = frame_interpolation( frame_b, x, y, u_pre, -v_pre, interpolation_order=interpolation_order) '''this one is doing the image deformation (see above)''' cor_win_1 = pyprocess.moving_window_array(frame_a, window_size, overlap) cor_win_2 = pyprocess.moving_window_array(frame_b_deform, window_size, overlap) '''Filling the interrogation window. They windows are arranged in a 3d array with number of interrogation window *window_size*window_size this way is much faster then using a loop''' correlation = correlation_func(cor_win_1, cor_win_2, correlation_method=correlation_method, normalized_correlation=False) 'do the correlation' disp = np.zeros((np.size(correlation, 0), 2)) for i in range(0, np.size(correlation, 0)): ''' determine the displacment on subpixel level ''' disp[i, :] = find_subpixel_peak_position( correlation[i, :, :], subpixel_method=subpixel_method) 'this loop is doing the displacment evaluation for each window ' disp = np.array(disp) - np.floor(np.array(correlation[0, :, :].shape) / 2) 'reshaping the interrogation window to vector field shape' shapes = np.array( pyprocess.get_field_shape(np.shape(frame_a), window_size, overlap)) u = disp[:, 1].reshape(shapes) v = -disp[:, 0].reshape(shapes) 'adding the recent displacment on to the displacment of the previous pass' u = u + u_pre v = v + v_pre 'validation using gloabl limits and local median' u, v, mask_g = validation.global_val(u, v, MinMaxU, MinMaxV) u, v, mask_s = validation.global_std(u, v, std_threshold=std_threshold) u, v, mask_m = validation.local_median_val(u, v, u_threshold=median_threshold, v_threshold=median_threshold, size=median_size) mask = mask_g + mask_m + mask_s 'adding masks to add the effect of alle the validations' #mask=np.zeros_like(u) 'filter to replace the values that where marked by the validation' if current_iteration != iterations: 'filter to replace the values that where marked by the validation' u, v = filters.replace_outliers(u, v, method=filter_method, max_iter=max_filter_iteration, kernel_size=filter_kernel_size) if do_sig2noise == True and current_iteration == iterations and iterations != 1: sig2noise_ratio = sig2noise_ratio_function( correlation, sig2noise_method=sig2noise_method, width=sig2noise_mask) sig2noise_ratio = sig2noise_ratio.reshape(shapes) else: sig2noise_ratio = np.full_like(u, np.nan) return x, y, u, v, sig2noise_ratio, mask
def first_pass(frame_a, frame_b, window_size, overlap, iterations, correlation_method='circular', subpixel_method='gaussian', do_sig2noise=False, sig2noise_method='peak2peak', sig2noise_mask=2): """ First pass of the PIV evaluation. This function does the PIV evaluation of the first pass. It returns the coordinates of the interrogation window centres, the displacment u and v for each interrogation window as well as the mask which indicates wether the displacement vector was interpolated or not. Parameters ---------- frame_a : 2d np.ndarray the first image frame_b : 2d np.ndarray the second image window_size : int the size of the interrogation window overlap : int the overlap of the interrogation window normal for example window_size/2 subpixel_method: string the method used for the subpixel interpolation. one of the following methods to estimate subpixel location of the peak: 'centroid' [replaces default if correlation map is negative], 'gaussian' [default if correlation map is positive], 'parabolic' Returns ------- x : 2d np.array array containg the x coordinates of the interrogation window centres y : 2d np.array array containg the y coordinates of the interrogation window centres u : 2d np.array array containing the u displacement for every interrogation window u : 2d np.array array containing the u displacement for every interrogation window """ cor_win_1 = pyprocess.moving_window_array(frame_a, window_size, overlap) cor_win_2 = pyprocess.moving_window_array(frame_b, window_size, overlap) '''Filling the interrogation window. They windows are arranged in a 3d array with number of interrogation window *window_size*window_size this way is much faster then using a loop''' correlation = correlation_func(cor_win_1, cor_win_2, correlation_method=correlation_method, normalized_correlation=False) 'do the correlation' disp = np.zeros((np.size(correlation, 0), 2)) #create a dummy for the loop to fill for i in range(0, np.size(correlation, 0)): ''' determine the displacment on subpixel level ''' disp[i, :] = find_subpixel_peak_position( correlation[i, :, :], subpixel_method=subpixel_method) 'this loop is doing the displacment evaluation for each window ' disp = np.array(disp) - np.floor(np.array(correlation[0, :, :].shape) / 2) shapes = np.array( pyprocess.get_field_shape(frame_a.shape, window_size, overlap)) u = disp[:, 1].reshape(shapes) v = -disp[:, 0].reshape(shapes) 'reshaping the interrogation window to vector field shape' x, y = get_coordinates(frame_a.shape, window_size, overlap) 'get coordinates for to map the displacement' if do_sig2noise == True and iterations == 1: sig2noise_ratio = sig2noise_ratio_function( correlation, sig2noise_method=sig2noise_method, width=sig2noise_mask) sig2noise_ratio = sig2noise_ratio.reshape(shapes) else: sig2noise_ratio = np.full_like(u, np.nan) return x, y, u, v, sig2noise_ratio
def PIV(frame_0, frame_1, winsize, searchsize, overlap, frame_rate, scaling_factor, threshold=1.3, output='fil'): """ Particle Image Velocimetry processing for two sequential images. Input: ------ frame_0 - first frame to indicate potential seeds. frame_1 - second frame to trace seed displacements. winsize - size of the individual (square) grid cells in pixels. searchsize - size of the search area in pixels in which the location with the highest similarity is found. overlap - overlap over the grid cells in pixels. frame_rate - frame rate of the video in frames per second (fps). scaling_factor - amount of pixels per meter. output - after which step the PIV processing is stopped ('raw', 'fil', or 'int'; default: 'fil') """ # determine the timestep between the two sequential frames (1/fps) dt = 1. / frame_rate # estimation of seed displacements in x and y direction # and the corresponding signal-to-noise ratio u, v, sig2noise = pyprocess.extended_search_area_piv( frame_0, frame_1, window_size=winsize, overlap=overlap, dt=dt, search_area_size=searchsize, sig2noise_method='peak2peak') # xy-coordinates of the centre of each grid cell x, y = pyprocess.get_coordinates(image_size=frame_0.shape, window_size=winsize, overlap=overlap) # if ouput is 'fil' or 'int': # filter out grid cells with a low signal-to-noise ratio if output == 'fil' or output == 'int': u, v, mask = validation.sig2noise_val(u, v, sig2noise, threshold=threshold) # if output is 'int' # fill in missing values through interpolation if output == 'int': u, v = filters.replace_outliers(u, v, method='localmean', max_iter=50, kernel_size=3) # scale results based on the pixels per metres x, y, u, v = scaling.uniform(x, y, u, v, scaling_factor=scaling_factor) return x, y, u, v, sig2noise
def run_piv( frame_a, frame_b, winsize=16, # pixels, interrogation window size in frame A searchsize=20, # pixels, search in image B overlap=8, # pixels, 50% overlap dt=0.0001, # sec, time interval between pulses image_check=False, show_vertical_profiles=False, figure_export_name='_results.png', text_export_name="_results.txt", scale_factor=1, pixel_density=36.74, arrow_width=0.001, show_result=True, u_bounds=(-100, 100), v_bounds=(-100, 100)): u0, v0, sig2noise = pyprocess.extended_search_area_piv( frame_a.astype(np.int32), frame_b.astype(np.int32), window_size=winsize, overlap=overlap, dt=dt, search_area_size=searchsize, sig2noise_method='peak2peak') x, y = pyprocess.get_coordinates(image_size=frame_a.shape, search_area_size=searchsize, overlap=overlap) x, y, u0, v0 = scaling.uniform( x, y, u0, v0, scaling_factor=pixel_density) # no. pixel per distance u0, v0, mask = validation.global_val(u0, v0, u_bounds, v_bounds) u1, v1, mask = validation.sig2noise_val(u0, v0, sig2noise, threshold=1.05) u3, v3 = filters.replace_outliers(u1, v1, method='localmean', max_iter=10, kernel_size=3) #save in the simple ASCII table format if np.std(u3) < 480: tools.save(x, y, u3, v3, sig2noise, mask, text_export_name) if image_check == True: fig, ax = plt.subplots(2, 1, figsize=(24, 12)) ax[0].imshow(frame_a) ax[1].imshow(frame_b) io.imwrite(figure_export_name, frame_a) if show_result == True: fig, ax = plt.subplots(figsize=(24, 12)) tools.display_vector_field( text_export_name, ax=ax, scaling_factor=pixel_density, scale=scale_factor, # scale defines here the arrow length width=arrow_width, # width is the thickness of the arrow on_img=True, # overlay on the image image_name=figure_export_name) fig.savefig(figure_export_name) if show_vertical_profiles: field_shape = pyprocess.get_field_shape(image_size=frame_a.shape, search_area_size=searchsize, overlap=overlap) vertical_profiles(text_export_name, field_shape) print('Std of u3: %.3f' % np.std(u3)) print('Mean of u3: %.3f' % np.mean(u3)) return np.std(u3)
import os # we can run it from any folder path = os.path.dirname(os.path.abspath(__file__)) frame_a = tools.imread(os.path.join(path, '../test1/exp1_001_a.bmp')) frame_b = tools.imread(os.path.join(path, '../test1/exp1_001_b.bmp')) frame_a = (frame_a * 1024).astype(np.int32) frame_b = (frame_b * 1024).astype(np.int32) u, v, sig2noise = pyprocess.extended_search_area_piv( frame_a, frame_b, \ window_size=32, overlap=16, dt=0.02, search_area_size=64, sig2noise_method='peak2peak' ) print(u, v, sig2noise) x, y = pyprocess.get_coordinates(image_size=frame_a.shape, search_area_size=64, overlap=16) u, v, mask = validation.sig2noise_val(u, v, sig2noise, threshold=1.3) u, v, mask = validation.global_val(u, v, (-1000, 2000), (-1000, 1000)) u, v = filters.replace_outliers(u, v, method='localmean', max_iter=10, kernel_size=2) x, y, u, v = scaling.uniform(x, y, u, v, scaling_factor=96.52) tools.save(x, y, u, v, mask, 'test1.vec') tools.display_vector_field('test1.vec', scale=75, width=0.0035)
x, y, u, v = scaling.uniform(x, y, u, v, scaling_factor=96.52) tools.save(x, y, u, v, mask, 'Y4-S3_Camera000398_a.txt') # %% # Use Python version, pyprocess: u, v, sig2noise = pyprocess.extended_search_area_piv( frame_a.astype(np.int32), frame_b.astype(np.int32), window_size=32, overlap=8, dt=.1, sig2noise_method='peak2peak') x, y = pyprocess.get_coordinates(image_size=frame_a.shape, window_size=32, overlap=8) u, v, mask = validation.sig2noise_val(u, v, sig2noise, threshold=1.3) u, v = filters.replace_outliers(u, v, method='localmean', max_iter=10, kernel_size=2) x, y, u, v = scaling.uniform(x, y, u, v, scaling_factor=96.52) tools.save(x, y, u, v, mask, 'Y4-S3_Camera000398_b.txt') # %% # "natural" view without image fig, ax = plt.subplots(2, 1, figsize=(6, 12)) ax[0].invert_yaxis()
def calc_piv_2_images(frame_a, frame_b, idx, dir_name): ''' Performs Particle Image Velocimetry (PIV) of two images, and saves an image with PIV on it. :param frame_a: first image :param frame_b: consecutive image :param idx: index of the first frame, for saving and ordering the images :param dir_name: directory to save the image to :return: - ''' u0, v0, sig2noise = pyprocess.extended_search_area_piv( frame_a.astype(np.int32), frame_b.astype(np.int32), window_size=winsize, overlap=overlap, dt=dt, search_area_size=searchsize, sig2noise_method='peak2peak') x, y = pyprocess.get_coordinates(image_size=frame_a.shape, search_area_size=searchsize, overlap=overlap) u1, v1, mask = validation.sig2noise_val(u0, v0, sig2noise, threshold=1.05) # to see where is a reasonable limit filter out # outliers that are very different from the neighbours u2, v2 = filters.replace_outliers(u1, v1, method='localmean', max_iter=3, kernel_size=3) # convert x,y to mm; convert u,v to mm/sec x, y, u3, v3 = scaling.uniform( x, y, u2, v2, scaling_factor=scaling_factor) # 96.52 microns/pixel # 0,0 shall be bottom left, positive rotation rate is counterclockwise x, y, u3, v3 = tools.transform_coordinates(x, y, u3, v3) fig, ax = plt.subplots() im = np.negative(frame_a) # plot negative of the image for more clarity xmax = np.amax(x) + winsize / (2 * scaling_factor) ymax = np.amax(y) + winsize / (2 * scaling_factor) ax.imshow(im, cmap="Greys_r", extent=[0.0, xmax, 0.0, ymax]) invalid = mask.astype("bool") valid = ~invalid plt.quiver(x[invalid], y[invalid], u3[invalid], v3[invalid], color="r", width=width) plt.quiver(x[valid], y[valid], u3[valid], v3[valid], color="b", width=width) ax.set_aspect(1.) plt.title(r'Velocity Vectors Field (Frame #%d) $(\frac{\mu m}{hour})$' % idx) plt.savefig(dir_name + "/" + "vec_page%d.png" % idx, dpi=200) plt.show() plt.close()