def openpiv_default_run(im1, im2): """ default settings for OpenPIV analysis using extended_search_area_piv algorithm for two images Inputs: im1,im2 : str,str = path of two image """ frame_a = tools.imread(im1) frame_b = tools.imread(im2) u, v, sig2noise = process.extended_search_area_piv( frame_a.astype(np.int32), frame_b.astype(np.int32), window_size=32, overlap=8, dt=1, search_area_size=64, sig2noise_method='peak2peak') x, y = process.get_coordinates(image_size=frame_a.shape, window_size=32, overlap=8) u, v, mask = validation.sig2noise_val(u, v, sig2noise, threshold=1.3) u, v = filters.replace_outliers(u, v, method='localmean', max_iter=10, kernel_size=2) x, y, u, v = scaling.uniform(x, y, u, v, scaling_factor=1) tools.save(x, y, u, v, mask, list_of_images[0] + '.txt') fig, ax = tools.display_vector_field(list_of_images[0] + '.txt', on_img=True, image_name=list_of_images[0], scaling_factor=1, ax=None)
def run_piv( frame_a, frame_b, ): winsize = 64 # pixels, interrogation window size in frame A searchsize = 68 # pixels, search in image B overlap = 32 # pixels, 50% overlap dt = 0.0005 # sec, time interval between pulses u0, v0, sig2noise = pyprocess.extended_search_area_piv( frame_a.astype(np.int32), frame_b.astype(np.int32), window_size=winsize, overlap=overlap, dt=dt, search_area_size=searchsize, sig2noise_method='peak2peak') x, y = pyprocess.get_coordinates(image_size=frame_a.shape, search_area_size=searchsize, window_size=winsize, overlap=overlap) u1, v1, mask = validation.sig2noise_val(u0, v0, sig2noise, threshold=1.05) u2, v2 = filters.replace_outliers(u1, v1, method='localmean', max_iter=10, kernel_size=3) x, y, u3, v3 = scaling.uniform(x, y, u2, v2, scaling_factor=41.22) # 41.22 microns/pixel mean_u = np.mean(u3) mean_v = np.mean(v3) deficit_u = u3 - mean_u deficit_v = v3 - mean_v u_prime = np.mean(np.sqrt(0.5 * (deficit_u**2 + deficit_v**2))) u_avg = np.mean(np.sqrt(0.5 * (mean_u**2 + mean_v**2))) turbulence_intensity = u_prime / u_avg #save in the simple ASCII table format fname = "./Tables/" + exp_string + ".txt" # tools.save(x, y, u3, v3, mask, fname) out = np.vstack([m.ravel() for m in [x, y, u3, v3]]) # print(out) # np.savetxt(fname,out.T) with open(fname, "ab") as f: f.write(b"\n") np.savetxt(f, out.T) return turbulence_intensity
def two_images(image_1, image_2, search_area_size=64, window_size=32, overlap=16, dt=0.02): with open("image_1.bmp", "wb") as fh1: fh1.write(base64.b64decode(image_1)) with open("image_2.bmp", "wb") as fh2: fh2.write(base64.b64decode(image_2)) frame_a = tools.imread( 'image_1.bmp' ) frame_b = tools.imread( 'image_2.bmp' ) frame_a = (frame_a*1024).astype(np.int32) frame_b = (frame_b*1024).astype(np.int32) if not search_area_size: search_area_size = 64 if not window_size: window_size = 32 if not overlap: overlap = 16 if not dt: dt = 0.02 u, v, sig2noise = process.extended_search_area_piv( frame_a, frame_b, window_size=window_size, overlap=overlap, dt=dt, search_area_size=search_area_size, sig2noise_method='peak2peak' ) x, y = process.get_coordinates( image_size=frame_a.shape, window_size=window_size, overlap=overlap ) u, v, mask = validation.sig2noise_val( u, v, sig2noise, threshold = 1.3 ) u, v, mask = validation.global_val( u, v, (-1000, 2000), (-1000, 1000) ) u, v = filters.replace_outliers( u, v, method='localmean', max_iter=10, kernel_size=2) x, y, u, v = scaling.uniform(x, y, u, v, scaling_factor = 96.52 ) file_name_text = 'result.txt' file_name_png = 'result.png' if os.path.isfile(file_name_text): os.remove(file_name_text) if os.path.isfile(file_name_png): os.remove(file_name_png) tools.save(x, y, u, v, mask, file_name_text) a = np.loadtxt(file_name_text) fig = plt.figure() invalid = a[:,4].astype('bool') fig.canvas.set_window_title('Vector field, '+str(np.count_nonzero(invalid))+' wrong vectors') valid = ~invalid plt.quiver(a[invalid,0],a[invalid,1],a[invalid,2],a[invalid,3],color='r',scale=100, width=0.0025) plt.quiver(a[valid,0],a[valid,1],a[valid,2],a[valid,3],color='b',scale=100, width=0.0025) plt.draw() plt.savefig(file_name_png, format="png") with open(file_name_text, "rb") as resultFileText: file_reader_text = resultFileText.read() text_encode = base64.encodestring(file_reader_text) base64_string_text = str(text_encode, 'utf-8') with open(file_name_png, "rb") as resultFilePng: file_reader_image = resultFilePng.read() image_encode = base64.encodestring(file_reader_image) base64_string_image = str(image_encode, 'utf-8') return base64_string_text, base64_string_image
def analyzer(frame_a, frame_b, text, plot, num_scene, pathout, scal, zre, xre, dt): winsize = 16 # pixels searchsize = 32 # pixels, search in image b overlap = 8 # pixels frame_a = cv2.adaptiveThreshold(frame_a, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 5, 5) frame_b = cv2.adaptiveThreshold(frame_b, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 5, 5) #frame_a = cv2.adaptiveThreshold(frame_a,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY,11,2) #frame_b = cv2.adaptiveThreshold(frame_b,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY,11,2) plt.imshow(np.c_[frame_a, frame_b], cmap='gray') plt.savefig(pathout + '/filtered' + str(num_scene) + '.png', dpi=800) u0, v0, sig2noise = process.extended_search_area_piv( frame_a.astype(np.int32), frame_b.astype(np.int32), window_size=winsize, overlap=overlap, dt=dt, search_area_size=searchsize, sig2noise_method='peak2peak') x, y = process.get_coordinates(image_size=frame_a.shape, window_size=winsize, overlap=overlap) u1, v1, mask = validation.sig2noise_val(u0, v0, sig2noise, threshold=1.3) u2, v2 = filters.replace_outliers(u1, v1, method='localmean', max_iter=10, kernel_size=2) x, y, u3, v3 = scaling.uniform( x, y, u2, v2, scaling_factor=scal) # scaling_factor (pixel per meter) u3 = np.flip(u3, axis=0) v3 = np.flip(v3, axis=0) xre = np.linspace(0, xre / 100, len(x[0, :])) zre = np.linspace(0, zre / 100, len(x[:, 0])) if plot == 1: piv_plotting(xre, zre, u3, v3, num_scene, pathout) if text == 0: tools.save(x, y, u3, v3, mask, pathout + '/piv' + str(num_scene) + '.txt')
def two_images(image_1, image_2): with open("image_1.bmp", "wb") as fh1: fh1.write(base64.b64decode(image_1)) with open("image_2.bmp", "wb") as fh2: fh2.write(base64.b64decode(image_2)) frame_a = tools.imread('image_1.bmp') frame_b = tools.imread('image_2.bmp') winsize = 32 # pixels searchsize = 64 # pixels, search in image B overlap = 12 # pixels dt = 0.02 # sec u, v, sig2noise = pyprocess.piv(frame_a.astype(np.int32), frame_b.astype(np.int32), window_size=winsize, overlap=overlap, dt=dt, search_size=searchsize, sig2noise_method='peak2peak') x, y = pyprocess.get_coordinates(image_size=frame_a.shape, window_size=searchsize, overlap=overlap) u, v, mask = validation.sig2noise_val(u, v, sig2noise, threshold=1.3) u, v = filters.replace_outliers(u, v, method='localmean', max_iter=10, kernel_size=2) x, y, u, v = scaling.uniform(x, y, u, v, scaling_factor=96.52) file_name = 'result.txt' if os.path.isfile(file_name): os.remove(file_name) tools.save(x, y, u, v, np.zeros_like(u), file_name) # no masking, all values are valid with open(file_name, "rb") as resultFile: file_reader = resultFile.read() image_encode = base64.encodestring(file_reader) base64_string = str(image_encode, 'utf-8') return base64_string
def PIV(image_0, image_1, winsize, searchsize, overlap, frame_rate, scaling_factor): frame_0 = image_0 # [0:600, :] frame_1 = image_1 # [0:600, :] # Processing the images with interrogation area and search area / cross correlation algortihm u, v, sig2noise = pyprocess.extended_search_area_piv( frame_0, frame_1, window_size=winsize, overlap=overlap, dt=dt, search_area_size=searchsize, sig2noise_method='peak2peak') # Compute the coordinates of the centers of the interrogation windows x, y = pyprocess.get_coordinates(image_size=frame_0.shape, window_size=winsize, overlap=overlap) # This function actually sets to NaN all those vector for # which the signal to noise ratio is below 1.3. # mask is a True/False array, where elements corresponding to invalid vectors have been replace by Nan. u, v, mask = validation.sig2noise_val(u, v, sig2noise, threshold=1.5) # Function as described above, removing outliers deviating with more # than twice the standard deviation u, v, mask = remove_outliers(u, v, mask) # Replacing the outliers with interpolation # u, v = filters.replace_outliers(u, # v, # method='nan', # max_iter=50, # kernel_size=3) # Apply an uniform scaling to the flow field to get dimensional units x, y, u, v = scaling.uniform(x, y, u, v, scaling_factor=scaling_factor) return x, y, u, v, mask
def ProcessPIV(args, bga, bgb, reflection, stg): # read images into numpy arrays file_a, file_b, counter = args frame_a = tools.imread(file_a) frame_b = tools.imread(file_b) # removing background and reflections if bgb is not None: frame_a = frame_a - bga frame_b = frame_b - bgb frame_a[reflection == 255] = 0 frame_b[reflection == 255] = 0 #plt.imshow(frame_a, cmap='gray') #plt.show() # main piv processing u, v, s2n = pyprocess.extended_search_area_piv( frame_a, frame_b, \ window_size=stg['WS'], overlap=stg['OL'], dt=stg['DT'], search_area_size=stg['SA'], sig2noise_method='peak2peak') x, y = pyprocess.get_coordinates(image_size=frame_a.shape, window_size=stg['WS'], overlap=stg['OL']) if stg['BVR'] == 'on': u, v, mask = validation.local_median_val(u, v, stg['MF'][0], stg['MF'][1], size=2) u, v, mask = validation.global_val(u, v, u_thresholds=stg['GF'][0], v_thresholds=stg['GF'][1]) u, v = filters.replace_outliers(u, v, method='localmean', max_iter=10, kernel_size=2) u, *_ = smoothn(u, s=0.5) v, *_ = smoothn(v, s=0.5) x, y, u, v = scaling.uniform(x, y, u, v, stg['SC']) # saving the results save_file = tools.create_path(file_a, 'Analysis') tools.save(x, y, u, v, s2n, save_file + '.dat')
def run_single(index, scale=1, src_dir=None, save_dir=None): frame_a = tools.imread(os.path.join(src_dir, f'{index:06}.tif')) frame_b = tools.imread(os.path.join(src_dir, f'{index + 1:06}.tif')) # no background removal will be performed so 'mask' is initialized to 1 everywhere mask = np.ones(frame_a.shape, dtype=np.int32) # main algorithm with warnings.catch_warnings(): warnings.simplefilter("ignore") x, y, u, v, mask = process.WiDIM(frame_a.astype(np.int32), frame_b.astype(np.int32), mask, min_window_size=MIN_WINDOW_SIZE, overlap_ratio=0.0, coarse_factor=2, dt=DT, validation_method='mean_velocity', trust_1st_iter=1, validation_iter=1, tolerance=0.4, nb_iter_max=3, sig2noise_method='peak2peak') x, y, u, v = scaling.uniform(x, y, u, v, scaling_factor=SCALING_FACTOR) tmp_fname = '.tmp_' + ''.join(random.choices(string.ascii_uppercase + string.digits, k=32)) tools.save(x, y, u, v, mask, filename=tmp_fname) tools.display_vector_field(tmp_fname, scale=scale, width=LINE_WIDTH) # scale: vector length ratio; width: line width of vector arrows os.remove(tmp_fname) # plt.quiver(x, y, u3, v3, color='blue') if save_dir is not None: save_path = os.path.join(save_dir, f'{index:06}.pdf') print(save_path) plt.savefig(save_path)
ax[0].imshow(frame_a,cmap=plt.cm.gray) ax[1].imshow(frame_b,cmap=plt.cm.gray) # %% winsize = 24 # pixels searchsize = 64 # pixels, search in image B overlap = 12 # pixels dt = 0.02 # sec u0, v0, sig2noise = process.extended_search_area_piv( frame_a.astype(np.int32), frame_b.astype(np.int32), window_size=winsize, overlap=overlap, dt=dt, search_area_size=searchsize, sig2noise_method='peak2peak' ) # %% x, y = process.get_coordinates( image_size=frame_a.shape, window_size=winsize, overlap=overlap ) # %% u1, v1, mask = validation.sig2noise_val( u0, v0, sig2noise, threshold = 1.3 ) # %% u2, v2 = filters.replace_outliers( u1, v1, method='localmean', max_iter=10, kernel_size=2) # %% x, y, u3, v3 = scaling.uniform(x, y, u2, v2, scaling_factor = 96.52 ) # %% tools.save(x, y, u3, v3, mask, 'exp1_001.txt' ) # %% tools.display_vector_field('exp1_001.txt', scale=100, width=0.0025)
def run_piv( frame_a, frame_b, winsize=16, # pixels, interrogation window size in frame A searchsize=20, # pixels, search in image B overlap=8, # pixels, 50% overlap dt=0.0001, # sec, time interval between pulses image_check=False, show_vertical_profiles=False, figure_export_name='_results.png', text_export_name="_results.txt", scale_factor=1, pixel_density=36.74, arrow_width=0.001, show_result=True, u_bounds=(-100, 100), v_bounds=(-100, 100)): u0, v0, sig2noise = pyprocess.extended_search_area_piv( frame_a.astype(np.int32), frame_b.astype(np.int32), window_size=winsize, overlap=overlap, dt=dt, search_area_size=searchsize, sig2noise_method='peak2peak') x, y = pyprocess.get_coordinates(image_size=frame_a.shape, search_area_size=searchsize, overlap=overlap) x, y, u0, v0 = scaling.uniform( x, y, u0, v0, scaling_factor=pixel_density) # no. pixel per distance u0, v0, mask = validation.global_val(u0, v0, u_bounds, v_bounds) u1, v1, mask = validation.sig2noise_val(u0, v0, sig2noise, threshold=1.05) u3, v3 = filters.replace_outliers(u1, v1, method='localmean', max_iter=10, kernel_size=3) #save in the simple ASCII table format if np.std(u3) < 480: tools.save(x, y, u3, v3, sig2noise, mask, text_export_name) if image_check == True: fig, ax = plt.subplots(2, 1, figsize=(24, 12)) ax[0].imshow(frame_a) ax[1].imshow(frame_b) io.imwrite(figure_export_name, frame_a) if show_result == True: fig, ax = plt.subplots(figsize=(24, 12)) tools.display_vector_field( text_export_name, ax=ax, scaling_factor=pixel_density, scale=scale_factor, # scale defines here the arrow length width=arrow_width, # width is the thickness of the arrow on_img=True, # overlay on the image image_name=figure_export_name) fig.savefig(figure_export_name) if show_vertical_profiles: field_shape = pyprocess.get_field_shape(image_size=frame_a.shape, search_area_size=searchsize, overlap=overlap) vertical_profiles(text_export_name, field_shape) print('Std of u3: %.3f' % np.std(u3)) print('Mean of u3: %.3f' % np.mean(u3)) return np.std(u3)
frame_b.astype(np.int32), window_size=32, overlap=8, dt=.1, sig2noise_method='peak2peak') x, y = process.get_coordinates(image_size=frame_a.shape, window_size=32, overlap=8) u, v, mask = validation.sig2noise_val(u, v, sig2noise, threshold=1.3) u, v = filters.replace_outliers(u, v, method='localmean', max_iter=10, kernel_size=2) x, y, u, v = scaling.uniform(x, y, u, v, scaling_factor=96.52) tools.save(x, y, u, v, mask, 'Y4-S3_Camera000398_a.txt') # %% # Use Python version, pyprocess: u, v, sig2noise = pyprocess.extended_search_area_piv( frame_a.astype(np.int32), frame_b.astype(np.int32), window_size=32, overlap=8, dt=.1, sig2noise_method='peak2peak') x, y = pyprocess.get_coordinates(image_size=frame_a.shape, window_size=32,
if 'OpenPIV' not in sys.path: sys.path.append('/Users/alex/Documents/OpenPIV/alexlib/openpiv-python') from openpiv import tools, validation, process, filters, scaling, pyprocess import numpy as np frame_a = tools.imread( 'exp1_001_a.bmp' ) frame_b = tools.imread( 'exp1_001_b.bmp' ) u, v, sig2noise = process.extended_search_area_piv( frame_a.astype(np.int32), frame_b.astype(np.int32), window_size=24, overlap=12, dt=0.02, search_area_size=64, sig2noise_method='peak2peak' ) x, y = process.get_coordinates( image_size=frame_a.shape, window_size=24, overlap=12 ) u, v, mask = validation.sig2noise_val( u, v, sig2noise, threshold = 2.5 ) u, v = filters.replace_outliers( u, v, method='localmean', max_iter=10, kernel_size=2) x, y, u, v = scaling.uniform(x, y, u, v, scaling_factor = 96.52 ) tools.save(x, y, u, v, mask, 'exp1_001.txt' ) tools.display_vector_field('exp1_001.txt', scale=100, width=0.0025) u, v, s2n= pyprocess.piv(frame_a, frame_b, corr_method='fft', window_size=24, overlap=12, dt=0.02, sig2noise_method='peak2peak' ) x, y = pyprocess.get_coordinates( image_size=frame_a.shape, window_size=24, overlap=12 ) u, v, mask = validation.sig2noise_val( u, v, s2n, threshold = 2.5 ) u, v = filters.replace_outliers( u, v, method='localmean', max_iter=10, kernel_size=2.5) x, y, u, v = scaling.uniform(x, y, u, v, scaling_factor = 96.52 ) tools.save(x, y, u, v, mask, 'exp1_002.txt' ) tools.display_vector_field('exp1_002.txt', scale=100, width=0.0025)
def quick_piv(self, search_dict, index_a=100, index_b=101, folder=None): self.show_piv_param() ns = Namespace(**self.piv_param) if folder == None: img_a, img_b = self.read_two_images(search_dict, index_a=index_a, index_b=index_b) location_path = [ x['path'] for x in self.piv_dict_list if search_dict.items() <= x.items() ] results_path = os.path.join(self.results_path, *location_path) try: os.makedirs(results_path) except FileExistsError: pass else: try: file_a_path = os.path.join(self.path, folder, 'frame_%06d.tiff' % index_a) file_b_path = os.path.join(self.path, folder, 'frame_%06d.tiff' % index_b) img_a = np.array(Image.open(file_a_path)) img_b = np.array(Image.open(file_b_path)) except: return None # crop img_a = img_a[ns.crop[0]:-ns.crop[1] - 1, ns.crop[2]:-ns.crop[3] - 1] img_b = img_b[ns.crop[0]:-ns.crop[1] - 1, ns.crop[2]:-ns.crop[3] - 1] u0, v0, sig2noise = pyprocess.extended_search_area_piv( img_a.astype(np.int32), img_b.astype(np.int32), window_size=ns.winsize, overlap=ns.overlap, dt=ns.dt, search_area_size=ns.searchsize, sig2noise_method='peak2peak') x, y = pyprocess.get_coordinates(image_size=img_a.shape, search_area_size=ns.searchsize, overlap=ns.overlap) x, y, u0, v0 = scaling.uniform( x, y, u0, v0, scaling_factor=ns.pixel_density) # no. pixel per distance u0, v0, mask = validation.global_val( u0, v0, (ns.u_lower_bound, ns.u_upper_bound), (ns.v_lower_bound, ns.v_upper_bound)) u1, v1, mask = validation.sig2noise_val(u0, v0, sig2noise, threshold=1.01) u3, v3 = filters.replace_outliers(u1, v1, method='localmean', max_iter=500, kernel_size=3) #save in the simple ASCII table format tools.save(x, y, u3, v3, sig2noise, mask, os.path.join(results_path, ns.text_export_name)) if ns.image_check == True: fig, ax = plt.subplots(2, 1, figsize=(24, 12)) ax[0].imshow(img_a) ax[1].imshow(img_b) io.imwrite(os.path.join(results_path, ns.figure_export_name), img_a) if ns.show_result == True: fig, ax = plt.subplots(figsize=(24, 12)) tools.display_vector_field( os.path.join(results_path, ns.text_export_name), ax=ax, scaling_factor=ns.pixel_density, scale=ns.scale_factor, # scale defines here the arrow length width=ns.arrow_width, # width is the thickness of the arrow on_img=True, # overlay on the image image_name=os.path.join(results_path, ns.figure_export_name)) fig.savefig(os.path.join(results_path, ns.figure_export_name)) if ns.show_vertical_profiles: field_shape = pyprocess.get_field_shape( image_size=img_a.shape, search_area_size=ns.searchsize, overlap=ns.overlap) vertical_profiles(ns.text_export_name, field_shape) print('Mean of u: %.3f' % np.mean(u3)) print('Std of u: %.3f' % np.std(u3)) print('Mean of v: %.3f' % np.mean(v3)) print('Std of v: %.3f' % np.std(v3)) output = np.array([np.mean(u3), np.std(u3), np.mean(v3), np.std(v3)]) # if np.absolute(np.mean(v3)) < 50: # output = self.quick_piv(search_dict,index_a = index_a + 1, index_b = index_b + 1) return x, y, u3, v3
def func(args): """A function to process each image pair.""" # this line is REQUIRED for multiprocessing to work # always use it in your custom function file_a, file_b, counter = args # counter2=str(counter2) ##################### # Here goes you code ##################### ' read images into numpy arrays' frame_a = tools.imread(os.path.join(settings.filepath_images, file_a)) frame_b = tools.imread(os.path.join(settings.filepath_images, file_b)) ## Miguel: I just had a quick look, and I do not understand the reason for this step. # I propose to remove it. #frame_a = (frame_a*1024).astype(np.int32) #frame_b = (frame_b*1024).astype(np.int32) ' crop to ROI' if settings.ROI == 'full': frame_a = frame_a frame_b = frame_b else: frame_a = frame_a[settings.ROI[0]:settings.ROI[1], settings.ROI[2]:settings.ROI[3]] frame_b = frame_b[settings.ROI[0]:settings.ROI[1], settings.ROI[2]:settings.ROI[3]] if settings.dynamic_masking_method == 'edge' or 'intensity': frame_a = preprocess.dynamic_masking( frame_a, method=settings.dynamic_masking_method, filter_size=settings.dynamic_masking_filter_size, threshold=settings.dynamic_masking_threshold) frame_b = preprocess.dynamic_masking( frame_b, method=settings.dynamic_masking_method, filter_size=settings.dynamic_masking_filter_size, threshold=settings.dynamic_masking_threshold) '''%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%''' 'first pass' x, y, u, v, sig2noise_ratio = first_pass( frame_a, frame_b, settings.windowsizes[0], settings.overlap[0], settings.iterations, correlation_method=settings.correlation_method, subpixel_method=settings.subpixel_method, do_sig2noise=settings.extract_sig2noise, sig2noise_method=settings.sig2noise_method, sig2noise_mask=settings.sig2noise_mask, ) 'validation using gloabl limits and std and local median' '''MinMaxU : two elements tuple sets the limits of the u displacment component Used for validation. MinMaxV : two elements tuple sets the limits of the v displacment component Used for validation. std_threshold : float sets the threshold for the std validation median_threshold : float sets the threshold for the median validation filter_method : string the method used to replace the non-valid vectors Methods: 'localmean', 'disk', 'distance', max_filter_iteration : int maximum of filter iterations to replace nans filter_kernel_size : int size of the kernel used for the filtering''' mask = np.full_like(x, False) if settings.validation_first_pass == True: u, v, mask_g = validation.global_val(u, v, settings.MinMax_U_disp, settings.MinMax_V_disp) u, v, mask_s = validation.global_std( u, v, std_threshold=settings.std_threshold) u, v, mask_m = validation.local_median_val( u, v, u_threshold=settings.median_threshold, v_threshold=settings.median_threshold, size=settings.median_size) if settings.extract_sig2noise == True and settings.iterations == 1 and settings.do_sig2noise_validation == True: u, v, mask_s2n = validation.sig2noise_val( u, v, sig2noise_ratio, threshold=settings.sig2noise_threshold) mask = mask + mask_g + mask_m + mask_s + mask_s2n else: mask = mask + mask_g + mask_m + mask_s 'filter to replace the values that where marked by the validation' if settings.iterations > 1: u, v = filters.replace_outliers( u, v, method=settings.filter_method, max_iter=settings.max_filter_iteration, kernel_size=settings.filter_kernel_size) 'adding masks to add the effect of all the validations' if settings.smoothn == True: u, dummy_u1, dummy_u2, dummy_u3 = smoothn.smoothn( u, s=settings.smoothn_p) v, dummy_v1, dummy_v2, dummy_v3 = smoothn.smoothn( v, s=settings.smoothn_p) elif settings.iterations == 1 and settings.replace_vectors == True: u, v = filters.replace_outliers( u, v, method=settings.filter_method, max_iter=settings.max_filter_iteration, kernel_size=settings.filter_kernel_size) 'adding masks to add the effect of all the validations' if settings.smoothn == True: u, v = filters.replace_outliers( u, v, method=settings.filter_method, max_iter=settings.max_filter_iteration, kernel_size=settings.filter_kernel_size) u, dummy_u1, dummy_u2, dummy_u3 = smoothn.smoothn( u, s=settings.smoothn_p) v, dummy_v1, dummy_v2, dummy_v3 = smoothn.smoothn( v, s=settings.smoothn_p) i = 1 'all the following passes' for i in range(2, settings.iterations + 1): x, y, u, v, sig2noise_ratio, mask = multipass_img_deform( frame_a, frame_b, settings.windowsizes[i - 1], settings.overlap[i - 1], settings.iterations, i, x, y, u, v, correlation_method=settings.correlation_method, subpixel_method=settings.subpixel_method, do_sig2noise=settings.extract_sig2noise, sig2noise_method=settings.sig2noise_method, sig2noise_mask=settings.sig2noise_mask, MinMaxU=settings.MinMax_U_disp, MinMaxV=settings.MinMax_V_disp, std_threshold=settings.std_threshold, median_threshold=settings.median_threshold, median_size=settings.median_size, filter_method=settings.filter_method, max_filter_iteration=settings.max_filter_iteration, filter_kernel_size=settings.filter_kernel_size, interpolation_order=settings.interpolation_order) # If the smoothing is active, we do it at each pass if settings.smoothn == True: u, dummy_u1, dummy_u2, dummy_u3 = smoothn.smoothn( u, s=settings.smoothn_p) v, dummy_v1, dummy_v2, dummy_v3 = smoothn.smoothn( v, s=settings.smoothn_p) '''%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%''' if settings.extract_sig2noise == True and i == settings.iterations and settings.iterations != 1 and settings.do_sig2noise_validation == True: u, v, mask_s2n = validation.sig2noise_val( u, v, sig2noise_ratio, threshold=settings.sig2noise_threshold) mask = mask + mask_s2n if settings.replace_vectors == True: u, v = filters.replace_outliers( u, v, method=settings.filter_method, max_iter=settings.max_filter_iteration, kernel_size=settings.filter_kernel_size) 'pixel/frame->pixel/sec' u = u / settings.dt v = v / settings.dt 'scales the results pixel-> meter' x, y, u, v = scaling.uniform(x, y, u, v, scaling_factor=settings.scaling_factor) 'save to a file' save(x, y, u, v, sig2noise_ratio, mask, os.path.join(save_path, 'field_A%03d.txt' % counter), delimiter='\t') 'some messages to check if it is still alive' 'some other stuff that one might want to use' if settings.show_plot == True or settings.save_plot == True: plt.close('all') plt.ioff() Name = os.path.join(save_path, 'Image_A%03d.png' % counter) display_vector_field(os.path.join(save_path, 'field_A%03d.txt' % counter), scale=settings.scale_plot) if settings.save_plot == True: plt.savefig(Name) if settings.show_plot == True: plt.show() print('Image Pair ' + str(counter + 1))
def func(args): """A function to process each image pair.""" # this line is REQUIRED for multiprocessing to work # always use it in your custom function file_a, file_b, counter = args # counter2=str(counter2) ##################### # Here goes you code ##################### " read images into numpy arrays" frame_a = imread(os.path.join(settings.filepath_images, file_a)) frame_b = imread(os.path.join(settings.filepath_images, file_b)) # Miguel: I just had a quick look, and I do not understand the reason # for this step. # I propose to remove it. # frame_a = (frame_a*1024).astype(np.int32) # frame_b = (frame_b*1024).astype(np.int32) " crop to ROI" if settings.ROI == "full": frame_a = frame_a frame_b = frame_b else: frame_a = frame_a[settings.ROI[0]:settings.ROI[1], settings.ROI[2]:settings.ROI[3]] frame_b = frame_b[settings.ROI[0]:settings.ROI[1], settings.ROI[2]:settings.ROI[3]] if settings.invert is True: frame_a = invert(frame_a) frame_b = invert(frame_b) if settings.show_all_plots: fig, ax = plt.subplots(1, 1) ax.imshow(frame_a, cmap=plt.get_cmap('Reds')) ax.imshow(frame_b, cmap=plt.get_cmap('Blues'), alpha=.5) plt.show() if settings.dynamic_masking_method in ("edge", "intensity"): frame_a, mask_a = preprocess.dynamic_masking( frame_a, method=settings.dynamic_masking_method, filter_size=settings.dynamic_masking_filter_size, threshold=settings.dynamic_masking_threshold, ) frame_b, mask_b = preprocess.dynamic_masking( frame_b, method=settings.dynamic_masking_method, filter_size=settings.dynamic_masking_filter_size, threshold=settings.dynamic_masking_threshold, ) # "first pass" x, y, u, v, s2n = first_pass(frame_a, frame_b, settings) if settings.show_all_plots: plt.figure() plt.quiver(x, y, u, -v, color='b') # plt.gca().invert_yaxis() # plt.gca().set_aspect(1.) # plt.title('after first pass, invert') # plt.show() # " Image masking " if settings.image_mask: image_mask = np.logical_and(mask_a, mask_b) mask_coords = preprocess.mask_coordinates(image_mask) # mark those points on the grid of PIV inside the mask grid_mask = preprocess.prepare_mask_on_grid(x, y, mask_coords) # mask the velocity u = np.ma.masked_array(u, mask=grid_mask) v = np.ma.masked_array(v, mask=grid_mask) else: mask_coords = [] u = np.ma.masked_array(u, mask=np.ma.nomask) v = np.ma.masked_array(v, mask=np.ma.nomask) if settings.validation_first_pass: u, v, mask = validation.typical_validation(u, v, s2n, settings) if settings.show_all_plots: # plt.figure() plt.quiver(x, y, u, -v, color='r') plt.gca().invert_yaxis() plt.gca().set_aspect(1.) plt.title('after first pass validation new, inverted') plt.show() # "filter to replace the values that where marked by the validation" if settings.num_iterations == 1 and settings.replace_vectors: # for multi-pass we cannot have holes in the data # after the first pass u, v = filters.replace_outliers( u, v, method=settings.filter_method, max_iter=settings.max_filter_iteration, kernel_size=settings.filter_kernel_size, ) # don't even check if it's true or false elif settings.num_iterations > 1: u, v = filters.replace_outliers( u, v, method=settings.filter_method, max_iter=settings.max_filter_iteration, kernel_size=settings.filter_kernel_size, ) # "adding masks to add the effect of all the validations" if settings.smoothn: u, dummy_u1, dummy_u2, dummy_u3 = smoothn.smoothn( u, s=settings.smoothn_p) v, dummy_v1, dummy_v2, dummy_v3 = smoothn.smoothn( v, s=settings.smoothn_p) if settings.image_mask: grid_mask = preprocess.prepare_mask_on_grid(x, y, mask_coords) u = np.ma.masked_array(u, mask=grid_mask) v = np.ma.masked_array(v, mask=grid_mask) else: u = np.ma.masked_array(u, np.ma.nomask) v = np.ma.masked_array(v, np.ma.nomask) if settings.show_all_plots: plt.figure() plt.quiver(x, y, u, -v) plt.gca().invert_yaxis() plt.gca().set_aspect(1.) plt.title('before multi pass, inverted') plt.show() if not isinstance(u, np.ma.MaskedArray): raise ValueError("Expected masked array") """ Multi pass """ for i in range(1, settings.num_iterations): if not isinstance(u, np.ma.MaskedArray): raise ValueError("Expected masked array") x, y, u, v, s2n, mask = multipass_img_deform( frame_a, frame_b, i, x, y, u, v, settings, mask_coords=mask_coords) # If the smoothing is active, we do it at each pass # but not the last one if settings.smoothn is True and i < settings.num_iterations - 1: u, dummy_u1, dummy_u2, dummy_u3 = smoothn.smoothn( u, s=settings.smoothn_p) v, dummy_v1, dummy_v2, dummy_v3 = smoothn.smoothn( v, s=settings.smoothn_p) if not isinstance(u, np.ma.MaskedArray): raise ValueError('not a masked array anymore') if hasattr(settings, 'image_mask') and settings.image_mask: grid_mask = preprocess.prepare_mask_on_grid(x, y, mask_coords) u = np.ma.masked_array(u, mask=grid_mask) v = np.ma.masked_array(v, mask=grid_mask) else: u = np.ma.masked_array(u, np.ma.nomask) v = np.ma.masked_array(v, np.ma.nomask) if settings.show_all_plots: plt.figure() plt.quiver(x, y, u, -1 * v, color='r') plt.gca().set_aspect(1.) plt.gca().invert_yaxis() plt.title('end of the multipass, invert') plt.show() if settings.show_all_plots and settings.num_iterations > 1: plt.figure() plt.quiver(x, y, u, -v) plt.gca().invert_yaxis() plt.gca().set_aspect(1.) plt.title('after multi pass, before saving, inverted') plt.show() # we now use only 0s instead of the image # masked regions. # we could do Nan, not sure what is best u = u.filled(0.) v = v.filled(0.) # "scales the results pixel-> meter" x, y, u, v = scaling.uniform(x, y, u, v, scaling_factor=settings.scaling_factor) if settings.image_mask: grid_mask = preprocess.prepare_mask_on_grid(x, y, mask_coords) u = np.ma.masked_array(u, mask=grid_mask) v = np.ma.masked_array(v, mask=grid_mask) else: u = np.ma.masked_array(u, np.ma.nomask) v = np.ma.masked_array(v, np.ma.nomask) # before saving we conver to the "physically relevant" # right-hand coordinate system with 0,0 at the bottom left # x to the right, y upwards # and so u,v x, y, u, v = transform_coordinates(x, y, u, v) # import pdb; pdb.set_trace() # "save to a file" tools.save(x, y, u, v, mask, os.path.join(save_path, "field_A%03d.txt" % counter), delimiter="\t") # "some other stuff that one might want to use" if settings.show_plot or settings.save_plot: Name = os.path.join(save_path, "Image_A%03d.png" % counter) fig, _ = display_vector_field( os.path.join(save_path, "field_A%03d.txt" % counter), scale=settings.scale_plot, ) if settings.save_plot is True: fig.savefig(Name) if settings.show_plot is True: plt.show() print(f"Image Pair {counter + 1}") print(file_a.rsplit('/')[-1], file_b.rsplit('/')[-1])
scaling_factor = 100 # we can run it from any folder path = os.path.dirname(os.path.abspath(__file__)) frame_a = tools.imread( os.path.join(path,'../test2/2image_00.tif')) frame_b = tools.imread( os.path.join(path,'../test2/2image_01.tif')) #no background removal will be performed so 'mark' is initialized to 1 everywhere mark = np.zeros(frame_a.shape, dtype=np.int32) for I in range(mark.shape[0]): for J in range(mark.shape[1]): mark[I,J]=1 #main algorithm with warnings.catch_warnings(): warnings.simplefilter("ignore") x,y,u,v, mask=process.WiDIM( frame_a.astype(np.int32), frame_b.astype(np.int32), mark, min_window_size=16, overlap_ratio=0.0, coarse_factor=2, dt=0.02, validation_method='mean_velocity', trust_1st_iter=1, validation_iter=1, tolerance=0.7, nb_iter_max=3, sig2noise_method='peak2peak') #display results x, y, u, v = scaling.uniform(x, y, u, v, scaling_factor = scaling_factor ) tools.save(x, y, u, v, mask, '2image_00.txt' ) tools.display_vector_field('2image_00.txt',on_img=True, image_name=os.path.join(path,'../test2/2image_00.tif'), window_size=16, scaling_factor=scaling_factor, scale=200, width=0.001) #further validation can be performed to eliminate the few remaining wrong vectors
def process(self, args): """ Process chain as configured in the GUI. Parameters ---------- args : tuple Tuple as expected by the inherited run method: file_a (str) -- image file a file_b (str) -- image file b counter (int) -- index pointing to an element of the filename list """ file_a, file_b, counter = args frame_a = piv_tls.imread(file_a) frame_b = piv_tls.imread(file_b) # Smoothning script borrowed from openpiv.windef s = self.p['smoothn_val'] def smoothn(u, s): s = s u, _, _, _ = piv_smt.smoothn(u, s=s, isrobust=self.p['robust']) return (u) # delimiters placed here for safety delimiter = self.p['separator'] if delimiter == 'tab': delimiter = '\t' if delimiter == 'space': delimiter = ' ' # preprocessing print('\nPre-pocessing image pair: {}'.format(counter + 1)) if self.p['background_subtract'] \ and self.p['background_type'] == 'minA - minB': self.background = gen_background(self.p, frame_a, frame_b) frame_a = frame_a.astype(np.int32) frame_a = process_images(self, frame_a, self.GUI.preprocessing_methods, background=self.background) frame_b = frame_b.astype(np.int32) frame_b = process_images(self, frame_b, self.GUI.preprocessing_methods, background=self.background) print('Evaluating image pair: {}'.format(counter + 1)) # evaluation first pass start = time.time() passes = 1 # setup custom windowing if selected if self.parameter['custom_windowing']: corr_window_0 = self.parameter['corr_window_1'] overlap_0 = self.parameter['overlap_1'] for i in range(2, 8): if self.parameter['pass_%1d' % i]: passes += 1 else: break else: passes = self.parameter['coarse_factor'] if self.parameter['grid_refinement'] == 'all passes' \ and self.parameter['coarse_factor'] != 1: corr_window_0 = self.parameter['corr_window'] * \ 2**(self.parameter['coarse_factor'] - 1) overlap_0 = self.parameter['overlap'] * \ 2**(self.parameter['coarse_factor'] - 1) # Refine all passes after first when there are more than 1 pass. elif self.parameter['grid_refinement'] == '2nd pass on' \ and self.parameter['coarse_factor'] != 1: corr_window_0 = self.parameter['corr_window'] * \ 2**(self.parameter['coarse_factor'] - 2) overlap_0 = self.parameter['overlap'] * \ 2**(self.parameter['coarse_factor'] - 2) # If >>none<< is selected or something goes wrong, the window # size would remain the same. else: corr_window_0 = self.parameter['corr_window'] overlap_0 = self.parameter['overlap'] overlap_percent = overlap_0 / corr_window_0 sizeX = corr_window_0 u, v, sig2noise = piv_wdf.extended_search_area_piv( frame_a.astype(np.int32), frame_b.astype(np.int32), window_size=corr_window_0, overlap=overlap_0, search_area_size=corr_window_0, width=self.parameter['s2n_mask'], subpixel_method=self.parameter['subpixel_method'], sig2noise_method=self.parameter['sig2noise_method'], correlation_method=self.parameter['corr_method'], normalized_correlation=self.parameter['normalize_correlation']) x, y = piv_prc.get_coordinates(frame_a.shape, corr_window_0, overlap_0) # validating first pass mask = np.full_like(x, 0) if self.parameter['fp_vld_global_threshold']: u, v, Mask = piv_vld.global_val( u, v, u_thresholds=(self.parameter['fp_MinU'], self.parameter['fp_MaxU']), v_thresholds=(self.parameter['fp_MinV'], self.parameter['fp_MaxV'])) # consolidate effects of mask mask += Mask if self.parameter['fp_local_med']: u, v, Mask = piv_vld.local_median_val( u, v, u_threshold=self.parameter['fp_local_med'], v_threshold=self.parameter['fp_local_med'], size=self.parameter['fp_local_med_size']) mask += Mask if self.parameter['adv_repl']: u, v = piv_flt.replace_outliers( u, v, method=self.parameter['adv_repl_method'], max_iter=self.parameter['adv_repl_iter'], kernel_size=self.parameter['adv_repl_kernel']) print('Validated first pass result of image pair: {}.'.format(counter + 1)) # smoothning before deformation if 'each pass' is selected if self.parameter['smoothn_each_pass']: if self.parameter['smoothn_first_more']: s *= 2 u = smoothn(u, s) v = smoothn(v, s) print('Smoothned pass 1 for image pair: {}.'.format(counter + 1)) s = self.parameter['smoothn_val1'] print('Finished pass 1 for image pair: {}.'.format(counter + 1)) print("window size: " + str(corr_window_0)) print('overlap: ' + str(overlap_0), '\n') # evaluation of all other passes if passes != 1: iterations = passes - 1 for i in range(2, passes + 1): # setting up the windowing of each pass if self.parameter['custom_windowing']: corr_window = self.parameter['corr_window_%1d' % i] overlap = int(corr_window * overlap_percent) else: if self.parameter['grid_refinement'] == 'all passes' or \ self.parameter['grid_refinement'] == '2nd pass on': corr_window = self.parameter['corr_window'] * \ 2**(iterations - 1) overlap = self.parameter['overlap'] * \ 2**(iterations - 1) else: corr_window = self.parameter['corr_window'] overlap = self.parameter['overlap'] sizeX = corr_window # translate settings to windef settings object piv_wdf_settings = piv_wdf.Settings() piv_wdf_settings.correlation_method = \ self.parameter['corr_method'] piv_wdf_settings.normalized_correlation = \ self.parameter['normalize_correlation'] piv_wdf_settings.windowsizes = (corr_window, ) * (passes + 1) piv_wdf_settings.overlap = (overlap, ) * (passes + 1) piv_wdf_settings.num_iterations = passes piv_wdf_settings.subpixel_method = \ self.parameter['subpixel_method'] piv_wdf_settings.deformation_method = \ self.parameter['deformation_method'] piv_wdf_settings.interpolation_order = \ self.parameter['interpolation_order'] piv_wdf_settings.sig2noise_validate = True, piv_wdf_settings.sig2noise_method = \ self.parameter['sig2noise_method'] piv_wdf_settings.sig2noise_mask = self.parameter['s2n_mask'] # do the correlation x, y, u, v, sig2noise, mask = piv_wdf.multipass_img_deform( frame_a.astype(np.int32), frame_b.astype(np.int32), i, # current iteration x, y, u, v, piv_wdf_settings) # validate other passes if self.parameter['sp_vld_global_threshold']: u, v, Mask = piv_vld.global_val( u, v, u_thresholds=(self.parameter['sp_MinU'], self.parameter['sp_MaxU']), v_thresholds=(self.parameter['sp_MinV'], self.parameter['sp_MaxV'])) mask += Mask # consolidate effects of mask if self.parameter['sp_vld_global_threshold']: u, v, Mask = piv_vld.global_std( u, v, std_threshold=self.parameter['sp_std_threshold']) mask += Mask if self.parameter['sp_local_med_validation']: u, v, Mask = piv_vld.local_median_val( u, v, u_threshold=self.parameter['sp_local_med'], v_threshold=self.parameter['sp_local_med'], size=self.parameter['sp_local_med_size']) mask += Mask if self.parameter['adv_repl']: u, v = piv_flt.replace_outliers( u, v, method=self.parameter['adv_repl_method'], max_iter=self.parameter['adv_repl_iter'], kernel_size=self.parameter['adv_repl_kernel']) print('Validated pass {} of image pair: {}.'.format( i, counter + 1)) # smoothning each individual pass if 'each pass' is selected if self.parameter['smoothn_each_pass']: u = smoothn(u, s) v = smoothn(v, s) print('Smoothned pass {} for image pair: {}.'.format( i, counter + 1)) print('Finished pass {} for image pair: {}.'.format( i, counter + 1)) print("window size: " + str(corr_window)) print('overlap: ' + str(overlap), '\n') iterations -= 1 if self.p['flip_u']: u = np.flipud(u) if self.p['flip_v']: v = np.flipud(v) if self.p['invert_u']: u *= -1 if self.p['invert_v']: v *= -1 # scaling u = u / self.parameter['dt'] v = v / self.parameter['dt'] x, y, u, v = piv_scl.uniform(x, y, u, v, scaling_factor=self.parameter['scale']) end = time.time() # save data to file. out = np.vstack([m.ravel() for m in [x, y, u, v, mask, sig2noise]]) np.savetxt(self.save_fnames[counter], out.T, fmt='%8.4f', delimiter=delimiter) print('Processed image pair: {}'.format(counter + 1)) sizeY = sizeX sizeX = ((int(frame_a.shape[0] - sizeX) // (sizeX - (sizeX * overlap_percent))) + 1) sizeY = ((int(frame_a.shape[1] - sizeY) // (sizeY - (sizeY * overlap_percent))) + 1) time_per_vec = _round((((end - start) * 1000) / ((sizeX * sizeY) - 1)), 3) print('Process time: {} second(s)'.format((_round((end - start), 3)))) print('Number of vectors: {}'.format(int((sizeX * sizeY) - 1))) print('Time per vector: {} millisecond(s)'.format(time_per_vec))
def PIV(frame_0, frame_1, winsize, searchsize, overlap, frame_rate, scaling_factor, threshold=1.3, output='fil'): """ Particle Image Velocimetry processing for two sequential images. Input: ------ frame_0 - first frame to indicate potential seeds. frame_1 - second frame to trace seed displacements. winsize - size of the individual (square) grid cells in pixels. searchsize - size of the search area in pixels in which the location with the highest similarity is found. overlap - overlap over the grid cells in pixels. frame_rate - frame rate of the video in frames per second (fps). scaling_factor - amount of pixels per meter. output - after which step the PIV processing is stopped ('raw', 'fil', or 'int'; default: 'fil') """ # determine the timestep between the two sequential frames (1/fps) dt = 1. / frame_rate # estimation of seed displacements in x and y direction # and the corresponding signal-to-noise ratio u, v, sig2noise = pyprocess.extended_search_area_piv( frame_0, frame_1, window_size=winsize, overlap=overlap, dt=dt, search_area_size=searchsize, sig2noise_method='peak2peak') # xy-coordinates of the centre of each grid cell x, y = pyprocess.get_coordinates(image_size=frame_0.shape, window_size=winsize, overlap=overlap) # if ouput is 'fil' or 'int': # filter out grid cells with a low signal-to-noise ratio if output == 'fil' or output == 'int': u, v, mask = validation.sig2noise_val(u, v, sig2noise, threshold=threshold) # if output is 'int' # fill in missing values through interpolation if output == 'int': u, v = filters.replace_outliers(u, v, method='localmean', max_iter=50, kernel_size=3) # scale results based on the pixels per metres x, y, u, v = scaling.uniform(x, y, u, v, scaling_factor=scaling_factor) return x, y, u, v, sig2noise
def calc_piv_2_images(frame_a, frame_b, idx, dir_name): ''' Performs Particle Image Velocimetry (PIV) of two images, and saves an image with PIV on it. :param frame_a: first image :param frame_b: consecutive image :param idx: index of the first frame, for saving and ordering the images :param dir_name: directory to save the image to :return: - ''' u0, v0, sig2noise = pyprocess.extended_search_area_piv( frame_a.astype(np.int32), frame_b.astype(np.int32), window_size=winsize, overlap=overlap, dt=dt, search_area_size=searchsize, sig2noise_method='peak2peak') x, y = pyprocess.get_coordinates(image_size=frame_a.shape, search_area_size=searchsize, overlap=overlap) u1, v1, mask = validation.sig2noise_val(u0, v0, sig2noise, threshold=1.05) # to see where is a reasonable limit filter out # outliers that are very different from the neighbours u2, v2 = filters.replace_outliers(u1, v1, method='localmean', max_iter=3, kernel_size=3) # convert x,y to mm; convert u,v to mm/sec x, y, u3, v3 = scaling.uniform( x, y, u2, v2, scaling_factor=scaling_factor) # 96.52 microns/pixel # 0,0 shall be bottom left, positive rotation rate is counterclockwise x, y, u3, v3 = tools.transform_coordinates(x, y, u3, v3) fig, ax = plt.subplots() im = np.negative(frame_a) # plot negative of the image for more clarity xmax = np.amax(x) + winsize / (2 * scaling_factor) ymax = np.amax(y) + winsize / (2 * scaling_factor) ax.imshow(im, cmap="Greys_r", extent=[0.0, xmax, 0.0, ymax]) invalid = mask.astype("bool") valid = ~invalid plt.quiver(x[invalid], y[invalid], u3[invalid], v3[invalid], color="r", width=width) plt.quiver(x[valid], y[valid], u3[valid], v3[valid], color="b", width=width) ax.set_aspect(1.) plt.title(r'Velocity Vectors Field (Frame #%d) $(\frac{\mu m}{hour})$' % idx) plt.savefig(dir_name + "/" + "vec_page%d.png" % idx, dpi=200) plt.show() plt.close()
def func(args): file_a, file_b, counter = args # read the iamges frame_a = tools.imread(os.path.join(settings.filepath_images, file_a)) frame_b = tools.imread(os.path.join(settings.filepath_images, file_b)) if counter == settings.fall_start: settings.ROI[1] = frame_a.shape[0] """Here we check if the interface has reached the top of the roi yet by comparing it to the index in the observation_periods file. If it has not reached the roi yet we skip this part, if it did then we shift the roi for each pair after the initial one """ if counter >= settings.roi_shift_start: # set the roi to the image height for the first frame # if counter == settings.roi_shift_start : # settings.current_pos = 0 # shift the roi for each pair (this is not done for the first one) settings.ROI[0] = int(settings.current_pos) # crop to roi if settings.ROI == 'full': frame_a = frame_a frame_b = frame_b else: frame_a = frame_a[settings.ROI[0]:settings.ROI[1], settings.ROI[2]:settings.ROI[3]] frame_b = frame_b[settings.ROI[0]:settings.ROI[1], settings.ROI[2]:settings.ROI[3]] if settings.dynamic_masking_method == 'edge' or settings.dynamic_masking_method == 'intensity': frame_a = preprocess.dynamic_masking( frame_a, method=settings.dynamic_masking_method, filter_size=settings.dynamic_masking_filter_size, threshold=settings.dynamic_masking_threshold) frame_b = preprocess.dynamic_masking( frame_b, method=settings.dynamic_masking_method, filter_size=settings.dynamic_masking_filter_size, threshold=settings.dynamic_masking_threshold) #%% """ Here we do the first pass of the piv interrogation """ x, y, u, v, sig2noise_ratio = first_pass( frame_a, frame_b, settings.window_width[0], settings.window_height[0], settings.overlap_width[0], settings.overlap_height[0], settings.iterations, correlation_method=settings.correlation_method, subpixel_method=settings.subpixel_method, do_sig2noise=settings.extract_sig2noise, sig2noise_method=settings.sig2noise_method, sig2noise_mask=settings.sig2noise_mask, ) mask = np.full_like(x, False) if settings.validation_first_pass == True: u, v, mask_g = validation.global_val(u, v, settings.MinMax_U_disp, settings.MinMax_V_disp) u, v, mask_s = validation.global_std( u, v, std_threshold=settings.std_threshold) u, v, mask_m = validation.local_median_val( u, v, u_threshold=settings.median_threshold, v_threshold=settings.median_threshold, size=settings.median_size) if settings.extract_sig2noise == True and settings.iterations == 1 and settings.do_sig2noise_validation == True: u, v, mask_s2n = validation.sig2noise_val( u, v, sig2noise_ratio, threshold=settings.sig2noise_threshold) mask = mask + mask_g + mask_m + mask_s + mask_s2n else: mask = mask + mask_g + mask_m + mask_s 'filter to replace the values that where marked by the validation' if settings.iterations > 1: u, v = filters.replace_outliers( u, v, method=settings.filter_method, max_iter=settings.max_filter_iteration, kernel_size=settings.filter_kernel_size) 'adding masks to add the effect of all the validations' if settings.smoothn == True: u, dummy_u1, dummy_u2, dummy_u3 = smoothn.smoothn( u, s=settings.smoothn_p) v, dummy_v1, dummy_v2, dummy_v3 = smoothn.smoothn( v, s=settings.smoothn_p) elif settings.iterations == 1 and settings.replace_vectors == True: u, v = filters.replace_outliers( u, v, method=settings.filter_method, max_iter=settings.max_filter_iteration, kernel_size=settings.filter_kernel_size) 'adding masks to add the effect of all the validations' if settings.smoothn == True: u, v = filters.replace_outliers( u, v, method=settings.filter_method, max_iter=settings.max_filter_iteration, kernel_size=settings.filter_kernel_size) u, dummy_u1, dummy_u2, dummy_u3 = smoothn.smoothn( u, s=settings.smoothn_p) v, dummy_v1, dummy_v2, dummy_v3 = smoothn.smoothn( v, s=settings.smoothn_p) #%% i = 1 """ Do the multipass until the maximum iterations are reached """ for i in range(2, settings.iterations + 1): x, y, u, v, sig2noise_ratio, mask = multipass_img_deform( frame_a, frame_b, settings.window_width[i - 1], settings.window_height[i - 1], settings.overlap_width[i - 1], settings.overlap_height[i - 1], settings.iterations, i, x, y, u, v, correlation_method=settings.correlation_method, subpixel_method=settings.subpixel_method, do_sig2noise=settings.extract_sig2noise, sig2noise_method=settings.sig2noise_method, sig2noise_mask=settings.sig2noise_mask, MinMaxU=settings.MinMax_U_disp, MinMaxV=settings.MinMax_V_disp, std_threshold=settings.std_threshold, median_threshold=settings.median_threshold, median_size=settings.median_size, filter_method=settings.filter_method, max_filter_iteration=settings.max_filter_iteration, filter_kernel_size=settings.filter_kernel_size, interpolation_order=settings.interpolation_order) # smooth on each pass in case this is wanted if settings.smoothn == True: u, dummy_u1, dummy_u2, dummy_u3 = smoothn.smoothn( u, s=settings.smoothn_p) v, dummy_v1, dummy_v2, dummy_v3 = smoothn.smoothn( v, s=settings.smoothn_p) # extract the sig2noise ratio in case it is desired and replace the vectors if settings.extract_sig2noise == True and i == settings.iterations and settings.iterations != 1 and settings.do_sig2noise_validation == True: u, v, mask_s2n = validation_patch.sig2noise_val( u, v, sig2noise_ratio, threshold_low=settings.sig2noise_threshold) mask = mask + mask_s2n if settings.replace_vectors == True: u, v = filters.replace_outliers( u, v, method=settings.filter_method, max_iter=settings.max_filter_iteration, kernel_size=settings.filter_kernel_size) if counter >= settings.roi_shift_start: settings.current_pos = settings.current_pos - calc_disp( x, v, frame_b.shape[1]) if ((settings.ROI[1] - settings.current_pos) < 300): return settings.current_pos, True # scale the result timewise and lengthwise u = u / settings.dt v = v / settings.dt x, y, u, v = scaling.uniform(x, y, u, v, scaling_factor=settings.scaling_factor) # save the result save(x, y, u, v, sig2noise_ratio, mask, os.path.join(save_path_txts, 'field_%06d.txt' % (counter)), delimiter='\t') # disable the grid in the rcParams file plt.rcParams['axes.grid'] = False # show and save the plot if it is desired if settings.show_plot == True or settings.save_plot == True: plt.ioff() Name = os.path.join(save_path_images, 'Image_%06d.png' % (counter)) display_vector_field(os.path.join(save_path_txts, 'field_%06d.txt' % (counter)), scale=settings.scale_plot) if settings.save_plot == True: plt.savefig(Name, dpi=100) if settings.show_plot == True: plt.show() plt.close('all') print('Image Pair ' + str(counter) + ' of ' + settings.save_folder_suffix) if settings.current_pos == np.nan: return settings.current_pos, True return settings.current_pos, False