def align(im1,im2,wma,wmo,cr,red,i): sz = im1.shape # (cc, warp_matrix) = cv2.findTransformECC (im2[int(sz[0]/2)-int(sz[0]/3):int(sz[0]/2)+int(sz[0]/3),int(sz[1]/2)-int(sz[1]/4):int(sz[1]/2)+int(sz[1]/4)],im1[int(sz[0]/2)-int(sz[0]/3):int(sz[0]/2)+int(sz[0]/3),int(sz[1]/2)-int(sz[1]/4):int(sz[1]/2)+int(sz[1]/4)],wma, wmo, cr) # (cc, warp_matrix) = cv2.findTransformECC (im2[:,int(sz[1]/2)-int(sz[1]/4):int(sz[1]/2)+int(sz[1]/4)],im1[:,int(sz[1]/2)-int(sz[1]/4):int(sz[1]/2)+int(sz[1]/4)],wma, wmo, cr) im1r,im2r = img_as_ubyte(skimage.transform.rescale(im1, 1/red)),img_as_ubyte(skimage.transform.rescale(im2, 1/red)) (cc, warp_matrix) = cv2.findTransformECC(im2r,im1r,wma, wmo, cr) # print(i) # print(cc) #if findTransformECC fails to find the transform, use match template to help if cc<0.85: print('cannot align images',i+1,'and',i+2,':correlation too low --',cc) print('trying with match template') cormap=cv2.matchTemplate(im2r, im1r[int(2*sz[0]/5/red):int(3*sz[0]/5/red),int(2*sz[1]/5/red):int(3*sz[1]/5/red)], cv2.TM_CCOEFF_NORMED) #finds the pixel where the correlation map is max cormax=np.unravel_index(cormap.argmax(),cormap.shape) xsh=cormap.shape[1]/2-cormax[1] ysh=cormap.shape[0]/2-cormax[0] TrMatComp=np.array([[ 1.,0.,xsh],[ 0.,1.,ysh]]) im1rtmp=cv2.warpAffine(np.copy(im1r), TrMatComp, (im1r.shape[1],im1r.shape[0]), flags=cv2.INTER_LINEAR + cv2.WARP_INVERSE_MAP); (cc2, warp_matrix2) = cv2.findTransformECC(im2r,im1rtmp,wma, wmo, cr) print('calculated approximate shift: x',xsh,' and y',ysh,' pixels') print('new correlation:',cc2,'with a warp matrix of') print(warp_matrix2) warp_matrix[0][2]=warp_matrix2[0][2]+xsh warp_matrix[1][2]=warp_matrix2[1][2]+ysh cc=cc2 warp_matrix[0][2]=(red*warp_matrix[0][2]) warp_matrix[1][2]=(red*warp_matrix[1][2]) # print(warp_matrix,' ',i) return np.copy(warp_matrix),np.copy(cc)
def align(self, blob): """Aligns the positions of active and inactive tracks depending on camera motion.""" if self.im_index > 0: im1 = np.transpose(self.last_image.cpu().numpy(), (1, 2, 0)) im2 = np.transpose(blob['img'][0].cpu().numpy(), (1, 2, 0)) im1_gray = cv2.cvtColor(im1, cv2.COLOR_RGB2GRAY) im2_gray = cv2.cvtColor(im2, cv2.COLOR_RGB2GRAY) warp_matrix = np.eye(2, 3, dtype=np.float32) criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, self.number_of_iterations, self.termination_eps) try: cc, warp_matrix = cv2.findTransformECC(im1_gray, im2_gray, warp_matrix, self.warp_mode, criteria) except: cc, warp_matrix = cv2.findTransformECC(im1_gray, im2_gray, warp_matrix, self.warp_mode, criteria, None, 5) warp_matrix = torch.from_numpy(warp_matrix) for t in self.tracks: t.pos = warp_pos(t.pos, warp_matrix) # t.pos = clip_boxes(Variable(pos), blob['im_info'][0][:2]).data if self.do_reid: for t in self.inactive_tracks: t.pos = warp_pos(t.pos, warp_matrix) if self.motion_model_cfg['enabled']: for t in self.tracks: for i in range(len(t.last_pos)): t.last_pos[i] = warp_pos(t.last_pos[i], warp_matrix)
def find_best_homography(self, physical_object_image, cropped_image, best_pe): pe_result = self.compute_homography(physical_object_image, cropped_image) pe_result.error = self.compute_error(pe_result.homography, physical_object_image, cropped_image) # Keep track of the best pe_result. If the newly computed one is better, replace the best with it. # See compute_quality_score(). I think a better approach would be compute the quality of a pe_result using reprojection error. if best_pe is not None: best_pe.error = self.compute_error(best_pe.homography, physical_object_image, cropped_image, "_best") if self.recompute_homography_using_ECC: try: better_pe = best_pe if best_pe is not None and best_pe.error < pe_result.error else pe_result if self.recompute_homography_using_ECC_threshold_min < better_pe.error < self.recompute_homography_using_ECC_threshold_max: logger.debug("Re-computing homography using ECC.") criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 1000, 1e-6) # pylint: disable=unbalanced-tuple-unpacking physical_object_image_gray, cropped_image_gray = self.convert_to_gray_scale( physical_object_image, cropped_image) better_homography_float32 = better_pe.homography.astype( np.float32) if isar.PLATFORM == "Darwin": (_, h_ECC) = cv2.findTransformECC( physical_object_image_gray, cropped_image_gray, better_homography_float32, cv2.MOTION_AFFINE, criteria) else: (_, h_ECC) = cv2.findTransformECC( physical_object_image_gray, cropped_image_gray, better_homography_float32, cv2.MOTION_AFFINE, criteria, inputMask=None, gaussFiltSize=5) error_ecc = self.compute_error(h_ECC, physical_object_image, cropped_image, "_ecc") if h_ECC is not None and error_ecc < better_pe.error: pe_result.homography = h_ECC pe_result.error = error_ecc except Exception as exp: logger.error("Error recomputing homography using ECC") logger.error(exp) traceback.print_tb(exp.__traceback__) epsilon = 5e0 if best_pe is None or pe_result.error < best_pe.error: return pe_result elif np.abs(pe_result.error - best_pe.error) < epsilon: return random.choice((pe_result, best_pe)) else: return best_pe
def align_ecc(image_set, images_gray_set, ref_ind, thre=0.05): img_num = len(image_set) ref_gray_image = images_gray_set[ref_ind] r, c = image_set[0].shape[0:2] warp_mode = cv2.MOTION_AFFINE # cv2.MOTION_HOMOGRAPHY # cv2.MOTION_AFFINE # cv2.MOTION_TRANSLATION # cv2.MOTION_EUCLIDEAN # Define 2x3 or 3x3 matrices and initialize the matrix to identity if warp_mode == cv2.MOTION_HOMOGRAPHY: print("Using homography model for alignment") identity_transform = np.eye(3, 3, dtype=np.float32) warp_matrix = np.eye(3, 3, dtype=np.float32) tform_set_init = [np.eye(3, 3, dtype=np.float32)] * img_num else: identity_transform = np.eye(2, 3, dtype=np.float32) warp_matrix = np.eye(2, 3, dtype=np.float32) tform_set_init = [np.eye(2, 3, dtype=np.float32)] * img_num number_of_iterations = 500 termination_eps = 1e-6 criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, number_of_iterations, termination_eps) # Run the ECC algorithm. The results are stored in warp_matrix. tform_set = np.zeros_like(tform_set_init) tform_inv_set = np.zeros_like(tform_set_init) valid_id = [] motion_thre = thre * min(r, c) for i in range(ref_ind - 1, -1, -1): _, warp_matrix = cv2.findTransformECC(ref_gray_image, images_gray_set[i], warp_matrix, warp_mode, criteria) tform_set[i] = warp_matrix tform_inv_set[i] = cv2.invertAffineTransform(warp_matrix) motion_val = abs(warp_matrix - identity_transform).sum() if motion_val < motion_thre: valid_id.append(i) else: continue if warp_mode == cv2.MOTION_HOMOGRAPHY: warp_matrix = np.eye(3, 3, dtype=np.float32) else: warp_matrix = np.eye(2, 3, dtype=np.float32) for i in range(ref_ind, img_num, 1): _, warp_matrix = cv2.findTransformECC(ref_gray_image, images_gray_set[i], warp_matrix, warp_mode, criteria) tform_set[i] = warp_matrix tform_inv_set[i] = cv2.invertAffineTransform(warp_matrix) motion_val = abs(warp_matrix - identity_transform).sum() if motion_val < motion_thre: valid_id.append(i) else: continue return tform_set, tform_inv_set, valid_id
def alignment(imgA, imgB): # takes two images as input, aligns them, cropps the larger one to the size of the # smaller one, then returns the two final images, image A and image B # color channels for image B imgBRed = imgB[:,:,0] imgBGreen = imgB[:,:,1] imgBBlue = imgB[:,:,2] # color channels for image A imgARed = imgA[:,:,0] imgAGreen = imgA[:,:,1] imgABlue = imgA[:,:,2] # convert images to grayscale (for correct sizing without color channels) imgA_gray = cv2.cvtColor(imgA, cv2.COLOR_BGR2GRAY) # find size of imgA; used when to ensure imgB will be same size as image 1 when aligned to it size = imgA_gray.shape # define Affine motion model for alignment align_mode = cv2.MOTION_AFFINE # define and initialize 2x3 matrices align_matrix = np.eye(2, 3, dtype=np.float32) # specify the number of iterations number_of_iterations = 1000 # specify the threshold of the increment in the correlation coefficient between two iterations termination_eps = 1e-10; # define termination criteria criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, number_of_iterations, termination_eps) # run the ECC algorithm for each color channel, results are stored in align_matrix. (ccR, align_matrixR) = cv2.findTransformECC(imgA_gray,imgBRed,align_matrix, align_mode, criteria) (ccG, align_matrixG) = cv2.findTransformECC(imgA_gray,imgBGreen,align_matrix, align_mode, criteria) (ccB, align_matrixB) = cv2.findTransformECC(imgA_gray,imgBBlue,align_matrix, align_mode, criteria) # use warpAffine for Affine transformation imgB_alignedRed = cv2.warpAffine(imgBRed, align_matrixR, (size[1],size[0]), flags=cv2.INTER_LINEAR + cv2.WARP_INVERSE_MAP) imgB_alignedGreen = cv2.warpAffine(imgBGreen, align_matrixG, (size[1],size[0]), flags=cv2.INTER_LINEAR + cv2.WARP_INVERSE_MAP) imgB_alignedBlue = cv2.warpAffine(imgBBlue, align_matrixB, (size[1],size[0]), flags=cv2.INTER_LINEAR + cv2.WARP_INVERSE_MAP) # final outputs imgB_final = cv2.merge((imgB_alignedBlue, imgB_alignedGreen, imgB_alignedRed)) imgA_final = cv2.merge((imgABlue, imgAGreen, imgARed)) # necessary for showing "real colors" image cv2.imwrite("Final_Algined.jpg", imgB_final) imgB_final = cv2.imread("Final_Algined.jpg") # and now to crop them, then return them imgA_croppedFinal, imgB_croppedFinal = crop(imgA_final, imgB_final) return(imgA_croppedFinal, imgB_croppedFinal)
def example_03_find_translation_with_ECC(warp_mode=cv2.MOTION_TRANSLATION): # warp_mode = cv2.MOTION_TRANSLATION # warp_mode = cv2.MOTION_EUCLIDEAN # warp_mode = cv2.MOTION_AFFINE # warp_mode = cv2.MOTION_HOMOGRAPHY folder_input = 'images/ex_homography_affine/' im1 = cv2.imread(folder_input + 'first.jpg') im2 = cv2.imread(folder_input + 'scond.jpg') folder_output = 'images/output/' output_filename_L1 = folder_output + 'transf1_first.jpg' output_filename_R1 = folder_output + 'transf1_scond.jpg' output_filename_L2 = folder_output + 'transf2_first.jpg' output_filename_R2 = folder_output + 'transf2_scond.jpg' if not os.path.exists(folder_output): os.makedirs(folder_output) else: tools_IO.remove_files(folder_output) im1_gray = cv2.cvtColor(im1, cv2.COLOR_BGR2GRAY) im2_gray = cv2.cvtColor(im2, cv2.COLOR_BGR2GRAY) criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 5000, 1e-10) if warp_mode != cv2.MOTION_HOMOGRAPHY: (cc, transform1) = cv2.findTransformECC( im1_gray, im2_gray, numpy.eye(2, 3, dtype=numpy.float32), warp_mode, criteria) (cc, transform2) = cv2.findTransformECC( im2_gray, im1_gray, numpy.eye(2, 3, dtype=numpy.float32), warp_mode, criteria) result_image11, result_image12 = tools_calibrate.get_stitched_images_using_translation( im2, im1, transform1) result_image22, result_image21 = tools_calibrate.get_stitched_images_using_translation( im1, im2, transform2) else: (cc, transform1) = cv2.findTransformECC( im1_gray, im2_gray, numpy.eye(3, 3, dtype=numpy.float32), warp_mode, criteria) (cc, transform2) = cv2.findTransformECC( im2_gray, im1_gray, numpy.eye(3, 3, dtype=numpy.float32), warp_mode, criteria) result_image11, result_image12 = tools_calibrate.get_stitched_images_using_homography( im2, im1, transform1) result_image22, result_image21 = tools_calibrate.get_stitched_images_using_homography( im1, im2, transform2) cv2.imwrite(output_filename_L1, result_image11) cv2.imwrite(output_filename_R1, result_image12) cv2.imwrite(output_filename_L2, result_image21) cv2.imwrite(output_filename_R2, result_image22) return
def image_transformation_from(self, otherdata): """ Return the translation and the rotation based on the two radar images """ translation, rotation = None, None if not (otherdata.id is None or self.id is None): cv2_transformations = shelve.open("cv2_transformations", flag="r") if cv2_transformations['use_dataset'] in cv2_transformations: if str(self.id)+"-"+str(otherdata.id) in cv2_transformations[cv2_transformations['use_dataset']]: translation, rotation = cv2_transformations[cv2_transformations['use_dataset']][str(self.id)+"-"+str(otherdata.id)] cv2_transformations.close() if translation is None or rotation is None: if not otherdata.id is None: print("Calculating transformation: "+ str(self.id)+"-"+str(otherdata.id)) else: print("Calculating transformation: "+ str(self.id)) try: # Restrict to predicted overlap self_img, otherdata_img = self.image_overlap(otherdata) # ECC warp_mode = cv2.MOTION_EUCLIDEAN number_of_iterations = 500; termination_eps = 1e-7; criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, number_of_iterations, termination_eps) warp_matrix = np.eye(2, 3, dtype=np.float32) (cc, warp_matrix) = cv2.findTransformECC (otherdata_img, self_img, warp_matrix, warp_mode, criteria, None, 1) rot_matrix = np.array([[warp_matrix[0,0], warp_matrix[1,0], 0], [warp_matrix[0,1], warp_matrix[1,1], 0], [0,0,1]]) translation = -self.precision*np.array([warp_matrix[0,2], warp_matrix[1,2], 0]) rotation = rot.from_dcm(rot_matrix) except: try: # Without restrictind to image overlap (cc, warp_matrix) = cv2.findTransformECC (otherdata.img, self.img, warp_matrix, warp_mode, criteria, None, 1) rot_matrix = np.array([[warp_matrix[0,0], warp_matrix[1,0], 0], [warp_matrix[0,1], warp_matrix[1,1], 0], [0,0,1]]) translation = -self.precision*np.array([warp_matrix[0,2], warp_matrix[1,2], 0]) rotation = rot.from_dcm(rot_matrix) except: print("CV2 calculation failed") translation = np.nan rotation = np.nan if not (otherdata.id is None or self.id is None) and not np.any(np.isnan(translation)): cv2_transformations = shelve.open("cv2_transformations", writeback=True) if not cv2_transformations['use_dataset'] in cv2_transformations: cv2_transformations[cv2_transformations['use_dataset']] = dict() cv2_transformations[cv2_transformations['use_dataset']][str(self.id)+"-"+str(otherdata.id)] = (translation, rotation) cv2_transformations.close() # just for test and vizualisation, could be removed() # check_transform(self, rotation, translation, 'radar1_1.png') return translation, rotation
def align(ref_img, match_img, warp_mode=cv2.MOTION_AFFINE, max_iterations=300, epsilon_threshold=1e-10, pyramid_levels=2, is_video=False): if pyramid_levels is None: w = ref_img.shape[1] nol = int(w / (1280 / 3)) - 1 else: nol = pyramid_levels # Initialize the matrix to identity if warp_mode == cv2.MOTION_HOMOGRAPHY: # warp_matrix = np.eye(3, 3, dtype=np.float32) warp_matrix = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]], dtype=np.float32) else: # warp_matrix = np.eye(2, 3, dtype=np.float32) warp_matrix = np.array([[1, 0, 0], [0, 1, 0]], dtype=np.float32) ref_img = cv2.fastNlMeansDenoising(ref_img, None, 5, 21) match_img = cv2.fastNlMeansDenoising(match_img, None, 5, 21) kernel = np.ones((11, 11), np.uint8) ref_img = cv2.morphologyEx(ref_img, cv2.MORPH_OPEN, kernel) match_img = cv2.morphologyEx(match_img, cv2.MORPH_OPEN, kernel) ref_img = gradient(ref_img) match_img = gradient(match_img) ref_img_pyr = [ref_img] match_img_pyr = [match_img] for level in range(nol): ref_img_pyr[0] = normalize(ref_img_pyr[0]) ref_img_pyr.insert(0, cv2.resize(ref_img_pyr[0], None, fx=1/2, fy=1/2, interpolation=cv2.INTER_LINEAR)) match_img_pyr[0] = normalize(match_img_pyr[0]) match_img_pyr.insert(0, cv2.resize(match_img_pyr[0], None, fx=1/2, fy=1/2, interpolation=cv2.INTER_LINEAR)) # Terminate the optimizer if either the max iterations or the threshold are reached criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, max_iterations, epsilon_threshold) # run pyramid ECC for level in range(nol): ref_img_grad = ref_img_pyr[level] match_img_grad = match_img_pyr[level] try: cc, warp_matrix = cv2.findTransformECC(ref_img_grad, match_img_grad, warp_matrix, warp_mode, criteria, inputMask=None, gaussFiltSize=1) except TypeError: cc, warp_matrix = cv2.findTransformECC(ref_img_grad, match_img_grad, warp_matrix, warp_mode, criteria) if level != nol: # scale up only the offset by a factor of 2 for the next (larger image) pyramid level if warp_mode == cv2.MOTION_HOMOGRAPHY: warp_matrix = warp_matrix * np.array([[1, 1, 2], [1, 1, 2], [0.5, 0.5, 1]], dtype=np.float32) else: warp_matrix = warp_matrix * np.array([[1, 1, 2], [1, 1, 2]], dtype=np.float32) # return warp_matrix return warp_matrix
def eccAlign(image1, image2): # Convert images to grayscale im1_gray = cv2.cvtColor(image1, cv2.COLOR_BGR2GRAY) im2_gray = cv2.cvtColor(image2, cv2.COLOR_BGR2GRAY) # Find size of image1 sz = image1.shape # Define the motion model warp_mode = cv2.MOTION_AFFINE # Define 2x3 or 3x3 matrices and initialize the matrix to identity if warp_mode == cv2.MOTION_HOMOGRAPHY: warp_matrix = np.eye(3, 3, dtype=np.float32) else: warp_matrix = np.eye(2, 3, dtype=np.float32) # Define termination criteria criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, number_of_iterations, termination_eps) # Run the ECC algorithm. The results are stored in warp_matrix. (cc, warp_matrix) = cv2.findTransformECC(im1_gray, im2_gray, warp_matrix, warp_mode, criteria) if warp_mode == cv2.MOTION_HOMOGRAPHY: # Use warpPerspective for Homography im2_aligned = cv2.warpPerspective(image2, warp_matrix, (sz[1], sz[0]), flags=cv2.INTER_LINEAR + cv2.WARP_INVERSE_MAP) else: # Use warpAffine for Translation, Euclidean and Affine im2_aligned = cv2.warpAffine(image2, warp_matrix, (sz[1], sz[0]), flags=cv2.INTER_LINEAR + cv2.WARP_INVERSE_MAP); return im2_aligned, warp_matrix
def _align_cv2(im, ref, **kargs): """Return the translation vector to shirt im to align to ref using the cv2 method.""" im1_gray = ref.convert("uint8", force_copy=True) im2_gray = im.convert("uint8", force_copy=True) # Define the motion model warp_mode = cv2.MOTION_TRANSLATION warp_matrix = np.eye(2, 3, dtype=np.float32) # Specify the number of iterations. number_of_iterations = 5000 # Specify the threshold of the increment # in the correlation coefficient between two iterations termination_eps = 1e-10 # Define termination criteria criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, number_of_iterations, termination_eps) # Run the ECC algorithm. The results are stored in warp_matrix. (_, warp_matrix) = cv2.findTransformECC(im1_gray, im2_gray, warp_matrix, warp_mode, criteria, inputMask=None, gaussFiltSize=1) return -warp_matrix[1::-1, 2], {}
def align_images(im1, im2): # Convert images to grayscale im1_gray = cv2.cvtColor(im1,cv2.COLOR_BGR2GRAY) im2_gray = cv2.cvtColor(im2,cv2.COLOR_BGR2GRAY) # Find size of image1 sz = im1.shape # Define the motion model warp_mode = cv2.MOTION_HOMOGRAPHY #Define the warp matrix warp_matrix = np.eye(3, 3, dtype=np.float32) #Define the number of iterations number_of_iterations = askinteger("Iterations", "Enter a number between 5 andd 5000",initialvalue=500,minvalue=5,maxvalue=5000) #Define correllation coefficient threshold #Specify the threshold of the increment in the correlation coefficient between two iterations termination_eps = askfloat("Threshold", "Enter a number between 1e-10 and 1e-50",initialvalue=1e-10,minvalue=1e-50,maxvalue=1e-10) #Define termination criteria criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, number_of_iterations, termination_eps) #Run the ECC algorithm. The results are stored in warp_matrix. (cc, warp_matrix) = cv2.findTransformECC (im1_gray,im2_gray,warp_matrix, warp_mode, criteria) #Use warpPerspective for Homography im_aligned = cv2.warpPerspective (im2, warp_matrix, (sz[1],sz[0]), flags=cv2.INTER_LINEAR + cv2.WARP_INVERSE_MAP) save1 = asksaveasfilename(defaultextension=".jpg", title="Save aligned image") cv2.imwrite(save1,im_aligned)
def align_with_registration(next_rois, previous_rois, half_size): sx, sy = previous_rois.shape # define the motion model warp_mode = cv2.MOTION_TRANSLATION warp_matrix = np.eye(2, 3, dtype=np.float32) number_of_iterations = 5000 termination_eps = 1e-10 criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, number_of_iterations, termination_eps) # run the ECC algorithm (cc, warp_matrix) = cv2.findTransformECC(previous_rois, next_rois, warp_matrix, warp_mode, criteria, None, 1) next_rois_aligned = cv2.warpAffine(next_rois, warp_matrix, (sy, sx), flags=cv2.INTER_LINEAR + cv2.WARP_INVERSE_MAP) # #obtain subrois # original_rois = previous_rois[sy//2-half_size:sy//2+half_size, # sx//2-half_size:sx//2+half_size ] # aligned_rois = next_rois_aligned[sy//2-half_size:sy//2+half_size, # sx//2-half_size:sx//2+half_size ] # return aligned_rois, original_rois print(warp_matrix) return next_rois_aligned
def _get_alignment(im_ref, im_to_align, key): if key is not None: cached_path = Path('./data/satellite_data/align_cache').joinpath( '{}.alignment'.format(key)) if cached_path.exists(): with cached_path.open('rb') as f: return pickle.load(f) logger.info('Getting alignment for {}'.format(key)) warp_mode = cv2.MOTION_TRANSLATION warp_matrix = np.eye(2, 3, dtype=np.float32) criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 5000, 1e-8) cc, warp_matrix = cv2.findTransformECC(im_ref, im_to_align, warp_matrix, warp_mode, criteria, inputMask=None, gaussFiltSize=1) if key is not None: with cached_path.open('wb') as f: pickle.dump((cc, warp_matrix), f) logger.info('Got alignment for {} with cc {:.3f}: {}'.format( key, cc, str(warp_matrix).replace('\n', ''))) return cc, warp_matrix
def _get_homography(image_1, image_2): warp_matrix = np.eye(3, 3, dtype=np.float32) criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, iterations, epsilon) # _, homography = cv2.findTransformECC(image_1, image_2, warp_matrix, cv2.MOTION_HOMOGRAPHY, criteria) #OK with openCV 4.0.1 # Needed Changes for openCV >= 4.2 below _, homography = cv2.findTransformECC(image_1, image_2, warp_matrix, cv2.MOTION_HOMOGRAPHY, criteria, None,5) return homography
def updateTracker(self, qimg, shape): assert (shape and shape.label == self.shape.label),"Inalid tracker state!" status = False result = shape if not self.isRunning or qimg.isNull(): return result, status mimg = ocvutil.qtImg2CvMat(qimg) srect = self.getRectForTracker(mimg,self.shape) mimg = mimg[srect[1]:srect[1]+srect[3],srect[0]:srect[0]+srect[2]] mimg = cv2.resize(mimg, (0, 0), fx = 0.5, fy = 0.5) mimg = cv2.cvtColor(mimg,cv2.COLOR_BGR2GRAY) warp_matrix = np.eye(2, 3, dtype=np.float32) cc = False try: (cc, T) = cv2.findTransformECC (self.ref_img,mimg,warp_matrix,cv2.MOTION_AFFINE, \ self.criteria, None, 1) except: cc = False if(cc): result = copy.deepcopy(shape) result.transform(T) status = True else: print("Tracker failed") return result , status
def ecc(self, files_list): # Align and stack images with ECC method # Slower but more accurate warp_matrix = np.eye(3, 3, dtype=np.float32) _iter = 1000 eps = 1e-10 criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, _iter, eps) for file in files_list: image = cv2.imread(file, 1).astype(np.float32) / 255 print(file) if self.first_image is None: # convert to gray scale floating point image self.first_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) self.image_stacked = image else: # Estimate perspective transform (s, warp_matrix) = cv2.findTransformECC(cv2.cvtColor( image, cv2.COLOR_BGR2GRAY), self.first_image, warp_matrix, cv2.MOTION_HOMOGRAPHY, criteria, inputMask=None, gaussFiltSize=1) w, h, _ = image.shape # Align image to first image image = cv2.warpPerspective(image, warp_matrix, (h, w)) self.image_stacked += image self.image_stacked /= len(files_list) self.image_stacked = (self.image_stacked * 255).astype(np.uint8) return self.image_stacked
def homography_alignment(target, im, number_of_iterations = 10): # Convert images to grayscale im1_gray = cv2.cvtColor(target, cv2.COLOR_RGB2GRAY) im2_gray = cv2.cvtColor(im, cv2.COLOR_RGB2GRAY) # Find size of image1 sz = im.shape # Define the motion model warp_mode = cv2.MOTION_HOMOGRAPHY # cv2.MOTION_TRANSLATION # Define 3x3 matrices and initialize the matrix to identity warp_matrix = np.eye(3, 3, dtype=np.float32) # Specify the threshold of the increment # in the correlation coefficient between two iterations termination_eps = 1e-10 # Define termination criteria criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, number_of_iterations, termination_eps) # Run the ECC algorithm. The results are stored in warp_matrix. (cc, warp_matrix) = cv2.findTransformECC(im1_gray, im2_gray, warp_matrix, warp_mode, criteria, None, 1) # Use warpPerspective for Homography im2_aligned = cv2.warpPerspective(im, warp_matrix, (sz[1], sz[0]), flags=cv2.INTER_LINEAR + cv2.WARP_INVERSE_MAP) return im2_aligned
def _align_two_rasters(img1, img2, two_dimensions_only=False): try: if two_dimensions_only: p1 = img1[10:400, 200:500, 1].astype(np.float32) p2 = img2[10:400, 200:500].astype(np.float32) else: p1 = img1[10:400, 200:500, 1].astype(np.float32) p2 = img2[10:400, 200:500, 4].astype(np.float32) except: print( "_align_two_rasters: can't extract patch, falling back to whole image" ) # lp1 = cv2.Laplacian(p1,cv2.CV_32F,ksize=5) # lp2 = cv2.Laplacian(p2,cv2.CV_32F,ksize=5) warp_mode = cv2.MOTION_EUCLIDEAN warp_matrix = np.eye(2, 3, dtype=np.float32) criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 1000, 1e-7) (cc, warp_matrix) = cv2.findTransformECC(p1, p2, warp_matrix, warp_mode, criteria) print("_align_two_rasters: cc:{}".format(cc)) img3 = cv2.warpAffine(img2, warp_matrix, (img1.shape[1], img1.shape[0]), flags=cv2.INTER_LINEAR + cv2.WARP_INVERSE_MAP) img3[img3 == 0] = np.average(img3) return img3
def get_shifts_image_series(imlist, filter_func, number_of_iterations=10000, termination_eps=1e-20): criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, number_of_iterations, termination_eps) #Align warp_mode = cv2.MOTION_TRANSLATION wms = [] for i, _ in enumerate(imlist[:-1]): im1 = imlist[i] im2 = imlist[i + 1] im1_to_align = filter_func(im1) im2_to_align = filter_func(im2) warp_matrix = np.eye(2, 3, dtype=np.float32) (cc, warp_matrix) = cv2.findTransformECC(im1_to_align, im2_to_align, warp_matrix, warp_mode, criteria) wms.append(warp_matrix) wms = np.dstack(wms) wms_summed = np.zeros(wms.shape) for i in range(wms.shape[-1] - 1): wms_summed[:, -1, i] = wms[:, -1, :i + 1].sum(-1) wms_summed[:, -1, 0] = wms[:, -1, 0] wms_summed[:, :-1, :] = wms[:, :-1, :] return wms_summed
def stackImagesECC(file_list): M = np.eye(3, 3, dtype=np.float32) first_image = None stacked_image = None for file in file_list: image = cv2.imread(file, 1).astype(np.float32) / 255 print(file) if first_image is None: # convert to gray scale floating point image first_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) stacked_image = image else: # Estimate perspective transform s, M = cv2.findTransformECC( cv2.cvtColor(image, cv2.COLOR_BGR2GRAY), first_image, M, cv2.MOTION_HOMOGRAPHY) w, h, _ = image.shape # Align image to first image image = cv2.warpPerspective(image, M, (h, w)) stacked_image += image stacked_image /= len(file_list) stacked_image = (stacked_image * 255).astype(np.uint8) return stacked_image
def register_image2(self, file_in, file_out): ds = gdal.Open(file_in) img = self.read_stretch_min_max(ds, 1) # Define motion model warp_mode = cv2.MOTION_TRANSLATION # Set the warp matrix to identity. if warp_mode == cv2.MOTION_HOMOGRAPHY: warp_matrix = np.eye(3, 3, dtype=np.float32) else: warp_matrix = np.eye(2, 3, dtype=np.float32) # Set the stopping criteria for the algorithm. criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 5000, 1e-10) # Warp the blue and green channels to the red channel # (cc, warp_matrix) = cv2.findTransformECC(self.get_gradient(self.reference_band.astype(np.float32)), # self.get_gradient(img.astype(np.float32)), # warp_matrix, warp_mode, criteria) # Warp the blue and green channels to the red channel (cc, warp_matrix) = cv2.findTransformECC(self.reference_band_f, img.astype(np.float32), warp_matrix, warp_mode, criteria) logging.info("CV2 {} translated by {}".format(file_in, warp_matrix)) tvec = [-warp_matrix[1, 2], -warp_matrix[0, 2]] # y,x self.trans_geotiff_tile(file_in, file_out, tvec)
def estimate_speed(prev, cur): global yoff # Convert images to grayscale s = (64, 48) im1_gray = cv2.resize(cv2.cvtColor(prev, cv2.COLOR_BGR2GRAY), s) im2_gray = cv2.resize(cv2.cvtColor(cur, cv2.COLOR_BGR2GRAY), s) # Find size of image1 sz = im1_gray.shape # Define the motion model #warp_mode = cv2.MOTION_AFFINE warp_mode = cv2.MOTION_TRANSLATION warp_matrix = np.eye(2, 3, dtype=np.float32) number_of_iterations = 2 criteria = (cv2.TERM_CRITERIA_COUNT, number_of_iterations, 0) try: (cc, warp_matrix) = cv2.findTransformECC(im1_gray, im2_gray, warp_matrix, warp_mode, criteria) except cv2.error: return s = math.sqrt( math.pow(warp_matrix[0][2], 2) + math.pow(warp_matrix[1][2], 2)) lpf = 0.8 yoff = lpf * yoff + (1 - lpf) * s mqttc.publish("%s/telemetry/vision/speed" % DEVICE_ID, yoff)
def cust_findTransformECC(im1, im2, warp_mode=cv2.MOTION_TRANSLATION): # Define 2x3 or 3x3 matrices and initialze the matrix if warp_mode == cv2.MOTION_HOMOGRAPHY: warp_matrix = np.eye(3, 3, dtype=np.float32) else: warp_matrix = np.eye(2, 3, dtype=np.float32) # Specifiy the number of iterations number_of_iterations = 500 # Specify the threshold of the increment # in the correlation coefficient between two iterations termination_eps = 1e-10 # Define termination criteria criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, number_of_iterations, termination_eps) # Run the ECC algorithm. try: (cc, warp_matrix) = cv2.findTransformECC(im1, im2, warp_matrix, warp_mode, criteria) except cv2.error as e: cc = None warp_matrix = None return cc, warp_matrix
def images_alignement(one, two, outpath): _h, _w = one.shape # Define 2x3 and initialize the matrix to identity warp_matrix = np.eye(2, 3, dtype=np.float32) # Specify the number of iterations number_of_iterations = 5000 # Specify the threshold of the increment # in the correlation coefficient between two iterations termination_eps = 1e-10 # Define termination criteria criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, number_of_iterations, termination_eps) # Run the ECC algorithm. The results are stored in warp_matrix. (cc, warp_matrix) = cv2.findTransformECC(one, two, warp_matrix, cv2.MOTION_TRANSLATION, criteria) # Use warpAffine for Translation, Euclidean and Affine three = cv2.warpAffine(two, warp_matrix, (_w, _h), flags=cv2.INTER_LINEAR + cv2.WARP_INVERSE_MAP) # Save image cv2.imwrite(outpath, three.astype('uint16')) return outpath
def update_matrix(orig, second, factor, matrix): """Update the warp matrix "matrix" at scale "factor", return a new warp matrix""" orig_gray = cv2.resize(orig, None, fx=factor, fy=factor, interpolation=cv2.INTER_CUBIC) second_gray = cv2.resize(second, None, fx=factor, fy=factor, interpolation=cv2.INTER_CUBIC) warp_mode = cv2.MOTION_HOMOGRAPHY number_of_iterations = 500 # Specify the threshold of the increment # in the correlation coefficient between two iterations termination_eps = 1e-5 # Define termination criteria criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, number_of_iterations, termination_eps) # Run the ECC algorithm. The results are stored in warp_matrix. (cc, warp_matrix) = cv2.findTransformECC(orig_gray, second_gray, matrix, warp_mode, criteria) return warp_matrix
def _align_two_rasters(img1, img2): try: p1 = img1[300:1900, 300:2200, 1].astype(np.float32) p2 = img2[300:1900, 300:2200, 3].astype(np.float32) except: print( "_align_two_rasters: can't extract patch, falling back to whole image" ) p1 = img1[:, :, 1] p2 = img2[:, :, 3] # lp1 = cv2.Laplacian(p1,cv2.CV_32F,ksize=5) # lp2 = cv2.Laplacian(p2,cv2.CV_32F,ksize=5) warp_mode = cv2.MOTION_EUCLIDEAN warp_matrix = np.eye(2, 3, dtype=np.float32) criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 1000, 1e-3) print('before findTransformECC()') (cc, warp_matrix) = cv2.findTransformECC(p1, p2, warp_matrix, warp_mode, criteria) print('after findTransformECC()') print("_align_two_rasters: cc:{}".format(cc)) img3 = cv2.warpAffine(img2, warp_matrix, (img1.shape[1], img1.shape[0]), flags=cv2.INTER_LINEAR + cv2.WARP_INVERSE_MAP) img3[img3 == 0] = np.average(img3) return img3
def align_two_images_ECC(im1, im2, mode=cv2.MOTION_AFFINE): if len(im1.shape) == 2: im1_gray = im1.copy() im2_gray = im2.copy() else: im1_gray = cv2.cvtColor(im1, cv2.COLOR_BGR2GRAY) im2_gray = cv2.cvtColor(im2, cv2.COLOR_BGR2GRAY) #mode = cv2.MOTION_TRANSLATION #mode = cv2.MOTION_AFFINE try: (cc, warp_matrix) = cv2.findTransformECC( im1_gray, im2_gray, numpy.eye(2, 3, dtype=numpy.float32), mode) except: return im1, im2 if len(im1.shape) == 2: aligned = cv2.warpAffine(im2_gray, warp_matrix, (im2_gray.shape[1], im2_gray.shape[0]), borderMode=cv2.BORDER_REPLICATE, flags=cv2.INTER_LINEAR + cv2.WARP_INVERSE_MAP) return im1_gray, aligned else: aligned = cv2.warpAffine(im2, warp_matrix, (im2_gray.shape[1], im2_gray.shape[0]), borderMode=cv2.BORDER_REPLICATE, flags=cv2.INTER_LINEAR + cv2.WARP_INVERSE_MAP) return im1, aligned
def get_warp_matrix(prev_img, img0, warp_mode, resize_factor=1): size = (prev_img.shape[1] // resize_factor), (prev_img.shape[0] // resize_factor) resize_prev_img = cv2.resize(prev_img, size) if resize_factor != 1 else prev_img resize_img0 = cv2.resize(img0, size) if resize_factor != 1 else img0 number_of_iterations = 25 termination_eps = 1e-10 if warp_mode == cv2.MOTION_HOMOGRAPHY: warp_matrix = np.eye(3, 3, dtype=np.float32) else: warp_matrix = np.eye(2, 3, dtype=np.float32) criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, number_of_iterations, termination_eps) _, warp_matrix = cv2.findTransformECC( cv2.cvtColor(resize_prev_img, cv2.COLOR_BGR2GRAY), cv2.cvtColor(resize_img0, cv2.COLOR_BGR2GRAY), warp_matrix, warp_mode, criteria, inputMask=None, gaussFiltSize=1) if warp_mode == cv2.MOTION_HOMOGRAPHY: warp_matrix[0][2] = warp_matrix[0][2] * resize_factor warp_matrix[1][2] = warp_matrix[1][2] * resize_factor warp_matrix[2][0] = warp_matrix[2][0] / resize_factor warp_matrix[2][1] = warp_matrix[2][1] / resize_factor else: warp_matrix[0][2] = warp_matrix[0][2] * resize_factor warp_matrix[1][2] = warp_matrix[1][2] * resize_factor return warp_matrix
def align_post_image(pre, post): warpMatrix = np.zeros((3, 3), dtype=np.float32) warpMatrix[0, 0] = warpMatrix[1, 1] = warpMatrix[2, 2] = 1.0 stop_criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 200, 0.0001) retval = False post_warped: np.ndarray = None try: retval, warpMatrix = cv2.findTransformECC( cv2.cvtColor(pre, cv2.COLOR_RGB2GRAY), cv2.cvtColor(post, cv2.COLOR_RGB2GRAY), warpMatrix, cv2.MOTION_HOMOGRAPHY, stop_criteria, None, 5, ) post_warped = cv2.warpPerspective(post, warpMatrix, dsize=(1024, 1024), flags=cv2.WARP_INVERSE_MAP) except: retval = False post_warped = post.copy() return post_warped
def define_matrix(image, number_of_iterations = 5000, termination_eps = 1e-10, warp = 'Affine'): warp_mode_dct = { 'Translation' : cv2.MOTION_TRANSLATION, 'Affine' : cv2.MOTION_AFFINE, 'Euclidean' : cv2.MOTION_EUCLIDEAN, 'Homography' : cv2.MOTION_HOMOGRAPHY } img = oib.image_reorder(image) color1 = img_as_ubyte(img[0,0,:,:,0]) color2 = img_as_ubyte(img[0,0,:,:,1]) warp_mode = warp_mode_dct.pop('%s' % warp) if warp_mode == cv2.MOTION_HOMOGRAPHY : warp_matrix = np.eye(3, 3, dtype=np.float32) else : warp_matrix = np.eye(2, 3, dtype=np.float32) number_of_iterations = number_of_iterations termination_eps = termination_eps criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, number_of_iterations, termination_eps) (cc, warp_matrix) = cv2.findTransformECC (color1,color2,warp_matrix, warp_mode, criteria) return warp_matrix
def register(self, src, trg, trg_mask=None, src_mask=None): """ Implementation of pair-wise registration and warping using Enhanced Correlation Coefficient This function estimates an Euclidean transformation (x,y translation + rotation) using the intensities of the pair of images to be registered. The similarity metric is a modification of the cross-correlation metric, which is invariant to distortions in contrast and brightness. :param src: 2D single channel source moving image :param trg: 2D single channel target reference image :param trg_mask: Mask of target image. Not used in this method. :param src_mask: Mask of source image. Not used in this method. :return: Estimated 2D transformation matrix of shape 2x3 """ # Parameters of registration warp_mode = cv2.MOTION_EUCLIDEAN # Specify the threshold of the increment # in the correlation coefficient between two iterations termination_eps = 1e-10 # Define termination criteria criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, self.params['MaxIters'], termination_eps) # Initialise warp matrix warp_matrix = np.eye(2, 3, dtype=np.float32) # Run the ECC algorithm. The results are stored in warp_matrix. _, warp_matrix = cv2.findTransformECC(src.astype(np.float32), trg.astype(np.float32), warp_matrix, warp_mode, criteria) return warp_matrix
def define_matrix(image, number_of_iterations=5000, termination_eps=1e-10, warp='Affine'): warp_mode_dct = { 'Translation': cv2.MOTION_TRANSLATION, 'Affine': cv2.MOTION_AFFINE, 'Euclidean': cv2.MOTION_EUCLIDEAN, 'Homography': cv2.MOTION_HOMOGRAPHY } img = oib.image_reorder(image) color1 = img_as_ubyte(img[0, 0, :, :, 0]) color2 = img_as_ubyte(img[0, 0, :, :, 1]) warp_mode = warp_mode_dct.pop('%s' % warp) if warp_mode == cv2.MOTION_HOMOGRAPHY: warp_matrix = np.eye(3, 3, dtype=np.float32) else: warp_matrix = np.eye(2, 3, dtype=np.float32) number_of_iterations = number_of_iterations termination_eps = termination_eps criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, number_of_iterations, termination_eps) (cc, warp_matrix) = cv2.findTransformECC(color1, color2, warp_matrix, warp_mode, criteria) return warp_matrix
def calculate_translation_values(self, image, warp_matrix): source_file = os.path.join(self.INPUT_DIR, image) if self.RESET_MATRIX_EVERY_LOOP: warp_matrix = self._create_warp_matrix() # reset im2 = self._read_image_and_crop(source_file, read_as_8bit=True) # proceed with downsized version if self.DOWNSIZE: im2_downsized = cv2.resize(im2, (0,0), fx=1.0/self.DOWNSIZE_FACTOR, fy=1.0/self.DOWNSIZE_FACTOR) else: im2_downsized = im2 im2_gray = cv2.cvtColor(im2_downsized, cv2.COLOR_BGR2GRAY) if self.USE_SOBEL: im2_gray = self._get_gradient(im2_gray) if self.ALGORITHM == "ECC": try: (cc, warp_matrix) = cv2.findTransformECC(self.reference_image_gray, im2_gray, warp_matrix, self.WARP_MODE, self.CRITERIA) except Exception as e: raise e if self.ALGORITHM == "ORB": orb = cv2.ORB_create(self.MAX_FEATURES) im2_gray = np.uint8(im2_gray) keypoints2, descriptors2 = orb.detectAndCompute(im2_gray, None) # Match features. matcher = cv2.DescriptorMatcher_create(cv2.DESCRIPTOR_MATCHER_BRUTEFORCE_HAMMING) matches = matcher.match(self.orb_descriptors1, descriptors2, None) # Sort matches by score matches.sort(key=lambda x: x.distance, reverse=False) # Remove not so good matches numGoodMatches = int(len(matches) * self.GOOD_MATCH_PERCENT) matches = matches[:numGoodMatches] # Draw top matches imMatches = cv2.drawMatches(self.reference_image_gray, self.orb_keypoints1, im2_gray, keypoints2, matches, None) cv2.imwrite(image + "_match.jpg", imMatches) # Extract location of good matches points1 = np.zeros((len(matches), 2), dtype=np.float32) points2 = np.zeros((len(matches), 2), dtype=np.float32) for i, match in enumerate(matches): points1[i, :] = self.orb_keypoints1[match.queryIdx].pt points2[i, :] = keypoints2[match.trainIdx].pt # Find homography h, mask = cv2.findHomography(points1, points2, cv2.RANSAC) warp_matrix = h return (im2, warp_matrix)
def align_images(images): if not isinstance(images, list) or len(images) < 2: print("Input has to be a list of at least two images") return None size = images[0].shape for i in range(len(images)): if not images[i].shape == size: print("Input images have to be of the same size") return None # Convert images to grayscale gray_images = [] for image in images: gray_images.append(cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)) model_image = gray_images[0] # Find size of images sz = model_image.shape # Define the motion model warp_mode = cv2.MOTION_TRANSLATION # Define 2x3 or 3x3 matrices and initialize the matrix to identity if warp_mode == cv2.MOTION_HOMOGRAPHY: warp_matrix = np.eye(3, 3, dtype=np.float32) else: warp_matrix = np.eye(2, 3, dtype=np.float32) # Specify the number of iterations. number_of_iterations = 5000 # Specify the threshold of the increment in the correlation coefficient between two iterations termination_eps = 1e-10 # Define termination criteria criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, number_of_iterations, termination_eps) # Run the ECC algorithm. The results are stored in warp_matrix. aligned_images = [images[0]] for i in range(1, len(images)): (cc, warp_matrix) = cv2.findTransformECC(model_image, gray_images[i], warp_matrix, warp_mode, criteria) if warp_mode == cv2.MOTION_HOMOGRAPHY : # Use warpPerspective for Homography aligned_image = cv2.warpPerspective (images[i], warp_matrix, (sz[1], sz[0]), flags=cv2.INTER_LINEAR + cv2.WARP_INVERSE_MAP) else: # Use warpAffine for Translation, Euclidean and Affine aligned_image = cv2.warpAffine(images[i], warp_matrix, (sz[1], sz[0]), flags=cv2.INTER_LINEAR + cv2.WARP_INVERSE_MAP) aligned_images.append(aligned_image) return aligned_images
def processMovie(filename, start, frames, outputfilename): reduceFPS=4 cap = cv2.VideoCapture(filename) cap.set(cv2.CAP_PROP_POS_FRAMES,start) S = (int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)), int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))) out = cv2.VideoWriter(outputfilename, cv2.VideoWriter_fourcc('M','J','P','G'), cap.get(cv2.CAP_PROP_FPS)/reduceFPS, S, True) warp_mode = cv2.MOTION_EUCLIDEAN warp_matrix = np.eye(2, 3, dtype=np.float32) number_of_iterations = 200; # Specify the threshold of the increment # in the correlation coefficient between two iterations termination_eps = 1e-4; # Define termination criteria criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, number_of_iterations, termination_eps) im1_gray = np.array([]) first = np.array([]) for tt in range(frames): # Capture frame-by-frame _, frame = cap.read() if not(im1_gray.size): im1_gray = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY) first = frame.copy() im2_gray = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY) try: (cc, warp_matrix) = cv2.findTransformECC (im1_gray,im2_gray,warp_matrix, warp_mode, criteria) except cv2.error as e: im1_gray = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY) first = frame.copy() print("missed frame") im2_aligned = np.empty_like(frame) np.copyto(im2_aligned, first) # transform the frame - the 5s are to cut out the border that the shitty windows conversion created for no reason im2_aligned = cv2.warpAffine(frame[5:-5,5:-5,:], warp_matrix, (S[0],S[1]), dst=im2_aligned, flags=cv2.INTER_NEAREST + cv2.WARP_INVERSE_MAP borderMode=cv2.BORDER_TRANSPARENT) out.write(im2_aligned) cap.release() out.release()
def align_image(template, image, warp_matrix=None): # Clean sky, apply clipping to enhance contrast, and convert to a dtype that cv2 will accept clipped_template = prettify(template) clipped_image = prettify(image) if warp_matrix is None: warp_matrix = np.eye(2, 3, dtype=np.float32) # findTransformECC requires 32-bit float warp_mode = cv2.MOTION_TRANSLATION # No rotation iterations = 100 termination_eps = 1e-3 criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, iterations, termination_eps) cc, warp_matrix = cv2.findTransformECC(clipped_template, clipped_image, warp_matrix, warp_mode, criteria) warp_matrix = np.around(warp_matrix) # Round the warp matrix to remove interpolation and conserve flux aligned = cv2.warpAffine(image, warp_matrix, image.shape[::-1], flags=cv2.INTER_LINEAR + cv2.WARP_INVERSE_MAP) return aligned, warp_matrix
def align(im, ref, method=None, **kargs): """Use one of a variety of algroithms to align two images. Args: im (ndarray) image to align ref (ndarray) reference array Keyword Args: method (str or None): If given specifies which module to try and use. Options: 'scharr', 'chi2_shift', 'imreg_dft', 'cv2' **kargs (various): All other keyword arguments are passed to the specific algorithm. Returns (ImageArray or ndarray) aligned image Notes: Currently three algorithms are supported: - image_registration module's chi^2 shift: This uses a dft with an automatic up-sampling of the fourier transform for sub-pixel alignment. The metadata key *chi2_shift* contains the translation vector and errors. - imreg_dft module's similarity function. This implements a full scale, rotation, translation algorithm (by default cosntrained for just translation). It's unclear how much sub-pixel translation is accomodated. - cv2 module based affine transform on a gray scale image. from: http://www.learnopencv.com/image-alignment-ecc-in-opencv-c-python/ """ # To be consistent with x-y co-ordinate systems if all([m is None for m in [imreg_dft, chi2_shift, cv2]]): raise ImportError("align requires one of imreg_dft, chi2_shift or cv2 modules to be available.") if method == "scharr" and imreg_dft is not None: im = im.T ref = ref.T scale = np.ceil(np.max(im.shape) / 500.0) ref1 = ref.gaussian_filter(sigma=scale, mode="wrap").scharr() im1 = im.gaussian_filter(sigma=scale, mode="wrap").scharr() im1 = im1.align(ref1, method="imreg_dft") tvec = np.array(im1["tvec"]) new_im = im.shift(tvec) new_im["tvec"] = tuple(-tvec) new_im = new_im.T elif (method is None and chi2_shift is not None) or method == "chi2_shift": kargs["zeromean"] = kargs.get("zeromean", True) result = np.array(chi2_shift(ref, im, **kargs)) new_im = im.__class__(fft_tools.shiftnd(im, -result[0:2])) new_im.metadata.update(im.metadata) new_im.metadata["chi2_shift"] = result elif (method is None and imreg_dft is not None) or method == "imreg_dft": constraints = kargs.pop("constraints", {"angle": [0.0, 0.0], "scale": [1.0, 0.0]}) cls = im.__class__ with warnings.catch_warnings(): # This causes a warning due to the masking warnings.simplefilter("ignore") result = imreg_dft.similarity(ref, im, constraints=constraints) new_im = (result.pop("timg")).view(type=cls) new_im.metadata.update(im.metadata) new_im.metadata.update(result) elif (method is None and cv2 is not None) or method == "cv2": im1_gray = cv2.cvtColor(ref, cv2.COLOR_BGR2GRAY) im2_gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY) # Find size of image1 sz = im.shape # Define the motion model warp_mode = cv2.MOTION_TRANSLATION warp_matrix = np.eye(2, 3, dtype=np.float32) # Specify the number of iterations. number_of_iterations = 5000 # Specify the threshold of the increment # in the correlation coefficient between two iterations termination_eps = 1e-10 # Define termination criteria criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, number_of_iterations, termination_eps) # Run the ECC algorithm. The results are stored in warp_matrix. (_, warp_matrix) = cv2.findTransformECC(im1_gray, im2_gray, warp_matrix, warp_mode, criteria) # Use warpAffine for Translation, Euclidean and Affine new_im = cv2.warpAffine(im, warp_matrix, (sz[1], sz[0]), flags=cv2.INTER_LINEAR + cv2.WARP_INVERSE_MAP) else: # No cv2 available so don't do anything. raise RuntimeError("Couldn't find an image alignment algorithm to use") return new_im.T
def find2dAlignmentMatrix(im1, im2, warp_mode = cv2.MOTION_TRANSLATION, center_crop_resolution=0, brightness_scale=1.0): ''' # Define the motion model warp_mode = cv2.MOTION_TRANSLATION warp_mode = cv2.MOTION_EUCLIDEAN warp_mode = cv2.MOTION_AFFINE warp_mode = cv2.MOTION_HOMOGRAPHY ''' #width, height, channels, warpModeToText = { 0 : 'Translation', 1 : 'Euclidean', 2 : 'Affine', 3 : 'Homography' } scale_factor = 1.0 # Convert to from OIIO to OpenCV-friendly format res1 = OpenCVImageBufferFromOIIOImageBuffer(im1) res2 = OpenCVImageBufferFromOIIOImageBuffer(im2) (height, width, channels) = res1.shape if center_crop_resolution >= 0: if center_crop_resolution == 0: center_crop_resolution = min(width, height)/2 if width > center_crop_resolution: print( width, height ) ws = width/2 - center_crop_resolution/2 we = width/2 + center_crop_resolution/2 hs = height/2 - center_crop_resolution/2 he = height/2 + center_crop_resolution/2 print( "Center crop : %d-%d, %d-%d" % (ws, we, hs, he) ) print( "Cropping image 1") res1 = res1[hs:he, ws:we] print( "Cropping image 2") res2 = res2[hs:he, ws:we] scale_factor = 1.0 #print( res1.shape ) #print( res2.shape ) # Scale image1 - Not entirely necessary res1 = cv2.multiply(res1, np.array([brightness_scale])) # Convert images to grayscale #if len(im1shape) > 2: if channels > 1: im1_gray = cv2.cvtColor(res1,cv2.COLOR_BGR2GRAY) im2_gray = cv2.cvtColor(res2,cv2.COLOR_BGR2GRAY) else: im1_gray = res1 im2_gray = res2 # Define 2x3 or 3x3 matrices and initialize the matrix to identity if warp_mode == cv2.MOTION_HOMOGRAPHY : warp_matrix = np.eye(3, 3, dtype=np.float32) else : warp_matrix = np.eye(2, 3, dtype=np.float32) # Specify the number of iterations. number_of_iterations = 5000; # Specify the threshold of the increment # in the correlation coefficient between two iterations termination_eps = 1e-10; # Define termination criteria criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, number_of_iterations, termination_eps) print( "Aligning - Mode : %s, %d" % (warpModeToText[warp_mode], warp_mode) ) #cv2.imwrite("im1_gray.exr", im1_gray) #cv2.imwrite("im2_gray.exr", im2_gray) # Run the ECC algorithm. The results are stored in warp_matrix. try: (cc, warp_matrix) = cv2.findTransformECC (im1_gray, im2_gray, warp_matrix, warp_mode, criteria) except Exception, e: print( "Exception in findTransformECC : %s" % repr(e)) warp_matrix = [[1.0, 0.0, 0.0], [1.0, 0.0, 0.0]]
def register_bands(image, template_band=1, ECC_criterion=True): """ Fix chromatic abberation in images by calculating and applying an affine transformation. Chromatic abberation is a result of uneven refraction of light of different wavelengths. It shows up as systematic blue and red edges in high-contrast areas. This should be done before other processing to minimize artifacts. Parameters ---------- image : ndarray 3- or 4-channel image, probably RGB. template : int Band to which to align the other bands. Usually G in RGB. ECC_criterion : bool Use ECC criterion to find optimal warp? Improves results, but increases processing time 5x. Returns ------- An ndarray of `image.size` Notes ----- Uses `skimage.filters.scharr` to find edges in each band, then finds and applies an affine transformation to register the images using `cv2.estimateRigidTransform` and `cv2.warpAffine`. If `ECC_criterion=True`, the matrix from `estimateRigidTransform` is updated using `cv2.findTransformECC`. """ #find dimensions height, width, depth = image.shape #define bands to analyze analyze = [] for i in range(depth): if i != template_band: analyze.append(i) # Extract bands, find edges bands = img_split(image) edges = [np.ones_like(i) for i in bands] for i in range(len(bands)): sigma_val = 0.25 edge_val = 1 while edge_val > 0.1 and sigma_val < 10: temp = filters.gaussian(bands[i], sigma=sigma_val) scharr = filters.scharr(temp) temp = scharr > filters.threshold_otsu(scharr) edge_val = np.sum(temp) / np.sum(np.ones_like(temp)) sigma_val = 2*sigma_val edges[i] = img_as_ubyte(scharr * temp) #make output image out = np.zeros((height, width, depth), dtype=np.uint8) out[:, :, template_band] = bands[template_band] try: for i in analyze: # Estimate transformation warp_matrix = np.array(cv2.estimateRigidTransform(edges[template_band], edges[i], fullAffine=False), dtype=np.float32) if ECC_criterion == True: # Optimize using ECC criterion and default settings warp_matrix = cv2.findTransformECC(edges[template_band], edges[i], warpMatrix=warp_matrix)[1] # transform aligned = cv2.warpAffine(bands[i], warp_matrix, (width, height), flags=cv2.INTER_LINEAR + cv2.WARP_INVERSE_MAP, # otherwise the transformation goes the wrong way borderMode=cv2.BORDER_CONSTANT) # add to color image out[:, :, i] = aligned except: # Probably few objects, so no smoothing and no thresholding to have as much info as possible edges = [img_as_ubyte(filters.scharr(i)) for i in edges] for i in analyze: # Estimate transformation warp_matrix = np.array(cv2.estimateRigidTransform(edges[template_band], edges[i], fullAffine=False), dtype=np.float32) if ECC_criterion == True: # Optimize using ECC criterion and default settings warp_matrix = cv2.findTransformECC(edges[template_band], edges[i], warpMatrix=warp_matrix)[1] # transform aligned = cv2.warpAffine(bands[i], warp_matrix, (width, height), flags=cv2.INTER_LINEAR + cv2.WARP_INVERSE_MAP, # otherwise the transformation goes the wrong way borderMode=cv2.BORDER_CONSTANT) # add to color image out[:, :, i] = aligned return(img_as_ubyte(out))
warp_matrix = np.eye(3, 3, dtype=np.float32) else : warp_matrix = np.eye(2, 3, dtype=np.float32) # Specify the number of iterations. number_of_iterations = 5000 # Specify the threshold of the increment # in the correlation coefficient between two iterations termination_eps = 1e-10 # Define termination criteria criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, number_of_iterations, termination_eps) # Run the ECC algorithm. The results are stored in warp_matrix. (cc, warp_matrix) = cv2.findTransformECC (groundtruth,slammap_unaligned,warp_matrix, warp_mode, criteria) if warp_mode == cv2.MOTION_HOMOGRAPHY : # Use warpPerspective for Homography slammap = cv2.warpPerspective (slammap_unaligned, warp_matrix, (sz[1],sz[0]), flags=cv2.INTER_LINEAR + cv2.WARP_INVERSE_MAP) else : # Use warpAffine for Translation, Euclidean and Affine slammap = cv2.warpAffine(slammap_unaligned, warp_matrix, (sz[1],sz[0]), flags=cv2.INTER_LINEAR + cv2.WARP_INVERSE_MAP) # Show the images cv2.imshow("Groundtruth", groundtruth) cv2.imshow("SLAM", slammap_unaligned) cv2.imshow("Aligned slam", slammap) #cv2.waitKey(0) else: slammap = slammap_unaligned
def alignImages(self, im0, im1): A0 = np.identity(3, dtype=np.float64) A = cv2.findTransformECC(im0, im1, A0, cv2.MOTION_EUCLIDEAN) return A
# So copy the red channel im_aligned[:,:,2] = im_color[:,:,2] # Define motion model warp_mode = cv2.MOTION_HOMOGRAPHY # Set the warp matrix to identity. if warp_mode == cv2.MOTION_HOMOGRAPHY : warp_matrix = np.eye(3, 3, dtype=np.float32) else : warp_matrix = np.eye(2, 3, dtype=np.float32) # Set the stopping criteria for the algorithm. criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 5000, 1e-10) # Warp the blue and green channels to the red channel for i in xrange(0,2) : (cc, warp_matrix) = cv2.findTransformECC (get_gradient(im_color[:,:,2]), get_gradient(im_color[:,:,i]),warp_matrix, warp_mode, criteria) if warp_mode == cv2.MOTION_HOMOGRAPHY : # Use Perspective warp when the transformation is a Homography im_aligned[:,:,i] = cv2.warpPerspective (im_color[:,:,i], warp_matrix, (width,height), flags=cv2.INTER_LINEAR + cv2.WARP_INVERSE_MAP) else : # Use Affine warp when the transformation is not a Homography im_aligned[:,:,i] = cv2.warpAffine(im_color[:,:,i], warp_matrix, (width, height), flags=cv2.INTER_LINEAR + cv2.WARP_INVERSE_MAP); print warp_matrix # Show final output #cv2.imwrite("Color_Image.jpg", im_color) cv2.imwrite("Aligned_Image.jpg", im_aligned) #cv2.waitKey(0)
def alignImages(imgA, imgB): if cv2.__version__ != '3.1.0': print 'ERROR: This script requires OpenCV 3.1.0, but cv2.__version__ ==', cv2.__version__ sys.exit(1) # Read 8-bit color image. # This is an image in which the three channels are # concatenated vertically. # im = cv2.imread("images/emir.jpg", cv2.IMREAD_GRAYSCALE); # # # Find the width and height of the color image # sz = im.shape # print sz # height = int(sz[0] / 3); # width = sz[1] # # # Extract the three channels from the gray scale image # # and merge the three channels into one color image # im_color = np.zeros((height,width,3), dtype=np.uint8 ) # for i in xrange(0,3): # im_color[:,:,i] = im[ i * height:(i+1) * height,:] # # # Allocate space for aligned image # im_aligned = np.zeros((height,width,3), dtype=np.uint8 ) # The blue and green channels will be aligned to the red channel. # So copy the red channel # im_aligned[:,:,2] = imgA # Define motion model warp_mode = cv2.MOTION_TRANSLATION # warp_mode = cv2.MOTION_HOMOGRAPHY # Set the warp matrix to identity. if warp_mode == cv2.MOTION_HOMOGRAPHY: warp_matrix = np.eye(3, 3, dtype=np.float32) else: warp_matrix = np.eye(2, 3, dtype=np.float32) # Set the stopping criteria for the algorithm. # criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 50000, 0.0001) criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 5000, 1e-10) # Get greyscale images: greyA = cv2.cvtColor(imgA, cv2.COLOR_BGR2GRAY) greyB = cv2.cvtColor(imgB, cv2.COLOR_BGR2GRAY) # Warp the blue and green channels to the red channel # Warp imgA to match imgB: (cc, warp_matrix) = cv2.findTransformECC( get_gradient(greyA), get_gradient(greyB), warp_matrix, warp_mode , criteria) width, height, _unused = imgA.shape imgAligned = None if warp_mode == cv2.MOTION_HOMOGRAPHY: # Use Perspective warp when the transformation is a Homography imgAligned = cv2.warpPerspective(imgB, warp_matrix, (width,height), flags=cv2.INTER_LINEAR + cv2.WARP_INVERSE_MAP) else: # Use Affine warp when the transformation is not a Homography imgAligned = cv2.warpAffine(imgB, warp_matrix, (width, height), flags=cv2.INTER_LINEAR + cv2.WARP_INVERSE_MAP); print 'warp_matrix:', warp_matrix return imgAligned
def restore(images,iterations=50): num_of_images = len(images) ref_idx = num_of_images/2 # Equalize all images for i in images: cv2.xphoto.balanceWhite(i, i, cv2.xphoto.WHITE_BALANCE_SIMPLE) # At least 3 images are needed, otherwise we just return the first one if num_of_images < 3: return images[0]; # Register imags w.r.t. the central one fixed = cv2.cvtColor(images[ref_idx],cv2.COLOR_BGR2GRAY) # Find size sz = fixed.shape # Define the motion model warp_mode = cv2.MOTION_HOMOGRAPHY # Specify the threshold of the increment # in the correlation coefficient between two iterations termination_eps = 1e-8; # Define termination criteria criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, iterations, termination_eps) alignedImages=[] alignedImagesIntensity=[] i = 0 for moving in images: if i == ref_idx: aligned=images[ref_idx] aligned_intensity = fixed else: try: # convert to intensity moving = cv2.cvtColor(moving, cv2.COLOR_BGR2GRAY) # Define 3x3 matrices and initialize the matrix to identity warp_matrix = np.eye(3, 3, dtype=np.float32) # Run the ECC algorithm. The results are stored in warp_matrix. (cc, warp_matrix) = cv2.findTransformECC(fixed, moving, warp_matrix, warp_mode, criteria) aligned = cv2.warpPerspective(images[i], warp_matrix, (sz[1], sz[0]), flags=cv2.INTER_LINEAR + cv2.WARP_INVERSE_MAP) except: aligned = None if aligned is not None: aligned_intensity = cv2.cvtColor(aligned,cv2.COLOR_BGR2GRAY) alignedImages.append(aligned) alignedImagesIntensity.append(aligned_intensity) i += 1 nx = sz[0] ny = sz[1] nz = num_of_images retr = np.empty((nx,ny)) retg = np.empty((nx,ny)) retb = np.empty((nx,ny)) for i in xrange(nx): for j in xrange(ny): ii = None values = [x[i,j] for x in alignedImagesIntensity ] values.sort() center = values[nz//2] for k in xrange(nz): if alignedImagesIntensity[k][i,j] == center: ii = k break assert ii is not None retb[i,j] = alignedImages[ii][i,j,0] retg[i,j] = alignedImages[ii][i,j,1] retr[i,j] = alignedImages[ii][i,j,2] retb = np.uint8(retb) retg = np.uint8(retg) retr = np.uint8(retr) restored = cv2.merge((retb,retg,retr)) return restored
warp_matrix = np.eye(3, 3, dtype=np.float32) else : warp_matrix = np.eye(2, 3, dtype=np.float32) # Specify the number of iterations. number_of_iterations = 5000; # Specify the threshold of the increment # in the correlation coefficient between two iterations termination_eps = 1e-10; # Define termination criteria criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, number_of_iterations, termination_eps) # Run the ECC algorithm. The results are stored in warp_matrix. (cc, warp_matrix) = cv2.findTransformECC (im1,im2,warp_matrix, warp_mode, criteria) if warp_mode == cv2.MOTION_HOMOGRAPHY : # Use warpPerspective for Homography im2_aligned = cv2.warpPerspective (im2, warp_matrix, (sz[1],sz[0]), flags=cv2.INTER_LINEAR + cv2.WARP_INVERSE_MAP) else : # Use warpAffine for Translation, Euclidean and Affine im2_aligned = cv2.warpAffine(im2, warp_matrix, (sz[1],sz[0]), flags=cv2.INTER_LINEAR + cv2.WARP_INVERSE_MAP); newm.append(im2_aligned) # Show final results # cv2.imshow("Image 1", im1) # cv2.imshow("Image 2", im2) # cv2.imshow("Aligned Image 2", im2_aligned) # cv2.waitKey(0) # cv2.destroyAllWindows()
# warp_model = cv2.MOTION_TRANSLATION # <-- TRANSLATIONS is for 2D translation (ie No Scaling) warp_model = cv2.MOTION_HOMOGRAPHY # <-- HOMOGRAPHY is for 3D translation (takes care of skew) if warp_model == cv2.MOTION_HOMOGRAPHY : warp_matrix = np.eye(3, 3, dtype=np.float32) # homography 3x3 else : warp_matrix = np.eye(2, 3, dtype=np.float32) # translation 2x3 number_of_iterations = 5000; termination_eps = 1e-10; criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, number_of_iterations, termination_eps) (cc, warp_matrix) = cv2.findTransformECC(i1, i2, warp_matrix, warp_model, criteria) if warp_model == cv2.MOTION_HOMOGRAPHY : i2_aligned = cv2.warpPerspective(i2, warp_matrix, (sz[1], sz[0]), flags=cv2.INTER_LINEAR + cv2.WARP_INVERSE_MAP) i3_aligned = cv2.warpPerspective(i3, warp_matrix, (sz[1], sz[0]), flags=cv2.INTER_LINEAR + cv2.WARP_INVERSE_MAP) else : i2_aligned = cv2.warpAffine(i2, warp_matrix, (sz[1], sz[0]), flags=cv2.INTER_LINEAR + cv2.WARP_INVERSE_MAP); i3_aligned = cv2.warpAffine(i3, warp_matrix, (sz[1], sz[0]), flags=cv2.INTER_LINEAR + cv2.WARP_INVERSE_MAP); # cv2.imwrite('1_org_greyscaled.png', i1) cv2.imwrite('lcns13.png', i2_aligned) cv2.imwrite('lcn13.png', i3_aligned) cv2.imshow("Image 1", i1) cv2.imshow("Image 2", i2) cv2.imshow("Aligned Image 2", i2_aligned)