def test_simple(self): img = self.get_sample('stitching/a1.png') finder= cv.ORB.create() imgFea = cv.detail.computeImageFeatures2(finder,img) self.assertIsNotNone(imgFea) matcher = cv.detail_BestOf2NearestMatcher(False, 0.3) self.assertIsNotNone(matcher) matcher = cv.detail_AffineBestOf2NearestMatcher(False, False, 0.3) self.assertIsNotNone(matcher) matcher = cv.detail_BestOf2NearestRangeMatcher(2, False, 0.3) self.assertIsNotNone(matcher) estimator = cv.detail_AffineBasedEstimator() self.assertIsNotNone(estimator) estimator = cv.detail_HomographyBasedEstimator() self.assertIsNotNone(estimator) adjuster = cv.detail_BundleAdjusterReproj() self.assertIsNotNone(adjuster) adjuster = cv.detail_BundleAdjusterRay() self.assertIsNotNone(adjuster) adjuster = cv.detail_BundleAdjusterAffinePartial() self.assertIsNotNone(adjuster) adjuster = cv.detail_NoBundleAdjuster() self.assertIsNotNone(adjuster) compensator=cv.detail.ExposureCompensator_createDefault(cv.detail.ExposureCompensator_NO) self.assertIsNotNone(compensator) compensator=cv.detail.ExposureCompensator_createDefault(cv.detail.ExposureCompensator_GAIN) self.assertIsNotNone(compensator) compensator=cv.detail.ExposureCompensator_createDefault(cv.detail.ExposureCompensator_GAIN_BLOCKS) self.assertIsNotNone(compensator) seam_finder = cv.detail.SeamFinder_createDefault(cv.detail.SeamFinder_NO) self.assertIsNotNone(seam_finder) seam_finder = cv.detail.SeamFinder_createDefault(cv.detail.SeamFinder_NO) self.assertIsNotNone(seam_finder) seam_finder = cv.detail.SeamFinder_createDefault(cv.detail.SeamFinder_VORONOI_SEAM) self.assertIsNotNone(seam_finder) seam_finder = cv.detail_GraphCutSeamFinder("COST_COLOR") self.assertIsNotNone(seam_finder) seam_finder = cv.detail_GraphCutSeamFinder("COST_COLOR_GRAD") self.assertIsNotNone(seam_finder) seam_finder = cv.detail_DpSeamFinder("COLOR") self.assertIsNotNone(seam_finder) seam_finder = cv.detail_DpSeamFinder("COLOR_GRAD") self.assertIsNotNone(seam_finder) blender = cv.detail.Blender_createDefault(cv.detail.Blender_NO) self.assertIsNotNone(blender) blender = cv.detail.Blender_createDefault(cv.detail.Blender_FEATHER) self.assertIsNotNone(blender) blender = cv.detail.Blender_createDefault(cv.detail.Blender_MULTI_BAND) self.assertIsNotNone(blender) timelapser = cv.detail.Timelapser_createDefault(cv.detail.Timelapser_AS_IS); self.assertIsNotNone(timelapser) timelapser = cv.detail.Timelapser_createDefault(cv.detail.Timelapser_CROP); self.assertIsNotNone(timelapser)
def get_matcher(self): if self.matcher_type == 'affine': matcher = cv.detail_AffineBestOf2NearestMatcher( False, self.try_cuda, self.match_conf) else: matcher = cv.detail.BestOf2NearestMatcher_create( self.try_cuda, self.match_conf) return matcher
def matcher_type_check(matcher_type, range_width, try_cuda, match_conf): matcher = None if matcher_type == "affine": matcher = cv2.detail_AffineBestOf2NearestMatcher( False, try_cuda, match_conf) elif range_width == -1: matcher = cv2.detail.BestOf2NearestMatcher_create(try_cuda, match_conf) else: matcher = cv2.detail.BestOf2NearestRangeMatcher_create( range_width, try_cuda, match_conf) return matcher
def __init__(self, matcher_type=DEFAULT_MATCHER, range_width=DEFAULT_RANGE_WIDTH, **kwargs): if matcher_type == "affine": """https://docs.opencv.org/master/d3/dda/classcv_1_1detail_1_1AffineBestOf2NearestMatcher.html""" # noqa self.matcher = cv.detail_AffineBestOf2NearestMatcher(**kwargs) elif range_width == -1: """https://docs.opencv.org/master/d4/d26/classcv_1_1detail_1_1BestOf2NearestMatcher.html""" # noqa self.matcher = cv.detail.BestOf2NearestMatcher_create(**kwargs) else: """https://docs.opencv.org/master/d8/d72/classcv_1_1detail_1_1BestOf2NearestRangeMatcher.html""" # noqa self.matcher = cv.detail.BestOf2NearestRangeMatcher_create( range_width, **kwargs)
def get_matcher(args): try_cuda = args.try_cuda matcher_type = args.matcher if args.match_conf is None: if args.features == 'orb': match_conf = 0.3 else: match_conf = 0.65 else: match_conf = args.match_conf range_width = args.rangewidth if matcher_type == "affine": matcher = cv.detail_AffineBestOf2NearestMatcher(False, try_cuda, match_conf) elif range_width == -1: matcher = cv.detail.BestOf2NearestMatcher_create(try_cuda, match_conf) else: matcher = cv.detail.BestOf2NearestRangeMatcher_create(range_width, try_cuda, match_conf) return matcher
def main(): img_names = [r'C:\Scratch\IPA_Data\FullRes\a0_nor.tif', r'C:\Scratch\IPA_Data\FullRes\a1_nor.tif', r'C:\Scratch\IPA_Data\FullRes\a2_nor.tif', r'C:\Scratch\IPA_Data\FullRes\a3_nor.tif', r'C:\Scratch\IPA_Data\FullRes\a4_nor.tif', r'C:\Scratch\IPA_Data\FullRes\a5_nor.tif', r'C:\Scratch\IPA_Data\FullRes\a6_nor.tif', r'C:\Scratch\IPA_Data\FullRes\a7_nor.tif', r'C:\Scratch\IPA_Data\FullRes\a8_nor.tif', r'C:\Scratch\IPA_Data\FullRes\a9_nor.tif'] # r'C:\Scratch\IPA_Data\FullRes\b0_nor.tif', # r'C:\Scratch\IPA_Data\FullRes\b1_nor.tif', # r'C:\Scratch\IPA_Data\FullRes\b2_nor.tif', # r'C:\Scratch\IPA_Data\FullRes\b3_nor.tif', # r'C:\Scratch\IPA_Data\FullRes\b4_nor.tif', # r'C:\Scratch\IPA_Data\FullRes\b5_nor.tif', # r'C:\Scratch\IPA_Data\FullRes\b6_nor.tif', # r'C:\Scratch\IPA_Data\FullRes\b7_nor.tif', # r'C:\Scratch\IPA_Data\FullRes\b8_nor.tif', # r'C:\Scratch\IPA_Data\FullRes\b9_nor.tif', # r'C:\Scratch\IPA_Data\FullRes\c0_nor.tif', # r'C:\Scratch\IPA_Data\FullRes\c1_nor.tif', # r'C:\Scratch\IPA_Data\FullRes\c2_nor.tif', # r'C:\Scratch\IPA_Data\FullRes\c3_nor.tif', # r'C:\Scratch\IPA_Data\FullRes\c4_nor.tif', # r'C:\Scratch\IPA_Data\FullRes\c5_nor.tif', # r'C:\Scratch\IPA_Data\FullRes\c6_nor.tif', # r'C:\Scratch\IPA_Data\FullRes\c7_nor.tif', # r'C:\Scratch\IPA_Data\FullRes\c8_nor.tif', # r'C:\Scratch\IPA_Data\FullRes\c9_nor.tif', # r'C:\Scratch\IPA_Data\FullRes\d0_nor.tif', # r'C:\Scratch\IPA_Data\FullRes\d1_nor.tif', # r'C:\Scratch\IPA_Data\FullRes\d2_nor.tif', # r'C:\Scratch\IPA_Data\FullRes\d3_nor.tif', # r'C:\Scratch\IPA_Data\FullRes\d4_nor.tif', # r'C:\Scratch\IPA_Data\FullRes\d5_nor.tif', # r'C:\Scratch\IPA_Data\FullRes\d6_nor.tif', # r'C:\Scratch\IPA_Data\FullRes\d7_nor.tif', # r'C:\Scratch\IPA_Data\FullRes\d8_nor.tif', # r'C:\Scratch\IPA_Data\FullRes\d9_nor.tif', # r'C:\Scratch\IPA_Data\FullRes\e0_nor.tif', # r'C:\Scratch\IPA_Data\FullRes\e1_nor.tif', # r'C:\Scratch\IPA_Data\FullRes\e2_nor.tif', # r'C:\Scratch\IPA_Data\FullRes\e3_nor.tif', # r'C:\Scratch\IPA_Data\FullRes\e4_nor.tif', # r'C:\Scratch\IPA_Data\FullRes\e5_nor.tif', # r'C:\Scratch\IPA_Data\FullRes\e6_nor.tif', # r'C:\Scratch\IPA_Data\FullRes\e7_nor.tif', # r'C:\Scratch\IPA_Data\FullRes\e8_nor.tif', # r'C:\Scratch\IPA_Data\FullRes\e9_nor.tif', # r'C:\Scratch\IPA_Data\FullRes\f0_nor.tif', # r'C:\Scratch\IPA_Data\FullRes\f1_nor.tif', # r'C:\Scratch\IPA_Data\FullRes\f2_nor.tif', # r'C:\Scratch\IPA_Data\FullRes\f3_nor.tif', # r'C:\Scratch\IPA_Data\FullRes\f4_nor.tif', # r'C:\Scratch\IPA_Data\FullRes\f5_nor.tif', # r'C:\Scratch\IPA_Data\FullRes\f6_nor.tif', # r'C:\Scratch\IPA_Data\FullRes\f7_nor.tif', # r'C:\Scratch\IPA_Data\FullRes\f8_nor.tif', # r'C:\Scratch\IPA_Data\FullRes\f9_nor.tif'] print(img_names) # ================ DEFINE ALL PARAMETERS ================ # Flags try_cuda = False work_megapix = 8 features_type = "surf" matcher_type = "affine" estimator_type = "affine" match_conf = 0.75 conf_thresh = 1.0 ba_cost_func = "affine" ba_refine_mask = "xxxxx" wave_correct = "vert" save_graph_var = None # Compositing Flags warp_type = "affine" seam_megapix = 2.0 seam_find_type = "gc_color" compose_megapix = -1 expos_comp = "no" expos_comp_nr_feeds = 1 # expos_comp_nr_filtering = 2 expos_comp_block_size = 32 blend_type = "multiband" blend_strength = 5 result_name = "test_result_3.png" timelapse_name = None range_width = 8 # Check if there is to be wave correction, then set the boolean check value if wave_correct == 'no': do_wave_correct = False else: do_wave_correct = True # Check if there is to be a graph file created if save_graph_var is None: save_graph = False else: save_graph = True save_graph_to = save_graph_var # Check if the exposure is to be compensated, if so define which compensator to use if expos_comp == 'no': expos_comp_type = cv.detail.ExposureCompensator_NO elif expos_comp == 'gain': expos_comp_type = cv.detail.ExposureCompensator_GAIN elif expos_comp == 'gain_blocks': expos_comp_type = cv.detail.ExposureCompensator_GAIN_BLOCKS elif expos_comp == 'channel': expos_comp_type = cv.detail.ExposureCompensator_CHANNELS elif expos_comp == 'channel_blocks': expos_comp_type = cv.detail.ExposureCompensator_CHANNELS_BLOCKS else: print("Bad exposure compensation method") exit() # Check if the timelapse is to be output. AKA the intermediate layers if timelapse_name is not None: timelapse = True if timelapse_name == "as_is": timelapse_type = cv.detail.Timelapser_AS_IS elif timelapse_name == "crop": timelapse_type = cv.detail.Timelapser_CROP else: print("Bad timelapse method") exit() else: timelapse = False # Check the feature type to be used and create the finder class that will be used # TODO - See if there other feature detectors which are more suitable if features_type == 'orb': finder = cv.ORB.create(500, 1.1, 8, 50, 0, 2, 0, 50, 20) elif features_type == 'surf': finder = cv.xfeatures2d_SURF.create(100, 8, 4, False, False) elif features_type == 'sift': finder = cv.xfeatures2d_SIFT.create() else: print("Unknown descriptor type") exit() # Pre-allocate other variables to work with seam_work_aspect = 1 # Seam aspect ratio full_img_sizes = [] # Size of full images features = [] # Array for storing features images = [] # Array for storing information about images is_work_scale_set = False # Bool for working image scaling is_seam_scale_set = False # Bool for seams scaling is_compose_scale_set = False # Bool for composition image scaling # Iterate through the image names for name in img_names: # Reads the image into a numpy array full_img = cv.imread(cv.samples.findFile(name)) # Check if the file could be read successfully if full_img is None: print("Cannot read image ", name) exit() # Add image size to the list ... # TODO Could change this to be constant... full_img_sizes.append((full_img.shape[1], full_img.shape[0])) # Define the working scale the images should be used based on the number of megapixel entered if work_megapix < 0: # If a negative value entered, use its true scale img = full_img work_scale = 1 is_work_scale_set = True else: if is_work_scale_set is False: work_scale = min(1.0, np.sqrt(work_megapix * 1e6 / (full_img.shape[0]*full_img.shape[1]))) is_work_scale_set = True img = cv.resize(src=full_img, dsize=None, fx=work_scale, fy=work_scale, interpolation=cv.INTER_LINEAR_EXACT) # Define the scale for the seams that they will be processed if is_seam_scale_set is False: seam_scale = min(1.0, np.sqrt(seam_megapix * 1e6 / (full_img.shape[0]*full_img.shape[1]))) seam_work_aspect = seam_scale / work_scale is_seam_scale_set = True # Get the image features for this image img_fea = cv.detail.computeImageFeatures2(finder, img) test_feat_img = cv.drawKeypoints(img, img_fea.getKeypoints(), None, (255, 0, 0), 4) cv.namedWindow('image', cv.WINDOW_NORMAL) cv.imshow('image', test_feat_img) cv.resizeWindow('image', int(full_img_sizes[0][0] / 10), int(full_img_sizes[0][1] / 10)) cv.waitKey() features.append(img_fea) img = cv.resize(src=full_img, dsize=None, fx=seam_scale, fy=seam_scale, interpolation=cv.INTER_LINEAR_EXACT) images.append(img) # Define the matcher type to be used for the features if matcher_type == "affine": matcher = cv.detail_AffineBestOf2NearestMatcher(False, try_cuda, match_conf) elif range_width == -1: matcher = cv.detail_BestOf2NearestMatcher(try_cuda, match_conf) else: matcher = cv.detail_BestOf2NearestRangeMatcher(range_width, try_cuda, match_conf) # Apply the matcher to the features, obtaining matches between them p = matcher.apply2(features) # Frees unused memory matcher.collectGarbage() # Save the graph if chosen if save_graph: f = open(save_graph_to, "w") f.write(cv.detail.matchesGraphAsString(img_names, p, conf_thresh)) f.close() # Remove matches if not above a confidence threshold indices = cv.detail.leaveBiggestComponent(features, p, match_conf) # Pre-allocate img_subset = [] # Array to hold subset of images numpy arrays? img_names_subset = [] # Array to list the names of subset images full_img_sizes_subset = [] # Sizes of the images in their full resolution within the subset num_images = len(indices) # Number of images as determined by the thresholding of the feature matches # TODO this appears to be the issue running into before... the matching beforehand is producing 0 results # Itearte through the images that were matched and get lists of matches/images for i in range(num_images): img_names_subset.append(img_names[indices[i, 0]]) # Append the names img_subset.append(images[indices[i, 0]]) # Append the actual image arrays full_img_sizes_subset.append(full_img_sizes[indices[i, 0]]) # Append their sizes # Update the list of images and image names images = img_subset img_names = img_names_subset full_img_sizes = full_img_sizes_subset # Get new number of matched images (shouldn't change with the mosaicing project num_images = len(img_names) # Do a simple test to check if sufficient images if num_images < 2: print("Need more images") exit() # Generate the estimator based on what was set to determine approximate relative orientation parameters if estimator_type == "affine": estimator = cv.detail_AffineBasedEstimator() else: estimator = cv.detail_HomographyBasedEstimator() b, cameras = estimator.apply(features, p, None) # Check if estimation passed based on the boolean 'b' if not b: print("Homography estimation failed.") exit() # Iterate through the camera orientations computed for cam in cameras: # Convert the camera rotation matrix to float 32 cam.R = cam.R.astype(np.float32) # TODO read up on the documentation here # Define bundle adjustment cost function if ba_cost_func == "reproj": adjuster = cv.detail_BundleAdjusterReproj() elif ba_cost_func == "ray": adjuster = cv.detail_BundleAdjusterRay() elif ba_cost_func == "affine": adjuster = cv.detail_BundleAdjusterAffinePartial() elif ba_cost_func == "no": adjuster = cv.detail_NoBundleAdjuster() else: print("Unknown bundle adjustment cost function: ", ba_cost_func) exit() # Set the threshold for the adjuster adjuster.setConfThresh(1) # Pre-allocate array of the mask to be applied to determine which camera parameters to compute refine_mask = np.zeros((3, 3), np.uint8) # Determine which parameters to compute? Or vice versa... not compute # TODO check this if ba_refine_mask[0] == 'x': refine_mask[0, 0] = 1 if ba_refine_mask[1] == 'x': refine_mask[0, 1] = 1 if ba_refine_mask[2] == 'x': refine_mask[0, 2] = 1 if ba_refine_mask[3] == 'x': refine_mask[1, 1] = 1 if ba_refine_mask[4] == 'x': refine_mask[1, 2] = 1 # Apply the refinement mask to the adjuster adjuster.setRefinementMask(refine_mask) # Recompute the camera orientation parameters with the refinement mask b, cameras = adjuster.apply(features, p, cameras) # Check if the parameters adjusted correctly if not b: print("Camera parameters adjusting failed.") exit() # Get list of focal lengths to scale images accordingly... # TODO probably remove this scaling as not required for this project, thus warped_image_scale should stay = 1 focals = [] for cam in cameras: focals.append(cam.focal) sorted(focals) if len(focals) % 2 == 1: warped_image_scale = focals[len(focals) // 2] else: warped_image_scale = (focals[len(focals) // 2]+focals[len(focals) // 2-1])/2 # Perform the wave correction # TODO adjust this... only performs a horizontal correction, need to implement a vertical correction. Possibly both. # Potentially not required at all if the estimation of camera parameter bundle adjustment is performed well if do_wave_correct: rmats = [] for cam in cameras: rmats.append(np.copy(cam.R)) if wave_correct == 'vert': rmats = cv.detail.waveCorrect(rmats, cv.detail.WAVE_CORRECT_VERT) elif wave_correct == 'horiz': rmats = cv.detail.waveCorrect(rmats, cv.detail.WAVE_CORRECT_HORIZ) for idx, cam in enumerate(cameras): cam.R = rmats[idx] # Pre-allocation corners = [] # Dimensions of warped images masks_warped = [] # The masking regions of the warped areas images_warped = [] # the images warped sizes = [] # Sizes of ....? masks = [] # Masks for the seams ...? # Iterate through the images creating 'i' as the index number and appending the pre-allocated mask for i in range(0, num_images): um = cv.UMat(255*np.ones((images[i].shape[0], images[i].shape[1]), np.uint8)) masks.append(um) # This creates the warper to be used to distort the images according to the layout or shape they're to be stitched warper = cv.PyRotationWarper(warp_type, warped_image_scale*seam_work_aspect) # Iterate through the images to create the seams for idx in range(0, num_images): # Get the respective camera matrix mat_k = cameras[idx].K().astype(np.float32) # Scale the K matrix for the seam aspect scale mat_k[0, 0] *= seam_work_aspect mat_k[0, 2] *= seam_work_aspect mat_k[1, 1] *= seam_work_aspect mat_k[1, 2] *= seam_work_aspect # Project the image into the warping shape # TODO Not sure if we actually need the images warped. If the rotation and translation should suffice corner, image_wp = warper.warp(images[idx], mat_k, cameras[idx].R, cv.INTER_LINEAR, cv.BORDER_REFLECT) corners.append(corner) sizes.append((image_wp.shape[1], image_wp.shape[0])) images_warped.append(image_wp) # Warp the masks as well p, mask_wp = warper.warp(masks[idx], mat_k, cameras[idx].R, cv.INTER_NEAREST, cv.BORDER_CONSTANT) masks_warped.append(mask_wp.get()) # Pre-allocation and conversion of the images warped images to floats images_warped_f = [] for img in images_warped: imgf = img.astype(np.float32) images_warped_f.append(imgf) # If exposure correction required.... Shouldn't although there is one bad image in there so quite possibly if cv.detail.ExposureCompensator_CHANNELS == expos_comp_type: compensator = cv.detail_ChannelsCompensator(expos_comp_nr_feeds) # compensator.setNrGainsFilteringIterations(expos_comp_nr_filtering) elif cv.detail.ExposureCompensator_CHANNELS_BLOCKS == expos_comp_type: compensator = cv.detail_BlocksChannelsCompensator( expos_comp_block_size, expos_comp_block_size, expos_comp_nr_feeds) # compensator.setNrGainsFilteringIterations(expos_comp_nr_filtering) else: compensator = cv.detail.ExposureCompensator_createDefault(expos_comp_type) # Apply the exposure compensator? Or set it up at least compensator.feed(corners=corners, images=images_warped, masks=masks_warped) # Define the type of seam finder to be used if seam_find_type == "no": seam_finder = cv.detail.SeamFinder_createDefault(cv.detail.SeamFinder_NO) elif seam_find_type == "voronoi": seam_finder = cv.detail.SeamFinder_createDefault(cv.detail.SeamFinder_VORONOI_SEAM) elif seam_find_type == "gc_color": seam_finder = cv.detail_GraphCutSeamFinder("COST_COLOR") elif seam_find_type == "gc_colorgrad": seam_finder = cv.detail_GraphCutSeamFinder("COST_COLOR_GRAD") elif seam_find_type == "dp_color": seam_finder = cv.detail_DpSeamFinder("COLOR") elif seam_find_type == "dp_colorgrad": seam_finder = cv.detail_DpSeamFinder("COLOR_GRAD") if seam_finder is None: print("Can't create the following seam finder ", seam_find_type) exit() # Find the seams # TODO potentially try using the non-warped images seam_finder.find(images_warped_f, corners, masks_warped) # Clear the variables from memory / use later imgListe = [] images_warped = [] images_warped_f = [] masks = [] # Clear or pre-allocate variables compose_scale = 1 corners = [] sizes = [] blender = None timelapser = None compose_work_aspect = 1 # Iterate through all the images again for idx, name in enumerate(img_names): # Read in image and get the composition scale. Should be left to 1... # TODO check the composition scale and whether this needs to be scaled full_img = cv.imread(name) # Compute the composition scale, work aspect ratio, warped image scale and create a warper to scale if not is_compose_scale_set: if compose_megapix > 0: compose_scale = min(1.0, np.sqrt(compose_megapix * 1e6 / (full_img.shape[0]*full_img.shape[1]))) is_compose_scale_set = True compose_work_aspect = compose_scale / work_scale warped_image_scale *= compose_work_aspect warper = cv.PyRotationWarper(warp_type, warped_image_scale) for i in range(0, len(img_names)): # Adjust the camera parameters based on the composition work aspect ratio cameras[i].focal *= compose_work_aspect cameras[i].ppx *= compose_work_aspect cameras[i].ppy *= compose_work_aspect # Compute the size of the scaled full image sz = (full_img_sizes[i][0] * compose_scale, full_img_sizes[i][1]*compose_scale) # Get the intrinsic camera matrix mat_k = cameras[i].K().astype(np.float32) # Generate a warper for the rotation and intrinsic matrix # TODO this could possibly be just the rotation matrix # One possibility this isn't working is due to it should be translating images... roi = warper.warpRoi(sz, mat_k, cameras[i].R) # Get the corners from the output parameters corners.append(roi[0:2]) # Get the sizes of the output parameters sizes.append(roi[2:4]) # Scale the image to a size greater than the full resolution, otherwise leave as is if abs(compose_scale - 1) > 1e-1: img = cv.resize(src=full_img, dsize=None, fx=compose_scale, fy=compose_scale, interpolation=cv.INTER_LINEAR_EXACT) else: img = full_img # Create a tuple of the image size img_size = (img.shape[1], img.shape[0]) # Convert the intrinsic matrix to float 32 mat_k = cameras[idx].K().astype(np.float32) # Warp the images accordingly... # TODO look at the interpolation and border values corner, image_warped = warper.warp(img, mat_k, cameras[idx].R, cv.INTER_LINEAR, cv.BORDER_REFLECT) # Define the masks and warp them as well to the same shape mask = 255*np.ones((img.shape[0], img.shape[1]), np.uint8) p, mask_warped = warper.warp(mask, mat_k, cameras[idx].R, cv.INTER_NEAREST, cv.BORDER_CONSTANT) # Apply the exposure compensation compensator.apply(idx, corners[idx], image_warped, mask_warped) # Convert the images back to integer for minimal memory use image_warped_s = image_warped.astype(np.int16) image_warped = [] # Clear variable # Dilate the warped mask image dilated_mask = cv.dilate(masks_warped[idx], None) # Resize the dilated mask to create the seam mask seam_mask = cv.resize(dilated_mask, (mask_warped.shape[1], mask_warped.shape[0]), 0, 0, cv.INTER_LINEAR_EXACT) # Get the output seam mask_warped = cv.bitwise_and(seam_mask, mask_warped) # If the blender object hasn't been created yet if blender is None and not timelapse: blender = cv.detail.Blender_createDefault(cv.detail.Blender_NO) dst_sz = cv.detail.resultRoi(corners=corners, sizes=sizes) # Get the blend width blend_width = np.sqrt(dst_sz[2]*dst_sz[3]) * blend_strength / 100 # Check if a width is computed. Based on the blend strength if blend_width < 1: blender = cv.detail.Blender_createDefault(cv.detail.Blender_NO) # Create a multiband blender elif blend_type == "multiband": blender = cv.detail_MultiBandBlender() blender.setNumBands((np.log(blend_width)/np.log(2.) - 1.).astype(np.int)) # Create a feather blender elif blend_type == "feather": blender = cv.detail_FeatherBlender() blender.setSharpness(1./blend_width) # Prepare the blender based on the distance blender.prepare(dst_sz) # If a timelapse type is passed, create the timelapser object elif timelapser is None and timelapse: timelapser = cv.detail.Timelapser_createDefault(timelapse_type) timelapser.initialize(corners, sizes) # If the timelapse parameter is passed if timelapse: # Initialise an array of ones of the right shape matones = np.ones((image_warped_s.shape[0], image_warped_s.shape[1]), np.uint8) # Adds the warped image into the list timelapser.process(image_warped_s, matones, corners[idx]) # Get the index for where the file name starts pos_s = img_names[idx].rfind("/") # Get the fixed file name if pos_s == -1: fixed_file_name = "fixed_" + img_names[idx] else: fixed_file_name = img_names[idx][:pos_s + 1]+"fixed_" + img_names[idx][pos_s + 1:] # Write the temporary partial image cv.imwrite(fixed_file_name, timelapser.getDst()) else: # Pass the warped image into the blender blender.feed(cv.UMat(image_warped_s), mask_warped, corners[idx]) # If the timelapse parameter is not passed if not timelapse: # Pre-allocate the results result = None result_mask = None # Get the blended results result, result_mask = blender.blend(result, result_mask) # Output the final result cv.imwrite(result_name, result) # Make the image shape fit into the window zoomx = 600.0 / result.shape[1] # Show the final output dst = cv.normalize(src=result, dst=None, alpha=255., norm_type=cv.NORM_MINMAX, dtype=cv.CV_8U) dst = cv.resize(dst, dsize=None, fx=zoomx, fy=zoomx) cv.imshow(result_name, dst) cv.waitKey() print('Done')
def main(): args = parser.parse_args() img_names=args.img_names print(img_names) preview = args.preview try_cuda = args.try_cuda work_megapix = args.work_megapix seam_megapix = args.seam_megapix compose_megapix = args.compose_megapix conf_thresh = args.conf_thresh features_type = args.features matcher_type = args.matcher estimator_type = args.estimator ba_cost_func = args.ba ba_refine_mask = args.ba_refine_mask wave_correct = args.wave_correct if wave_correct=='no': do_wave_correct= False else: do_wave_correct=True if args.save_graph is None: save_graph = False else: save_graph =True save_graph_to = args.save_graph warp_type = args.warp if args.expos_comp=='no': expos_comp_type = cv.detail.ExposureCompensator_NO elif args.expos_comp=='gain': expos_comp_type = cv.detail.ExposureCompensator_GAIN elif args.expos_comp=='gain_blocks': expos_comp_type = cv.detail.ExposureCompensator_GAIN_BLOCKS elif args.expos_comp=='channel': expos_comp_type = cv.detail.ExposureCompensator_CHANNELS elif args.expos_comp=='channel_blocks': expos_comp_type = cv.detail.ExposureCompensator_CHANNELS_BLOCKS else: print("Bad exposure compensation method") exit() expos_comp_nr_feeds = args.expos_comp_nr_feeds expos_comp_nr_filtering = args.expos_comp_nr_filtering expos_comp_block_size = args.expos_comp_block_size match_conf = args.match_conf seam_find_type = args.seam blend_type = args.blend blend_strength = args.blend_strength result_name = args.output if args.timelapse is not None: timelapse = True if args.timelapse=="as_is": timelapse_type = cv.detail.Timelapser_AS_IS elif args.timelapse=="crop": timelapse_type = cv.detail.Timelapser_CROP else: print("Bad timelapse method") exit() else: timelapse= False range_width = args.rangewidth if features_type=='orb': finder= cv.ORB.create() elif features_type=='surf': finder= cv.xfeatures2d_SURF.create() elif features_type=='sift': finder= cv.xfeatures2d_SIFT.create() else: print ("Unknown descriptor type") exit() seam_work_aspect = 1 full_img_sizes=[] features=[] images=[] is_work_scale_set = False is_seam_scale_set = False is_compose_scale_set = False; for name in img_names: full_img = cv.imread(cv.samples.findFile(name)) if full_img is None: print("Cannot read image ", name) exit() full_img_sizes.append((full_img.shape[1],full_img.shape[0])) if work_megapix < 0: img = full_img work_scale = 1 is_work_scale_set = True else: if is_work_scale_set is False: work_scale = min(1.0, np.sqrt(work_megapix * 1e6 / (full_img.shape[0]*full_img.shape[1]))) is_work_scale_set = True img = cv.resize(src=full_img, dsize=None, fx=work_scale, fy=work_scale, interpolation=cv.INTER_LINEAR_EXACT) if is_seam_scale_set is False: seam_scale = min(1.0, np.sqrt(seam_megapix * 1e6 / (full_img.shape[0]*full_img.shape[1]))) seam_work_aspect = seam_scale / work_scale is_seam_scale_set = True imgFea= cv.detail.computeImageFeatures2(finder,img) features.append(imgFea) img = cv.resize(src=full_img, dsize=None, fx=seam_scale, fy=seam_scale, interpolation=cv.INTER_LINEAR_EXACT) images.append(img) if matcher_type== "affine": matcher = cv.detail_AffineBestOf2NearestMatcher(False, try_cuda, match_conf) elif range_width==-1: matcher = cv.detail.BestOf2NearestMatcher_create(try_cuda, match_conf) else: matcher = cv.detail.BestOf2NearestRangeMatcher_create(range_width, try_cuda, match_conf) p=matcher.apply2(features) matcher.collectGarbage() if save_graph: f = open(save_graph_to,"w") f.write(cv.detail.matchesGraphAsString(img_names, p, conf_thresh)) f.close() indices=cv.detail.leaveBiggestComponent(features,p,0.3) img_subset =[] img_names_subset=[] full_img_sizes_subset=[] num_images=len(indices) for i in range(len(indices)): img_names_subset.append(img_names[indices[i,0]]) img_subset.append(images[indices[i,0]]) full_img_sizes_subset.append(full_img_sizes[indices[i,0]]) images = img_subset; img_names = img_names_subset; full_img_sizes = full_img_sizes_subset; num_images = len(img_names) if num_images < 2: print("Need more images") exit() if estimator_type == "affine": estimator = cv.detail_AffineBasedEstimator() else: estimator = cv.detail_HomographyBasedEstimator() b, cameras =estimator.apply(features,p,None) if not b: print("Homography estimation failed.") exit() for cam in cameras: cam.R=cam.R.astype(np.float32) if ba_cost_func == "reproj": adjuster = cv.detail_BundleAdjusterReproj() elif ba_cost_func == "ray": adjuster = cv.detail_BundleAdjusterRay() elif ba_cost_func == "affine": adjuster = cv.detail_BundleAdjusterAffinePartial() elif ba_cost_func == "no": adjuster = cv.detail_NoBundleAdjuster() else: print( "Unknown bundle adjustment cost function: ", ba_cost_func ) exit() adjuster.setConfThresh(1) refine_mask=np.zeros((3,3),np.uint8) if ba_refine_mask[0] == 'x': refine_mask[0,0] = 1 if ba_refine_mask[1] == 'x': refine_mask[0,1] = 1 if ba_refine_mask[2] == 'x': refine_mask[0,2] = 1 if ba_refine_mask[3] == 'x': refine_mask[1,1] = 1 if ba_refine_mask[4] == 'x': refine_mask[1,2] = 1 adjuster.setRefinementMask(refine_mask) b,cameras = adjuster.apply(features,p,cameras) if not b: print("Camera parameters adjusting failed.") exit() focals=[] for cam in cameras: focals.append(cam.focal) sorted(focals) if len(focals)%2==1: warped_image_scale = focals[len(focals) // 2] else: warped_image_scale = (focals[len(focals) // 2]+focals[len(focals) // 2-1])/2 if do_wave_correct: rmats=[] for cam in cameras: rmats.append(np.copy(cam.R)) rmats = cv.detail.waveCorrect( rmats, cv.detail.WAVE_CORRECT_HORIZ) for idx,cam in enumerate(cameras): cam.R = rmats[idx] corners=[] mask=[] masks_warped=[] images_warped=[] sizes=[] masks=[] for i in range(0,num_images): um=cv.UMat(255*np.ones((images[i].shape[0],images[i].shape[1]),np.uint8)) masks.append(um) warper = cv.PyRotationWarper(warp_type,warped_image_scale*seam_work_aspect) # warper peut etre nullptr? for idx in range(0,num_images): K = cameras[idx].K().astype(np.float32) swa = seam_work_aspect K[0,0] *= swa K[0,2] *= swa K[1,1] *= swa K[1,2] *= swa corner,image_wp =warper.warp(images[idx],K,cameras[idx].R,cv.INTER_LINEAR, cv.BORDER_REFLECT) corners.append(corner) sizes.append((image_wp.shape[1],image_wp.shape[0])) images_warped.append(image_wp) p,mask_wp =warper.warp(masks[idx],K,cameras[idx].R,cv.INTER_NEAREST, cv.BORDER_CONSTANT) masks_warped.append(mask_wp.get()) images_warped_f=[] for img in images_warped: imgf=img.astype(np.float32) images_warped_f.append(imgf) if cv.detail.ExposureCompensator_CHANNELS == expos_comp_type: compensator = cv.detail_ChannelsCompensator(expos_comp_nr_feeds) # compensator.setNrGainsFilteringIterations(expos_comp_nr_filtering) elif cv.detail.ExposureCompensator_CHANNELS_BLOCKS == expos_comp_type: compensator=cv.detail_BlocksChannelsCompensator(expos_comp_block_size, expos_comp_block_size,expos_comp_nr_feeds) # compensator.setNrGainsFilteringIterations(expos_comp_nr_filtering) else: compensator=cv.detail.ExposureCompensator_createDefault(expos_comp_type) compensator.feed(corners=corners, images=images_warped, masks=masks_warped) if seam_find_type == "no": seam_finder = cv.detail.SeamFinder_createDefault(cv.detail.SeamFinder_NO) elif seam_find_type == "voronoi": seam_finder = cv.detail.SeamFinder_createDefault(cv.detail.SeamFinder_VORONOI_SEAM); elif seam_find_type == "gc_color": seam_finder = cv.detail_GraphCutSeamFinder("COST_COLOR") elif seam_find_type == "gc_colorgrad": seam_finder = cv.detail_GraphCutSeamFinder("COST_COLOR_GRAD") elif seam_find_type == "dp_color": seam_finder = cv.detail_DpSeamFinder("COLOR") elif seam_find_type == "dp_colorgrad": seam_finder = cv.detail_DpSeamFinder("COLOR_GRAD") if seam_finder is None: print("Can't create the following seam finder ",seam_find_type) exit() seam_finder.find(images_warped_f, corners,masks_warped ) imgListe=[] compose_scale=1 corners=[] sizes=[] images_warped=[] images_warped_f=[] masks=[] blender= None timelapser=None compose_work_aspect=1 for idx,name in enumerate(img_names): # https://github.com/opencv/opencv/blob/master/samples/cpp/stitching_detailed.cpp#L725 ? full_img = cv.imread(name) if not is_compose_scale_set: if compose_megapix > 0: compose_scale = min(1.0, np.sqrt(compose_megapix * 1e6 / (full_img.shape[0]*full_img.shape[1]))) is_compose_scale_set = True; compose_work_aspect = compose_scale / work_scale; warped_image_scale *= compose_work_aspect warper = cv.PyRotationWarper(warp_type,warped_image_scale) for i in range(0,len(img_names)): cameras[i].focal *= compose_work_aspect cameras[i].ppx *= compose_work_aspect cameras[i].ppy *= compose_work_aspect sz = (full_img_sizes[i][0] * compose_scale,full_img_sizes[i][1]* compose_scale) K = cameras[i].K().astype(np.float32) roi = warper.warpRoi(sz, K, cameras[i].R); corners.append(roi[0:2]) sizes.append(roi[2:4]) if abs(compose_scale - 1) > 1e-1: img =cv.resize(src=full_img, dsize=None, fx=compose_scale, fy=compose_scale, interpolation=cv.INTER_LINEAR_EXACT) else: img = full_img; img_size = (img.shape[1],img.shape[0]); K=cameras[idx].K().astype(np.float32) corner,image_warped =warper.warp(img,K,cameras[idx].R,cv.INTER_LINEAR, cv.BORDER_REFLECT) mask =255*np.ones((img.shape[0],img.shape[1]),np.uint8) p,mask_warped =warper.warp(mask,K,cameras[idx].R,cv.INTER_NEAREST, cv.BORDER_CONSTANT) compensator.apply(idx,corners[idx],image_warped,mask_warped) image_warped_s = image_warped.astype(np.int16) image_warped=[] dilated_mask = cv.dilate(masks_warped[idx],None) seam_mask = cv.resize(dilated_mask,(mask_warped.shape[1],mask_warped.shape[0]),0,0,cv.INTER_LINEAR_EXACT) mask_warped = cv.bitwise_and(seam_mask,mask_warped) if blender==None and not timelapse: blender = cv.detail.Blender_createDefault(cv.detail.Blender_NO) dst_sz = cv.detail.resultRoi(corners=corners,sizes=sizes) blend_width = np.sqrt(dst_sz[2]*dst_sz[3]) * blend_strength / 100 if blend_width < 1: blender = cv.detail.Blender_createDefault(cv.detail.Blender_NO) elif blend_type == "multiband": blender = cv.detail_MultiBandBlender() blender.setNumBands((np.log(blend_width)/np.log(2.) - 1.).astype(np.int)) elif blend_type == "feather": blender = cv.detail_FeatherBlender() blender.setSharpness(1./blend_width) blender.prepare(dst_sz) elif timelapser==None and timelapse: timelapser = cv.detail.Timelapser_createDefault(timelapse_type) timelapser.initialize(corners, sizes) if timelapse: matones=np.ones((image_warped_s.shape[0],image_warped_s.shape[1]), np.uint8) timelapser.process(image_warped_s, matones, corners[idx]) pos_s = img_names[idx].rfind("/"); if pos_s == -1: fixedFileName = "fixed_" + img_names[idx]; else: fixedFileName = img_names[idx][:pos_s + 1 ]+"fixed_" + img_names[idx][pos_s + 1: ] cv.imwrite(fixedFileName, timelapser.getDst()) else: blender.feed(cv.UMat(image_warped_s), mask_warped, corners[idx]) if not timelapse: result=None result_mask=None result,result_mask = blender.blend(result,result_mask) cv.imwrite(result_name,result) zoomx = 600.0 / result.shape[1] dst=cv.normalize(src=result,dst=None,alpha=255.,norm_type=cv.NORM_MINMAX,dtype=cv.CV_8U) dst=cv.resize(dst,dsize=None,fx=zoomx,fy=zoomx) cv.imshow(result_name,dst) cv.waitKey() print('Done')
def main(argv): exp_name = str(time.time()) opts, args = getopt.getopt(argv, "n:") for opt, arg in opts: if opt in ("-n"): exp_name = arg dataset_path = 'b0-2014-07-11-11-00-49-1OG' #'b0-2014-07-21-12-55-35-EG' #'b2-2015-09-01-11-55-40-UG' #'picdata' #'picdata_120' MaxFrameId = 142 #214 #243 #418 #143 SubmapLength = 120 #180 #120 Resolution = 0.05 print("OK") frame_poses = [] submap_poses = [] center_poses = [] for frameIndex in range(MaxFrameId + 1): mapinfo_fname_tmp = "mapinfo_{}_{}.txt".format(frameIndex, SubmapLength) mapinfo_fname = os.path.join(dataset_path, mapinfo_fname_tmp) if not os.path.exists(mapinfo_fname): assert False, "file {} not exist".format(mapinfo_fname) framePose, submapPose = readMapinfo(mapinfo_fname) frameTf = msg2RigidTransform(framePose, "map", "base{}".format(frameIndex)) submapTf = msg2RigidTransform(submapPose, "map", "origin{}".format(frameIndex)) frame_poses.append(frameTf) submap_poses.append(submapTf) center_xy = np.zeros((MaxFrameId + 1, 2)) feature_list = [] kaze = cv2.KAZE_create() akaze = cv2.AKAZE_create() # fast = cv2.FastFeatureDetector_create() orb = cv2.ORB_create() brisk = cv2.BRISK_create() # sift = cv2.xfeatures2d.SIFT_create() # surf = cv2.xfeatures2d.SURF_create() matcher = cv2.detail_AffineBestOf2NearestMatcher() for frameIndex in range(MaxFrameId + 1): print("OCMAP: index: {}".format(frameIndex)) mappng_fname_tmp = "output_int8_{}_{}.png".format( frameIndex, SubmapLength) # mappng_fname_tmp = "output_{}_{}.png".format(frameIndex,SubmapLength) mappng_fname = os.path.join(dataset_path, mappng_fname_tmp) if not os.path.exists(mappng_fname): assert False, "file {} not exist".format(mappng_fname) mappng = cv2.imread(mappng_fname, 0) mappng[mappng == 255] = 50 mappng[mappng <= 45] = 255 mappng[mappng < 55] = 225 mappng[mappng <= 100] = 0 keepy, keepx = np.where(mappng == 0) centerx = Resolution * keepx.sum() / keepx.size centery = Resolution * keepy.sum() / keepy.size print("local x: {} y: {}".format(centerx, centery)) centerTf = RigidTransform(translation=[centerx, centery, 0], from_frame="origin{}".format(frameIndex), to_frame="center{}".format(frameIndex)) center_poses.append(centerTf) map2centerTf = centerTf * submap_poses[frameIndex] center_xy[frameIndex, :] = map2centerTf.translation[ 0], map2centerTf.translation[1] print("global x: {} y: {}".format(center_xy[frameIndex, 0], center_xy[frameIndex, 1])) mappng = cv2.GaussianBlur(mappng, (3, 3), 0) features = cv2.detail.computeImageFeatures2(akaze, mappng) feature_list.append(features) if frameIndex == 0: img2 = cv2.drawKeypoints(mappng, features.getKeypoints(), None, color=(0, 255, 0), flags=0) plt.imshow(img2), plt.show() pose_dists = np.zeros((MaxFrameId + 1, MaxFrameId + 1)) center_dists = np.zeros((MaxFrameId + 1, MaxFrameId + 1)) for index in range(MaxFrameId + 1): for jndex in range(MaxFrameId + 1): print("GT: index: {} and jndex: {}".format(index, jndex)) pose_dists[index, jndex] = np.linalg.norm( (frame_poses[index].translation - frame_poses[jndex].translation), ord=2) center_dists[index, jndex] = np.linalg.norm( ((center_poses[index] * submap_poses[index]).translation - (center_poses[jndex] * submap_poses[jndex]).translation), ord=2) PR_matched = pose_dists < 6 PR_matched_show = (PR_matched * 255).astype(np.uint8) cv2.imwrite("result/matched_{}.png".format(exp_name), PR_matched_show) np.save('result/pose_dists_{}.npy'.format(exp_name), pose_dists) np.save('result/center_dists_{}.npy'.format(exp_name), center_dists) np.save('result/center_xy_{}.npy'.format(exp_name), center_xy) transl_error = np.zeros((MaxFrameId + 1, MaxFrameId + 1)) rot_error = np.zeros((MaxFrameId + 1, MaxFrameId + 1)) size_error = np.zeros((MaxFrameId + 1, MaxFrameId + 1)) match_confidence = np.zeros((MaxFrameId + 1, MaxFrameId + 1)) for index in range(MaxFrameId + 1): for jndex in range(MaxFrameId + 1): matches_info = matcher.apply(feature_list[index], feature_list[jndex]) match_confidence[index, jndex] = matches_info.confidence if type(matches_info.H) == type(None): continue rotation = matches_info.H[0:2, 0:2] size = math.sqrt( math.pow(rotation[0, 0], 2) + math.pow(rotation[0, 1], 2)) size_error[index, jndex] = size rotation = rotation / size rotation = np.pad(rotation, ((0, 1), (0, 1)), 'constant') rotation[2, 2] = 1.0 translation = matches_info.H[0:3, 2] translation[2] = 0.0 T = RigidTransform(rotation, translation, "origin{}".format(jndex), "origin{}".format(index)) error = submap_poses[index].inverse() * T * submap_poses[jndex] # print("fromframe: {} and toframe: {}".format(error.from_frame, error.to_frame)) transl_error[index, jndex] = np.linalg.norm(error.translation, ord=2) * Resolution rot_error[index, jndex] = math.atan(error.quaternion[3] / error.quaternion[0]) print("Match: index: {} and jndex: {}, transl:{}, rot:{}".format( index, jndex, transl_error[index, jndex], rot_error[index, jndex])) np.save('result/trans_error_{}.npy'.format(exp_name), transl_error) np.save('result/rot_error_{}.npy'.format(exp_name), rot_error) np.save('result/size_error_{}.npy'.format(exp_name), size_error) np.save('result/match_confidence_{}.npy'.format(exp_name), match_confidence) tmp = 1
def compare_occmap(features1, features2): matcher = cv2.detail_AffineBestOf2NearestMatcher() matches_info = matcher.apply(features1, features2) return matches_info.confidence
def main(argv): exp_name = str(time.time()) opts, args = getopt.getopt(argv, "n:") for opt, arg in opts: if opt in ("-n"): exp_name = arg dataset_path = 'picdata' MaxFrameId = 418 print("OK") frame_poses = np.zeros([0, 7]) submap_poses = np.zeros([0, 7]) for frameIndex in range(MaxFrameId + 1): mapinfo_fname_tmp = "mapinfo_{}_{}.txt".format(frameIndex, 180) mapinfo_fname = os.path.join(dataset_path, mapinfo_fname_tmp) if not os.path.exists(mapinfo_fname): assert False, "file {} not exist".format(mapinfo_fname) framePose, submapPose = readMapinfo(mapinfo_fname) frame_poses = np.vstack([frame_poses, framePose]) submap_poses = np.vstack([submap_poses, submapPose]) pose_dists = np.zeros((MaxFrameId + 1, MaxFrameId + 1)) for index in range(MaxFrameId + 1): for jndex in range(MaxFrameId + 1): print("GT: index: {} and jndex: {}".format(index, jndex)) pose_dists[index, jndex] = np.linalg.norm( (frame_poses[index, 0:3] - frame_poses[jndex, 0:3]), ord=2) PR_matched = pose_dists < 6 PR_matched_show = (PR_matched * 255).astype(np.uint8) cv2.imwrite("matched.png", PR_matched_show) feature_list = [] kaze = cv2.KAZE_create() akaze = cv2.AKAZE_create() fast = cv2.FastFeatureDetector_create() orb = cv2.ORB_create() brisk = cv2.BRISK_create() sift = cv2.xfeatures2d.SIFT_create() surf = cv2.xfeatures2d.SURF_create() matcher = cv2.detail_AffineBestOf2NearestMatcher() for frameIndex in range(MaxFrameId + 1): print("OCMAP: index: {}".format(frameIndex)) mappng_fname_tmp = "output_{}_{}.png".format(frameIndex, 180) mappng_fname = os.path.join(dataset_path, mappng_fname_tmp) if not os.path.exists(mappng_fname): assert False, "file {} not exist".format(mappng_fname) mappng = cv2.imread(mappng_fname, 0) mappng[mappng == 255] = 50 mappng[mappng <= 45] = 255 mappng[mappng < 55] = 225 mappng[mappng <= 100] = 0 mappng = cv2.GaussianBlur(mappng, (3, 3), 0) features = cv2.detail.computeImageFeatures2(akaze, mappng) feature_list.append(features) if frameIndex == 0: img2 = cv2.drawKeypoints(mappng, features.getKeypoints(), None, color=(0, 255, 0), flags=0) plt.imshow(img2), plt.show() trans_dists = np.zeros((MaxFrameId + 1, MaxFrameId + 1)) for index in range(MaxFrameId + 1): for jndex in range(MaxFrameId + 1): matches_info = matcher.apply(feature_list[index], feature_list[jndex]) trans_dists[index, jndex] = matches_info.confidence print("Match: index: {} and jndex: {}, confidence:{}".format( index, jndex, matches_info.confidence)) np.save('trans_dists_{}.npy'.format(exp_name), trans_dists) np.save('pose_dists_{}.npy'.format(exp_name), pose_dists) tmp = 1
def main(argv): exp_name = str(time.time()) opts, args = getopt.getopt(argv, "n:") for opt, arg in opts: if opt in ("-n"): exp_name = arg dataset_path = 'PR_result' dataset_dirs = os.listdir(dataset_path) print(dataset_dirs) Resolution = 0.05 for data_dir in dataset_dirs: file_names = os.listdir(os.path.join(dataset_path, data_dir)) print(data_dir + str(file_names)) MaxFrameId = 0 for file_name in file_names: if file_name[-3:] == 'txt': frame_id = int(file_name.split('_')[1]) if frame_id > MaxFrameId: MaxFrameId = frame_id frame_poses = [RigidTransform()] * (MaxFrameId + 1) submap_poses = [RigidTransform()] * (MaxFrameId + 1) center_poses = [RigidTransform()] * (MaxFrameId + 1) feature_list = [None] * (MaxFrameId + 1) center_xy = np.zeros((MaxFrameId + 1, 2)) for file_name in file_names: if file_name[-3:] == 'txt': frame_id = int(file_name.split('_')[1]) mapinfo_fname = os.path.join(dataset_path, data_dir, file_name) if not os.path.exists(mapinfo_fname): assert False, "file {} not exist".format(mapinfo_fname) framePose, submapPose = readMapinfo(mapinfo_fname) frameTf = msg2RigidTransform(framePose, "map", "base{}".format(frame_id)) submapTf = msg2RigidTransform(submapPose, "map", "origin{}".format(frame_id)) frame_poses[frame_id] = frameTf submap_poses[frame_id] = submapTf kaze = cv2.KAZE_create() akaze = cv2.AKAZE_create() orb = cv2.ORB_create() brisk = cv2.BRISK_create() matcher = cv2.detail_AffineBestOf2NearestMatcher() for file_name in file_names: if file_name[-3:] == 'png': frame_id = int(file_name.split('_')[2]) mappng_fname = os.path.join(dataset_path, data_dir, file_name) if not os.path.exists(mappng_fname): assert False, "file {} not exist".format(mappng_fname) mappng = cv2.imread(mappng_fname, 0) mappng[mappng == 255] = 50 mappng[mappng <= 45] = 255 mappng[mappng < 55] = 225 mappng[mappng <= 100] = 0 keepy, keepx = np.where(mappng == 0) if keepx.size == 0 or keepy.size == 0: # plt.imshow(mappng), plt.show() center_poses[frame_id] = RigidTransform( frame_poses[frame_id].rotation, frame_poses[frame_id].translation, "origin{}".format(frame_id), to_frame="center{}".format(frame_id)) center_xy[ frame_id, :] = center_poses[frame_id].translation[ 0], center_poses[frame_id].translation[1] print("{} global x: {} y: {}".format( file_name, center_xy[frame_id, 0], center_xy[frame_id, 1])) # pdb.set_trace() else: centerx = Resolution * keepx.sum() / keepx.size centery = Resolution * keepy.sum() / keepy.size print("{} local x: {} y: {}".format( file_name, centerx, centery)) centerTf = RigidTransform( translation=[centerx, centery, 0], from_frame="origin{}".format(frame_id), to_frame="center{}".format(frame_id)) center_poses[frame_id] = centerTf map2centerTf = centerTf * submap_poses[frame_id] center_xy[frame_id, :] = map2centerTf.translation[ 0], map2centerTf.translation[1] print("{} global x: {} y: {}".format( file_name, center_xy[frame_id, 0], center_xy[frame_id, 1])) mappng = cv2.GaussianBlur(mappng, (3, 3), 0) features = cv2.detail.computeImageFeatures2(akaze, mappng) feature_list[frame_id] = features pose_dists = np.zeros((MaxFrameId + 1, MaxFrameId + 1)) center_dists = np.zeros((MaxFrameId + 1, MaxFrameId + 1)) for index in range(MaxFrameId + 1): print("{} GT: index: {}".format(data_dir, index)) for jndex in range(MaxFrameId + 1): pose_dists[index, jndex] = np.linalg.norm( (frame_poses[index].translation - frame_poses[jndex].translation), ord=2) center_dists[index, jndex] = np.linalg.norm( ((center_poses[index] * submap_poses[index]).translation - (center_poses[jndex] * submap_poses[jndex]).translation), ord=2) PR_matched = pose_dists < 6 PR_matched_show = (PR_matched * 255).astype(np.uint8) cv2.imwrite("result/matched_{}.png".format(data_dir), PR_matched_show) np.save('result/pose_dists_{}.npy'.format(data_dir), pose_dists) np.save('result/center_dists_{}.npy'.format(data_dir), center_dists) np.save('result/center_xy_{}.npy'.format(data_dir), center_xy) transl_error = np.zeros((MaxFrameId + 1, MaxFrameId + 1)) rot_error = np.zeros((MaxFrameId + 1, MaxFrameId + 1)) size_error = np.zeros((MaxFrameId + 1, MaxFrameId + 1)) match_confidence = np.zeros((MaxFrameId + 1, MaxFrameId + 1)) for index in range(MaxFrameId + 1): for jndex in range(MaxFrameId + 1): if type(feature_list[index]) == type(None) or type( feature_list[jndex]) == type(None): continue if len(feature_list[index].getKeypoints()) < 2 or len( feature_list[jndex].getKeypoints()) < 2: continue # print(len(feature_list[index].getKeypoints())) # print(len(feature_list[jndex].getKeypoints())) matches_info = matcher.apply(feature_list[index], feature_list[jndex]) match_confidence[index, jndex] = matches_info.confidence if type(matches_info.H) == type(None): continue rotation = matches_info.H[0:2, 0:2] size = math.sqrt( math.pow(rotation[0, 0], 2) + math.pow(rotation[0, 1], 2)) size_error[index, jndex] = size rotation = rotation / size rotation = np.pad(rotation, ((0, 1), (0, 1)), 'constant') rotation[2, 2] = 1.0 translation = matches_info.H[0:3, 2] translation[2] = 0.0 T = RigidTransform(rotation, translation, "origin{}".format(jndex), "origin{}".format(index)) error = submap_poses[index].inverse() * T * submap_poses[jndex] # print("fromframe: {} and toframe: {}".format(error.from_frame, error.to_frame)) transl_error[index, jndex] = np.linalg.norm(error.translation, ord=2) * Resolution rot_error[index, jndex] = math.atan(error.quaternion[3] / error.quaternion[0]) print("{} Match: index: {} and jndex: {}, transl:{}, rot:{}". format(data_dir, index, jndex, transl_error[index, jndex], rot_error[index, jndex])) np.save('result/trans_error_{}.npy'.format(data_dir), transl_error) np.save('result/rot_error_{}.npy'.format(data_dir), rot_error) np.save('result/size_error_{}.npy'.format(data_dir), size_error) np.save('result/match_confidence_{}.npy'.format(data_dir), match_confidence)
seam_scale = min( 1.0, np.sqrt(seam_megapix * 1e6 / (full_img.shape[0] * full_img.shape[1]))) seam_work_aspect = seam_scale / work_scale is_seam_scale_set = True imgFea = cv.detail.computeImageFeatures2(finder, img) features.append(imgFea) img = cv.resize(src=full_img, dsize=None, fx=seam_scale, fy=seam_scale, interpolation=cv.INTER_LINEAR_EXACT) images.append(img) if matcher_type == "affine": matcher = cv.detail_AffineBestOf2NearestMatcher( False, try_cuda, match_conf) elif range_width == -1: matcher = cv.detail.BestOf2NearestMatcher_create(try_cuda, match_conf) else: matcher = cv.detail.BestOf2NearestRangeMatcher_create( range_width, try_cuda, match_conf) p = matcher.apply2(features) matcher.collectGarbage() if save_graph: f = open(save_graph_to, "w") f.write(cv.detail.matchesGraphAsString(img_names, p, conf_thresh)) f.close() indices = cv.detail.leaveBiggestComponent(features, p, 0.3) img_subset = [] img_names_subset = [] full_img_sizes_subset = []
def main(): match_conf = 0.3 normal_files_directory = r'C:\Scratch\IPA_Data\Sampled\00_Normal_NIR' ambient_files_directory = r'C:\Scratch\IPA_Data\Sampled\15_Ambient_Combined\06_red_green_blue' normal_image_names = [r'a0_nor0.tif', r'a1_nor0.tif'] # r'a2_nor0.tif', # r'a3_nor0.tif', # r'a4_nor0.tif', # r'a5_nor0.tif', # r'a6_nor0.tif', # r'a7_nor0.tif', # r'a8_nor0.tif', # r'a9_nor0.tif'] # r'b1_nor0.tif', # r'b1_nor0.tif', # r'b2_nor0.tif', # r'b3_nor0.tif', # r'b4_nor0.tif', # r'b5_nor0.tif', # r'b6_nor0.tif', # r'b7_nor0.tif', # r'b8_nor0.tif', # r'b9_nor0.tif', # r'c0_nor0.tif', # r'c1_nor0.tif', # r'c2_nor0.tif', # r'c3_nor0.tif', # r'c4_nor0.tif', # r'c5_nor0.tif', # r'c6_nor0.tif', # r'c7_nor0.tif', # r'c8_nor0.tif', # r'c9_nor0.tif', # r'd0_nor0.tif', # r'd1_nor0.tif', # r'd2_nor0.tif', # r'd3_nor0.tif', # r'd4_nor0.tif', # r'd5_nor0.tif', # r'd6_nor0.tif', # r'd7_nor0.tif', # r'd8_nor0.tif', # r'd9_nor0.tif', # r'e0_nor0.tif', # r'e1_nor0.tif', # r'e2_nor0.tif', # r'e3_nor0.tif', # r'e4_nor0.tif', # r'e5_nor0.tif', # r'e6_nor0.tif', # r'e7_nor0.tif', # r'e8_nor0.tif', # r'e9_nor0.tif', # r'f0_nor0.tif', # r'f1_nor0.tif', # r'f2_nor0.tif', # r'f3_nor0.tif', # r'f4_nor0.tif', # r'f5_nor0.tif', # r'f6_nor0.tif', # r'f7_nor0.tif', # r'f8_nor0.tif', # r'f9_nor0.tif'] ambient_image_names = [r'a0_amb.tif', r'a1_amb.tif'] # r'a2_amb.tif', # r'a3_amb.tif', # r'a4_amb.tif', # r'a5_amb.tif', # r'a6_amb.tif', # r'a7_amb.tif', # r'a8_amb.tif', # r'a9_amb.tif'] # r'b1_amb.tif', # r'b1_amb.tif', # r'b2_amb.tif', # r'b3_amb.tif', # r'b4_amb.tif', # r'b5_amb.tif', # r'b6_amb.tif', # r'b7_amb.tif', # r'b8_amb.tif', # r'b9_amb.tif', # r'c0_amb.tif', # r'c1_amb.tif', # r'c2_amb.tif', # r'c3_amb.tif', # r'c4_amb.tif', # r'c5_amb.tif', # r'c6_amb.tif', # r'c7_amb.tif', # r'c8_amb.tif', # r'c9_amb.tif', # r'd0_amb.tif', # r'd1_amb.tif', # r'd2_amb.tif', # r'd3_amb.tif', # r'd4_amb.tif', # r'd5_amb.tif', # r'd6_amb.tif', # r'd7_amb.tif', # r'd8_amb.tif', # r'd9_amb.tif', # r'e0_amb.tif', # r'e1_amb.tif', # r'e2_amb.tif', # r'e3_amb.tif', # r'e4_amb.tif', # r'e5_amb.tif', # r'e6_amb.tif', # r'e7_amb.tif', # r'e8_amb.tif', # r'e9_amb.tif', # r'f0_amb.tif', # r'f1_amb.tif', # r'f2_amb.tif', # r'f3_amb.tif', # r'f4_amb.tif', # r'f5_amb.tif', # r'f6_amb.tif', # r'f7_amb.tif', # r'f8_amb.tif', # r'f9_amb.tif'] normal_image_paths = [] ambient_image_paths = [] for filename in normal_image_names: normal_image_paths.append(os.path.join(normal_files_directory, filename)) for filename in ambient_image_names: ambient_image_paths.append(os.path.join(ambient_files_directory, filename)) norm_imgs = [] amb_imgs = [] full_image = cv.imread(cv.samples.findFile(normal_image_paths[0]), 0) full_img_size = full_image.shape for img_name in normal_image_paths: img = cv.imread(cv.samples.findFile(img_name), 0) if img is None: print("can't read image " + img_name) sys.exit(-1) norm_imgs.append(img) for img_name in ambient_image_paths: img = cv.imread(cv.samples.findFile(img_name)) if img is None: print("can't read image " + img_name) sys.exit(-1) amb_imgs.append(img) finder = cv.xfeatures2d.SURF_create(300) norm_kp1, norm_des1 = finder.detectAndCompute(norm_imgs[0], None) norm_kp2, norm_des2 = finder.detectAndCompute(norm_imgs[1], None) amb_kp1, amb_des1 = finder.detectAndCompute(amb_imgs[0], None) amb_kp2, amb_des2 = finder.detectAndCompute(amb_imgs[1], None) # test_feat_img = cv.drawKeypoints(amb_imgs[0], norm_kp1, None, (255, 0, 0), 4) # cv.namedWindow('image', cv.WINDOW_NORMAL) # cv.imshow('image', test_feat_img) # cv.resizeWindow('image', int(full_img_size[0]), int(full_img_size[1])) # cv.waitKey() # # test_feat_img = cv.drawKeypoints(norm_imgs[0], amb_kp1, None, (255, 0, 0), 4) # cv.namedWindow('image', cv.WINDOW_NORMAL) # cv.imshow('image', test_feat_img) # cv.resizeWindow('image', int(full_img_size[0]), int(full_img_size[1])) # cv.waitKey() # # test_feat_img = cv.drawKeypoints(amb_imgs[1], norm_kp2, None, (255, 0, 0), 4) # cv.namedWindow('image', cv.WINDOW_NORMAL) # cv.imshow('image', test_feat_img) # cv.resizeWindow('image', int(full_img_size[1]), int(full_img_size[1])) # cv.waitKey() # # test_feat_img = cv.drawKeypoints(norm_imgs[1], amb_kp2, None, (255, 0, 0), 4) # cv.namedWindow('image', cv.WINDOW_NORMAL) # cv.imshow('image', test_feat_img) # cv.resizeWindow('image', int(full_img_size[1]), int(full_img_size[1])) # cv.waitKey() # # # comb_kp1 = np.concatenate([norm_kp1, amb_kp1]) comb_kp2 = np.concatenate([norm_kp2, amb_kp2]) comb_des1 = np.concatenate([norm_des1, amb_des1]) comb_des2 = np.concatenate([norm_des2, amb_des2]) # # test_feat_img = cv.drawKeypoints(amb_imgs[0], comb_kp1, None, (255, 0, 0), 4) # cv.namedWindow('image', cv.WINDOW_NORMAL) # cv.imshow('image', test_feat_img) # cv.resizeWindow('image', int(full_img_size[0]), int(full_img_size[1])) # cv.waitKey() # # test_feat_img = cv.drawKeypoints(norm_imgs[0], comb_kp1, None, (255, 0, 0), 4) # cv.namedWindow('image', cv.WINDOW_NORMAL) # cv.imshow('image', test_feat_img) # cv.resizeWindow('image', int(full_img_size[0]), int(full_img_size[1])) # cv.waitKey() # # test_feat_img = cv.drawKeypoints(amb_imgs[1], comb_kp2, None, (255, 0, 0), 4) # cv.namedWindow('image', cv.WINDOW_NORMAL) # cv.imshow('image', test_feat_img) # cv.resizeWindow('image', int(full_img_size[1]), int(full_img_size[1])) # cv.waitKey() # # test_feat_img = cv.drawKeypoints(norm_imgs[1], comb_kp2, None, (255, 0, 0), 4) # cv.namedWindow('image', cv.WINDOW_NORMAL) # cv.imshow('image', test_feat_img) # cv.resizeWindow('image', int(full_img_size[1]), int(full_img_size[1])) # cv.waitKey() matcher = cv.detail_AffineBestOf2NearestMatcher(False, False, match_conf) temp_1 = cv.detail.computeImageFeatures(finder, norm_imgs[0]) temp_2 = cv.detail.computeImageFeatures(finder, amb_imgs[0]) temp = temp_1.append(temp_2)
def init_panorama(liste_images, pano): nb_images = len(liste_images) if nb_images < 2: print("Il faut au moins 2 images") return False, pano algo_descripteur = cv.ORB.create(pano.nb_point_cle) descripteurs = cv.detail.computeImageFeatures(algo_descripteur, liste_images) if pano.matcher_type == "affine": algo_apparier = cv.detail_AffineBestOf2NearestMatcher( True, pano.try_cuda, pano.seuil_appariement) else: algo_apparier = cv.detail_AffineBestOf2NearestMatcher( False, pano.try_cuda, pano.seuil_appariement) appariement_image = algo_apparier.apply2(descripteurs) pano.indices = cv.detail.leaveBiggestComponent(descripteurs, appariement_image, pano.seuil_confiance) nb_images = len(pano.indices) if nb_images < 2: print("Echec de l'appariement") return False, pano pano.remise_a_zero() if pano.type_estimateur == "affine": estimateur = cv.detail_AffineBasedEstimator() else: estimateur = cv.detail_HomographyBasedEstimator() ret, pano.cameras = estimateur.apply(descripteurs, appariement_image, None) if not ret: print("Echec de l'estimation des modeles.") return False, pano for cam in pano.cameras: cam.R = cam.R.astype(np.float32) if pano.fct_cout not in pano.cout: print("Fonction de cout inconnue: ", pano.fct_cout) return False, pano ajuster = pano.cout[pano.fct_cout]() ajuster.setConfThresh(pano.seuil_confiance) refine_mask = np.ones((3, 3), np.uint8) ajuster.setRefinementMask(refine_mask) ret, pano.cameras = ajuster.apply(descripteurs, appariement_image, pano.cameras) if not ret: print("Echec de l'ajustement des parametres.") return False, pano for cam in pano.cameras: pano.focales.append(cam.focal) sorted(pano.focales) if len(pano.focales) % 2 == 1: pano.focale_moyenne = pano.focales[len(pano.focales) // 2] else: pano.focale_moyenne = (pano.focales[len(pano.focales) // 2] + pano.focales[len(pano.focales) // 2 - 1]) / 2 images_projetees = [] images_projetees_float = [] pano.composition = cv.PyRotationWarper(pano.surface_compo, pano.focale_moyenne) for i in range(nb_images): idx = pano.indices[i][0] cam_int = pano.cameras[idx].K().astype(np.float32) coins, image_wp = pano.composition.warp(liste_images[idx], cam_int, pano.cameras[idx].R, cv.INTER_LINEAR, cv.BORDER_REFLECT) pano.liste_pos_coin.append(coins) pano.liste_taille_masque.append((image_wp.shape[1], image_wp.shape[0])) images_projetees.append(image_wp) umat = cv.UMat(255 * np.ones( (liste_images[pano.indices[i][0]].shape[0], liste_images[pano.indices[i][0]].shape[1]), np.uint8)) _, mask_wp = pano.composition.warp(umat, cam_int, pano.cameras[idx].R, cv.INTER_NEAREST, cv.BORDER_CONSTANT) pano.liste_masque_compo.append(mask_wp) for img in images_projetees: imgf = img.astype(np.float32) images_projetees_float.append(imgf) pano.algo_correct_expo = cv.detail.ExposureCompensator_createDefault( pano.correction_exposition) pano.algo_correct_expo.feed(pano.liste_pos_coin, images_projetees, pano.liste_masque_compo) if pano.fct_couture not in pano.liste_couture: print("type de couture inconnue :", pano.fct_couture) return False, pano pano.couture[pano.fct_couture].find(images_projetees_float, pano.liste_pos_coin, pano.liste_masque_compo) if len(pano.indices) == len(liste_images): sauver_configuration(pano) return True, pano
is_work_scale_set = True else: if is_work_scale_set is False: work_scale = min(1.0, np.sqrt(work_megapix * 1e6 / (full_img.shape[0]*full_img.shape[1]))) is_work_scale_set = True img = cv.resize(src=full_img, dsize=None, fx=work_scale, fy=work_scale, interpolation=cv.INTER_LINEAR_EXACT) if is_seam_scale_set is False: seam_scale = min(1.0, np.sqrt(seam_megapix * 1e6 / (full_img.shape[0]*full_img.shape[1]))) seam_work_aspect = seam_scale / work_scale is_seam_scale_set = True imgFea= cv.detail.computeImageFeatures2(finder,img) features.append(imgFea) img = cv.resize(src=full_img, dsize=None, fx=seam_scale, fy=seam_scale, interpolation=cv.INTER_LINEAR_EXACT) images.append(img) if matcher_type== "affine": matcher = cv.detail_AffineBestOf2NearestMatcher(False, try_cuda, match_conf) elif range_width==-1: matcher = cv.detail.BestOf2NearestMatcher_create(try_cuda, match_conf) else: matcher = cv.detail.BestOf2NearestRangeMatcher_create(range_width, try_cuda, match_conf) p=matcher.apply2(features) matcher.collectGarbage() if save_graph: f = open(save_graph_to,"w") f.write(cv.detail.matchesGraphAsString(img_names, p, conf_thresh)) f.close() indices=cv.detail.leaveBiggestComponent(features,p,0.3) img_subset =[] img_names_subset=[] full_img_sizes_subset=[] num_images=len(indices)