def render_perspective_view_of_a_panorama(image, panoshot, perspectiveshot): """Render a perspective view of a panorama.""" # Get destination pixel coordinates dst_shape = (perspectiveshot.camera.height, perspectiveshot.camera.width) dst_y, dst_x = np.indices(dst_shape).astype(np.float32) dst_pixels_denormalized = np.column_stack([dst_x.ravel(), dst_y.ravel()]) dst_pixels = features.normalized_image_coordinates( dst_pixels_denormalized, perspectiveshot.camera.width, perspectiveshot.camera.height) # Convert to bearing dst_bearings = perspectiveshot.camera.pixel_bearings(dst_pixels) # Rotate to panorama reference frame rotation = np.dot(panoshot.pose.get_rotation_matrix(), perspectiveshot.pose.get_rotation_matrix().T) rotated_bearings = np.dot(dst_bearings, rotation.T) # Project to panorama pixels src_x, src_y = panoshot.camera.project( (rotated_bearings[:, 0], rotated_bearings[:, 1], rotated_bearings[:, 2])) src_pixels = np.column_stack([src_x.ravel(), src_y.ravel()]) src_pixels_denormalized = features.denormalized_image_coordinates( src_pixels, image.shape[1], image.shape[0]) # Sample color colors = cv2.remap(image, src_pixels_denormalized[:, 0].astype(np.float32), src_pixels_denormalized[:, 1].astype(np.float32), cv2.INTER_LINEAR) colors.shape = dst_shape + (-1, ) return colors
def show_epipolar_lines(self, main_image_idx): if len(self.views) > 2: raise NotImplementedError("Not implemented yet for >2 views") img1 = self.views[0].current_image img2 = self.views[1].current_image img1_size = self.database.get_image_size(img1) img2_size = self.database.get_image_size(img2) matched_points = self.database.get_visible_points_coords( self.views[main_image_idx].current_image) matched_points_coords = convert_tuple_cords_to_list(matched_points) matched_points_coords = features.normalized_image_coordinates( matched_points_coords, img1_size[1], img1_size[0]) color_idx = 0 for point_idx, point in enumerate(matched_points_coords): image_pair = [img1, img2] line = calc_epipol_line(point, image_pair, self.database.get_path(), main_image_idx) denormalized_lines = features.denormalized_image_coordinates( line, img2_size[1], img2_size[0]) for line_segment in denormalized_lines: circle = mpatches.Circle( (line_segment[0], line_segment[1]), 3, color=distinct_colors[divmod( hash(list(matched_points.keys())[point_idx]), 19)[1]]) self.views[main_image_idx].plt_artists.append(circle) self.views[not main_image_idx].subplot.add_artist(circle) color_idx = color_idx + 1 self.views[not main_image_idx].figure.canvas.draw_idle()
def _read_ground_control_points_list_line(line, projection, reference_lla, exif): words = line.split() easting, northing, alt, pixel_x, pixel_y = map(float, words[:5]) shot_id = words[5] # Convert 3D coordinates if projection is not None: lon, lat = projection(easting, northing, inverse=True) else: lon, lat = easting, northing x, y, z = geo.topocentric_from_lla( lat, lon, alt, reference_lla['latitude'], reference_lla['longitude'], reference_lla['altitude']) # Convert 2D coordinates d = exif[shot_id] coordinates = features.normalized_image_coordinates( np.array([[pixel_x, pixel_y]]), d['width'], d['height'])[0] o = types.GroundControlPointObservation() o.lla = np.array([lat, lon, alt]) o.coordinates = np.array([x, y, z]) o.shot_id = shot_id o.shot_coordinates = coordinates return o
def render_perspective_view_of_a_panorama(image, panoshot, perspectiveshot, interpolation=cv2.INTER_LINEAR, borderMode=cv2.BORDER_WRAP): """Render a perspective view of a panorama.""" # Get destination pixel coordinates dst_shape = (perspectiveshot.camera.height, perspectiveshot.camera.width) dst_y, dst_x = np.indices(dst_shape).astype(np.float32) dst_pixels_denormalized = np.column_stack([dst_x.ravel(), dst_y.ravel()]) dst_pixels = features.normalized_image_coordinates( dst_pixels_denormalized, perspectiveshot.camera.width, perspectiveshot.camera.height) # Convert to bearing dst_bearings = perspectiveshot.camera.pixel_bearing_many(dst_pixels) # Rotate to panorama reference frame rotation = np.dot(panoshot.pose.get_rotation_matrix(), perspectiveshot.pose.get_rotation_matrix().T) rotated_bearings = np.dot(dst_bearings, rotation.T) # Project to panorama pixels src_pixels = panoshot.camera.project_many(rotated_bearings) src_pixels_denormalized = features.denormalized_image_coordinates( src_pixels, image.shape[1], image.shape[0]) src_pixels_denormalized.shape = dst_shape + (2,) # Sample color x = src_pixels_denormalized[..., 0].astype(np.float32) y = src_pixels_denormalized[..., 1].astype(np.float32) colors = cv2.remap(image, x, y, interpolation, borderMode=borderMode) return colors
def pixel_to_gcp_coordinates(self, x: float, y: float) -> Tuple[float, float]: """ Transforms from pixels (in the viewing window) to normalized coordinates (in the whole geotiff) """ x += self.image_window.col_off y += self.image_window.row_off h, w = self.image_manager.get_image_size(self.current_image) coords = features.normalized_image_coordinates(np.array([[x, y]]), w, h)[0] return coords.tolist()
def pixel_to_gcp_coordinates(self, x: float, y: float) -> Tuple[float, float]: """ Transforms from pixels to normalized coordinates The view displays images at a reduced resolution for speed. We use the image manager to obtain the reduced coordinates to use for normalization. """ h, w = self.image_manager.get_image_size(self.current_image) point = self.rotate_point(x, y, h, w, reverse=True) coords = features.normalized_image_coordinates(np.array([point]), w, h)[0] return coords.tolist()
def write_to_file(self, filename): data = {"points": []} for point_id, observations in self.points.items(): point = {"id": point_id, "observations": []} for observation in observations: h, w = self.get_image_size(observation["shot_id"]) scaled_projection = features.normalized_image_coordinates( np.array([observation["projection"]]), w, h)[0].tolist() point["observations"].append({ "shot_id": observation["shot_id"], "projection": scaled_projection, }) data["points"].append(point) with open(filename, 'wt') as fp: json.dump(data, fp, indent=4, sort_keys=True)
def _read_gcp_list_lines(lines, projection, reference, exif): points = {} for line in lines: words = line.split(None, 5) easting, northing, alt, pixel_x, pixel_y = map(float, words[:5]) shot_id = words[5].strip() key = (easting, northing, alt) if key in points: point = points[key] else: # Convert 3D coordinates if np.isnan(alt): alt = 0 has_altitude = False else: has_altitude = True if projection is not None: lon, lat = projection(easting, northing, inverse=True) else: lon, lat = easting, northing point = pymap.GroundControlPoint() point.id = "unnamed-%d" % len(points) point.lla = {"latitude": lat, "longitude": lon, "altitude": alt} point.has_altitude = has_altitude if reference: x, y, z = reference.to_topocentric(lat, lon, alt) point.coordinates.value = np.array([x, y, z]) else: point.coordinates.reset() points[key] = point # Convert 2D coordinates d = exif[shot_id] coordinates = features.normalized_image_coordinates( np.array([[pixel_x, pixel_y]]), d["width"], d["height"] )[0] o = pymap.GroundControlPointObservation() o.shot_id = shot_id o.projection = coordinates point.add_observation(o) return list(points.values())
def show_epipolar_lines(self, main_image_idx): img1_size = self.database.get_image_size(self.curr_images[0]) img2_size = self.database.get_image_size(self.curr_images[1]) matched_points = self.database.get_visible_points_coords(self.curr_images[main_image_idx]) matched_points_coords = convert_tuple_cords_to_list(matched_points) matched_points_coords = features.normalized_image_coordinates(matched_points_coords, img1_size[1], img1_size[0]) color_idx = 0 for point_idx, point in enumerate(matched_points_coords): line = calc_epipol_line(point, self.curr_images, self.database.get_path(), main_image_idx) denormalized_lines = features.denormalized_image_coordinates(line, img2_size[1], img2_size[0]) for line_segment in denormalized_lines: circle = mpatches.Circle((line_segment[0], line_segment[1]), 3, color=distinct_colors[divmod(hash(list(matched_points.keys())[point_idx]), 19)[1]]) self.plt_artists[main_image_idx].append(circle) self.subplots[not main_image_idx].add_artist(circle) color_idx = color_idx + 1 self.figures[not main_image_idx].canvas.draw_idle()
def import_features(db, data, image_map, camera_map): cursor = db.cursor() cursor.execute("SELECT image_id, rows, cols, data FROM keypoints;") keypoints = {} colors = {} for row in cursor: image_id, n_rows, n_cols, arr = row filename, camera_id = image_map[image_id] cam = camera_map[camera_id] arr = np.fromstring(arr, dtype=np.float32).reshape((n_rows, n_cols)) rgb = data.load_image(filename).astype(np.float32) xc = np.clip(arr[:, 1].astype(int), 0, rgb.shape[0] - 1) yc = np.clip(arr[:, 0].astype(int), 0, rgb.shape[1] - 1) colors[image_id] = rgb[xc, yc, :] arr[:, :2] = features.normalized_image_coordinates( arr[:, :2], cam.width, cam.height) if n_cols == 4: x, y, s, o = arr[:, 0], arr[:, 1], arr[:, 2], arr[:, 3] elif n_cols == 6: x, y = arr[:, 0], arr[:, 1] s, o = get_scale_orientation_from_affine(arr) elif n_cols == 2: x, y = arr[:, 0], arr[:, 1] s = np.zeros_like(x) o = np.zeros_like(x) else: raise ValueError s = s / max(cam.width, cam.height) keypoints[image_id] = np.vstack((x, y, s, o)).T cursor.execute("SELECT image_id, rows, cols, data FROM descriptors;") for row in cursor: image_id, n_rows, n_cols, arr = row filename, _ = image_map[image_id] descriptors = np.fromstring(arr, dtype=np.uint8).reshape( (n_rows, n_cols)) kp = keypoints[image_id] features_data = features.FeaturesData(kp, descriptors, colors[image_id], None) data.save_features(filename, features_data) cursor.close() return keypoints
def _read_ground_control_points_list_line(line, reference_lla, exif): words = line.split() lat, lon, alt, pixel_x, pixel_y = map(float, words[:5]) shot_id = words[5] x, y, z = geo.topocentric_from_lla( lat, lon, alt, reference_lla['latitude'], reference_lla['longitude'], reference_lla['altitude']) d = exif[shot_id] o = types.GroundControlPointObservation() o.lla = np.array([lat, lon, alt]) o.coordinates = np.array([x, y, z]) o.shot_id = shot_id o.shot_coordinates = features.normalized_image_coordinates( np.array([[pixel_x, pixel_y]]), d['width'], d['height'])[0] return o
def _read_gcp_list_lines(lines, projection, reference, exif): points = {} for line in lines: words = line.split(None, 5) easting, northing, alt, pixel_x, pixel_y = map(float, words[:5]) shot_id = words[5].strip() key = (easting, northing, alt) if key in points: point = points[key] else: # Convert 3D coordinates if np.isnan(alt): alt = 0 has_altitude = False else: has_altitude = True if projection is not None: lon, lat = projection(easting, northing, inverse=True) else: lon, lat = easting, northing x, y, z = reference.to_topocentric(lat, lon, alt) point = types.GroundControlPoint() point.lla = np.array([lat, lon, alt]) point.coordinates = np.array([x, y, z]) point.has_altitude = has_altitude points[key] = point # Convert 2D coordinates d = exif[shot_id] coordinates = features.normalized_image_coordinates( np.array([[pixel_x, pixel_y]]), d['width'], d['height'])[0] o = types.GroundControlPointObservation() o.shot_id = shot_id o.shot_coordinates = coordinates point.observations.append(o) return list(points.values())
def render_perspective_view_of_a_panorama(image, panoshot, perspectiveshot, interpolation=cv2.INTER_LINEAR): """Render a perspective view of a panorama.""" # Get destination pixel coordinates dst_shape = (perspectiveshot.camera.height, perspectiveshot.camera.width) dst_y, dst_x = np.indices(dst_shape).astype(np.float32) dst_pixels_denormalized = np.column_stack([dst_x.ravel(), dst_y.ravel()]) dst_pixels = features.normalized_image_coordinates( dst_pixels_denormalized, perspectiveshot.camera.width, perspectiveshot.camera.height) # Convert to bearing dst_bearings = perspectiveshot.camera.pixel_bearing_many(dst_pixels) # Rotate to panorama reference frame rotation = np.dot(panoshot.pose.get_rotation_matrix(), perspectiveshot.pose.get_rotation_matrix().T) rotated_bearings = np.dot(dst_bearings, rotation.T) # Project to panorama pixels src_x, src_y = panoshot.camera.project((rotated_bearings[:, 0], rotated_bearings[:, 1], rotated_bearings[:, 2])) src_pixels = np.column_stack([src_x.ravel(), src_y.ravel()]) src_pixels_denormalized = features.denormalized_image_coordinates( src_pixels, image.shape[1], image.shape[0]) src_pixels_denormalized.shape = dst_shape + (2,) # Sample color x = src_pixels_denormalized[..., 0].astype(np.float32) y = src_pixels_denormalized[..., 1].astype(np.float32) colors = cv2.remap(image, x, y, interpolation, borderMode=cv2.BORDER_WRAP) return colors
def create_full_mosaic(args): log.setup() shot, data = args logger.info('Creating full mosaic for image {}'.format( shot.id ) ) config = data.config start = timer() projection_type = shot.camera.projection_type r_map_x = None r_map_y = None dst_mask_x = None dst_mask_y = None if projection_type in ['perspective', 'brown', 'fisheye']: img = data.load_image( shot.id ) camera = types.SphericalCamera() camera.id = "Spherical Projection Camera" # Determine the correct mosaic size from the focal length of the camera # Limit this to a maximum of a 16K image which is the highest resolution # currently supported by PVR. K_pix = shot.camera.get_K_in_pixel_coordinates() camera.height = int( np.clip( math.pi*K_pix[0,0], 0, 8192 ) ) camera.width = int( np.clip( 2*math.pi*K_pix[0,0], 0, 16384 ) ) shot_cam = shot.camera # Project shot's pixels to the spherical mosaic image src_shape = ( shot_cam.height, shot_cam.width ) src_y, src_x = np.indices( src_shape ).astype( np.float32 ) src_pixels_denormalized = np.column_stack( [ src_x.ravel(), src_y.ravel() ] ) src_pixels = features.normalized_image_coordinates( src_pixels_denormalized, shot_cam.width, shot_cam.height ) # Convert to bearings src_bearings = shot_cam.pixel_bearing_many( src_pixels ) # Project to spherical mosaic pixels dst_x, dst_y = camera.project( ( src_bearings[:, 0], src_bearings[:, 1], src_bearings[:, 2] ) ) dst_pixels = np.column_stack( [ dst_x.ravel(), dst_y.ravel() ] ) interp_mode = data.config.get( 'full_mosaic_proj_interpolation', 'linear' ) if interp_mode == 'linear': # Snap to pixel centers to generate a projection index mask. This will be slower then finding # the ROI using the projected border but it's far easier and covers wrap around and the poles with # minimal effort. It will also probably be more efficient when wrap around or crossing the poles does occur. dst_pixels_denormalized_int = features.denormalized_image_coordinates( dst_pixels, camera.width, camera.height ).astype( np.int32 ) dst_pixels_snap = features.normalized_image_coordinates( dst_pixels_denormalized_int.astype( np.float32 ), camera.width, camera.height ) dst_bearings_re = camera.pixel_bearing_many( dst_pixels_snap ) # Project mosaic pixel center bearings back into the source image src_re_x, src_re_y = shot_cam.project( ( dst_bearings_re[:, 0], dst_bearings_re[:, 1], dst_bearings_re[:, 2] ) ) src_re_pixels = np.column_stack( [ src_re_x.ravel(), src_re_y.ravel() ] ) src_re_denormalized = features.denormalized_image_coordinates( src_re_pixels, shot_cam.width, shot_cam.height ) mosaic_img = initialize_mosaic_image( camera.width, camera.height, img ) # Reshape arrays for cv.remap efficiency reasons and due to the SHRT_MAX limit of array size. # Another option is to process in chunks of linear array of shize SHRT_MAX. However, this # approach was probably 4x slower. x = src_re_denormalized[:, 0].reshape( src_x.shape ).astype(np.float32) y = src_re_denormalized[:, 1].reshape( src_y.shape ).astype(np.float32) r_map_x = x r_map_y = y # Sample source imagery colors colors = cv2.remap( img, x, y, cv2.INTER_LINEAR , borderMode=cv2.BORDER_CONSTANT ) dst_mask_y = dst_pixels_denormalized_int[:, 1].reshape( src_y.shape ) dst_mask_x = dst_pixels_denormalized_int[:, 0].reshape( src_x.shape ) mosaic_img[ dst_mask_y, dst_mask_x ] = colors blend_projection_border( mosaic_img, dst_mask_y, dst_mask_x ) # Initialize blurring and alpha mask kernels # half_chunk_size = 75 # border = 41 # half_size = half_chunk_size + border # kernel_1d = cv2.getGaussianKernel( 2*half_chunk_size+1, 1.5*(0.3*((2*half_chunk_size+1-1)*0.5 - 1) + 0.8) , cv2.CV_32F ) # kernel_1d/=kernel_1d[ half_chunk_size ] # half_kernel_1d = kernel_1d[ half_chunk_size : 2*half_chunk_size ] # alpha = np.zeros( ( 2*half_chunk_size, 2*half_chunk_size, 3 ), dtype = np.float32 ) #np.float32 uint8) # for y in range(0,2*half_chunk_size): # for x in range(0,2*half_chunk_size): # yt = y - half_chunk_size # xt = x - half_chunk_size # r = int( math.sqrt( yt*yt + xt*xt ) ) # if r > half_chunk_size-1: # r = half_chunk_size-1 # kv = half_kernel_1d[r] # alpha[ y, x, 0] = alpha[ y, x, 1] = alpha[ y, x, 2] = kv # # Grab the indices of pixels along the projected image border and blend into the # # background with a gaussian blur and alpha map. # dst_mask_y_border = np.concatenate( [ dst_mask_y[ 0:,0 ], # dst_mask_y[ 0:, -1 ], # dst_mask_y[ 0, 0: ], # dst_mask_y[-1, 0: ] ] ) # dst_mask_x_border = np.concatenate( [ dst_mask_x[ 0:,0 ], # dst_mask_x[ 0:, -1 ], # dst_mask_x[ 0, 0: ], # dst_mask_x[-1, 0: ] ] ) # dst_mask_border = np.column_stack( [ dst_mask_y_border, dst_mask_x_border ] ) # #for y_ind in np.arange( 0, dst_mask_y.shape[0], 75 ): # for border_pix in dst_mask_border[::75]: # border_y = border_pix[0] #dst_mask_y[y_ind,0] # border_x = border_pix[1] #dst_mask_x[y_ind,0] # sub_img = mosaic_img[ border_y - half_size : border_y + half_size, border_x - half_size : border_x + half_size ].copy() # sub_rng = border + 2*half_chunk_size # sub_img[border:sub_rng,border:sub_rng] = cv2.GaussianBlur( sub_img[border:sub_rng,border:sub_rng], (81,81), 0 ) # mosaic_img[ border_y - half_chunk_size : border_y + half_chunk_size, border_x - half_chunk_size : border_x + half_chunk_size ] = \ # np.multiply( sub_img[border:sub_rng,border:sub_rng].astype( np.float32 ), alpha ) + \ # np.multiply( mosaic_img[ border_y - half_chunk_size : border_y + half_chunk_size, border_x - half_chunk_size : border_x + half_chunk_size ].astype( np.float32 ), 1 - alpha ) #cv2.imwrite('c:\\alpha.png', alpha) #mosaic_img[ border_y - half_chunk_size : border_y + half_chunk_size, border_x - half_chunk_size : border_x + half_chunk_size ] = alpha #mosaic_img[ border_y - half_chunk_size : border_y + half_chunk_size, border_x - half_chunk_size : border_x + half_chunk_size ] = sub_img[border:sub_rng,border:sub_rng] elif interp_mode == 'nearest': # Implementing nearest this way rather than just changing the interpolation function of cv2.remap above # will be more efficient because we'll avoid the reprojection back to the source image and sample it directly # using our index mask. dst_pixels_denormalized = features.denormalized_image_coordinates( dst_pixels, camera.width, camera.height ) # Create a full equirectangular index image with all zero indices for x and y fdst_y, fdst_x = np.zeros( ( 2, camera.height, camera.width ) ).astype( np.float32 ) # Use the projected indices to swap in the source image indices. x = dst_pixels_denormalized[..., 0].astype(np.int32) y = dst_pixels_denormalized[..., 1].astype(np.int32) fdst_x[ y, x ] = src_pixels_denormalized[...,0] fdst_y[ y, x ] = src_pixels_denormalized[...,1] r_map_x = fdst_x r_map_y = fdst_y mosaic_img = cv2.remap( img, fdst_x, fdst_y, cv2.INTER_NEAREST, borderMode=cv2.BORDER_CONSTANT ) else: raise NotImplementedError( 'Interpolation type not supported: {}'.format( interp_mode ) ) data.save_full_mosaic_image( os.path.splitext( shot.id )[0], mosaic_img ) end = timer() report = { "image": shot.id, "wall_time": end - start, } data.save_report( io.json_dumps(report), 'full_mosaic_reprojection/{}.json'.format( shot.id ) ) return ( r_map_x, r_map_y, dst_mask_x, dst_mask_y )
def detect(args): image, tags, data = args logger.info('Extracting {} features for image {}'.format( data.feature_type().upper(), image)) DEBUG = 0 # check if features already exist if not data.feature_index_exists(image): mask = data.mask_as_array(image) if mask is not None: logger.info('Found mask to apply for image {}'.format(image)) preemptive_max = data.config.get('preemptive_max', 200) p_unsorted, f_unsorted, c_unsorted = features.extract_features( data.image_as_array(image), data.config, mask) if len(p_unsorted) == 0: return #===== prune features in tags =====# if data.config.get('prune_features_on_tags', False): # setup img = cv2.imread(os.path.join(data.data_path, 'images', image)) [height, width, _] = img.shape p_denorm = features.denormalized_image_coordinates( p_unsorted, width, height) # expand tag contour with grid points beyond unit square expn = 2.0 gridpts = np.array( [[-expn, expn], [expn, expn], [expn, -expn], [-expn, -expn]], dtype='float32') # find features to prune rm_list = [] for tag in tags: # contour from tag region contours = np.array(tag.corners) if DEBUG > 0: for i in range(0, 3): cv2.line(img, (tag.corners[i, 0].astype('int'), tag.corners[i, 1].astype('int')), (tag.corners[i + 1, 0].astype('int'), tag.corners[i + 1, 1].astype('int')), [0, 255, 0], 12) cv2.line(img, (tag.corners[3, 0].astype('int'), tag.corners[3, 1].astype('int')), (tag.corners[0, 0].astype('int'), tag.corners[0, 1].astype('int')), [0, 255, 0], 12) # scale contour outward H = np.array(tag.homography, dtype='float32') contours_expanded = cv2.perspectiveTransform( np.array([gridpts]), H) # for each point for pidx in range(0, len(p_unsorted)): # point pt = p_denorm[pidx, 0:2] # point in contour inout = cv2.pointPolygonTest( contours_expanded.astype('int'), (pt[0], pt[1]), False) # check result if inout >= 0: rm_list.append(pidx) # prune features p_unsorted = np.delete(p_unsorted, np.array(rm_list), axis=0) f_unsorted = np.delete(f_unsorted, np.array(rm_list), axis=0) c_unsorted = np.delete(c_unsorted, np.array(rm_list), axis=0) # debug if DEBUG > 0: p_denorm = np.delete(p_denorm, np.array(rm_list), axis=0) for pidx in range(0, len(p_denorm)): pt = p_denorm[pidx, 0:2] cv2.circle(img, (pt[0].astype('int'), pt[1].astype('int')), 5, [0, 0, 255], -1) cv2.namedWindow('ShowImage', cv2.WINDOW_NORMAL) height, width, channels = img.shape showw = max(752, width / 4) showh = max(480, height / 4) cv2.resizeWindow('ShowImage', showw, showh) cv2.imshow('ShowImage', img) cv2.waitKey(0) #===== prune features in tags =====# # sort for preemptive size = p_unsorted[:, 2] order = np.argsort(size) p_sorted = p_unsorted[order, :] f_sorted = f_unsorted[order, :] c_sorted = c_unsorted[order, :] p_pre = p_sorted[-preemptive_max:] f_pre = f_sorted[-preemptive_max:] # save data.save_features(image, p_sorted, f_sorted, c_sorted) data.save_preemptive_features(image, p_pre, f_pre) if data.config.get('matcher_type', 'FLANN') == 'FLANN': index = features.build_flann_index(f_sorted, data.config) data.save_feature_index(image, index) #===== tag features =====# if data.config.get('use_apriltags', False) or data.config.get( 'use_arucotags', False) or data.config.get('use_chromatags', False): # setup try: tags_all = data.load_tag_detection() except: return tags = tags_all[image] pt = [] ft = [] ct = [] it = [] imexif = data.load_exif(image) # for each tag in image for tag in tags: # normalize corners img = cv2.imread(os.path.join(data.data_path, 'images', image)) [height, width, _] = img.shape #print 'width = ',str(imexif['width']) #print 'height= ',str(imexif['height']) #print 'width2= ',str(width) #print 'heigh2= ',str(height) norm_tag_corners = features.normalized_image_coordinates( tag.corners, width, height) #imexif['width'], imexif['height']) # for each corner of tag for r in range(0, 4): # tag corners pt.append(norm_tag_corners[r, :]) # tag id ft.append(tag.id) # colors ct.append(tag.colors[r, :]) # corner id (0,1,2,3) it.append(r) # if tag features found if pt: pt = np.array(pt) ft = np.array(ft) ct = np.array(ct) it = np.array(it) data.save_tag_features(image, pt, ft, it, ct)