def run(img_path, cfg): '''Wrapper over OpenCV SURF. Parameters ---------- img_path (str): Path to images. cfg: (Namespace): Configuration. ''' # Init opencv feature extractor if cfg.method_dict['config_common']['keypoint'].lower() == 'surf-def': feature = cv2.xfeatures2d.SURF_create() elif cfg.method_dict['config_common']['keypoint'].lower() == 'surf-lowth': feature = cv2.xfeatures2d.SURF_create(hessianThreshold=0) else: raise RuntimeError('Unknown local feature type') # Load Image img, _ = load_image(img_path, use_color_image=False, crop_center=False) # Compute features kp, desc = feature.detectAndCompute(img, None) # Convert opencv keypoints into our benchmark format kp, desc = convert_opencv_kp_desc( kp, desc, cfg.method_dict['config_common']['num_keypoints']) result = {} result['kp'] = [p[0:2] for p in kp] result['scale'] = [p[2] for p in kp] result['angle'] = [p[3] for p in kp] result['score'] = [p[4] for p in kp] result['descs'] = desc return result
def run(img_path, cfg): '''Wrapper over OpenCV ORB. Parameters ---------- img_path (str): Path to images. cfg: (Namespace): Configuration. ''' # Init opencv feature extractor feature = cv2.ORB_create( nfeatures=cfg.method_dict['config_common']['num_keypoints']) # Load Image img, _ = load_image(img_path, use_color_image=False, crop_center=False) # Compute features kp, desc = feature.detectAndCompute(img, None) # Convert opencv keypoints into our benchmark format kp, desc = convert_opencv_kp_desc( kp, desc, cfg.method_dict['config_common']['num_keypoints']) result = {} result['kp'] = [p[0:2] for p in kp] result['descs'] = desc return result
def run(img_path, cfg): '''Wrapper over OpenCV SIFT. Parameters ---------- img_path (str): Path to images. cfg: (Namespace): Configuration. Valid keypoint methods: "sift-def" (standard detection threshold) and "sift-lowth" (lowered detection threshold to extract 8000 features). Optional suffixes: "-clahe" (applies CLAHE over the image). Valid descriptors methods: "sift" and "rootsift". Optional suffixes: "-clahe" (applies CLAHE over the image), "upright" (sets keypoint orientations to 0, removing duplicates). ''' # Parse options kp_name = cfg.method_dict['config_common']['keypoint'].lower() desc_name = cfg.method_dict['config_common']['descriptor'].lower() num_kp = cfg.method_dict['config_common']['num_keypoints'] # Do a strict name check to prevent mistakes (e.g. due to flag order) if kp_name == 'sift-def': use_lower_det_th = False use_clahe_det = False elif kp_name == 'sift-lowth': use_lower_det_th = True use_clahe_det = False elif kp_name == 'sift-def-clahe': use_lower_det_th = False use_clahe_det = True elif kp_name == 'sift-lowth-clahe': use_lower_det_th = True use_clahe_det = True else: raise ValueError('Unknown detector') if desc_name == 'sift': use_rootsift = False use_clahe_desc = False use_upright = False elif desc_name == 'rootsift': use_rootsift = True use_clahe_desc = False use_upright = False elif desc_name == 'sift-clahe': use_rootsift = False use_clahe_desc = True use_upright = False elif desc_name == 'rootsift-clahe': use_rootsift = True use_clahe_desc = True use_upright = False elif desc_name == 'sift-upright': use_rootsift = False use_clahe_desc = False use_upright = True elif desc_name == 'rootsift-upright': use_rootsift = True use_clahe_desc = False use_upright = True elif desc_name == 'sift-clahe-upright': use_rootsift = False use_clahe_desc = True use_upright = True elif desc_name == 'rootsift-clahe-upright': use_rootsift = True use_clahe_desc = True use_upright = True else: raise ValueError('Unknown descriptor') # print('Extracting SIFT features with' # ' use_lower_det_th={},'.format(use_lower_det_th), # ' use_clahe_det={},'.format(use_clahe_det), # ' use_rootsift={},'.format(use_rootsift), # ' use_clahe_desc={},'.format(use_clahe_desc), # ' use_upright={}'.format(use_upright)) # Initialize feature extractor if use_lower_det_th: feature = cv2.xfeatures2d.SIFT_create(contrastThreshold=-10000, edgeThreshold=-10000) else: feature = cv2.xfeatures2d.SIFT_create() # Load image, for detection if use_clahe_det: img_det, _ = load_image(img_path, use_color_image=True, crop_center=False) img_det = l_clahe(img_det) else: img_det, _ = load_image(img_path, use_color_image=False, crop_center=False) # Load image, for descriptor extraction if use_clahe_desc: img_desc, _ = load_image(img_path, use_color_image=True, crop_center=False) img_desc = l_clahe(img_desc) else: img_desc, _ = load_image(img_path, use_color_image=False, crop_center=False) # Get keypoints kp = feature.detect(img_det, None) # Compute descriptors if use_upright: unique_kp = [] for i, x in enumerate(kp): if i > 0: if x.response == kp[i - 1].response: continue x.angle = 0 unique_kp.append(x) unique_kp, unique_desc = feature.compute(img_desc, unique_kp, None) top_resps = np.array([x.response for x in unique_kp]) idxs = np.argsort(top_resps)[::-1] kp = np.array(unique_kp)[idxs[:min(len(unique_kp), num_kp)]] desc = unique_desc[idxs[:min(len(unique_kp), num_kp)]] else: kp, desc = feature.compute(img_desc, kp, None) # Use root-SIFT if use_rootsift: desc /= desc.sum(axis=1, keepdims=True) + 1e-8 desc = np.sqrt(desc) # Convert opencv keypoints into our format kp, desc = convert_opencv_kp_desc(kp, desc, num_kp) result = {} result['kp'] = [p[0:2] for p in kp] result['scale'] = [p[2] for p in kp] result['angle'] = [p[3] for p in kp] result['score'] = [p[4] for p in kp] result['descs'] = desc return result