def test_binary_descriptors_rotation_crosscheck_true(): """Verify matched keypoints and their corresponding masks results between image and its rotated version with the expected keypoint pairs with cross_check enabled.""" img = data.astronaut() img = rgb2gray(img) tform = tf.SimilarityTransform(scale=1, rotation=0.15, translation=(0, 0)) rotated_img = tf.warp(img, tform, clip=False) extractor = BRIEF(descriptor_size=512) keypoints1 = corner_peaks(corner_harris(img), min_distance=5, threshold_abs=0, threshold_rel=0.1) extractor.extract(img, keypoints1) descriptors1 = extractor.descriptors keypoints2 = corner_peaks(corner_harris(rotated_img), min_distance=5, threshold_abs=0, threshold_rel=0.1) extractor.extract(rotated_img, keypoints2) descriptors2 = extractor.descriptors matches = match_descriptors(descriptors1, descriptors2, cross_check=True) exp_matches1 = np.array([ 0, 2, 3, 4, 5, 6, 9, 11, 12, 13, 14, 17, 18, 19, 21, 22, 23, 26, 27, 28, 29, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 46]) exp_matches2 = np.array([ 0, 2, 3, 1, 4, 6, 5, 7, 13, 10, 9, 11, 15, 8, 14, 12, 16, 18, 19, 21, 20, 24, 25, 26, 28, 27, 22, 23, 29, 30, 31, 32, 35, 33, 34, 36]) assert_equal(matches[:, 0], exp_matches1) assert_equal(matches[:, 1], exp_matches2)
def test_corner_peaks(): response = np.zeros((5, 5)) response[2:4, 2:4] = 1 corners = corner_peaks(response, exclude_border=False) assert len(corners) == 1 corners = corner_peaks(response, exclude_border=False, min_distance=0) assert len(corners) == 4
def process(self, img2, image_gray): # img2 = warp(img2) patch_size = [640] img2 = rgb2gray(img2) image_gray = rgb2gray(img2) blobs_dog = blob_dog(image_gray, min_sigma=0.2, max_sigma=225, sigma_ratio=1.6, threshold=.5) blobs_dog[:, 2] = blobs_dog[:, 2] blobs = [blobs_dog] colors = ['black'] titles = ['Difference of Gaussian'] sequence = zip(blobs, colors, titles) # plt.imshow(img2) # plt.axis("equal") # plt.show() for blobs, color, title in sequence: print(len(blobs)) for blob in blobs: y, x, r = blob plotx = x ploty = y for i in range (3): keypoints1 = corner_peaks(corner_harris(Array.image_arr[i]), min_distance=1) keypoints2 = corner_peaks(corner_harris(img2), min_distance=1) extractor = BRIEF(patch_size=30, mode="uniform") extractor.extract(Array.image_arr[i], keypoints1) keypoints1 = keypoints1[extractor.mask] descriptors1 = extractor.descriptors extractor.extract(img2, keypoints2) keypoints2 = keypoints2[extractor.mask] descriptors2 = extractor.descriptors matches12 = match_descriptors(descriptors1, descriptors2, cross_check=True) # print(keypoints1, keypoints2) # print(matches12) #FUCKGGGPLAYT for pizdezh in matches12: X = keypoints2[pizdezh[1]][1] Y = keypoints2[pizdezh[1]][0] if sqrt((plotx - X)**2 + (ploty - Y)**2) < r: seen = [{ "type": Array.type_arr[i], "center_shift": (plotx - 160/2) * -0.02, "distance": image_gray[y][x] / 0.08 }] print seen data.seen.add(seen) break
def test_corner_peaks(): response = np.zeros((10, 10)) response[2:5, 2:5] = 1 corners = corner_peaks(response, exclude_border=False, min_distance=10, threshold_rel=0) assert len(corners) == 1 corners = corner_peaks(response, exclude_border=False, min_distance=1) assert len(corners) == 4 corners = corner_peaks(response, exclude_border=False, min_distance=1, indices=False) assert np.sum(corners) == 4
def featurize(img_name): """Load an image and convert it into a dictionary of features""" img = plt.imread(os.path.join('stimuli', img_name + '.png')) height, width, _ = img.shape features = defaultdict(int) for y in range(height): for x in range(width): features['red'] += img[y][x][0] features['green'] += img[y][x][1] features['blue'] += img[y][x][2] features['alpha'] += img[y][x][3] grey = color.rgb2grey(img) for y in range(height): for x in range(width): for key, value in per_pixel(grey, y, x): features[key] += value # Normalize over image size for key, value in features.items(): features[key] = float(value) / height / width features['blob'] = feature.blob_dog(grey).shape[0] features['corners'] = feature.corner_peaks( feature.corner_harris(grey)).shape[0] return features
def test_uniform_mode(): """Verify the computed BRIEF descriptors with expected for uniform mode.""" img = data.coins() keypoints = corner_peaks(corner_harris(img), min_distance=5, threshold_abs=0, threshold_rel=0.1) extractor = BRIEF(descriptor_size=8, sigma=2, mode="uniform") extractor.extract(img, keypoints[:8]) expected = np.array( [ [False, False, False, True, True, True, False, False], [True, True, True, False, True, False, False, True], [True, True, True, False, True, True, False, True], [True, True, True, True, False, True, False, True], [True, True, True, True, True, True, False, False], [True, True, True, True, True, True, True, True], [False, False, False, True, True, True, True, True], [False, True, False, True, False, True, True, True], ], dtype=bool, ) assert_array_equal(extractor.descriptors, expected)
def extract_corner_harris(patch): """ Extract four corner points using harris corner detection algorithm """ # Find corner with harris corner detection coords = corner_peaks(corner_harris(patch, k=0.1), min_distance=5) coords_subpix = corner_subpix(patch, coords, window_size=13) # Find the nearest point for each corner dim = patch.shape corners = [(0, 0), (dim[0], 0), (dim[0], dim[1]), (0, dim[1])] dest_points = [[] for x in range(4)] for i in xrange(4): dest_points[i] = search_closest_points(corners[i], coords_subpix) # Check for error try: epsilon = 1e-10 for i in xrange(4): for j in xrange(i + 1, 4): if calc_distance(dest_points[i], dest_points[j]) < epsilon: print 'Error point' return [] except TypeError: return [] # Reverse y,x position to x,y for i in xrange(4): dest_points[i][1], dest_points[i][0] = dest_points[i][0], dest_points[i][1] return dest_points
def test_corner_orientations_lena(): img = rgb2gray(data.lena()) corners = corner_peaks(corner_fast(img, 11, 0.35)) expected = np.array([-1.9195897 , -3.03159624, -1.05991162, -2.89573739, -2.61607644, 2.98660159]) actual = corner_orientations(img, corners, octagon(3, 2)) assert_almost_equal(actual, expected)
def test_corner_orientations_square(): square = np.zeros((12, 12)) square[3:9, 3:9] = 1 corners = corner_peaks(corner_fast(square, 9), min_distance=1) actual_orientations = corner_orientations(square, corners, octagon(3, 2)) actual_orientations_degrees = np.rad2deg(actual_orientations) expected_orientations_degree = np.array([45.0, 135.0, -45.0, -135.0]) assert_array_equal(actual_orientations_degrees, expected_orientations_degree)
def dumb_matcher(img1, img2): kps = lambda img: feature.corner_peaks(feature.corner_harris(img), min_distance = 2) kp1 = kps(img1) kp2 = kps(img2) to_set = lambda aoa: set(map(lambda x: (x[0], x[1]), aoa)) s1 = to_set(kp1) s2 = to_set(kp2) return float(len(s1 & s2) * 2) / (len(s1) + len(s2))
def find_corners(path, min_distance=5): """Find corners in an image at path Returns the image and the corner lists. """ from skimage.feature import corner_harris, corner_peaks img = imread(path, flatten=True) corners = corner_peaks(corner_harris(img), min_distance=min_distance) return img, corners
def peak_corner_detector(distance_map, threshold, min_d, num_peaks=6): """ well, no idea what is the difference from skimage.feature.peak_local_max :param distance_map: :param threshold: :param min_d: :return: """ return corner_peaks(distance_map, threshold_rel=threshold, min_distance=min_d, num_peaks=num_peaks)
def corners(provider): """ number of corners """ gray = provider.as_gray() # TODO custom parameters would give arise to exceptions of mis-matched shapes coords = corner_peaks(corner_harris(gray))#, min_distance=5) coords_subpix = corner_subpix(gray, coords)#, window_size=13) return len(coords_subpix)
def test_match_keypoints_brief_lena_translation(): """Test matched keypoints between lena image and its translated version.""" img = data.lena() img = rgb2gray(img) img.shape tform = tf.SimilarityTransform(scale=1, rotation=0, translation=(15, 20)) translated_img = tf.warp(img, tform) keypoints1 = corner_peaks(corner_harris(img), min_distance=5) descriptors1, keypoints1 = brief(img, keypoints1, descriptor_size=512) keypoints2 = corner_peaks(corner_harris(translated_img), min_distance=5) descriptors2, keypoints2 = brief(translated_img, keypoints2, descriptor_size=512) matched_keypoints = match_keypoints_brief(keypoints1, descriptors1, keypoints2, descriptors2, threshold=0.10) assert_array_equal(matched_keypoints[:, 0, :], matched_keypoints[:, 1, :] + [20, 15])
def test_match_keypoints_brief_lena_rotation(): """Verify matched keypoints result between lena image and its rotated version with the expected keypoint pairs.""" img = data.lena() img = rgb2gray(img) img.shape tform = tf.SimilarityTransform(scale=1, rotation=0.10, translation=(0, 0)) rotated_img = tf.warp(img, tform) keypoints1 = corner_peaks(corner_harris(img), min_distance=5) descriptors1, keypoints1 = brief(img, keypoints1, descriptor_size=512) keypoints2 = corner_peaks(corner_harris(rotated_img), min_distance=5) descriptors2, keypoints2 = brief(rotated_img, keypoints2, descriptor_size=512) matched_keypoints = match_keypoints_brief(keypoints1, descriptors1, keypoints2, descriptors2, threshold=0.07) expected = np.array([[[263, 272], [234, 298]], [[271, 120], [258, 146]], [[323, 164], [305, 195]], [[414, 70], [405, 111]], [[435, 181], [415, 223]], [[454, 176], [435, 221]]]) assert_array_equal(matched_keypoints, expected)
def test_corner_fast_lena(): img = rgb2gray(data.lena()) expected = np.array([[ 67, 157], [204, 261], [247, 146], [269, 111], [318, 158], [386, 73], [413, 70], [435, 180], [455, 177], [461, 160]]) actual = corner_peaks(corner_fast(img, 12, 0.3)) assert_array_equal(actual, expected)
def test_subpix_border(): img = np.zeros((50, 50)) img[1:25,1:25] = 255 img[25:-1,25:-1] = 255 corner = corner_peaks(corner_harris(img), min_distance=1) subpix = corner_subpix(img, corner, window_size=11) ref = np.array([[ 0.52040816, 0.52040816], [ 0.52040816, 24.47959184], [24.47959184, 0.52040816], [24.5 , 24.5 ], [24.52040816, 48.47959184], [48.47959184, 24.52040816], [48.47959184, 48.47959184]]) assert_almost_equal(subpix, ref)
def nameTheShape (file): coords = corner_peaks(corner_harris(file), min_distance=5) noCorners = len (coords) if (noCorners == 3): shapeName = "triangle" return (shapeName) elif (noCorners == 4): shapeName = "quadrilateral" return (shapeName) elif (noCorners == 5): shapeName = "pentagon" return (shapeName) else: shapeName = "ERROR!!!" return (shapeName)
def test_corner_fast_astronaut(): img = rgb2gray(data.astronaut()) expected = np.array([[101, 198], [140, 205], [141, 242], [177, 156], [188, 113], [197, 148], [213, 117], [223, 375], [232, 266], [245, 137], [249, 171], [300, 244], [305, 57], [325, 245], [339, 242], [346, 279], [353, 172], [358, 307], [362, 252], [362, 328], [363, 192], [364, 147], [369, 159], [374, 171], [379, 183], [387, 195], [390, 149], [401, 197], [403, 162], [413, 181], [444, 310], [464, 251], [476, 250], [489, 155], [492, 139], [494, 169], [496, 266]]) actual = corner_peaks(corner_fast(img, 12, 0.3), min_distance=10, threshold_rel=0) assert_array_equal(actual, expected)
def conrecs(): #im = feature.corner_shi_tomasi(im).corner_harris(im) keypoints1 = feature.corner_peaks(feature.corner_shi_tomasi(im), min_distance=1) print(keypoints1) extractor = feature.BRIEF() extractor.extract(im, keypoints1) keys = keypoints1[extractor.mask] fig, ax = plt.subplots(figsize=(18, 13)) ax.imshow(im, cmap=plt.cm.gray) for pair in keys: plt.scatter(pair[0], pair[1])
def main(argv): image = io.imread(argv[0], True) smooth = gaussian(image, sigma=4, mode='reflect') binary = image > threshold_otsu(smooth) skeleton = skeletonize_3d(invert(binary)) coords = corner_peaks(corner_harris(binary, k=0.2, sigma=4), min_distance=5) coords_subpix = corner_subpix(binary, coords, window_size=13) fig, ax = plt.subplots() ax.imshow(binary, interpolation='nearest', cmap=plt.cm.gray) ax.plot(coords[:, 1], coords[:, 0], '+r', markersize=15) # ax.plot(coords_subpix[:, 1], coords_subpix[:, 0], '+r', markersize=15) ax.axis((0, 600, 600, 0)) plt.show()
def make_dp_mask(dp_cropped): dp_eroded = erosion(dp_cropped) dp_sobel = sobel(dp_eroded) dp_mask = dp_sobel > (dp_sobel.max() - dp_sobel.min()) / 10 dp_closed = closing(dp_mask) dp_filled = ndi.binary_fill_holes(dp_closed) dp_small_removed = remove_small_objects(dp_filled, 100) dp_distance = ndi.distance_transform_edt(dp_small_removed) dp_local_max = corner_peaks(dp_distance, indices=False, labels=dp_small_removed, min_distance=10) dp_watershed_markers = ndi.label(dp_local_max)[0] dp_ws = watershed(-dp_distance, dp_watershed_markers, mask=dp_small_removed) return dp_ws
def getKeypoints(self, img): Ix = cv2.Sobel(src=img, ddepth=cv2.CV_32F, dx=1, dy=0, ksize=self.ksize) Iy = cv2.Sobel(src=img, ddepth=cv2.CV_32F, dx=0, dy=1, ksize=self.ksize) Ixx = Ix * Iy Iyy = Iy * Iy Ixy = Ix * Iy response = Ixx * Iyy - self.k * (Ixy**2) args = corner_peaks(response, threshold_rel=self.k, exclude_border=8) return args
def test_corner_orientations_astronaut(): img = rgb2gray(data.astronaut()) corners = corner_peaks(corner_fast(img, 11, 0.35)) expected = np.array([ -1.75220190e+00, 2.01197383e+00, -2.01162417e+00, -1.88247204e-01, 1.19134149e+00, -6.61151410e-01, -2.99143370e+00, 2.17103132e+00, -7.52950306e-04, 1.25854853e+00, 2.43573659e+00, -1.69230287e+00, -9.88548213e-01, 1.47154532e+00, -1.65449964e+00, 1.09650167e+00, 1.07812134e+00, -1.68885773e+00, -1.64397304e+00, 3.09780364e+00, -3.49561988e-01, -1.46554357e+00, -2.81524886e+00, 8.12701702e-01, 2.47305654e+00, -1.63869275e+00, 5.46905279e-02, -4.40598471e-01, 3.14918803e-01, -1.76069982e+00, 3.05330950e+00, 2.39291733e+00, -1.22091334e-01, -3.09279990e-01, 1.45931342e+00 ]) actual = corner_orientations(img, corners, octagon(3, 2)) assert_almost_equal(actual, expected)
def main(inputfilename, oututfilename): # Load image image = imread(inputfilename) image = rgb2gray(image) # Apply corner detection algorithm corners = corner_harris(image) coords = corner_peaks(corners, min_distance=5) coords_subpix = corner_subpix(image, coords, window_size=13) # Diplay the image fig, ax = plt.subplots() ax.imshow(image, interpolation='nearest', cmap=plt.cm.gray) ax.plot(coords[:, 1], coords[:, 0], '.b', markersize=3) ax.plot(coords_subpix[:, 1], coords_subpix[:, 0], '+r', markersize=15) ax.axis((0, 350, 350, 0)) plt.savefig(oututfilename)
def fast_skimage(self, image, **kwargs): coords_subpix = np.zeros_like(image) cornerness_matrix = sf.corner_peaks( sf.corner_fast(image, 16, 0.8), min_distance=1) # no_of_detected_points*2 coords_subpix = sf.corner_subpix(image, cornerness_matrix, window_size=13, alpha=kwargs["alpha"]) display.draw_points(image, cornerness_matrix, '_', self.path[2:-1], method_name=kwargs['method'], name=self.name, sp=coords_subpix) return cornerness_matrix, coords_subpix
def shi_tomasi_skimage(self, image, **kwargs): coords_subpix = np.zeros_like(image) cornerness_matrix = sf.corner_peaks(sf.corner_shi_tomasi(image), min_distance=1) coords_subpix = sf.corner_subpix(image, cornerness_matrix, window_size=13, alpha=kwargs["alpha"]) display.draw_points(image, cornerness_matrix, '_', self.path[2:-1], method_name=kwargs['method'], name=self.name, sp=coords_subpix) print("detected points: ", cornerness_matrix.shape[0]) return cornerness_matrix, coords_subpix
def get_head_tail(image, radius=12, sigma=4, min_distance=10): """ Make a head tail mask of a worm :param image: binary worm image :param radius: radius used around point :param sigma: harris detector radius :param min_distance: distance between head and tail :return: mask of head and tail """ hc = corner_harris(image, sigma=sigma) cp = corner_peaks(hc, min_distance=min_distance, num_peaks=2) mask = np.zeros_like(image) for c in cp: rr, cc = circle(c[0], c[1], radius, shape=mask.shape) mask[rr, cc] = 1 return image & mask
def harris_skimage(self, image, num_peaks, **kwargs): coords_subpix = np.zeros_like(image) cornerness_matrix = sf.corner_peaks( sf.corner_harris(image), min_distance=1, num_peaks=num_peaks) # larger distance -> fewer points coords_subpix = sf.corner_subpix( image, cornerness_matrix, window_size=13, alpha=kwargs["alpha"]) # sub pixel accuracy display.draw_points(image, cornerness_matrix, '_', self.path[2:-1], method_name=kwargs['method'], name=self.name, sp=coords_subpix, counter=kwargs["counter"]) print("detected points: ", cornerness_matrix.shape[0]) return cornerness_matrix, coords_subpix
def corner_demo(): image = io.imread("D:/images/home.jpg") gray = color.rgb2gray(image) coords = feature.corner_peaks(feature.corner_harris(gray), min_distance=5) fig, axes = plt.subplots(1, 2, figsize=(8, 4)) ax = axes.ravel() ax[0].imshow(image) ax[0].set_title("Input ") ax[1].imshow(image) ax[1].set_title("harris corner detection") ax[0].axis('off') ax[1].axis('off') ax[1].plot(coords[:, 1], coords[:, 0], color='red', marker='o', linestyle='None', markersize=4) fig.tight_layout() plt.show()
def get_corner_peaks_by_label(image, labels, **kwargs): """ Wrapper for skimage.feature.corner_peaks that optimizes the labels feature INPUTS ------ image: Image to find local maxima in labels: binary mask denoting regions to search for local maxima in indices feature is not supported """ peaks = numpy.zeros_like(image, dtype=numpy.bool) for props in measure.regionprops(labels): chunk = crop_from_bounding_box(props.bbox, image) chunk_peaks = feature.corner_peaks(chunk, labels=props.image, **kwargs) modify_with_bounding_box(props.bbox, peaks, chunk_peaks) return peaks
def corners_2_xy(img): #img = img.copy() #img[img < 127] = 0 #img[img > 127] = 255 local_peaks = corner_peaks(img, min_distance=5, threshold_rel=0.5, indices=True) local_peaks = np.array(local_peaks, dtype=np.float64) height, width = img.shape width /=3 col1m = (local_peaks[:,1]>=width) & (local_peaks[:,1]<2*width) peaks = local_peaks[col1m] peaks[:,0]/=height peaks[:,1]-= width peaks[:,1]/= width return peaks
def test_corner_orientations_astronaut(): img = rgb2gray(data.astronaut()) corners = corner_peaks(corner_fast(img, 11, 0.35), min_distance=10, threshold_abs=0, threshold_rel=0.1) expected = np.array([-1.75220190e+00, 2.01197383e+00, -2.01162417e+00, -1.88247204e-01, 1.19134149e+00, -6.61151410e-01, -2.99143370e+00, 2.17103132e+00, -7.52950306e-04, 1.25854853e+00, 2.43573659e+00, -1.69230287e+00, -9.88548213e-01, 1.47154532e+00, -1.65449964e+00, 1.09650167e+00, 1.07812134e+00, -1.68885773e+00, -1.64397304e+00, 3.09780364e+00, -3.49561988e-01, -1.46554357e+00, -2.81524886e+00, 8.12701702e-01, 2.47305654e+00, -1.63869275e+00, 5.46905279e-02, -4.40598471e-01, 3.14918803e-01, -1.76069982e+00, 3.05330950e+00, 2.39291733e+00, -1.22091334e-01, -3.09279990e-01, 1.45931342e+00]) actual = corner_orientations(img, corners, octagon(3, 2)) assert_almost_equal(actual, expected)
def forward(ob): """ Takes raw (768,1024,3) uint8 screen and returns list of VNC events. The browser window indents the origin of MiniWob by 75 pixels from top and 10 pixels from the left. The first 50 pixels along height are the query. """ if ob is None: return [] x = ob['vision'] crop = x[75:75 + 50 + 160, 10:10 + 160, :] # miniwob coordinates crop square = x[75 + 50:75 + 50 + 160, 10:10 + 160, :] gray = rgb2gray(square) print gray coords = corner_peaks(corner_harris(gray), min_distance=5) coords_subpix = corner_subpix(gray, coords, window_size=13) for item in coords_subpix: pass #print item[0]+75+50,item[1]+10 newy = coords_subpix[:, 0] newx = coords_subpix[:, 1] newy = newy[np.logical_not(np.isnan(newy))] newx = newx[np.logical_not(np.isnan(newx))] #if newx == None or newy == None: #return [] goal_y, goal_x = np.mean(newy) + 125, np.mean(newx) + 10 if math.isnan(goal_y) or math.isnan(goal_x): return [] print goal_y, goal_x #xcoord = np.random.randint(0, 160) + 10 # todo: something more clever here #ycoord = np.random.randint(0, 160) + 75 + 50 # todo: something more clever here #print ycoord,xcoord # 1. move to x,y with left button released, and click there (2. and 3.) action = [ universe.spaces.PointerEvent(goal_x, goal_y, 0), universe.spaces.PointerEvent(goal_x, goal_y, 1), universe.spaces.PointerEvent(goal_x, goal_y, 0) ] return action
def test_uniform_mode(dtype): """Verify the computed BRIEF descriptors with expected for uniform mode.""" img = data.coins().astype(dtype) keypoints = corner_peaks(corner_harris(img), min_distance=5, threshold_abs=0, threshold_rel=0.1) extractor = BRIEF(descriptor_size=8, sigma=2, mode='uniform') extractor.extract(img, keypoints[:8]) expected = np.array([[1, 1, 0, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 1, 0, 0], [1, 1, 0, 0, 1, 0, 0, 0], [0, 0, 0, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 1, 0, 0], [1, 1, 1, 1, 0, 1, 0, 0], [1, 1, 0, 0, 0, 1, 0, 0], [0, 1, 1, 1, 0, 1, 1, 1]], dtype=bool) assert_array_equal(extractor.descriptors, expected)
def test_normal_mode(): """Verify the computed BRIEF descriptors with expected for normal mode.""" img = data.coins() keypoints = corner_peaks(corner_harris(img), min_distance=5) extractor = BRIEF(descriptor_size=8, sigma=2) extractor.extract(img, keypoints[:8]) expected = np.array([[False, True, False, False, True, False, True, False], [ True, False, True, True, False, True, False, False], [ True, False, False, True, False, True, False, True], [ True, True, True, True, False, True, False, True], [ True, True, True, False, False, True, True, True], [False, False, False, False, True, False, False, False], [False, True, False, False, True, False, True, False], [False, False, False, False, False, False, False, False]], dtype=bool) assert_array_equal(extractor.descriptors, expected)
def show_features(self, gd_file): r_img = self.cp.resize_img(PIL.Image.open(gd_file), base_width=g_prisma_image_size, keep_size=False) l_img = np.float32(r_img.convert('L')) ll_img = np.float32(l_img / 255) coords = corner_peaks(corner_harris(ll_img), min_distance=5) coords_subpix = corner_subpix(ll_img, coords, window_size=25) plt.figure(figsize=(8, 8)) plt.imshow(r_img, interpolation='nearest') plt.plot(coords_subpix[:, 1], coords_subpix[:, 0], '+r', markersize=15, mew=5) plt.plot(coords[:, 1], coords[:, 0], '.b', markersize=7) plt.axis('off') plt.show()
def calculate_descriptors(X): extractor = BRIEF() Descriptors = [] for i in range(len(X)): Im = np.asarray(X[i, :, :, :], dtype='float32') Max = np.amax(Im) Im = Im / Max Im = rgb2gray(Im) keypoints = corner_peaks(corner_harris(Im), min_distance=5) extractor.extract(Im, keypoints) Temp = extractor.descriptors Descriptors.append( np.asarray(np.round(np.average(Temp, axis=0)), dtype='int32')) Descriptors_matrix = np.zeros([len(X), 256]) for i in range(len(X)): Descriptors_matrix[i, :] = Descriptors[i] return Descriptors_matrix
def test_uniform_mode(): """Verify the computed BRIEF descriptors with expected for uniform mode.""" img = rgb2gray(data.lena()) keypoints = corner_peaks(corner_harris(img), min_distance=5) extractor = BRIEF(descriptor_size=8, sigma=2, mode='uniform') extractor.extract(img, keypoints[:8]) expected = np.array([[ True, False, True, False, False, True, False, False], [False, True, False, False, True, True, True, True], [ True, False, False, False, False, False, False, False], [False, True, True, False, False, False, True, False], [False, False, False, False, False, False, True, False], [False, True, False, False, True, False, False, False], [False, False, True, True, False, False, True, True], [ True, True, False, False, False, False, False, False]], dtype=bool) assert_array_equal(extractor.descriptors, expected)
def corners_2_xy(outputs): output = outputs['output_likelihood'] output = torch.sigmoid(output) edges,corners =torch.chunk(output,2,dim=1) corner1= 255* corners corner1[corner1>127] = 255 corner1[corner1<127] = 0 corner1 = torch.cat((corner1,corner1,corner1),dim=-1) corner1 = torch.squeeze(corner1) array = corner1.detach().cpu().numpy().astype(np.uint8) local_peaks = corner_peaks(array, min_distance=5, threshold_rel=0.5, indices=True) local_peaks = np.array(local_peaks, dtype=np.float64) height, width = array.shape width /=3 col1m = (local_peaks[:,1]>=width) & (local_peaks[:,1]<2*width) peaks = local_peaks[col1m] peaks[:,0]/=height peaks[:,1]-= width peaks[:,1]/= width return peaks
def get_pnodes(self): corners = np.array(corner_peaks(corner_harris(self.map_), min_distance=1)) print("Corners located!") corner_map = np.zeros(self.map_.shape) for corner in corners: corner_map[corner[0], corner[1]] = 1 selem_mat = np.ones((12, 12)) for _ in range(5): corner_map = dilation(corner_map) for _ in range(1): corner_map = opening(corner_map, selem=selem_mat) for _ in range(8): corner_map = dilation(corner_map) print("Pseudo-nodes located!") p_nodes = [] for i in range(len(corner_map)): for j in range(len(corner_map[i])): if corner_map[i, j] > 0.5: p_nodes.append([i, j]) return p_nodes
def kitchen_rosenfeld_skimage(self, image, threshold_abs_kr, **kwargs): coords_subpix = np.zeros_like(image) cornerness_matrix = sf.corner_peaks(sf.corner_kitchen_rosenfeld( image, mode='constant'), min_distance=1, threshold_abs=threshold_abs_kr, threshold_rel=0.3) coords_subpix = sf.corner_subpix(image, cornerness_matrix, window_size=13, alpha=kwargs["alpha"]) display.draw_points(image, cornerness_matrix, '_', self.path[2:-1], method_name=kwargs['method'], name=self.name, sp=coords_subpix) print("detected points: ", cornerness_matrix.shape[0]) return cornerness_matrix, coords_subpix
def centre_button(ob): if ob is None: return -1,-1 x = ob['vision'] crop = x[75:75+50+160, 10:10+160, :] # miniwob coordinates crop square = x[75+50:75+50+160, 10:10+160, :] gray =rgb2gray(square) coords = corner_peaks(corner_harris(gray), min_distance=5) coords_subpix = corner_subpix(gray, coords, window_size=13) newy = coords_subpix[:,0] newx = coords_subpix[:,1] newy = newy[np.logical_not(np.isnan(newy))] newx = newx[np.logical_not(np.isnan(newx))] goal_y,goal_x = np.mean(newy)+125,np.mean(newx)+10 if math.isnan(goal_y) or math.isnan(goal_x) or goal_y ==None: return -1,-1 return goal_y,goal_x
def test_normal_mode(): """Verify the computed BRIEF descriptors with expected for normal mode.""" img = data.coins() keypoints = corner_peaks(corner_harris(img), min_distance=5, threshold_abs=0, threshold_rel=0.1) extractor = BRIEF(descriptor_size=8, sigma=2) extractor.extract(img, keypoints[:8]) expected = np.array([[False, True, False, False, True, False, True, False], [ True, False, True, True, False, True, False, False], [ True, False, False, True, False, True, False, True], [ True, True, True, True, False, True, False, True], [ True, True, True, False, False, True, True, True], [False, False, False, False, True, False, False, False], [False, True, False, False, True, False, True, False], [False, False, False, False, False, False, False, False]], dtype=bool) assert_array_equal(extractor.descriptors, expected)
def test_uniform_mode(): """Verify the computed BRIEF descriptors with expected for uniform mode.""" img = data.coins() keypoints = corner_peaks(corner_harris(img), min_distance=5) extractor = BRIEF(descriptor_size=8, sigma=2, mode='uniform') extractor.extract(img, keypoints[:8]) expected = np.array([[False, False, False, True, True, True, False, False], [True, True, True, False, True, False, False, True], [True, True, True, False, True, True, False, True], [True, True, True, True, False, True, False, True], [True, True, True, True, True, True, False, False], [True, True, True, True, True, True, True, True], [False, False, False, True, True, True, True, True], [False, True, False, True, False, True, True, True]], dtype=bool) assert_array_equal(extractor.descriptors, expected)
def foerstner_skimage(self, image, num_peaks, **kwargs): w, q = sf.corner_foerstner(image) q_min = 0.9 w_min = 0.1 foerstner = (q > q_min) * (w > w_min) * w cornerness_matrix = sf.corner_peaks(foerstner, min_distance=1, num_peaks=num_peaks) coords_subpix = sf.corner_subpix(image, cornerness_matrix, window_size=13, alpha=kwargs["alpha"]) display.draw_points(image, cornerness_matrix, '_', self.path[2:-1], method_name=kwargs['method'], name=self.name, sp=coords_subpix) print("detected points: ", cornerness_matrix.shape[0]) return cornerness_matrix, coords_subpix
def get_bbox_(self, data: TransformedImageData, image: array) -> BBox: contours = find_contours(image, .5) if len(contours) != 1: return BBox.from_image(image) image = sobel(image) coords = corner_peaks(corner_harris(image), threshold_rel=0, num_peaks=4) x_values = sorted(coords[:, 1]) y_values = sorted(coords[:, 0]) min_x, max_x = x_values[1:3] min_y, max_y = y_values[1:3] if self.debug_level >= DebugLevel.REPORT: imshow(image, cmap="gray") plot(coords[:, 1], coords[:, 0], '+r', markersize=15) self.savers_['corners'].save(data.name) return BBox(min_x, min_y, max_x, max_y)
def find_keypoints(img, scheme="SURF", radius=None): if scheme == "SURF": detector = cv2.xfeatures2d.SURF_create(hessianThreshold=400, nOctaves=4, nOctaveLayers=3, extended=False, upright=True) elif scheme == "SIFT": detector = cv2.xfeatures2d.SIFT_create(nOctaveLayers=3, sigma=1.3) elif scheme == "BRISK": detector = cv2.BRISK_create(thresh=30, octaves=3) elif scheme == "ORB": detector = cv2.ORB_create(nfeatures=10000) if scheme not in ["HARRIS"]: kps = detector.detect(img, None) else: cnrs = corner_peaks(corner_harris(img), min_distance=radius) kps = [FakeCVFpt(xy) for xy in cnrs] return kps
def briefRotLite(im, compareX, compareY, uX, uY,): locs, desc = None, None # YOUR CODE HERE method = feat.corner_harris(im,sigma = 1.5) locs = feat.corner_peaks(method, min_distance = 2) patch_width = 9 # Load the matrices that we saved: comp_x = sio.loadmat('testPattern.mat')['compareX'][0] comp_y = sio.loadmat('testPattern.mat')['compareY'][0] unrav_x = np.unravel_index(comp_x + 40, (patch_width, patch_width)) unrav_y = np.unravel_index(comp_y + 40, (patch_width, patch_width)) # Find the identity matrix I = np.dot(locs.T, locs) # Compute the principal direction (d) after computing SVD on I _,_,SVD = np.linalg.svd(I) d = np.array( SVD[0,:] ) # Compute rotation matrix now that you have principal direction R = np.array([ [d[0],d[1] ], [-d[1],d[0]] ]) # Now that you have the rotation matrix, unravel it it to find the new location of Y y = np.unravel_index(comp_y + 40, (patch_width, patch_width)) # Compute the dot product of the rotaiton matrix with coordinates y = np.dot(R, y) y_range = y.shape[1] # Update the new locations y_ = np.array([ 9*y[0,i] + y[1,i] for i in range(y_range) ]) - 40 # Now that that is done, recompute the brief to try and find the keypoints locs, desc = computeBrief(im, locs, comp_x, comp_y, unrav_x, unrav_y) return locs, desc
def classify(self, image, model, args): ''' uniformly sampled points ''' Worig, Horig = image.size() num_points = int(args.get('points', 100)) border = float(args.get('border', 5)) border = int(round(border * np.mean([Worig, Horig]) / 100.0)) # get gray scale image for salient point detection pix = image.pixels( operations= 'slice=,,1,1&resize=%s,%s,BC,MX&depth=8,d,u&remap=gray&format=tiff' % (self.side, self.side)) W, H = pix.shape[0:2] # compute scaling factor sx, sy = (1.0, 1.0) if Worig != W or Horig != H: sx = float(W) / Worig sy = float(H) / Horig log.debug( 'Classify: Original image is larger, use scaling factors: %s,%s', sx, sy) # scale params to resized image border = int(round(border * sx)) pts, num_points_x, num_points_y, sw, sh = distribute_points( num_points, W, H, border, equal=False, return_all=True) # detect salient points pts = corner_peaks(corner_shi_tomasi(pix), min_distance=int(sw * 0.3), exclude_border=border, indices=True, num_peaks=num_points) # re-scale points points = [(p[0] / sx, p[1] / sy) for p in pts] return classify_points(image, model, args, points, 'Salient points')
def stitch_multiple_images(imgs, desc_func=simple_descriptor, patch_size=5): """ Stitch an ordered chain of images together. Args: imgs: List of length m containing the ordered chain of m images desc_func: Function that takes in an image patch and outputs a 1D feature vector describing the patch patch_size: Size of square patch at each keypoint Returns: panorama: Final panorma image in coordinate frame of reference image """ # Detect keypoints in each image keypoints = [] # keypoints[i] corresponds to imgs[i] for img in imgs: kypnts = corner_peaks(harris_corners(img, window_size=3), threshold_rel=0.05, exclude_border=8) keypoints.append(kypnts) # Describe keypoints descriptors = [] # descriptors[i] corresponds to keypoints[i] for i, kypnts in enumerate(keypoints): desc = describe_keypoints(imgs[i], kypnts, desc_func=desc_func, patch_size=patch_size) descriptors.append(desc) # Match keypoints in neighboring images matches = [] # matches[i] corresponds to matches between # descriptors[i] and descriptors[i+1] for i in range(len(imgs) - 1): mtchs = match_descriptors(descriptors[i], descriptors[i + 1], 0.7) matches.append(mtchs) ### YOUR CODE HERE pass ### END YOUR CODE return panorama
def _detect_octave(self, octave_image): # Extract keypoints for current octave fast_response = corner_fast(octave_image, self.fast_n, self.fast_threshold) keypoints = corner_peaks(fast_response, min_distance=1) if len(keypoints) == 0: return (np.zeros((0, 2), dtype=np.double), np.zeros((0, ), dtype=np.double), np.zeros((0, ), dtype=np.double)) mask = _mask_border_keypoints(octave_image.shape, keypoints, distance=16) keypoints = keypoints[mask] orientations = corner_orientations(octave_image, keypoints, OFAST_MASK) harris_response = corner_harris(octave_image, method='k', k=self.harris_k) responses = harris_response[keypoints[:, 0], keypoints[:, 1]] return keypoints, orientations, responses
def show_corners(image, title=None, ax=None, min_distance=20, corners=None): """Display a list of corners overlapping an image""" if not ax: fig, ax = plt.subplots(1) if not np.all(corners): corners = ft.corner_peaks(ft.corner_harris(image), min_distance=min_distance) ax.imshow(image) # Convert coordinates to x and y lists y_corner, x_corner = zip(*corners) ax.plot(x_corner, y_corner, 'o') # Plot corners if title: plt.title(title) ax.set_xlim(0, image.shape[1]) ax.set_ylim(image.shape[0], 0) # images use weird axes # fig.set_size_inches(np.array(fig.get_size_inches()) * 1.5) # plt.show() print("Number of corners:", len(corners))
def calc_blob_property(b, keys = None): props = {} greycovmat = greycomatrix(np.nan_to_num((b/2.)+128), [2], [0], 256, symmetric=True, normed=True) #this func expects 0< pixel value < 256 props['grey_dissimilarity'] = greycoprops(greycovmat, 'dissimilarity')[0, 0] props['grey_energy'] = greycoprops(greycovmat, 'energy')[0, 0] props['aspect_ratio'] = b.shape[0]*1./b.shape[1] props['size'] = np.count_nonzero(~np.isnan(b)) props['threshold_otsu'] = threshold_otsu(np.nan_to_num(b)) b_bw = (b>props['threshold_otsu']) props['extent'] = 1.*np.count_nonzero(b_bw) / props['size'] # fraction of non zero pixels ncluster = ndimage.label(b_bw,structure=np.ones((3,3)))[1] props['ncluster_frac'] = ncluster*1./np.count_nonzero(b_bw) corners = corner_peaks(corner_harris(b_bw), min_distance=1) props['corner_frac'] = len(corners)*100./np.count_nonzero(b_bw) props['median_intensity'] = np.nanmedian(abs(b)) props['max_intensity'] = np.nanmax(abs(b)) props['max_intensity'] = np.nanmax(abs(b)) if keys: return [props[k] for k in keys] else: return props
checkerboard = data.checkerboard() img_orig = np.zeros(list(checkerboard.shape) + [3]) img_orig[..., 0] = checkerboard gradient_r, gradient_c = np.mgrid[0:img_orig.shape[0], 0:img_orig.shape[1]] img_orig[..., 1] = gradient_r img_orig[..., 2] = gradient_c img_orig = rescale_intensity(img_orig) img_orig_gray = rgb2gray(img_orig) # warp synthetic image tform = AffineTransform(scale=(0.9, 0.9), rotation=0.2, translation=(20, -10)) img_warped = warp(img_orig, tform.inverse, output_shape=(200, 200)) img_warped_gray = rgb2gray(img_warped) # extract corners using Harris' corner measure coords_orig = corner_peaks(corner_harris(img_orig_gray), threshold_rel=0.001, min_distance=5) coords_warped = corner_peaks(corner_harris(img_warped_gray), threshold_rel=0.001, min_distance=5) # determine sub-pixel corner position coords_orig_subpix = corner_subpix(img_orig_gray, coords_orig, window_size=10) coords_warped_subpix = corner_subpix(img_warped_gray, coords_warped, window_size=10) def gaussian_weights(window_ext, sigma=1): y, x = np.mgrid[-window_ext:window_ext+1, -window_ext:window_ext+1] g = np.zeros(y.shape, dtype=np.double) g[:] = np.exp(-0.5 * (x**2 / sigma**2 + y**2 / sigma**2)) g /= 2 * np.pi * sigma * sigma return g
def getMinorMajorRatio(image): image = image.copy() # Create the thresholded image to eliminate some of the background imagethr = np.where(image > np.mean(image),0.,1.0) imagethr2 = np.where(image > np.mean(image) - 2*np.std(image),0.,1.0) #Dilate the image imdilated = morphology.dilation(imagethr, np.ones((4,4))) # Create the label list label_list = measure.label(imdilated) label_list2 = imagethr2*label_list label_list = imagethr*label_list label_list2 = label_list2.astype(int) label_list = label_list.astype(int) region_list = measure.regionprops(label_list, intensity_image=image) region_list2 = measure.regionprops(label_list2, intensity_image=image) maxregion,max2ndregion = getLargestRegions(region_list, label_list, imagethr) maxregion2,max2ndregion2 = getLargestRegions(region_list2, label_list2, imagethr2) # guard against cases where the segmentation fails by providing zeros ratio = 0.0 fillratio = 0.0 largeeigen = 0.0 smalleigen = 0.0 eigenratio = 0.0 solidity = 0.0 perimratio = 0.0 arearatio = 0.0 orientation = 0.0 centroid = (0.0,0.0) cornercenter = 0.0 cornerstd = 0.0 lrdiff = 0.0 tbdiff = 0.0 hu1 = hu2 = hu3 = hu12 = hu13 = hu23 = 0.0 whu1 = whu2 = whu3 = whu12 = whu13 = whu23 = 0.0 extent = 0.0 minintensity = maxintensity = meanintensity = 0.0 intensityratio1 = intensityratio2 = intensityratio3 = 0.0 if ((not maxregion is None) and (maxregion.major_axis_length != 0.0)): corners = corner_peaks(corner_harris(maxregion.image), min_distance=5) corners_subpix = corner_subpix(maxregion.image, corners, window_size=13) cornercentercoords = np.nanmean(corners_subpix, axis=0) cornerstdcoords = np.nanstd(corners_subpix, axis=0) ratio = 0.0 if maxregion is None else maxregion.minor_axis_length*1.0 / maxregion.major_axis_length largeeigen = 0.0 if maxregion is None else maxregion.inertia_tensor_eigvals[0] smalleigen = 0.0 if maxregion is None else maxregion.inertia_tensor_eigvals[1] fillratio = 0.0 if (maxregion2 is None or maxregion2.minor_axis_length == 0.0) else maxregion2.filled_area/(maxregion2.minor_axis_length*maxregion2.major_axis_length) solidity = 0.0 if maxregion2 is None else maxregion2.solidity hu1 = 0.0 if maxregion is None else maxregion.moments_hu[1] hu2 = 0.0 if maxregion is None else maxregion.moments_hu[2] hu3 = 0.0 if maxregion is None else maxregion.moments_hu[3] hu12 = 0.0 if (maxregion is None or hu1==0.0) else hu2/hu1 hu13 = 0.0 if (maxregion is None or hu1==0.0) else hu3/hu1 hu23 = 0.0 if (maxregion is None or hu2==0.0) else hu3/hu2 whu1 = 0.0 if maxregion is None else maxregion.weighted_moments_hu[1] whu2 = 0.0 if maxregion is None else maxregion.weighted_moments_hu[2] whu3 = 0.0 if maxregion is None else maxregion.weighted_moments_hu[3] whu12 = 0.0 if (maxregion is None or whu1==0.0) else whu2/whu1 whu13 = 0.0 if (maxregion is None or whu1==0.0) else whu3/whu1 whu23 = 0.0 if (maxregion is None or whu2==0.0) else whu3/whu2 extent = 0.0 if maxregion is None else maxregion.extent minintensity = 0.0 if maxregion is None else maxregion.min_intensity meanintensity = 0.0 if maxregion is None else maxregion.mean_intensity maxintensity = 0.0 if maxregion is None else maxregion.max_intensity intensityratio1 = 0.0 if (maxregion is None or maxintensity==0.0) else meanintensity/maxintensity intensityratio2 = 0.0 if (maxregion is None or maxintensity==0.0) else minintensity/maxintensity intensityratio3 = 0.0 if (maxregion is None or meanintensity==0.0) else minintensity/meanintensity perimratio = 0.0 if (maxregion is None or maxregion.minor_axis_length==0.0) else maxregion.perimeter/(maxregion.minor_axis_length*4.0+maxregion.major_axis_length*4.0) eigenratio = 0.0 if largeeigen == 0.0 else smalleigen/largeeigen orientation = 0.0 if maxregion is None else maxregion.orientation centroid = (0.0,0.0) if maxregion is None else maxregion.centroid cornercentercoords = np.absolute(cornercentercoords - centroid) if maxregion.major_axis_length==0.0 else np.absolute(cornercentercoords - centroid)/maxregion.major_axis_length cornercenter = np.linalg.norm(cornercentercoords) if maxregion.major_axis_length!=0.0: cornerstdcoords = np.absolute(cornerstdcoords)/maxregion.major_axis_length cornerstd = np.linalg.norm(cornerstdcoords) left = np.sum(maxregion.image[:,maxregion.image.shape[1]/2:]) if maxregion.image.shape[1] % 2 == 0: right = np.sum(maxregion.image[:,:maxregion.image.shape[1]/2]) else: right = np.sum(maxregion.image[:,:maxregion.image.shape[1]/2+1]) lrdiff = np.abs((right-left)/(right+left)) top = np.sum(maxregion.image[maxregion.image.shape[0]/2:,:]) if maxregion.image.shape[0] % 2 == 0: bottom = np.sum(maxregion.image[:maxregion.image.shape[0]/2,:]) else: bottom = np.sum(maxregion.image[:maxregion.image.shape[0]/2+1,:]) tbdiff = np.abs((top-bottom)/(top+bottom)) else: cornercentercoords = (0.0,0.0) cornerstdcoords = (0.0,0.0) if ((not maxregion is None) and (not max2ndregion is None)): arearatio = max2ndregion.area/maxregion.area #print perimratio if np.isnan(cornercenter): cornercenter = 0.0 if sum(np.isnan(cornercentercoords)) > 0.0: cornercentercoords = np.array([0.0,0.0]) if math.isnan(cornerstd): cornerstd = 0.0 if sum(np.isnan(cornerstdcoords)) > 0.0: cornerstdcoords = np.array([0.0,0.0]) return minintensity,meanintensity,maxintensity,intensityratio1,intensityratio2,intensityratio3,extent,lrdiff,tbdiff,cornercenter,cornercentercoords,cornerstd,cornerstdcoords,ratio,fillratio,eigenratio,solidity,hu1,hu2,hu3,hu12,hu13,hu23,whu1,whu2,whu3,whu12,whu13,whu23,perimratio,arearatio,orientation,centroid
# HARRIS CORNER DETECTION # corner detection is based upon the change of the position vector with respect # to arc length. # approximates the autocorrelation function in the direction (u, v). A measure of # curvature is given by the minimum value obtained by considering the shifts (u, v) # in the four main directions. That is, by (1,0), (0,−1), (0,1) and (−1,0). The minimum is chosen # because it agrees with the following two observations. First, if the pixel is in an edge defining a # straight line, is small for a shift along the edge and large for a shift perpendicular to # the edge. In this case, we should choose the small value since the curvature of the edge is small. # Secondly, if the edge defines a corner, then all the shifts produce a large value. Thus, if we also # chose the minimum, this value indicates high curvature. from skimage import data import matplotlib.pyplot as plt from skimage.feature import corner_harris, corner_subpix, corner_peaks from skimage.transform import warp, AffineTransform tform = AffineTransform(scale=(1.3, 1.1), rotation=0, shear=0,translation=(0,0))# image = warp(data.coins(), tform.inverse, output_shape=(500, 500)) coords = corner_peaks(corner_harris(image), min_distance=5) coords_subpix = corner_subpix(image, coords, window_size=13) plt.gray() plt.imshow(image, interpolation='nearest') plt.plot(coords_subpix[:, 1], coords_subpix[:, 0], '+r', markersize=15, mew=5) plt.plot(coords[:, 1], coords[:, 0], '.b', markersize=7) plt.axis('off') plt.show()
""" from skimage import data from skimage import transform as tf from skimage.feature import (match_descriptors, corner_peaks, corner_harris, plot_matches, BRIEF) from skimage.color import rgb2gray import matplotlib.pyplot as plt img1 = rgb2gray(data.astronaut()) tform = tf.AffineTransform(scale=(1.2, 1.2), translation=(0, -100)) img2 = tf.warp(img1, tform) img3 = tf.rotate(img1, 25) keypoints1 = corner_peaks(corner_harris(img1), min_distance=5) keypoints2 = corner_peaks(corner_harris(img2), min_distance=5) keypoints3 = corner_peaks(corner_harris(img3), min_distance=5) extractor = BRIEF() extractor.extract(img1, keypoints1) keypoints1 = keypoints1[extractor.mask] descriptors1 = extractor.descriptors extractor.extract(img2, keypoints2) keypoints2 = keypoints2[extractor.mask] descriptors2 = extractor.descriptors extractor.extract(img3, keypoints3) keypoints3 = keypoints3[extractor.mask]
def featureExtractImage(): malpa = io.imread('data/malpa.png') malpa = equalize_hist(rgb2gray(malpa)) corners = corner_peaks(corner_harris(malpa),min_distance=2) show_corners(corners,malpa)