def featurePointMatching(image0, image1, decimation=1, n_keypoints=750): """ Get a transformation model knowing 2 images """ # 2 images, possibly with a certain decimation factor (reduce the size of the image) image0 = transform.rescale(image0, 1 / float(decimation)) image1 = transform.rescale(image1, 1 / float(decimation)) orb = ORB(n_keypoints=n_keypoints, fast_threshold=0.05) # definition of the ORB detector # Get the keypoints from the first image orb.detect_and_extract(image0) keypoints1 = orb.keypoints descriptors1 = orb.descriptors # Get the keypoints from the second image orb.detect_and_extract(image1) keypoints2 = orb.keypoints descriptors2 = orb.descriptors # Matching of descriptors matches12 = match_descriptors(descriptors1, descriptors2, cross_check=True) # Select keypoints from both images with RANSAC algorithm src = keypoints1[matches12[:, 0]][:, ::-1] dst = keypoints2[matches12[:, 1]][:, ::-1] model_robust, inliers = \ ransac((src, dst), transform.EuclideanTransform, min_samples=6, residual_threshold=2) # Get the inliners outliers = inliers == False return model_robust, inliers, outliers, src, dst
def findH(img1, img2): from skimage.feature import ORB, match_descriptors # load image img1_gray = skimage.color.rgb2gray(img1) img2_gray = skimage.color.rgb2gray(img2) # extract points detector_extractor1 = ORB(n_keypoints=3000) detector_extractor1.detect_and_extract(img1_gray) detector_extractor2 = ORB(n_keypoints=3000) detector_extractor2.detect_and_extract(img2_gray) matches = match_descriptors(detector_extractor1.descriptors, detector_extractor2.descriptors) match_pts1 = detector_extractor1.keypoints[matches[:, 0]].astype(int) match_pts2 = detector_extractor2.keypoints[matches[:, 1]].astype(int) # call RANSAC match_pts1 = np.flip(match_pts1, axis=1) match_pts2 = np.flip(match_pts2, axis=1) H_2to1, _ = computeHransac(match_pts1, match_pts2) H_2to1 = H_2to1 / H_2to1[2, 2] print('tranform H:') print(H_2to1) return H_2to1
def test_keypoints_orb_desired_no_of_keypoints(): detector_extractor = ORB(n_keypoints=10, fast_n=12, fast_threshold=0.20) detector_extractor.detect(img) exp_rows = np.array( [435., 435.6, 376., 455., 434.88, 269., 375.6, 310.8, 413., 311.04]) exp_cols = np.array( [180., 180., 156., 176., 180., 111., 156., 172.8, 70., 172.8]) exp_scales = np.array([1., 1.2, 1., 1., 1.44, 1., 1.2, 1.2, 1., 1.728]) exp_orientations = np.array([ -175.64733392, -167.94842949, -148.98350192, -142.03599837, -176.08535837, -53.08162354, -150.89208271, 97.7693776, -173.4479964, 38.66312042 ]) exp_response = np.array([ 0.96770745, 0.81027306, 0.72376257, 0.5626413, 0.5097993, 0.44351774, 0.39154173, 0.39084861, 0.39063076, 0.37602487 ]) assert_almost_equal(exp_rows, detector_extractor.keypoints[:, 0]) assert_almost_equal(exp_cols, detector_extractor.keypoints[:, 1]) assert_almost_equal(exp_scales, detector_extractor.scales) assert_almost_equal(exp_response, detector_extractor.responses) assert_almost_equal(exp_orientations, np.rad2deg(detector_extractor.orientations), 5) detector_extractor.detect_and_extract(img) assert_almost_equal(exp_rows, detector_extractor.keypoints[:, 0]) assert_almost_equal(exp_cols, detector_extractor.keypoints[:, 1])
def test_keypoints_orb_less_than_desired_no_of_keypoints(dtype): _img = _convert(img, dtype) detector_extractor = ORB(n_keypoints=15, fast_n=12, fast_threshold=0.33, downscale=2, n_scales=2) detector_extractor.detect(_img) exp_rows = np.array([108., 203., 140., 65., 58.]) exp_cols = np.array([293., 267., 202., 130., 291.]) exp_scales = np.array([1., 1., 1., 1., 1.]) exp_orientations = np.array( [151.93906, -56.90052, -79.46341, -59.42996, -158.26941]) exp_response = np.array( [-0.1764169, 0.2652126, -0.0324343, 0.0400902, 0.2667641]) assert_almost_equal(exp_rows, detector_extractor.keypoints[:, 0]) assert_almost_equal(exp_cols, detector_extractor.keypoints[:, 1]) assert_almost_equal(exp_scales, detector_extractor.scales) assert_almost_equal(exp_response, detector_extractor.responses) assert_almost_equal(exp_orientations, np.rad2deg(detector_extractor.orientations), 3) detector_extractor.detect_and_extract(img) assert_almost_equal(exp_rows, detector_extractor.keypoints[:, 0]) assert_almost_equal(exp_cols, detector_extractor.keypoints[:, 1])
def epipolar_rectify(imL,imR,show_matches=True): descriptor_extractor = ORB(n_keypoints=2000) descriptor_extractor.detect_and_extract(imL) keypoints1 = descriptor_extractor.keypoints descriptors1 = descriptor_extractor.descriptors descriptor_extractor.detect_and_extract(imR) keypoints2 = descriptor_extractor.keypoints descriptors2 = descriptor_extractor.descriptors matches12 = match_descriptors(descriptors1, descriptors2,metric='hamming', cross_check=True) pts1=keypoints1[matches12[:,0],:] pts2=keypoints2[matches12[:,1],:] F, mask = cv2.findFundamentalMat(pts1,pts2,cv2.FM_RANSAC) pts1 = pts1[mask.ravel()==1] pts2 = pts2[mask.ravel()==1] res,H1,H2=cv2.stereoRectifyUncalibrated(pts1,pts2,F,imL.shape,10) if show_matches: fig, ax = plt.subplots(nrows=1, ncols=1) plot_matches(ax, imL, imR, keypoints1, keypoints2, matches12) return H1,H2
def iris_scan_orb(request): from skimage import io from skimage.feature import (match_descriptors, ORB) from skimage.color import rgb2gray from .settings import MEDIA_ROOT img1 = rgb2gray(io.imread(MEDIA_ROOT + '/IRIS3.jpg')) # Query img2 = rgb2gray(io.imread(MEDIA_ROOT + '/IRIS6.jpg')) # Comparing to descriptor_extractor = ORB(n_keypoints=200) descriptor_extractor.detect_and_extract(img1) keypoints1 = descriptor_extractor.keypoints descriptors1 = descriptor_extractor.descriptors # Query Descriptor descriptor_extractor.detect_and_extract(img2) keypoints2 = descriptor_extractor.keypoints descriptors2 = descriptor_extractor.descriptors # Comparing To Descriptors matches12 = match_descriptors(descriptors1, descriptors2, cross_check=True) # print("Matched: ", len(matches12), " of ", len(descriptors1)) percent = len(matches12) / len(descriptors1) * 100 # print("Percent Match - ", percent, "%") """if percent > 80: print("Matched!") else: print("Not Matched!")""" return render(request, 'scan.html', {'percent': percent})
def test_keypoints_orb_less_than_desired_no_of_keypoints(): img = rgb2gray(lena()) detector_extractor = ORB(n_keypoints=15, fast_n=12, fast_threshold=0.33, downscale=2, n_scales=2) detector_extractor.detect(img) exp_rows = np.array([67., 247., 269., 413., 435., 230., 264., 330., 372.]) exp_cols = np.array([157., 146., 111., 70., 180., 136., 336., 148., 156.]) exp_scales = np.array([1., 1., 1., 1., 1., 2., 2., 2., 2.]) exp_orientations = np.array([ -105.76503839, -96.28973044, -53.08162354, -173.4479964, -175.64733392, -106.07927215, -163.40016243, 75.80865813, -154.73195911 ]) exp_response = np.array([ 0.13197835, 0.24931321, 0.44351774, 0.39063076, 0.96770745, 0.04935129, 0.21431068, 0.15826555, 0.42403573 ]) assert_almost_equal(exp_rows, detector_extractor.keypoints[:, 0]) assert_almost_equal(exp_cols, detector_extractor.keypoints[:, 1]) assert_almost_equal(exp_scales, detector_extractor.scales) assert_almost_equal(exp_response, detector_extractor.responses) assert_almost_equal(exp_orientations, np.rad2deg(detector_extractor.orientations), 5) detector_extractor.detect_and_extract(img) assert_almost_equal(exp_rows, detector_extractor.keypoints[:, 0]) assert_almost_equal(exp_cols, detector_extractor.keypoints[:, 1])
def feature_registration(src, dst): """Register dst to src using feature detection.""" # First convert both image to grayscale. src_gray = rgb2gray(src) dst_gray = rgb2gray(dst) # Scale down. src_scaled = transform.rescale(src_gray, 0.25) dst_scaled = transform.rescale(src_gray, 0.25) orb = ORB(n_keypoints=1000, fast_threshold=0.05) orb.detect_and_extract(src_scaled) src_keypoints = orb.keypoints src_descriptors = orb.descriptors orb.detect_and_extract(dst_scaled) dst_keypoints = orb.keypoints dst_descriptors = orb.descriptors matches = match_descriptors(src_descriptors, dst_descriptors, cross_check=True) src_points = src_keypoints[matches[:,0]][:, ::-1] dst_points = dst_keypoints[matches[:,0]][:, ::,-1] model, inlier = ransac( (src_points, dst_points), ProjectiveTransform, min_samples=4, residual_threshold=2) )
def iris_scan_orb_android(file_name): from skimage import io from skimage.feature import (match_descriptors, ORB) from skimage.color import rgb2gray from .settings import MEDIA_ROOT img1 = rgb2gray(io.imread(MEDIA_ROOT + '/' + file_name)) # Query img2 = rgb2gray(io.imread(MEDIA_ROOT + '/IRIS9.jpg')) # Comparing to descriptor_extractor = ORB(n_keypoints=200) descriptor_extractor.detect_and_extract(img1) keypoints1 = descriptor_extractor.keypoints descriptors1 = descriptor_extractor.descriptors # Query Descriptor descriptor_extractor.detect_and_extract(img2) keypoints2 = descriptor_extractor.keypoints descriptors2 = descriptor_extractor.descriptors # Comparing To Descriptors matches12 = match_descriptors(descriptors1, descriptors2, cross_check=True) percent = len(matches12) / len(descriptors1) * 100 return percent
def test_keypoints_orb_desired_no_of_keypoints(): detector_extractor = ORB(n_keypoints=10, fast_n=12, fast_threshold=0.20) detector_extractor.detect(img) exp_rows = np.array([ 141. , 108. , 214.56 , 131. , 214.272, 67. , 206. , 177. , 108. , 141. ]) exp_cols = np.array([ 323. , 328. , 282.24 , 292. , 281.664, 85. , 260. , 284. , 328.8 , 267. ]) exp_scales = np.array([1, 1, 1.44, 1, 1.728, 1, 1, 1, 1.2, 1]) exp_orientations = np.array([ -53.97446153, 59.5055285 , -96.01885186, -149.70789506, -94.70171899, -45.76429535, -51.49752849, 113.57081195, 63.30428063, -79.56091118]) exp_response = np.array([ 1.01168357, 0.82934145, 0.67784179, 0.57176438, 0.56637459, 0.52248355, 0.43696175, 0.42992376, 0.37700486, 0.36126832]) assert_almost_equal(exp_rows, detector_extractor.keypoints[:, 0]) assert_almost_equal(exp_cols, detector_extractor.keypoints[:, 1]) assert_almost_equal(exp_scales, detector_extractor.scales) assert_almost_equal(exp_response, detector_extractor.responses) assert_almost_equal(exp_orientations, np.rad2deg(detector_extractor.orientations), 5) detector_extractor.detect_and_extract(img) assert_almost_equal(exp_rows, detector_extractor.keypoints[:, 0]) assert_almost_equal(exp_cols, detector_extractor.keypoints[:, 1])
def get_descriptor(img): descriptor_extractor = ORB(n_keypoints=100) descriptor_extractor.detect_and_extract(img) keypoints = descriptor_extractor.keypoints descriptors = descriptor_extractor.descriptors return keypoints, descriptors
def test_keypoints_orb_desired_no_of_keypoints(): detector_extractor = ORB(n_keypoints=10, fast_n=12, fast_threshold=0.20) detector_extractor.detect(img) exp_rows = np.array([ 435. , 435.6 , 376. , 455. , 434.88, 269. , 375.6 , 310.8 , 413. , 311.04]) exp_cols = np.array([ 180. , 180. , 156. , 176. , 180. , 111. , 156. , 172.8, 70. , 172.8]) exp_scales = np.array([ 1. , 1.2 , 1. , 1. , 1.44 , 1. , 1.2 , 1.2 , 1. , 1.728]) exp_orientations = np.array([-175.64733392, -167.94842949, -148.98350192, -142.03599837, -176.08535837, -53.08162354, -150.89208271, 97.7693776 , -173.4479964 , 38.66312042]) exp_response = np.array([ 0.96770745, 0.81027306, 0.72376257, 0.5626413 , 0.5097993 , 0.44351774, 0.39154173, 0.39084861, 0.39063076, 0.37602487]) assert_almost_equal(exp_rows, detector_extractor.keypoints[:, 0]) assert_almost_equal(exp_cols, detector_extractor.keypoints[:, 1]) assert_almost_equal(exp_scales, detector_extractor.scales) assert_almost_equal(exp_response, detector_extractor.responses) assert_almost_equal(exp_orientations, np.rad2deg(detector_extractor.orientations), 5) detector_extractor.detect_and_extract(img) assert_almost_equal(exp_rows, detector_extractor.keypoints[:, 0]) assert_almost_equal(exp_cols, detector_extractor.keypoints[:, 1])
def test_keypoints_orb_less_than_desired_no_of_keypoints(): detector_extractor = ORB(n_keypoints=15, fast_n=12, fast_threshold=0.33, downscale=2, n_scales=2) detector_extractor.detect(img) exp_rows = np.array([58., 65., 108., 140., 203.]) exp_cols = np.array([291., 130., 293., 202., 267.]) exp_scales = np.array([1., 1., 1., 1., 1.]) exp_orientations = np.array([ -158.26941428, -59.42996346, 151.93905955, -79.46341354, -56.90052451 ]) exp_response = np.array( [0.2667641, 0.04009017, -0.17641695, -0.03243431, 0.26521259]) assert_almost_equal(exp_rows, detector_extractor.keypoints[:, 0]) assert_almost_equal(exp_cols, detector_extractor.keypoints[:, 1]) assert_almost_equal(exp_scales, detector_extractor.scales) assert_almost_equal(exp_response, detector_extractor.responses) assert_almost_equal(exp_orientations, np.rad2deg(detector_extractor.orientations), 5) detector_extractor.detect_and_extract(img) assert_almost_equal(exp_rows, detector_extractor.keypoints[:, 0]) assert_almost_equal(exp_cols, detector_extractor.keypoints[:, 1])
def detect_kp_desc(img, method='orb', n_keypoints=2000, **args): """Find keypoints and their descriptors on the image. img: `np.array` of shape == WxHx3 RGB image method: str Name of the method to use. Options are: ['orb', 'lf-net'] n_keypoints: int Number of keypoints to find **args: dict Other parameters to pass to keypoints detector without any chanages return: tuple (2,) Coordinates and descriptors of found keypoints """ if method == 'orb': detector_exctractor = ORB(n_keypoints=n_keypoints, **args) # detector_exctractor = cv2.ORB_create(nfeatures=n_keypoints, **args) elif method == 'lf-net': # https://github.com/vcg-uvic/lf-net-release raise NotImplemetedError() detector_exctractor.detect_and_extract(rgb2gray(img).astype(np.float64)) return detector_exctractor.keypoints, detector_exctractor.descriptors
def iris_scan_orb_android(file_name): from skimage import io from skimage.feature import (match_descriptors, ORB) from skimage.color import rgb2gray from .settings import MEDIA_ROOT img1 = rgb2gray(io.imread(MEDIA_ROOT + '/'+ file_name)) # Query img2 = rgb2gray(io.imread(MEDIA_ROOT + '/IRIS9.jpg')) # Comparing to descriptor_extractor = ORB(n_keypoints=200) descriptor_extractor.detect_and_extract(img1) keypoints1 = descriptor_extractor.keypoints descriptors1 = descriptor_extractor.descriptors # Query Descriptor descriptor_extractor.detect_and_extract(img2) keypoints2 = descriptor_extractor.keypoints descriptors2 = descriptor_extractor.descriptors # Comparing To Descriptors matches12 = match_descriptors(descriptors1, descriptors2, cross_check=True) percent = len(matches12) / len(descriptors1) * 100 return percent
def test_keypoints_orb_less_than_desired_no_of_keypoints(): detector_extractor = ORB(n_keypoints=15, fast_n=12, fast_threshold=0.33, downscale=2, n_scales=2) detector_extractor.detect(img) exp_rows = np.array([ 67., 247., 269., 413., 435., 230., 264., 330., 372.]) exp_cols = np.array([ 157., 146., 111., 70., 180., 136., 336., 148., 156.]) exp_scales = np.array([ 1., 1., 1., 1., 1., 2., 2., 2., 2.]) exp_orientations = np.array([-105.76503839, -96.28973044, -53.08162354, -173.4479964 , -175.64733392, -106.07927215, -163.40016243, 75.80865813, -154.73195911]) exp_response = np.array([ 0.13197835, 0.24931321, 0.44351774, 0.39063076, 0.96770745, 0.04935129, 0.21431068, 0.15826555, 0.42403573]) assert_almost_equal(exp_rows, detector_extractor.keypoints[:, 0]) assert_almost_equal(exp_cols, detector_extractor.keypoints[:, 1]) assert_almost_equal(exp_scales, detector_extractor.scales) assert_almost_equal(exp_response, detector_extractor.responses) assert_almost_equal(exp_orientations, np.rad2deg(detector_extractor.orientations), 5) detector_extractor.detect_and_extract(img) assert_almost_equal(exp_rows, detector_extractor.keypoints[:, 0]) assert_almost_equal(exp_cols, detector_extractor.keypoints[:, 1])
def test_descriptor_orb(): detector_extractor = ORB(fast_n=12, fast_threshold=0.20) exp_descriptors = np.array([[ True, False, True, True, False, False, False, False, False, False], [False, False, True, True, False, True, True, False, True, True], [ True, False, False, False, True, False, True, True, True, False], [ True, False, False, True, False, True, True, False, False, False], [False, True, True, True, False, False, False, True, True, False], [False, False, False, False, False, True, False, True, True, True], [False, True, True, True, True, False, False, True, False, True], [ True, True, True, False, True, True, True, True, False, False], [ True, True, False, True, True, True, True, False, False, False], [ True, False, False, False, False, True, False, False, True, True], [ True, False, False, False, True, True, True, False, False, False], [False, False, True, False, True, False, False, True, False, False], [False, False, True, True, False, False, False, False, False, True], [ True, True, False, False, False, True, True, True, True, True], [ True, True, True, False, False, True, False, True, True, False], [False, True, True, False, False, True, True, True, True, True], [ True, True, True, False, False, False, False, True, True, True], [False, False, False, False, True, False, False, True, True, False], [False, True, False, False, True, False, False, False, True, True], [ True, False, True, False, False, False, True, True, False, False]], dtype=bool) detector_extractor.detect(img) detector_extractor.extract(img, detector_extractor.keypoints, detector_extractor.scales, detector_extractor.orientations) assert_equal(exp_descriptors, detector_extractor.descriptors[100:120, 10:20]) detector_extractor.detect_and_extract(img) assert_equal(exp_descriptors, detector_extractor.descriptors[100:120, 10:20])
def test_keypoints_orb_desired_no_of_keypoints(): detector_extractor = ORB(n_keypoints=10, fast_n=12, fast_threshold=0.20) detector_extractor.detect(img) exp_rows = np.array( [141., 108., 214.56, 131., 214.272, 67., 206., 177., 108., 141.]) exp_cols = np.array( [323., 328., 282.24, 292., 281.664, 85., 260., 284., 328.8, 267.]) exp_scales = np.array([1, 1, 1.44, 1, 1.728, 1, 1, 1, 1.2, 1]) exp_orientations = np.array([ -53.97446153, 59.5055285, -96.01885186, -149.70789506, -94.70171899, -45.76429535, -51.49752849, 113.57081195, 63.30428063, -79.56091118 ]) exp_response = np.array([ 1.01168357, 0.82934145, 0.67784179, 0.57176438, 0.56637459, 0.52248355, 0.43696175, 0.42992376, 0.37700486, 0.36126832 ]) assert_almost_equal(exp_rows, detector_extractor.keypoints[:, 0]) assert_almost_equal(exp_cols, detector_extractor.keypoints[:, 1]) assert_almost_equal(exp_scales, detector_extractor.scales) assert_almost_equal(exp_response, detector_extractor.responses) assert_almost_equal(exp_orientations, np.rad2deg(detector_extractor.orientations), 5) detector_extractor.detect_and_extract(img) assert_almost_equal(exp_rows, detector_extractor.keypoints[:, 0]) assert_almost_equal(exp_cols, detector_extractor.keypoints[:, 1])
def orb(img_path): image = PIL.Image.open(img_path).convert('L') img1 = np.array(image) img2 = tf.rotate(img1, 180) descriptor_extractor = ORB(n_keypoints=200) descriptor_extractor.detect_and_extract(img1) keypoints1 = descriptor_extractor.keypoints descriptors1 = descriptor_extractor.descriptors descriptor_extractor.detect_and_extract(img2) keypoints2 = descriptor_extractor.keypoints descriptors2 = descriptor_extractor.descriptors matches12 = match_descriptors(descriptors1, descriptors2, cross_check=True) fig, ax = plt.subplots(nrows=2, ncols=1) plt.gray() plot_matches(ax[0], img1, img2, keypoints1, keypoints2, matches12) ax[0].axis('off') ax[0].set_title(img_path) plt.show() return matches12.shape[0]
def test_keypoints_orb_less_than_desired_no_of_keypoints(): detector_extractor = ORB(n_keypoints=15, fast_n=12, fast_threshold=0.33, downscale=2, n_scales=2) detector_extractor.detect(img) exp_rows = np.array([ 58., 65., 108., 140., 203.]) exp_cols = np.array([ 291., 130., 293., 202., 267.]) exp_scales = np.array([1., 1., 1., 1., 1.]) exp_orientations = np.array([-158.26941428, -59.42996346, 151.93905955, -79.46341354, -56.90052451]) exp_response = np.array([ 0.2667641 , 0.04009017, -0.17641695, -0.03243431, 0.26521259]) assert_almost_equal(exp_rows, detector_extractor.keypoints[:, 0]) assert_almost_equal(exp_cols, detector_extractor.keypoints[:, 1]) assert_almost_equal(exp_scales, detector_extractor.scales) assert_almost_equal(exp_response, detector_extractor.responses) assert_almost_equal(exp_orientations, np.rad2deg(detector_extractor.orientations), 5) detector_extractor.detect_and_extract(img) assert_almost_equal(exp_rows, detector_extractor.keypoints[:, 0]) assert_almost_equal(exp_cols, detector_extractor.keypoints[:, 1])
def test_descriptor_orb(): detector_extractor = ORB(fast_n=12, fast_threshold=0.20) exp_descriptors = np.array( [[0, 1, 1, 1, 0, 1, 0, 1, 0, 1], [1, 1, 1, 0, 0, 1, 0, 0, 1, 1], [1, 0, 1, 1, 0, 0, 1, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 1, 0], [0, 1, 0, 0, 0, 0, 0, 0, 1, 0], [1, 1, 0, 1, 1, 1, 0, 0, 1, 1], [1, 1, 0, 1, 0, 0, 1, 0, 1, 1], [0, 0, 1, 0, 1, 0, 0, 1, 1, 0], [1, 0, 0, 0, 1, 0, 0, 0, 0, 1], [0, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 0, 1, 0, 1, 0, 0, 1, 1], [1, 1, 1, 0, 0, 0, 1, 1, 1, 0], [1, 1, 1, 1, 1, 1, 0, 0, 0, 0], [1, 1, 1, 0, 1, 1, 1, 1, 0, 0], [1, 1, 0, 0, 1, 0, 0, 1, 0, 1], [1, 1, 0, 0, 0, 0, 1, 0, 0, 1], [0, 0, 0, 0, 1, 1, 1, 0, 1, 0], [0, 0, 0, 0, 1, 1, 1, 0, 0, 1], [0, 0, 0, 0, 0, 1, 1, 0, 1, 1], [0, 0, 0, 0, 1, 0, 1, 0, 1, 1]], dtype=bool) detector_extractor.detect(img) detector_extractor.extract(img, detector_extractor.keypoints, detector_extractor.scales, detector_extractor.orientations) assert_equal(exp_descriptors, detector_extractor.descriptors[100:120, 10:20]) detector_extractor.detect_and_extract(img) assert_equal(exp_descriptors, detector_extractor.descriptors[100:120, 10:20])
def feature_extractor(image_path, options=None): """Extracts a set of features (described in config file) from an image. Args: image_path: the path to the image options: the configuration file settings. In this case the settings should contain relevant image processing feature options. Return: an array of features which depending on the config options """ features = [] image = imread(image_path) if 'grey_required' in options: grey_image = rgb2grey(image) # GLCM features if 'glcm' in options: glcm_config = options['glcm'] glcm_features = glcm.glcm_features(grey_image, glcm_config['modes']) features.append(glcm_features) # ORB features if 'orb' in options: orb_config = options['orb'] orb_extractor = ORB(downscale=orb_config['downscale'], n_scales=orb_config['n_scales'], n_keypoints=orb_config['n_keypoints'], fast_n=orb_config['fast_n'], fast_threshold=orb_config['fast_threshold'], harris_k=orb_config['harris_k']) orb_extractor.detect_and_extract(grey_image) # TODO add these to config system features.append(orb_extractor.keypoints.tolist()) # features.append(orb_extractor.scales.tolist()) # features.append(orb_extractor.orientations.tolist()) # features.append(orb_extractor.responses.tolist()) # features.append(orb_extractor.descriptors.tolist()) if 'kmeans' in options: k_image = np.array(image, dtype=np.float64) / 255 w, h, d = original_shape = tuple(k_image.shape) assert d == 3 image_array = np.reshape(k_image, (w * h, d)) kmeans = KMeans( n_clusters=options['kmeans']['clusters']).fit(image_array) features.append(kmeans.cluster_centers_.tolist()) return list(flatten.flatten(features))
def calc_orb(*imgs): descriptor_extractor = ORB(n_keypoints=100) for c_img in imgs: descriptor_extractor.detect_and_extract(c_img) yield { "keypoints": descriptor_extractor.keypoints, "descriptors": descriptor_extractor.descriptors, }
def register_image_pair(idx, path_img_target, path_img_source, path_out): """ register two images together :param int idx: empty parameter for using the function in parallel :param str path_img_target: path to the target image :param str path_img_source: path to the source image :param str path_out: path for exporting the output :return tuple(str,float): """ start = time.time() # load and denoise reference image img_target = io.imread(path_img_target)[..., :3] img_target = denoise_wavelet(img_target, wavelet_levels=7, multichannel=True) img_target_gray = rgb2gray(img_target) # load and denoise moving image img_source = io.imread(path_img_source)[..., :3] img_source = denoise_bilateral(img_source, sigma_color=0.05, sigma_spatial=2, multichannel=True) img_source_gray = rgb2gray(img_source) # detect ORB features on both images detector_target = ORB(n_keypoints=150) detector_source = ORB(n_keypoints=150) detector_target.detect_and_extract(img_target_gray) detector_source.detect_and_extract(img_source_gray) matches = match_descriptors(detector_target.descriptors, detector_source.descriptors) # robustly estimate affine transform model with RANSAC model, _ = ransac( (detector_target.keypoints[matches[:, 0]], detector_source.keypoints[matches[:, 1]]), AffineTransform, min_samples=25, max_trials=500, residual_threshold=0.9, ) # warping source image with estimated transformations path_img_warped = os.path.join(path_out, NAME_IMAGE_WARPED % idx) if model: img_warped = warp(img_target, model.inverse, output_shape=img_target.shape[:2]) try: io.imsave(path_img_warped, img_warped) except Exception: traceback.print_exc() else: warnings.warn("Image registration failed.", RuntimeWarning) path_img_warped = None # summarise experiment execution_time = time.time() - start return path_img_warped, execution_time
def selectFeatures(useList): DataSet = [] LabelSet = [] lengthV = [] trainPaths = ['./fruit/' + c + '_train/' for c in classes] testPaths = ['./fruit/' + c + ' test/' for c in classes] for c in range(len(classes)): className = classes[c] path = trainPaths[c] detector = CENSURE() detector2 = ORB(n_keypoints=50) detector3 = BRIEF(patch_size=49) files = os.listdir(path) #sample files = random.sample(files, 100) nfiles = len(files) for i in range(nfiles): featureVector = [] infile = files[i] img = io.imread(path + infile, as_grey=True) hist = np.histogram(img, bins=256) img = resize(img, (400, 400)) detector2.detect_and_extract(img) detector.detect(img) a = fd = hog(img, orientations=9, pixels_per_cell=(32, 32), cells_per_block=(1, 1), visualise=False) for h in hist: fd = np.append(fd, h) if (useList[0]): fd = np.append(fd, [np.array(detector.keypoints).flatten()]) if (useList[1]): fd = np.append(fd, detector2.keypoints) if (useList[2]): fd = np.append(fd, edgeExtract(img, 100)) l1 = len(fd) corners = corner_peaks(corner_harris(img), min_distance=1) if (useList[3]): fd = np.append(fd, corners) lengthV.append(len(fd)) DataSet.append(fd) ind = classes.index(className) LabelSet.append(ind) max = np.amax(lengthV) lengthV = [] DataSet2 = [] for d in DataSet: d = np.pad(d, (0, max - len(d)), 'constant') DataSet2.append(d) lengthV.append(len(d)) DataSet = DataSet2 res = 0 #perform gridsearch with one thread if __name__ == '__main__': res = gridSearch(DataSet, LabelSet, False) return res
def get_matrix(image_tif_bgrn, image_jpg_bgr, verbose=False): """Get similarity transform matrix ORB Limitation: https://github.com/scikit-image/scikit-image/issues/1472 """ im_tif_adjusted = match_color_curve_tif2jpg(image_tif_bgrn, image_jpg_bgr) jpg_gray = cv2.cvtColor(image_jpg_bgr, cv2.COLOR_BGR2GRAY).astype(np.uint8) tif_gray = cv2.cvtColor(im_tif_adjusted, cv2.COLOR_BGR2GRAY).astype(np.uint8) number_of_keypoints = 100 # Initialize ORB # This number of keypoints is large enough for robust results, # but low enough to run quickly. orb = ORB(n_keypoints=number_of_keypoints, fast_threshold=0.05) orb2 = ORB(n_keypoints=number_of_keypoints, fast_threshold=0.05) try: # Detect keypoints orb.detect_and_extract(jpg_gray) keypoints_jpg = orb.keypoints descriptors_jpg = orb.descriptors orb2.detect_and_extract(tif_gray) keypoints_tif = orb2.keypoints descriptors_tif = orb2.descriptors except IndexError: raise KeypointDetectionException('ORB Keypoint detection failed') # Match descriptors between images matches = match_descriptors(descriptors_jpg, descriptors_tif, cross_check=True) # Select keypoints from # * source (image to be registered) # * target (reference image) src = keypoints_jpg[matches[:, 0]][:, ::-1] dst = keypoints_tif[matches[:, 1]][:, ::-1] model_robust, inliers = ransac((src, dst), TranslationTransform, min_samples=4, residual_threshold=1, max_trials=300) if verbose: print(inliers) print("number of matching keypoints", np.sum(inliers)) if inliers is None or np.sum(inliers) < 3 or model_robust is None: raise ValueError('Possible mismatched JPG and TIF') if is_translational(model_robust): # we assume src and dst are not rotated relative to each other # get rid of any rotational noise introduced during normalization/centering in transform estimate function model_robust.params[0, 0] = 1.0 model_robust.params[1, 1] = 1.0 return model_robust else: raise ValueError('Invalid Model')
def get_displacement(image0, image1): """ Gets displacement (in pixels I think) difference between 2 images using scikit-image not as accurate as the opencv version i think. :param image0: reference image :param image1: target image :return: """ from skimage.feature import (match_descriptors, ORB, plot_matches) from skimage.color import rgb2gray from scipy.spatial.distance import hamming from scipy import misc image0_gray = rgb2gray(image0) image1_gray = rgb2gray(image1) descriptor_extractor = ORB(n_keypoints=200) descriptor_extractor.detect_and_extract(image0_gray) keypoints1 = descriptor_extractor.keypoints descriptors1 = descriptor_extractor.descriptors descriptor_extractor.detect_and_extract(image1_gray) keypoints2 = descriptor_extractor.keypoints descriptors2 = descriptor_extractor.descriptors matches12 = match_descriptors(descriptors1, descriptors2, cross_check=True) # Sort the matches based on distance. Least distance # is better distances12 = [] for match in matches12: distance = hamming(descriptors1[match[0]], descriptors2[match[1]]) distances12.append(distance) indices = np.arange(len(matches12)) indices = [index for (_, index) in sorted(zip(distances12, indices))] matches12 = matches12[indices] # collect displacement from the first 10 matches dx_list = [] dy_list = [] for mat in matches12[:10]: # Get the matching key points for each of the images img1_idx = mat[0] img2_idx = mat[1] # x - columns # y - rows (x1, y1) = keypoints1[img1_idx] (x2, y2) = keypoints2[img2_idx] dx_list.append(abs(x1 - x2)) dy_list.append(abs(y1 - y2)) dx_median = np.median(np.asarray(dx_list, dtype=np.double)) dy_median = np.median(np.asarray(dy_list, dtype=np.double)) # plot_matches(image0, image1, descriptors1, descriptors2, matches12[:10]) return dx_median, dy_median
class PanormaGroup(GroupChecker): """ This is to check wheter the new image can be considered to be part of a panorama of previous image Based on: http://nbviewer.ipython.org/github/scikit-image/skimage-demos/blob/master/pano/pano.ipynb?raw=true """ def __init__(self, name, startGID = 0): super(PanormaGroup,self).__init__(name, startGID = startGID) # "Oriented FAST and rotated BRIEF" feature detector self.orb = ORB(n_keypoints=4000, fast_threshold=0.05) # self.ImagesWithOverlap = [] # List to store images which has overlap self.ImagesKeypointsDescriptors = [] # List of tuples storing ORB (keypoints, descrioptors) # Minus one to compensate for the increment which will happen for the first image self.CurrentGroupID -= 1 def NextGID(self,image): """ Calculates the next Group ID for the input image """ NewImg = self.LoadImage(image,Greyscale=True,scale=0.25) self.orb.detect_and_extract(NewImg) NewImgKeyDescr = (self.orb.keypoints, self.orb.descriptors) for PreImgKeyDescr in reversed(self.ImagesKeypointsDescriptors): # Check for overlap matcheOfDesc = match_descriptors(PreImgKeyDescr[1], NewImgKeyDescr[1], cross_check=True) # Select keypoints from the source (image to be registered) # and target (reference image) src = NewImgKeyDescr[0][matcheOfDesc[:, 1]][:, ::-1] dst = PreImgKeyDescr[0][matcheOfDesc[:, 0]][:, ::-1] model_robust, inliers = ransac((src, dst), ProjectiveTransform, min_samples=4, residual_threshold=1, max_trials=300) NumberOfTrueMatches = np.sum(inliers) #len(inliers[inliers]) if NumberOfTrueMatches > 100 : # Image has overlap logger.debug('Image {0} found a match! (No: of Matches={1})'.format(image,NumberOfTrueMatches)) break else : logger.debug('Image {0} not matching..(No: of Matches={1})'.format(image,NumberOfTrueMatches)) continue else: # None of the images in the for loop has any overlap...So this is a new Group self.ImagesKeypointsDescriptors = [] # Erase all previous group items # self.ImagesWithOverlap = [] # Increment Group ID self.CurrentGroupID += 1 logger.debug('Starting a new Panorama group (GID={0})'.format(self.CurrentGroupID)) # Append the latest image to the current group self.ImagesKeypointsDescriptors.append(NewImgKeyDescr) # self.ImagesWithOverlap.append(NewImg) # Return the current group ID return self.CurrentGroupID
def main(): # save_frames() meta_dir = '../scratch/matching_im' in_dir = os.path.join(meta_dir, 'sir_holger') im_format = 'si_%d_20181102111500.jpg' for views in [[0,1],[1,2],[2,3],[3,0]]: ims = [os.path.join(in_dir, im_format%view) for view in views] out_file = os.path.join(in_dir, 'correspondences_%d_%d.jpg'%(views[0],views[1])) img_left = rescale(io.imread(ims[0]),scale = 0.25).squeeze() img_right = rescale(io.imread(ims[1]),scale = 0.25).squeeze() # Find sparse feature correspondences between left and right image. descriptor_extractor = ORB() descriptor_extractor.detect_and_extract(img_left) keypoints_left = descriptor_extractor.keypoints descriptors_left = descriptor_extractor.descriptors descriptor_extractor.detect_and_extract(img_right) keypoints_right = descriptor_extractor.keypoints descriptors_right = descriptor_extractor.descriptors matches = match_descriptors(descriptors_left, descriptors_right, cross_check=True) # Estimate the epipolar geometry between the left and right image. model, inliers = ransac((keypoints_left[matches[:, 0]], keypoints_right[matches[:, 1]]), FundamentalMatrixTransform, min_samples=8, residual_threshold=1, max_trials=5000) inlier_keypoints_left = keypoints_left[matches[inliers, 0]] inlier_keypoints_right = keypoints_right[matches[inliers, 1]] print(f"Number of matches: {matches.shape[0]}") print(f"Number of inliers: {inliers.sum()}") plt.figure() plt.gray() plot_matches(plt.gca(), img_left, img_right, keypoints_left, keypoints_right, matches[inliers], only_matches=True) plt.gca().axis("off") plt.gca().set_title("Inlier correspondences") plt.savefig(out_file) plt.close() print (out_file) visualize.writeHTMLForFolder(in_dir)
def getORB(img, kpn=200): # extract features descriptor_extractor = ORB(n_keypoints=kpn) # get the good stuff descriptor_extractor.detect_and_extract(img) keys = descriptor_extractor.keypoints # convert bool to nums descs = descriptor_extractor.descriptors * 1.0 return keys, descs
def image_features_orb(img,keypoints): # X is the feature vector with one row of features per image # Xsize=2*keypoints X=np.zeros(Xsize, dtype=float) # extract patches using scikit library. orb=ORB(downscale=1.2, n_scales=8, n_keypoints=keypoints, fast_n=4, fast_threshold=0.00001, harris_k=0.01) orb.detect_and_extract(img) X[0:Xsize] = np.reshape(orb.keypoints,(1, Xsize)) return X
def load_descriptors(file_names, num_keypoints=200): # Load images descriptor_extractor = ORB(n_keypoints=num_keypoints) descriptors = [] for im_path in file_names: img = plt.imread("data/" + im_path) img = rgb2gray(img) descriptor_extractor.detect_and_extract(img) descriptors.append(descriptor_extractor.descriptors) return np.array(descriptors)
def orb_extractor(img, n_keypoints=100): """Try orb binary descriptor using binaries created by Otsu's method.""" descriptor_extractor = ORB(n_keypoints) """ Extract descriptors for the original images """ descriptor_extractor.detect_and_extract(img) keypoints = descriptor_extractor.keypoints descriptors = descriptor_extractor.descriptors return (keypoints, descriptors)
def calculate(self, resource): except_image_only(resource) im = image2numpy(resource.image, remap='gray') extractor = ORB() extractor.detect_and_extract(im) return (extractor.descriptors, extractor.keypoints[:,0], extractor.keypoints[:,1], extractor.responses, extractor.scales, extractor.orientations)
def orb_descriptors(img, keypoints=800): """ Вычисление ORB дескрипторов. Params: img - изображение keypoints - максимальное количество точек детектора Return: np.array - массив дескрипторов (как минмум двумерный) """ extractor = ORB(n_keypoints=keypoints) extractor.detect_and_extract(img) return extractor.descriptors
def HarryPotterize(): # we use ORB descriptors but you can use something else from skimage.feature import ORB, match_descriptors # YOUR CODE HERE im1 = skimage.io.imread('../data/cv_cover.jpg') gray1 = skimage.color.rgb2gray(im1) im2 = skimage.io.imread('../data/cv_desk.png') gray2 = skimage.color.rgb2gray(im2) im3 = skimage.io.imread('../data/hp_cover.jpg') # Specifify the orb describtor orb = ORB(n_keypoints=3000) # Detect features from image 1 orb.detect_and_extract(gray1) locations_1 = orb.keypoints descriptors_1 = orb.descriptors # Defect features from image 2 orb.detect_and_extract(gray2) locations_2 = orb.keypoints descriptors_2 = orb.descriptors # Match descriptors m = match_descriptors(descriptors_1, descriptors_2) # Location of matching objects in first image l_m_1 = locations_1[m[:, 0]] locs0 = np.flip(l_m_1, 1) one_array_1 = np.ones([locs0.shape[0], 1]) locations_first = np.hstack((locs0, one_array_1)) # Location of matching objects in second image l_m_2 = locations_2[m[:, 1]] locs1 = np.flip(l_m_2, 1) one_array_2 = np.ones([locs1.shape[0], 1]) locations_second = np.hstack((locs1, one_array_2)) # Compute the ransac now bestH2to1, inliers = computeHransac(locations_second, locations_first) # Generate a composite image composite_img = compositeH(bestH2to1, resize(im3, im1.shape), im2) skimage.io.imshow(composite_img) skimage.io.show() # Print the final H Matrix H_final = bestH2to1 / bestH2to1[2, 2] print(H_final) return
def FindRetinaFeatures(Image): """ this function finds strong features in the image to use for registration. :param Image: the images to find the features in :return: """ Image = gaussian_filter(Image, 3) orb = ORB(n_keypoints=200) orb.detect_and_extract(Image) keypoints = orb.keypoints descriptors = orb.descriptors return keypoints, descriptors
def orb_extractor(img, n_keypoints=100): """Try orb binary descriptor using binaries created by Otsu's method.""" descriptor_extractor = ORB(n_keypoints) """ Extract descriptors for the original images """ descriptor_extractor.detect_and_extract(img) keypoints = descriptor_extractor.keypoints descriptors = descriptor_extractor.descriptors return(keypoints, descriptors)
def orb_feature(image): """ 提取图像的orb特征 :param image: :return: """ image_gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY) orb = ORB(n_keypoints=50) orb.detect_and_extract(image_gray) descriptors = orb.descriptors keypoints = orb.keypoints return keypoints
def orb_extractor_generator(stack): """Orb binary descriptor generator This returns a descriptor object. The descriptor object encodes both keypoints and descriptors. """ """Set parameters""" number_of_keypoints = 10 """Get descriptor_extractor object""" for img in stack: descriptor_extractor = ORB(n_keypoints=number_of_keypoints) descriptor_extractor.detect_and_extract(img) yield descriptor_extractor
def getDisplacement(Image0, Image1): Image0Gray = rgb2gray(Image0) Image1Gray = rgb2gray(Image1) descriptor_extractor = ORB(n_keypoints=200) descriptor_extractor.detect_and_extract(Image0Gray) keypoints1 = descriptor_extractor.keypoints descriptors1 = descriptor_extractor.descriptors descriptor_extractor.detect_and_extract(Image1Gray) keypoints2 = descriptor_extractor.keypoints descriptors2 = descriptor_extractor.descriptors matches12 = match_descriptors(descriptors1, descriptors2, cross_check=True) # Sort the matches based on distance. Least distance # is better distances12 = [] for match in matches12: distance = hamming(descriptors1[match[0]], descriptors2[match[1]]) distances12.append(distance) indices = np.range(len(matches12)) indices = [index for (_, index) in sorted(zip(distances12, indices))] matches12 = matches12[indices] # collect displacement from the first 10 matches dxList = [] dyList = [] for mat in matches12[:10]: # Get the matching keypoints for each of the images img1_idx = mat[0] img2_idx = mat[1] # x - columns # y - rows (x1, y1) = keypoints1[img1_idx] (x2, y2) = keypoints2[img2_idx] dxList.append(abs(x1 - x2)) dyList.append(abs(y1 - y2)) dxMedian = np.median(np.asarray(dxList, dtype=np.double)) dyMedian = np.median(np.asarray(dyList, dtype=np.double)) plot_matches(Image0, Image1, descriptors1, descriptors2, matches12[:10]) return dxMedian, dyMedian
def get_translation_tool(self, n_keypoints=1000): # Convert images to grayscale src_image = rgb2gray(self.src_image) dst_image = rgb2gray(self.dst_image) # Initiate an ORB class object which can extract features & descriptors from images. # Set the amount of features that should be found (more = more accurate) descriptor_extractor = ORB(n_keypoints=n_keypoints) # Extract features and descriptors from source image descriptor_extractor.detect_and_extract(src_image) self.keypoints1 = descriptor_extractor.keypoints descriptors1 = descriptor_extractor.descriptors # Extract features and descriptors from destination image descriptor_extractor.detect_and_extract(dst_image) self.keypoints2 = descriptor_extractor.keypoints descriptors2 = descriptor_extractor.descriptors # Matches the descriptors and gives them rating as to how similar they are self.matches12 = match_descriptors(descriptors1, descriptors2, cross_check=True) # Selects the coordinates from source image and destination image based on the # indices given from the match_descriptors function. src = self.keypoints1[self.matches12[:, 0]][:, ::-1] dst = self.keypoints2[self.matches12[:, 1]][:, ::-1] # Filters out the outliers and generates the transformation matrix based on only the inliers model_robust, inliers = \ ransac((src, dst), ProjectiveTransform, min_samples=4, residual_threshold=2) # This returns the object "model_robust" which contains the tranformation matrix and # uses that to translate any coordinate point from source to destination image. return model_robust, inliers
from skimage.color import rgb2gray from skimage.feature import match_descriptors, ORB, plot_matches from skimage.measure import ransac from skimage.transform import FundamentalMatrixTransform import matplotlib.pyplot as plt np.random.seed(0) img_left, img_right, groundtruth_disp = data.stereo_motorcycle() img_left, img_right = map(rgb2gray, (img_left, img_right)) # Find sparse feature correspondences between left and right image. descriptor_extractor = ORB() descriptor_extractor.detect_and_extract(img_left) keypoints_left = descriptor_extractor.keypoints descriptors_left = descriptor_extractor.descriptors descriptor_extractor.detect_and_extract(img_right) keypoints_right = descriptor_extractor.keypoints descriptors_right = descriptor_extractor.descriptors matches = match_descriptors(descriptors_left, descriptors_right, cross_check=True) # Estimate the epipolar geometry between the left and right image. model, inliers = ransac((keypoints_left[matches[:, 0]], keypoints_right[matches[:, 1]]), FundamentalMatrixTransform, min_samples=8,
def main(): image_base_dir = '/home/dek/makerfaire-booth/2018/burger/experimental/dek/train_object_detector/decoded' canonical_dir = 'canonical' # template = os.path.join(image_base_dir, 'bottombun.0.00.27.34.-24.61.0.81.png') fig, axes = plt.subplots(7, 7, figsize=(7, 6), sharex=True, sharey=True) fig.delaxes(axes[0][0]) ssims = numpy.zeros( (len(BurgerElement.__members__), len(BurgerElement.__members__)), dtype=float) mses = numpy.zeros( (len(BurgerElement.__members__), len(BurgerElement.__members__)), dtype=float) for i, layer in enumerate(BurgerElement.__members__): template = os.path.join(canonical_dir, '%s.png' % layer) img1 = imread(template) # img1_padded = numpy.zeros( (WIDTH, HEIGHT,3), dtype=numpy.uint8) img1_padded = numpy.resize( [255,255,255], (WIDTH, HEIGHT, 3)) s = img1.shape w = s[0] h = s[1] nb = img1_padded.shape[0] na = img1.shape[0] lower1 = (nb) // 2 - (na // 2) upper1 = (nb // 2) + (na // 2) nb = img1_padded.shape[1] na = img1.shape[1] lower2 = (nb) // 2 - (na // 2) upper2 = (nb // 2) + (na // 2) img1_padded[lower1:upper1, lower2:upper2] = img1 img1_padded_float = img1_padded.astype(numpy.float64)/255. print img1_padded_float.shape img1_gray = rgb2gray(img1_padded_float) descriptor_extractor = ORB() try: descriptor_extractor.detect_and_extract(img1_gray) except RuntimeError: continue keypoints1 = descriptor_extractor.keypoints descriptors1 = descriptor_extractor.descriptors axes[i][0].imshow(img1_padded_float) axes[i][0].set_title("Template image") for j, layer2 in enumerate(BurgerElement.__members__): rot, tx, ty, scale = get_random_orientation() img2 = draw_example(layer2, WIDTH, HEIGHT, rot, tx, ty, scale) # match = os.path.join(canonical_dir, '%s.png' % layer2) # img2 = imread(match) img2_padded = numpy.resize( [255,255,255], (WIDTH, HEIGHT, 3)) s = img2.shape img2_padded[:s[0], :s[1]] = img2 img2_padded_float = img2_padded.astype(numpy.float64)/255. img2_gray = rgb2gray(img2_padded_float) try: descriptor_extractor.detect_and_extract(img2_gray) except RuntimeError: continue keypoints2 = descriptor_extractor.keypoints descriptors2 = descriptor_extractor.descriptors matches12 = match_descriptors(descriptors1, descriptors2, cross_check=True) src = keypoints2[matches12[:, 1]][:, ::-1] dst = keypoints1[matches12[:, 0]][:, ::-1] model_robust, inliers = \ ransac((src, dst), SimilarityTransform, min_samples=4, residual_threshold=2) if not model_robust: print "bad" continue img2_transformed = transform.warp(img2_padded_float, model_robust.inverse, mode='constant', cval=1) sub = img2_transformed - img1_padded_float ssim = compare_ssim(img2_transformed, img1_padded_float, win_size=5, multichannel=True) mse = compare_mse(img2_transformed, img1_padded_float) ssims[i,j] = ssim mses[i,j] = mse axes[0][j].imshow(img2_padded_float) axes[0][j].set_title("Match image") axes[i][j].imshow(img2_transformed) axes[i][j].set_title("Transformed image") axes[i][j].set_xlabel("SSIM: %9.4f MSE: %9.4f" % (ssim, mse)) # ax = plt.gca() # plot_matches(ax, img1, img2, keypoints1, keypoints2, matches12) print ssims print numpy.argmax(ssims, axis=1) print numpy.argmin(mses, axis=1) plt.show()
height, width, channels = img.shape gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) del img # Make a PIL image so we can use PIL.Image.thumbnail to resize if needed gray_ = Image.fromarray(gray) # Check if dimensions are above desired, if so then resize keepig aspect ratio m, n = 512,512 if height > m or width > n: gray_.thumbnail((m,n), Image.ANTIALIAS) orb = ORB(n_keypoints=100) try: orb.detect_and_extract(gray_) except IndexError: print(file_names[i] + " had an issue.") issues.append(file_names[i]) continue kp = orb.keypoints des = orb.descriptors print(len(des)) #Store keypoint features temp_array = [] temp = pickle_keypoints(kp, des) temp_array.append(temp) pickle.dump(temp_array, open("features/" + folder1 + "/orb/gray/" + file_names[i][:-5] + "_orb.pkl", "wb")) # temp = str(float((i+1)*100/N))
def main(): image_base_dir = '/home/dek/makerfaire-booth/2018/burger/experimental/dek/train_object_detector/decoded' canonical_dir = 'canonical' # template = os.path.join(image_base_dir, 'bottombun.0.00.27.34.-24.61.0.81.png') template = os.path.join(canonical_dir, 'patty.png') img1 = imread(template) # img1_padded = numpy.zeros( (256, 256,3), dtype=numpy.uint8) img1_padded = numpy.resize( [255,255,255], (256, 256, 3)) s = img1.shape img1_padded[:s[0], :s[1]] = img1 img1_gray = rgb2gray(img1) descriptor_extractor = ORB() descriptor_extractor.detect_and_extract(img1_gray) keypoints1 = descriptor_extractor.keypoints descriptors1 = descriptor_extractor.descriptors # g = glob.glob(os.path.join(image_base_dir, 'patty*.nobox.png')) # for moving in g: while True: rot, tx, ty, scale = get_random_orientation() # img2 = imread(moving) img2 = draw_example('patty', 256, 256, rot, tx, ty, scale) img2_gray = rgb2gray(img2) try: descriptor_extractor.detect_and_extract(img2_gray) except RuntimeError: continue keypoints2 = descriptor_extractor.keypoints descriptors2 = descriptor_extractor.descriptors matches12 = match_descriptors(descriptors1, descriptors2, cross_check=True) src = keypoints2[matches12[:, 1]][:, ::-1] dst = keypoints1[matches12[:, 0]][:, ::-1] model_robust, inliers = \ ransac((src, dst), SimilarityTransform, min_samples=4, residual_threshold=2) if not model_robust: print "bad" continue img2_transformed = transform.warp(img2, model_robust.inverse, mode='constant', cval=1) img1_padded_float = img1_padded.astype(numpy.float64)/255. sub = img2_transformed - img1_padded_float print compare_ssim(img2_transformed, img1_padded_float, win_size=5, multichannel=True) fig, axes = plt.subplots(2, 2, figsize=(7, 6), sharex=True, sharey=True) ax = axes.ravel() ax[0].imshow(img1_padded_float) ax[1].imshow(img2) ax[1].set_title("Template image") ax[2].imshow(img2_transformed) ax[2].set_title("Matched image") ax[3].imshow(sub) ax[3].set_title("Subtracted image") # plt.gray() # ax = plt.gca() # plot_matches(ax, img1, img2, keypoints1, keypoints2, matches12) plt.show()
# Transform the image using the skimage.transform library # "rotate" does what you might expect schroedinger_rotate = tf.rotate(schroedinger, 180) # This sets up a transformation that changes the image's scale, rotates it, # and moves it. "warp" then applies that transformation to the image tform = tf.AffineTransform(scale=(1.3, 1.1), rotation=0.5, translation=(0, -200)) schroedinger_warped = tf.warp(schroedinger, tform) # ORB is an algorithm that detects good features in an image and then # describes them in a compact way. The descriptions can then be matched # across multiple images. descriptor_extractor = ORB(n_keypoints=200) # Apply the ORB algorithm to our images descriptor_extractor.detect_and_extract(schroedinger) keypoints1 = descriptor_extractor.keypoints descriptors1 = descriptor_extractor.descriptors descriptor_extractor.detect_and_extract(schroedinger_rotate) keypoints2 = descriptor_extractor.keypoints descriptors2 = descriptor_extractor.descriptors descriptor_extractor.detect_and_extract(schroedinger_warped) keypoints3 = descriptor_extractor.keypoints descriptors3 = descriptor_extractor.descriptors # See which descriptors match across the images matches12 = match_descriptors(descriptors1, descriptors2, cross_check=True) matches13 = match_descriptors(descriptors1, descriptors3, cross_check=True)
def test_no_descriptors_extracted_orb(): img = np.ones((128, 128)) detector_extractor = ORB() with testing.raises(RuntimeError): detector_extractor.detect_and_extract(img)
from skimage import data from skimage import transform as tf from skimage.feature import (match_descriptors, corner_harris, corner_peaks, ORB, plot_matches) from skimage.color import rgb2gray import matplotlib.pyplot as plt import numpy as np test = np.array([2,6,4,8,9]) descriptor_extractor = ORB(n_keypoints=200) descriptor_extractor.detect_and_extract(test)
from skimage import transform as tf from skimage.feature import (match_descriptors, corner_harris, corner_peaks, ORB, plot_matches) from skimage.color import rgb2gray import matplotlib.pyplot as plt img1 = rgb2gray(data.coins()) img2 = tf.rotate(img1, 180) tform = tf.AffineTransform(scale=(1.3, 1.1), rotation=0.5, translation=(0, -200)) img3 = tf.warp(img1, tform) descriptor_extractor = ORB(n_keypoints=200) descriptor_extractor.detect_and_extract(img1) keypoints1 = descriptor_extractor.keypoints descriptors1 = descriptor_extractor.descriptors descriptor_extractor.detect_and_extract(img2) keypoints2 = descriptor_extractor.keypoints descriptors2 = descriptor_extractor.descriptors descriptor_extractor.detect_and_extract(img3) keypoints3 = descriptor_extractor.keypoints descriptors3 = descriptor_extractor.descriptors matches12 = match_descriptors(descriptors1, descriptors2, cross_check=True) matches13 = match_descriptors(descriptors1, descriptors3, cross_check=True) fig, ax = plt.subplots(nrows=2, ncols=1)