Пример #1
0
def epipolar_rectify(imL,imR,show_matches=True):
    descriptor_extractor = ORB(n_keypoints=2000)
    
    descriptor_extractor.detect_and_extract(imL)
    keypoints1 = descriptor_extractor.keypoints
    descriptors1 = descriptor_extractor.descriptors    
    
    descriptor_extractor.detect_and_extract(imR)
    keypoints2 = descriptor_extractor.keypoints
    descriptors2 = descriptor_extractor.descriptors        
    
    matches12 = match_descriptors(descriptors1, descriptors2,metric='hamming', cross_check=True)
    
    pts1=keypoints1[matches12[:,0],:]
    pts2=keypoints2[matches12[:,1],:]    
    
    
    F, mask = cv2.findFundamentalMat(pts1,pts2,cv2.FM_RANSAC)
    pts1 = pts1[mask.ravel()==1]
    pts2 = pts2[mask.ravel()==1]
    
    res,H1,H2=cv2.stereoRectifyUncalibrated(pts1,pts2,F,imL.shape,10)
    
    if show_matches:
        fig, ax = plt.subplots(nrows=1, ncols=1)
        plot_matches(ax, imL, imR, keypoints1, keypoints2, matches12)    
    
    return H1,H2
Пример #2
0
    def __init__(self, test_image, orb_file, verbose, testall):
        self.test_image = test_image
        self.orb_file = orb_file
        self.verbose = verbose
        self.testall = testall
        self.test_image_shape = None

        # training ORB features
        self.train_orb_features = np.load(self.orb_file)

        self.keypoints_test = None
        self.descriptors_test = None
        self.matches = None

        # test set (30 images from the original data)
        self.test_dir = "./test/"
        self.labels_file = os.path.join(self.test_dir, "labels.txt")
        self.test_dict = defaultdict(list)

        # predictions
        self.predicted_kprc = [
        ]  # (n, 2) for n train image features, stores n predicted keypoint in r, c format
        self.predicted_centroid_rc = np.array(
            [0, 0])  # final predicted r, c location of the object

        # clustering params
        self.db_thresh = 25  # 25 pixel DBSCAN clustering threshold
        self.descriptor_extractor = ORB(n_keypoints=50,
                                        fast_n=9,
                                        fast_threshold=0.15)

        # evaluation
        self.pck_at_dot05_thresh = 0.05
Пример #3
0
def iris_scan_orb(request):

    from skimage import io
    from skimage.feature import (match_descriptors, ORB)
    from skimage.color import rgb2gray
    from .settings import MEDIA_ROOT

    img1 = rgb2gray(io.imread(MEDIA_ROOT + '/IRIS3.jpg'))  # Query
    img2 = rgb2gray(io.imread(MEDIA_ROOT + '/IRIS6.jpg'))  # Comparing to

    descriptor_extractor = ORB(n_keypoints=200)

    descriptor_extractor.detect_and_extract(img1)
    keypoints1 = descriptor_extractor.keypoints
    descriptors1 = descriptor_extractor.descriptors  # Query Descriptor

    descriptor_extractor.detect_and_extract(img2)
    keypoints2 = descriptor_extractor.keypoints
    descriptors2 = descriptor_extractor.descriptors  # Comparing To Descriptors

    matches12 = match_descriptors(descriptors1, descriptors2, cross_check=True)

    # print("Matched: ", len(matches12), " of ", len(descriptors1))
    percent = len(matches12) / len(descriptors1) * 100

    # print("Percent Match - ", percent, "%")

    """if percent > 80:
        print("Matched!")
    else:
        print("Not Matched!")"""

    return render(request, 'scan.html', {'percent': percent})
Пример #4
0
def test_keypoints_orb_less_than_desired_no_of_keypoints():
    detector_extractor = ORB(n_keypoints=15, fast_n=12,
                             fast_threshold=0.33, downscale=2, n_scales=2)
    detector_extractor.detect(img)

    exp_rows = np.array([  67.,  247.,  269.,  413.,  435.,  230.,  264.,
                          330.,  372.])
    exp_cols = np.array([ 157.,  146.,  111.,   70.,  180.,  136.,  336.,
                          148.,  156.])

    exp_scales = np.array([ 1.,  1.,  1.,  1.,  1.,  2.,  2.,  2.,  2.])

    exp_orientations = np.array([-105.76503839,  -96.28973044,  -53.08162354,
                                 -173.4479964 , -175.64733392, -106.07927215,
                                 -163.40016243,   75.80865813, -154.73195911])

    exp_response = np.array([ 0.13197835,  0.24931321,  0.44351774,
                              0.39063076,  0.96770745,  0.04935129,
                              0.21431068,  0.15826555,  0.42403573])

    assert_almost_equal(exp_rows, detector_extractor.keypoints[:, 0])
    assert_almost_equal(exp_cols, detector_extractor.keypoints[:, 1])
    assert_almost_equal(exp_scales, detector_extractor.scales)
    assert_almost_equal(exp_response, detector_extractor.responses)
    assert_almost_equal(exp_orientations,
                        np.rad2deg(detector_extractor.orientations), 5)

    detector_extractor.detect_and_extract(img)
    assert_almost_equal(exp_rows, detector_extractor.keypoints[:, 0])
    assert_almost_equal(exp_cols, detector_extractor.keypoints[:, 1])
Пример #5
0
def test_keypoints_orb_desired_no_of_keypoints():
    detector_extractor = ORB(n_keypoints=10, fast_n=12, fast_threshold=0.20)
    detector_extractor.detect(img)

    exp_rows = np.array([ 435.  ,  435.6 ,  376.  ,  455.  ,  434.88,  269.  ,
                          375.6 ,  310.8 ,  413.  ,  311.04])
    exp_cols = np.array([ 180. ,  180. ,  156. ,  176. ,  180. ,  111. ,
                          156. ,  172.8,   70. ,  172.8])

    exp_scales = np.array([ 1.   ,  1.2  ,  1.   ,  1.   ,  1.44 ,  1.   ,
                            1.2  ,  1.2  ,  1.   ,  1.728])

    exp_orientations = np.array([-175.64733392, -167.94842949, -148.98350192,
                                 -142.03599837, -176.08535837,  -53.08162354,
                                 -150.89208271,   97.7693776 , -173.4479964 ,
                                 38.66312042])
    exp_response = np.array([ 0.96770745,  0.81027306,  0.72376257,
                              0.5626413 ,  0.5097993 ,  0.44351774,
                              0.39154173,  0.39084861,  0.39063076,
                              0.37602487])

    assert_almost_equal(exp_rows, detector_extractor.keypoints[:, 0])
    assert_almost_equal(exp_cols, detector_extractor.keypoints[:, 1])
    assert_almost_equal(exp_scales, detector_extractor.scales)
    assert_almost_equal(exp_response, detector_extractor.responses)
    assert_almost_equal(exp_orientations,
                        np.rad2deg(detector_extractor.orientations), 5)

    detector_extractor.detect_and_extract(img)
    assert_almost_equal(exp_rows, detector_extractor.keypoints[:, 0])
    assert_almost_equal(exp_cols, detector_extractor.keypoints[:, 1])
Пример #6
0
def test_keypoints_orb_desired_no_of_keypoints():
    detector_extractor = ORB(n_keypoints=10, fast_n=12, fast_threshold=0.20)
    detector_extractor.detect(img)

    exp_rows = np.array([ 141.   ,  108.   ,  214.56 ,  131.   ,  214.272,
                           67.   ,  206.   ,  177.   ,  108.   ,  141.   ])
    exp_cols = np.array([ 323.   ,  328.   ,  282.24 ,  292.   ,  281.664,
                           85.   ,  260.   ,  284.   ,  328.8  ,  267.   ])

    exp_scales = np.array([1,  1,  1.44,  1,  1.728, 1, 1, 1, 1.2, 1])

    exp_orientations = np.array([ -53.97446153,   59.5055285 ,  -96.01885186,
                                 -149.70789506,  -94.70171899,  -45.76429535,
                                  -51.49752849,  113.57081195,   63.30428063,
                                  -79.56091118])
    exp_response = np.array([ 1.01168357,  0.82934145,  0.67784179,  0.57176438,
                              0.56637459,  0.52248355,  0.43696175,  0.42992376,
                              0.37700486,  0.36126832])

    assert_almost_equal(exp_rows, detector_extractor.keypoints[:, 0])
    assert_almost_equal(exp_cols, detector_extractor.keypoints[:, 1])
    assert_almost_equal(exp_scales, detector_extractor.scales)
    assert_almost_equal(exp_response, detector_extractor.responses)
    assert_almost_equal(exp_orientations,
                        np.rad2deg(detector_extractor.orientations), 5)

    detector_extractor.detect_and_extract(img)
    assert_almost_equal(exp_rows, detector_extractor.keypoints[:, 0])
    assert_almost_equal(exp_cols, detector_extractor.keypoints[:, 1])
def detect_kp_desc(img, method='orb', n_keypoints=2000, **args):
    """Find keypoints and their descriptors on the image.
    
    img:
        `np.array` of shape == WxHx3
        RGB image
    method:
        str
        Name of the method to use. Options are: ['orb', 'lf-net']
    n_keypoints:
        int
        Number of keypoints to find
    **args:
        dict
        Other parameters to pass to keypoints detector without any chanages
    
    return:
        tuple (2,)
        Coordinates and descriptors of found keypoints
    """

    if method == 'orb':
        detector_exctractor = ORB(n_keypoints=n_keypoints, **args)


#         detector_exctractor = cv2.ORB_create(nfeatures=n_keypoints, **args)
    elif method == 'lf-net':
        #         https://github.com/vcg-uvic/lf-net-release
        raise NotImplemetedError()
    detector_exctractor.detect_and_extract(rgb2gray(img).astype(np.float64))
    return detector_exctractor.keypoints, detector_exctractor.descriptors
Пример #8
0
def test_keypoints_orb_less_than_desired_no_of_keypoints():
    detector_extractor = ORB(n_keypoints=15, fast_n=12,
                             fast_threshold=0.33, downscale=2, n_scales=2)
    detector_extractor.detect(img)

    exp_rows = np.array([  58.,   65.,  108.,  140.,  203.])
    exp_cols = np.array([ 291.,  130.,  293.,  202.,  267.])

    exp_scales = np.array([1., 1., 1., 1., 1.])

    exp_orientations = np.array([-158.26941428,  -59.42996346,  151.93905955,
                                  -79.46341354,  -56.90052451])

    exp_response = np.array([ 0.2667641 ,  0.04009017, -0.17641695, -0.03243431,
                              0.26521259])

    assert_almost_equal(exp_rows, detector_extractor.keypoints[:, 0])
    assert_almost_equal(exp_cols, detector_extractor.keypoints[:, 1])
    assert_almost_equal(exp_scales, detector_extractor.scales)
    assert_almost_equal(exp_response, detector_extractor.responses)
    assert_almost_equal(exp_orientations,
                        np.rad2deg(detector_extractor.orientations), 5)

    detector_extractor.detect_and_extract(img)
    assert_almost_equal(exp_rows, detector_extractor.keypoints[:, 0])
    assert_almost_equal(exp_cols, detector_extractor.keypoints[:, 1])
Пример #9
0
def iris_scan_orb_android(file_name):

    from skimage import io
    from skimage.feature import (match_descriptors, ORB)
    from skimage.color import rgb2gray
    from .settings import MEDIA_ROOT

    img1 = rgb2gray(io.imread(MEDIA_ROOT + '/'+ file_name))  # Query
    img2 = rgb2gray(io.imread(MEDIA_ROOT + '/IRIS9.jpg'))  # Comparing to

    descriptor_extractor = ORB(n_keypoints=200)

    descriptor_extractor.detect_and_extract(img1)
    keypoints1 = descriptor_extractor.keypoints
    descriptors1 = descriptor_extractor.descriptors  # Query Descriptor

    descriptor_extractor.detect_and_extract(img2)
    keypoints2 = descriptor_extractor.keypoints
    descriptors2 = descriptor_extractor.descriptors  # Comparing To Descriptors

    matches12 = match_descriptors(descriptors1, descriptors2, cross_check=True)

    percent = len(matches12) / len(descriptors1) * 100

    return percent
Пример #10
0
def get_descriptor(img):
    descriptor_extractor = ORB(n_keypoints=100)

    descriptor_extractor.detect_and_extract(img)
    keypoints = descriptor_extractor.keypoints
    descriptors = descriptor_extractor.descriptors
    return keypoints, descriptors
Пример #11
0
def feature_extractor(image_path, options=None):
    """Extracts a set of features (described in config file) from an image.
    Args:
        image_path: the path to the image
        options: the configuration file settings. In this case the settings should contain
                 relevant image processing feature options.
    Return:
        an array of features which depending on the config options
    """

    features = []

    image = imread(image_path)

    if 'grey_required' in options:
        grey_image = rgb2grey(image)

    # GLCM features
    if 'glcm' in options:
        glcm_config = options['glcm']

        glcm_features = glcm.glcm_features(grey_image, glcm_config['modes'])

        features.append(glcm_features)

    # ORB features
    if 'orb' in options:
        orb_config = options['orb']

        orb_extractor = ORB(downscale=orb_config['downscale'],
                            n_scales=orb_config['n_scales'],
                            n_keypoints=orb_config['n_keypoints'],
                            fast_n=orb_config['fast_n'],
                            fast_threshold=orb_config['fast_threshold'],
                            harris_k=orb_config['harris_k'])

        orb_extractor.detect_and_extract(grey_image)

        # TODO add these to config system
        features.append(orb_extractor.keypoints.tolist())
        # features.append(orb_extractor.scales.tolist())
        # features.append(orb_extractor.orientations.tolist())
        # features.append(orb_extractor.responses.tolist())

        # features.append(orb_extractor.descriptors.tolist())

    if 'kmeans' in options:
        k_image = np.array(image, dtype=np.float64) / 255

        w, h, d = original_shape = tuple(k_image.shape)
        assert d == 3
        image_array = np.reshape(k_image, (w * h, d))

        kmeans = KMeans(
            n_clusters=options['kmeans']['clusters']).fit(image_array)

        features.append(kmeans.cluster_centers_.tolist())

    return list(flatten.flatten(features))
Пример #12
0
def main():
    baseImg = loadResized("base.jpg", 600, 410)
    atlasImg = loadResized("atlas.jpg", 600, 410)

    orb = (ORB(n_keypoints=800,
               fast_threshold=0.05), ORB(n_keypoints=800, fast_threshold=0.05))
    orb[0].detect_and_extract(baseImg)
    orb[1].detect_and_extract(atlasImg)
    baseData = [orb[0].keypoints, orb[0].descriptors]
    atlasData = [orb[1].keypoints, orb[1].descriptors]

    match = match_descriptors(baseData[1], atlasData[1])

    dst = baseData[0][match[:, 0]][:, ::-1]
    src = atlasData[0][match[:, 1]][:, ::-1]

    robust, inliers = ransac((src, dst),
                             ProjectiveTransform,
                             min_samples=4,
                             residual_threshold=1,
                             max_trials=300)

    r, c = baseImg.shape[:2]
    corners = np.array([[0, 0], [0, r], [c, 0], [c, r]])
    warpedCorners = robust(corners)

    allCorners = np.vstack((warpedCorners, corners))
    cornerMin = np.min(allCorners, axis=0)
    cornerMax = np.max(allCorners, axis=0)
    outputShape = (cornerMax - cornerMin)
    outputShape = np.ceil(outputShape[::-1]).astype(int)

    offSet = SimilarityTransform(translation=cornerMin)
    atlasWarped = warp(atlasImg,
                       offSet.inverse,
                       order=3,
                       output_shape=outputShape,
                       cval=-1)
    atlasMask = (atlasWarped != -1)
    atlasWarped[~atlasMask] = 0

    fig, ax = plt.subplots(figsize=(12, 12))
    diffImg = atlasWarped - baseImg
    ax.imshow(diffImg, cmap="gray")
    ax.axis("off")
    plt.show()

    compare(atlasWarped, baseImg, figsize=(12, 10))

    costs = generateCosts(np.abs(atlasWarped, baseImg), atlasWarped & baseImg)
    fig, ax = plt.subplots(figsize=(15, 12))
    ax.imshow(costs, cmap="gray", interpolation="none")
    ax.axis("off")

    outputImg = cv2.addWeighted(baseImg, .3, atlasImg, 1, 0)

    cv2.imshow("Output", outputImg)
    cv2.waitKey(0)
    cv2.destroyAllWindows()
Пример #13
0
def calc_orb(*imgs):
    descriptor_extractor = ORB(n_keypoints=100)
    for c_img in imgs:
        descriptor_extractor.detect_and_extract(c_img)
        yield {
            "keypoints": descriptor_extractor.keypoints,
            "descriptors": descriptor_extractor.descriptors,
        }
def selectFeatures(useList):
    DataSet = []
    LabelSet = []
    lengthV = []
    trainPaths = ['./fruit/' + c + '_train/' for c in classes]
    testPaths = ['./fruit/' + c + ' test/' for c in classes]
    for c in range(len(classes)):
        className = classes[c]
        path = trainPaths[c]
        detector = CENSURE()
        detector2 = ORB(n_keypoints=50)
        detector3 = BRIEF(patch_size=49)
        files = os.listdir(path)
        #sample
        files = random.sample(files, 100)
        nfiles = len(files)
        for i in range(nfiles):
            featureVector = []
            infile = files[i]
            img = io.imread(path + infile, as_grey=True)
            hist = np.histogram(img, bins=256)
            img = resize(img, (400, 400))
            detector2.detect_and_extract(img)
            detector.detect(img)
            a = fd = hog(img,
                         orientations=9,
                         pixels_per_cell=(32, 32),
                         cells_per_block=(1, 1),
                         visualise=False)
            for h in hist:
                fd = np.append(fd, h)
            if (useList[0]):
                fd = np.append(fd, [np.array(detector.keypoints).flatten()])
            if (useList[1]):
                fd = np.append(fd, detector2.keypoints)
            if (useList[2]):
                fd = np.append(fd, edgeExtract(img, 100))
            l1 = len(fd)
            corners = corner_peaks(corner_harris(img), min_distance=1)
            if (useList[3]):
                fd = np.append(fd, corners)
            lengthV.append(len(fd))
            DataSet.append(fd)
            ind = classes.index(className)
            LabelSet.append(ind)
    max = np.amax(lengthV)
    lengthV = []
    DataSet2 = []
    for d in DataSet:
        d = np.pad(d, (0, max - len(d)), 'constant')
        DataSet2.append(d)
        lengthV.append(len(d))
    DataSet = DataSet2
    res = 0
    #perform gridsearch with one thread
    if __name__ == '__main__':
        res = gridSearch(DataSet, LabelSet, False)
        return res
Пример #15
0
 def __init__(self, name, startGID=0):
     super(PanormaGroup, self).__init__(name, startGID=startGID)
     # "Oriented FAST and rotated BRIEF" feature detector
     self.orb = ORB(n_keypoints=4000, fast_threshold=0.05)
     #        self.ImagesWithOverlap = []  # List to store images which has overlap
     self.ImagesKeypointsDescriptors = [
     ]  # List of tuples storing ORB (keypoints, descrioptors)
     # Minus one to compensate for the increment which will happen for the first image
     self.CurrentGroupID -= 1
Пример #16
0
class PanormaGroup(GroupChecker):
    """ This is to check wheter the new image can be 
    considered to be part of a panorama of previous image 
    Based on: http://nbviewer.ipython.org/github/scikit-image/skimage-demos/blob/master/pano/pano.ipynb?raw=true """

    def __init__(self, name, startGID = 0):
        super(PanormaGroup,self).__init__(name, startGID = startGID)
        # "Oriented FAST and rotated BRIEF" feature detector
        self.orb = ORB(n_keypoints=4000, fast_threshold=0.05)
#        self.ImagesWithOverlap = []  # List to store images which has overlap
        self.ImagesKeypointsDescriptors = []  # List of tuples storing ORB (keypoints, descrioptors)
        # Minus one to compensate for the increment which will happen for the first image
        self.CurrentGroupID -= 1
        
    def NextGID(self,image):
        """ Calculates the next Group ID for the input image """
        NewImg = self.LoadImage(image,Greyscale=True,scale=0.25)
        self.orb.detect_and_extract(NewImg)
        NewImgKeyDescr = (self.orb.keypoints, self.orb.descriptors)

        for PreImgKeyDescr in reversed(self.ImagesKeypointsDescriptors):
            # Check for overlap
            matcheOfDesc = match_descriptors(PreImgKeyDescr[1], NewImgKeyDescr[1], cross_check=True)

            # Select keypoints from the source (image to be registered)
            # and target (reference image)
            src = NewImgKeyDescr[0][matcheOfDesc[:, 1]][:, ::-1]
            dst = PreImgKeyDescr[0][matcheOfDesc[:, 0]][:, ::-1]

            model_robust, inliers = ransac((src, dst), ProjectiveTransform,
                                           min_samples=4, residual_threshold=1, max_trials=300)                
                
            NumberOfTrueMatches = np.sum(inliers)  #len(inliers[inliers])

            if NumberOfTrueMatches > 100 :
                # Image has overlap
                logger.debug('Image {0} found a match! (No: of Matches={1})'.format(image,NumberOfTrueMatches))
                break
            else :
                logger.debug('Image {0} not matching..(No: of Matches={1})'.format(image,NumberOfTrueMatches))
                continue

        else:
            # None of the images in the for loop has any overlap...So this is a new Group
            self.ImagesKeypointsDescriptors = [] # Erase all previous group items
            # self.ImagesWithOverlap = [] 

            # Increment Group ID
            self.CurrentGroupID += 1
            logger.debug('Starting a new Panorama group (GID={0})'.format(self.CurrentGroupID))

        # Append the latest image to the current group
        self.ImagesKeypointsDescriptors.append(NewImgKeyDescr) 
        # self.ImagesWithOverlap.append(NewImg)

        # Return the current  group ID
        return self.CurrentGroupID
def get_displacement(image0, image1):
    """
    Gets displacement (in pixels I think) difference between 2 images using scikit-image
    not as accurate as the opencv version i think.

    :param image0: reference image
    :param image1: target image
    :return:
    """
    from skimage.feature import (match_descriptors, ORB, plot_matches)
    from skimage.color import rgb2gray
    from scipy.spatial.distance import hamming
    from scipy import misc
    image0_gray = rgb2gray(image0)
    image1_gray = rgb2gray(image1)
    descriptor_extractor = ORB(n_keypoints=200)

    descriptor_extractor.detect_and_extract(image0_gray)
    keypoints1 = descriptor_extractor.keypoints
    descriptors1 = descriptor_extractor.descriptors

    descriptor_extractor.detect_and_extract(image1_gray)
    keypoints2 = descriptor_extractor.keypoints
    descriptors2 = descriptor_extractor.descriptors

    matches12 = match_descriptors(descriptors1, descriptors2, cross_check=True)

    # Sort the matches based on distance.  Least distance
    # is better
    distances12 = []
    for match in matches12:
        distance = hamming(descriptors1[match[0]], descriptors2[match[1]])
        distances12.append(distance)

    indices = np.arange(len(matches12))
    indices = [index for (_, index) in sorted(zip(distances12, indices))]
    matches12 = matches12[indices]

    # collect displacement from the first 10 matches
    dx_list = []
    dy_list = []
    for mat in matches12[:10]:
        # Get the matching key points for each of the images
        img1_idx = mat[0]
        img2_idx = mat[1]

        # x - columns
        # y - rows
        (x1, y1) = keypoints1[img1_idx]
        (x2, y2) = keypoints2[img2_idx]
        dx_list.append(abs(x1 - x2))
        dy_list.append(abs(y1 - y2))

    dx_median = np.median(np.asarray(dx_list, dtype=np.double))
    dy_median = np.median(np.asarray(dy_list, dtype=np.double))
    # plot_matches(image0, image1, descriptors1, descriptors2, matches12[:10])
    return dx_median, dy_median
Пример #18
0
def load_descriptors(file_names, num_keypoints=200):
    # Load images
    descriptor_extractor = ORB(n_keypoints=num_keypoints)
    descriptors = []
    for im_path in file_names:
        img = plt.imread("data/" + im_path)
        img = rgb2gray(img)
        descriptor_extractor.detect_and_extract(img)
        descriptors.append(descriptor_extractor.descriptors)
    return np.array(descriptors)
Пример #19
0
def image_features_orb(img,keypoints):
     # X is the feature vector with one row of features per image
     #
     Xsize=2*keypoints
     X=np.zeros(Xsize, dtype=float)
     # extract patches using scikit library.
     orb=ORB(downscale=1.2, n_scales=8, n_keypoints=keypoints, fast_n=4, fast_threshold=0.00001, harris_k=0.01)
     orb.detect_and_extract(img)
     X[0:Xsize] = np.reshape(orb.keypoints,(1, Xsize))
     return X
Пример #20
0
def getORB(img, kpn=200):
    # extract features
    descriptor_extractor = ORB(n_keypoints=kpn)
    # get the good stuff
    descriptor_extractor.detect_and_extract(img)
    keys = descriptor_extractor.keypoints
    # convert bool to nums
    descs = descriptor_extractor.descriptors * 1.0

    return keys, descs
Пример #21
0
def image_features_orb(img,keypoints):
     # X is the feature vector with one row of features per image
     #
     Xsize=2*keypoints
     X=np.zeros(Xsize, dtype=float)
     # extract patches using scikit library.
     orb=ORB(downscale=1.2, n_scales=8, n_keypoints=keypoints, fast_n=4, fast_threshold=0.00001, harris_k=0.01)
     orb.detect_and_extract(img)
     X[0:Xsize] = np.reshape(orb.keypoints,(1, Xsize))
     return X
def orb_extractor(img, n_keypoints=100):
    """Try orb binary descriptor using binaries created by Otsu's method."""

    descriptor_extractor = ORB(n_keypoints)
    """ Extract descriptors for the original images """

    descriptor_extractor.detect_and_extract(img)
    keypoints = descriptor_extractor.keypoints
    descriptors = descriptor_extractor.descriptors

    return (keypoints, descriptors)
Пример #23
0
 def calculate(self, resource):
     except_image_only(resource)
     im = image2numpy(resource.image, remap='gray')
     extractor = ORB()
     extractor.detect_and_extract(im)
     return (extractor.descriptors,
             extractor.keypoints[:,0],
             extractor.keypoints[:,1],
             extractor.responses,
             extractor.scales,
             extractor.orientations)
def orb_extractor(img, n_keypoints=100):
    """Try orb binary descriptor using binaries created by Otsu's method."""

    descriptor_extractor = ORB(n_keypoints)

    """ Extract descriptors for the original images """
    
    descriptor_extractor.detect_and_extract(img)
    keypoints = descriptor_extractor.keypoints
    descriptors = descriptor_extractor.descriptors

    return(keypoints, descriptors)
Пример #25
0
def orb_descriptors(img, keypoints=800):
    """
        Вычисление ORB дескрипторов.
        Params:
            img - изображение
            keypoints - максимальное количество точек детектора
        Return:
            np.array - массив дескрипторов (как минмум двумерный)
    """
    extractor = ORB(n_keypoints=keypoints)
    extractor.detect_and_extract(img)
    return extractor.descriptors
Пример #26
0
def FindRetinaFeatures(Image):
    """
    this function finds strong features in the image to use for registration.
    :param Image: the images to find the features in
    :return:
    """
    Image = gaussian_filter(Image, 3)
    orb = ORB(n_keypoints=200)
    orb.detect_and_extract(Image)
    keypoints = orb.keypoints
    descriptors = orb.descriptors
    return keypoints, descriptors
Пример #27
0
def orb_feature(image):
    """
    提取图像的orb特征
    :param image:
    :return:
    """
    image_gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
    orb = ORB(n_keypoints=50)
    orb.detect_and_extract(image_gray)
    descriptors = orb.descriptors
    keypoints = orb.keypoints
    return keypoints
Пример #28
0
def orb_extractor_generator(stack):
    """Orb binary descriptor generator
    
    This returns a descriptor object. The descriptor object encodes both keypoints and descriptors.
    """
    """Set parameters"""
    number_of_keypoints = 10
    """Get descriptor_extractor object"""
    for img in stack:
        descriptor_extractor = ORB(n_keypoints=number_of_keypoints)
        descriptor_extractor.detect_and_extract(img)

        yield descriptor_extractor
Пример #29
0
    def __next__(self):
        from skimage import transform as tf
        from skimage.feature import (match_descriptors, corner_harris,
                                                                corner_peaks, ORB, plot_matches)
        from skimage.color import rgb2gray

        img1 = rgb2gray(self.image)
        descriptor_extractor = ORB(n_keypoints=200)
        descriptor_extractor.detect_and_extract(img1)
        keypoints1 = descriptor_extractor.keypoints
        descriptors1 = descriptor_extractor.descriptors
        for x,y in keypoints1:
            yield ((x,y))
Пример #30
0
def orb(img):
    img1 = rgb2gray(img)
    img2 = transform.rotate(img1, 180)
    tform = transform.AffineTransform(scale=(1.3, 1.1),
                                      rotation=0.5,
                                      translation=(0, -200))
    img3 = transform.warp(img1, tform)

    descriptor_extractor = ORB(n_keypoints=200)

    descriptor_extractor.detect_and_extract(img1)
    keypoints1 = descriptor_extractor.keypoints
    descriptors1 = descriptor_extractor.descriptors

    descriptor_extractor.detect_and_extract(img2)
    keypoints2 = descriptor_extractor.keypoints
    descriptors2 = descriptor_extractor.descriptors

    descriptor_extractor.detect_and_extract(img3)
    keypoints3 = descriptor_extractor.keypoints
    descriptors3 = descriptor_extractor.descriptors

    matches1 = match_descriptors(descriptors1, descriptors2, cross_check=True)
    matches2 = match_descriptors(descriptors1, descriptors3, cross_check=True)

    return np.hstack(
        (keypoints1[matches1[:, 0]].ravel(), keypoints2[matches2[:,
                                                                 1]].ravel()))
Пример #31
0
def test_descriptor_orb():
    detector_extractor = ORB(fast_n=12, fast_threshold=0.20)

    exp_descriptors = np.array(
        [[0, 1, 1, 1, 0, 1, 0, 1, 0, 1], [1, 1, 1, 0, 0, 1, 0, 0, 1, 1],
         [1, 0, 1, 1, 0, 0, 1, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
         [0, 1, 0, 0, 0, 0, 0, 0, 1, 0], [1, 1, 0, 1, 1, 1, 0, 0, 1, 1],
         [1, 1, 0, 1, 0, 0, 1, 0, 1, 1], [0, 0, 1, 0, 1, 0, 0, 1, 1, 0],
         [1, 0, 0, 0, 1, 0, 0, 0, 0, 1], [0, 1, 1, 1, 1, 1, 1, 1, 1, 1],
         [1, 1, 0, 1, 0, 1, 0, 0, 1, 1], [1, 1, 1, 0, 0, 0, 1, 1, 1, 0],
         [1, 1, 1, 1, 1, 1, 0, 0, 0, 0], [1, 1, 1, 0, 1, 1, 1, 1, 0, 0],
         [1, 1, 0, 0, 1, 0, 0, 1, 0, 1], [1, 1, 0, 0, 0, 0, 1, 0, 0, 1],
         [0, 0, 0, 0, 1, 1, 1, 0, 1, 0], [0, 0, 0, 0, 1, 1, 1, 0, 0, 1],
         [0, 0, 0, 0, 0, 1, 1, 0, 1, 1], [0, 0, 0, 0, 1, 0, 1, 0, 1, 1]],
        dtype=bool)
    detector_extractor.detect(img)
    detector_extractor.extract(img, detector_extractor.keypoints,
                               detector_extractor.scales,
                               detector_extractor.orientations)
    assert_equal(exp_descriptors, detector_extractor.descriptors[100:120,
                                                                 10:20])

    detector_extractor.detect_and_extract(img)
    assert_equal(exp_descriptors, detector_extractor.descriptors[100:120,
                                                                 10:20])
Пример #32
0
def find_orb(img, keypoints=500):
    """Find keypoints and their descriptors in image.

    img ((W, H, 3)  np.ndarray) : 3-channel image
    n_keypoints (int) : number of keypoints to find

    Returns:
        (N, 2)  np.ndarray : keypoints
        (N, 256)  np.ndarray, type=np.bool  : descriptors
    """
    extractor = ORB(n_keypoints=keypoints)
    grey_img = rgb2gray(img)
    extractor.detect_and_extract(grey_img)
    return extractor.keypoints, extractor.descriptors
Пример #33
0
def findH(img1, img2):
    from skimage.feature import ORB, match_descriptors

    # load image
    img1_gray = skimage.color.rgb2gray(img1)
    img2_gray = skimage.color.rgb2gray(img2)

    # extract points
    detector_extractor1 = ORB(n_keypoints=3000)
    detector_extractor1.detect_and_extract(img1_gray)
    detector_extractor2 = ORB(n_keypoints=3000)
    detector_extractor2.detect_and_extract(img2_gray)
    matches = match_descriptors(detector_extractor1.descriptors,
                                detector_extractor2.descriptors)
    match_pts1 = detector_extractor1.keypoints[matches[:, 0]].astype(int)
    match_pts2 = detector_extractor2.keypoints[matches[:, 1]].astype(int)

    # call RANSAC
    match_pts1 = np.flip(match_pts1, axis=1)
    match_pts2 = np.flip(match_pts2, axis=1)
    H_2to1, _ = computeHransac(match_pts1, match_pts2)
    H_2to1 = H_2to1 / H_2to1[2, 2]
    print('tranform H:')
    print(H_2to1)

    return H_2to1
Пример #34
0
def keypoint_extractor(img_dir_list):
    # orb = cv2.ORB_create(nfeatures=100)
    orb = ORB(n_keypoints=100)
    descriptors = []
    for index, i in enumerate(img_dir_list):
        img_name = os.path.join(base_path, img_list + "\\" + i)
        # image = cv2.imread(img_name)
        # image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        image = io.imread(img_name)
        image = color.rgb2gray(image)
        # train_keypoints, train_descriptor = orb.detectAndCompute(image , None)
        orb.detect_and_extract(image)
        descriptors.append(orb.descriptors)
        print(index)
    return descriptors
Пример #35
0
    def extract_features(self):
        '''Extract interest points and their feature vector (descriptors)'''

        self.keypoints, self.descriptors, self.corners = [], [], np.empty(
            (self.num_imgs, 4, 2))
        orb = ORB(n_keypoints=1000, fast_threshold=0.05)

        for idx, img in enumerate(self.grays):
            orb.detect_and_extract(img)
            self.keypoints.append(orb.keypoints)
            self.descriptors.append(orb.descriptors)

            # get 4 corners of images
            r, c = img.shape
            self.corners[idx] = np.array([[0, 0], [0, r], [c, 0], [c, r]])
Пример #36
0
def FindRetinaFeatures(Image):
    """
    this function finds strong features in the image to use for registration.
    :param Image: the images to find the features in
    :return:
    """
    Image = read_image(Image, 1)
    plt.imshow(Image, cmap='gray')
    orb = ORB(n_keypoints=50, harris_k=20)
    orb.detect_and_extract(Image)
    key_points = orb.keypoints
    plt.scatter(key_points[:, 0], key_points[:, 1])
    plt.ylim(BOTTOM_CAPTION, 0)  # removing the caption at the bottom
    plt.title("Retina Features")
    plt.show()
Пример #37
0
def find_orb(img, n_keypoints=N_KEYPOINTS):
    """Find keypoints and their descriptors in image.

    img ((W, H, 3)  np.ndarray) : 3-channel image
    n_keypoints (int) : number of keypoints to find

    Returns:
        (N, 2)  np.ndarray : keypoints
        (N, 256)  np.ndarray, type=np.bool  : descriptors
    """

    # your code here
    orb = ORB(n_keypoints=n_keypoints)
    orb.detect_and_extract(rgb2gray(img))
    return (orb.keypoints, orb.descriptors)
Пример #38
0
def register_image_pair(idx, path_img_target, path_img_source, path_out):
    """ register two images together

    :param int idx: empty parameter for using the function in parallel
    :param str path_img_target: path to the target image
    :param str path_img_source: path to the source image
    :param str path_out: path for exporting the output
    :return tuple(str,float):
    """
    start = time.time()
    # load and denoise reference image
    img_target = io.imread(path_img_target)[..., :3]
    img_target = denoise_wavelet(img_target,
                                 wavelet_levels=7,
                                 multichannel=True)
    img_target_gray = rgb2gray(img_target)

    # load and denoise moving image
    img_source = io.imread(path_img_source)[..., :3]
    img_source = denoise_bilateral(img_source,
                                   sigma_color=0.05,
                                   sigma_spatial=2,
                                   multichannel=True)
    img_source_gray = rgb2gray(img_source)

    # detect ORB features on both images
    detector_target = ORB(n_keypoints=150)
    detector_source = ORB(n_keypoints=150)
    detector_target.detect_and_extract(img_target_gray)
    detector_source.detect_and_extract(img_source_gray)
    matches = match_descriptors(detector_target.descriptors,
                                detector_source.descriptors)
    # robustly estimate affine transform model with RANSAC
    model, _ = ransac(
        (detector_target.keypoints[matches[:, 0]],
         detector_source.keypoints[matches[:, 1]]),
        AffineTransform,
        min_samples=25,
        max_trials=500,
        residual_threshold=0.9,
    )

    # warping source image with estimated transformations
    path_img_warped = os.path.join(path_out, NAME_IMAGE_WARPED % idx)
    if model:
        img_warped = warp(img_target,
                          model.inverse,
                          output_shape=img_target.shape[:2])
        try:
            io.imsave(path_img_warped, img_warped)
        except Exception:
            traceback.print_exc()
    else:
        warnings.warn("Image registration failed.", RuntimeWarning)
        path_img_warped = None
    # summarise experiment
    execution_time = time.time() - start
    return path_img_warped, execution_time
Пример #39
0
def keypoint_extractor_test(img_dir_list):
    # orb = cv2.ORB_create(nfeatures=100)
    orb = ORB(n_keypoints=100)
    descriptors = []
    keypoints = []
    for i in img_dir_list:
        # image = cv2.imread(img_name)
        # image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        image = io.imread(i)
        image = color.rgb2gray(image)
        # train_keypoints, train_descriptor = orb.detectAndCompute(image , None)
        orb.detect_and_extract(image)
        descriptors.append(orb.descriptors)
        keypoints.append(orb.keypoints)

    return descriptors, keypoints
Пример #40
0
def get_matrix(image_tif_bgrn, image_jpg_bgr, verbose=False):
    """Get similarity transform matrix
	ORB Limitation: https://github.com/scikit-image/scikit-image/issues/1472 """
    im_tif_adjusted = match_color_curve_tif2jpg(image_tif_bgrn, image_jpg_bgr)
    jpg_gray = cv2.cvtColor(image_jpg_bgr, cv2.COLOR_BGR2GRAY).astype(np.uint8)
    tif_gray = cv2.cvtColor(im_tif_adjusted,
                            cv2.COLOR_BGR2GRAY).astype(np.uint8)

    number_of_keypoints = 100

    # Initialize ORB
    # This number of keypoints is large enough for robust results,
    # but low enough to run quickly.
    orb = ORB(n_keypoints=number_of_keypoints, fast_threshold=0.05)
    orb2 = ORB(n_keypoints=number_of_keypoints, fast_threshold=0.05)
    try:
        # Detect keypoints
        orb.detect_and_extract(jpg_gray)
        keypoints_jpg = orb.keypoints
        descriptors_jpg = orb.descriptors
        orb2.detect_and_extract(tif_gray)
        keypoints_tif = orb2.keypoints
        descriptors_tif = orb2.descriptors
    except IndexError:
        raise KeypointDetectionException('ORB Keypoint detection failed')

    # Match descriptors between images
    matches = match_descriptors(descriptors_jpg,
                                descriptors_tif,
                                cross_check=True)

    # Select keypoints from
    #   * source (image to be registered)
    #   * target (reference image)
    src = keypoints_jpg[matches[:, 0]][:, ::-1]
    dst = keypoints_tif[matches[:, 1]][:, ::-1]

    model_robust, inliers = ransac((src, dst),
                                   TranslationTransform,
                                   min_samples=4,
                                   residual_threshold=1,
                                   max_trials=300)
    if verbose:
        print(inliers)
        print("number of matching keypoints", np.sum(inliers))

    if inliers is None or np.sum(inliers) < 3 or model_robust is None:
        raise ValueError('Possible mismatched JPG and TIF')

    if is_translational(model_robust):
        # we assume src and dst are not rotated relative to each other
        # get rid of any rotational noise introduced during normalization/centering in transform estimate function
        model_robust.params[0, 0] = 1.0
        model_robust.params[1, 1] = 1.0
        return model_robust
    else:
        raise ValueError('Invalid Model')
Пример #41
0
    def __init__(self, train_dir, verbose):
        self.train_dir = train_dir
        self.verbose = verbose

        self.labels_file = os.path.join(self.train_dir, "labels.txt")
        self.train_on_full_data = True
        self.n_train = 100
        self.train_dict = defaultdict(list)

        self.thresh = 25  # 25 pixel threshold to decide if the detected ORB keypoint is within this threshold of the ground truth object location

        self.descriptor_extractor = ORB(n_keypoints=50,
                                        fast_n=9,
                                        fast_threshold=0.15)
        self.all_train_descriptors = []
        self.train_descriptor_outfile = os.path.join(os.getcwd(),
                                                     "train_descriptors.npy")
Пример #42
0
    def __init__(self, name, startGID = 0):
        super(PanormaGroup,self).__init__(name, startGID = startGID)
        # "Oriented FAST and rotated BRIEF" feature detector
        self.orb = ORB(n_keypoints=4000, fast_threshold=0.05)
#        self.ImagesWithOverlap = []  # List to store images which has overlap
        self.ImagesKeypointsDescriptors = []  # List of tuples storing ORB (keypoints, descrioptors)
        # Minus one to compensate for the increment which will happen for the first image
        self.CurrentGroupID -= 1
def getDisplacement(Image0, Image1):
    Image0Gray = rgb2gray(Image0)
    Image1Gray = rgb2gray(Image1)
    descriptor_extractor = ORB(n_keypoints=200)

    descriptor_extractor.detect_and_extract(Image0Gray)
    keypoints1 = descriptor_extractor.keypoints
    descriptors1 = descriptor_extractor.descriptors

    descriptor_extractor.detect_and_extract(Image1Gray)
    keypoints2 = descriptor_extractor.keypoints
    descriptors2 = descriptor_extractor.descriptors

    matches12 = match_descriptors(descriptors1, descriptors2, cross_check=True)

    # Sort the matches based on distance.  Least distance
    # is better
    distances12 = []
    for match in matches12:
        distance = hamming(descriptors1[match[0]], descriptors2[match[1]])
        distances12.append(distance)

    indices = np.range(len(matches12))
    indices = [index for (_, index) in sorted(zip(distances12, indices))]
    matches12 = matches12[indices]

    # collect displacement from the first 10 matches
    dxList = []
    dyList = []
    for mat in matches12[:10]:
        # Get the matching keypoints for each of the images
        img1_idx = mat[0]
        img2_idx = mat[1]

        # x - columns
        # y - rows
        (x1, y1) = keypoints1[img1_idx]
        (x2, y2) = keypoints2[img2_idx]
        dxList.append(abs(x1 - x2))
        dyList.append(abs(y1 - y2))

    dxMedian = np.median(np.asarray(dxList, dtype=np.double))
    dyMedian = np.median(np.asarray(dyList, dtype=np.double))
    plot_matches(Image0, Image1, descriptors1, descriptors2, matches12[:10])
    return dxMedian, dyMedian
Пример #44
0
def test_descriptor_orb():
    detector_extractor = ORB(fast_n=12, fast_threshold=0.20)

    exp_descriptors = np.array([[ True, False,  True,  True, False, False, False, False, False, False],
                                [False, False,  True,  True, False,  True,  True, False,  True,  True],
                                [ True, False, False, False,  True, False,  True,  True,  True, False],
                                [ True, False, False,  True, False,  True,  True, False, False, False],
                                [False,  True,  True,  True, False, False, False,  True,  True, False],
                                [False, False, False, False, False,  True, False,  True,  True,  True],
                                [False,  True,  True,  True,  True, False, False,  True, False,  True],
                                [ True,  True,  True, False,  True,  True,  True,  True, False, False],
                                [ True,  True, False,  True,  True,  True,  True, False, False, False],
                                [ True, False, False, False, False,  True, False, False,  True,  True],
                                [ True, False, False, False,  True,  True,  True, False, False, False],
                                [False, False,  True, False,  True, False, False,  True, False, False],
                                [False, False,  True,  True, False, False, False, False, False,  True],
                                [ True,  True, False, False, False,  True,  True,  True,  True,  True],
                                [ True,  True,  True, False, False,  True, False,  True,  True, False],
                                [False,  True,  True, False, False,  True,  True,  True,  True,  True],
                                [ True,  True,  True, False, False, False, False,  True,  True,  True],
                                [False, False, False, False,  True, False, False,  True,  True, False],
                                [False,  True, False, False,  True, False, False, False,  True,  True],
                                [ True, False,  True, False, False, False,  True,  True, False, False]], dtype=bool)

    detector_extractor.detect(img)
    detector_extractor.extract(img, detector_extractor.keypoints,
                               detector_extractor.scales,
                               detector_extractor.orientations)
    assert_equal(exp_descriptors,
                 detector_extractor.descriptors[100:120, 10:20])

    detector_extractor.detect_and_extract(img)
    assert_equal(exp_descriptors,
                 detector_extractor.descriptors[100:120, 10:20])
    def get_translation_tool(self, n_keypoints=1000):

        # Convert images to grayscale
        src_image = rgb2gray(self.src_image)
        dst_image = rgb2gray(self.dst_image)

        # Initiate an ORB class object which can extract features & descriptors from images.
        # Set the amount of features that should be found (more = more accurate)
        descriptor_extractor = ORB(n_keypoints=n_keypoints)

        # Extract features and descriptors from source image
        descriptor_extractor.detect_and_extract(src_image)
        self.keypoints1 = descriptor_extractor.keypoints
        descriptors1 = descriptor_extractor.descriptors

        # Extract features and descriptors from destination image
        descriptor_extractor.detect_and_extract(dst_image)
        self.keypoints2 = descriptor_extractor.keypoints
        descriptors2 = descriptor_extractor.descriptors

        # Matches the descriptors and gives them rating as to how similar they are
        self.matches12 = match_descriptors(descriptors1, descriptors2, cross_check=True)

        # Selects the coordinates from source image and destination image based on the
        # indices given from the match_descriptors function.
        src = self.keypoints1[self.matches12[:, 0]][:, ::-1]
        dst = self.keypoints2[self.matches12[:, 1]][:, ::-1]

        # Filters out the outliers and generates the transformation matrix based on only the inliers
        model_robust, inliers = \
            ransac((src, dst), ProjectiveTransform,
                min_samples=4, residual_threshold=2)

        # This returns the object "model_robust" which contains the tranformation matrix and
        # uses that to translate any coordinate point from source to destination image.
        return model_robust, inliers
Пример #46
0
    # Read image from file, then inspect the image dimensions
    img = cv2.imread("/media/dick/External/KaggleRetina/" + folder1 + "/" + file_names[i],1)
    print(file_names[i]),
    height, width, channels = img.shape

    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    del img
    # Make a PIL image so we can use PIL.Image.thumbnail to resize if needed
    gray_ = Image.fromarray(gray)

    # Check if dimensions are above desired, if so then resize keepig aspect ratio
    m, n = 512,512
    if height > m or width > n:
        gray_.thumbnail((m,n), Image.ANTIALIAS)

    orb = ORB(n_keypoints=100)

    try:
        orb.detect_and_extract(gray_)
    except IndexError:
        print(file_names[i] + " had an issue.")
        issues.append(file_names[i])
        continue
    kp = orb.keypoints
    des = orb.descriptors
    print(len(des))

    #Store keypoint features 
    temp_array = []
    temp = pickle_keypoints(kp, des) 
    temp_array.append(temp)
Пример #47
0
def main():
    image_base_dir = '/home/dek/makerfaire-booth/2018/burger/experimental/dek/train_object_detector/decoded'
    canonical_dir = 'canonical'
    # template = os.path.join(image_base_dir, 'bottombun.0.00.27.34.-24.61.0.81.png')
    fig, axes = plt.subplots(7, 7, figsize=(7, 6), sharex=True, sharey=True)

    fig.delaxes(axes[0][0])

    ssims = numpy.zeros( (len(BurgerElement.__members__), len(BurgerElement.__members__)), dtype=float)
    mses = numpy.zeros( (len(BurgerElement.__members__), len(BurgerElement.__members__)), dtype=float)
                         
    for i, layer in enumerate(BurgerElement.__members__):
        template = os.path.join(canonical_dir, '%s.png' % layer)

        img1 = imread(template)
        # img1_padded = numpy.zeros( (WIDTH, HEIGHT,3), dtype=numpy.uint8)
        img1_padded = numpy.resize( [255,255,255], (WIDTH, HEIGHT, 3))
        s = img1.shape
        w = s[0]
        h = s[1]
        nb = img1_padded.shape[0]
        na = img1.shape[0]
        lower1 = (nb) // 2 - (na // 2)
        upper1 = (nb // 2) + (na // 2)
        nb = img1_padded.shape[1]
        na = img1.shape[1]
        lower2 = (nb) // 2 - (na // 2)
        upper2 = (nb // 2) + (na // 2)
        img1_padded[lower1:upper1, lower2:upper2] = img1
        img1_padded_float = img1_padded.astype(numpy.float64)/255.
        print img1_padded_float.shape
        img1_gray = rgb2gray(img1_padded_float)

        descriptor_extractor = ORB()

        try:
            descriptor_extractor.detect_and_extract(img1_gray)
        except RuntimeError:
            continue
        
        keypoints1 = descriptor_extractor.keypoints
        descriptors1 = descriptor_extractor.descriptors

        axes[i][0].imshow(img1_padded_float)
        axes[i][0].set_title("Template image")

        for j, layer2 in enumerate(BurgerElement.__members__):

            rot, tx, ty, scale = get_random_orientation()
            img2 = draw_example(layer2, WIDTH, HEIGHT, rot, tx, ty, scale)

            # match = os.path.join(canonical_dir, '%s.png' % layer2)
            # img2 = imread(match)

            img2_padded = numpy.resize( [255,255,255], (WIDTH, HEIGHT, 3))
            s = img2.shape
            img2_padded[:s[0], :s[1]] = img2
            img2_padded_float = img2_padded.astype(numpy.float64)/255.
            img2_gray = rgb2gray(img2_padded_float)

            try:
                descriptor_extractor.detect_and_extract(img2_gray)
            except RuntimeError:
                continue

            keypoints2 = descriptor_extractor.keypoints
            descriptors2 = descriptor_extractor.descriptors

            matches12 = match_descriptors(descriptors1, descriptors2, cross_check=True)

            src = keypoints2[matches12[:, 1]][:, ::-1]
            dst = keypoints1[matches12[:, 0]][:, ::-1]

            model_robust, inliers = \
                ransac((src, dst), SimilarityTransform,
                       min_samples=4, residual_threshold=2)
            if not model_robust:
                print "bad"
                continue
            img2_transformed = transform.warp(img2_padded_float, model_robust.inverse, mode='constant', cval=1)
            sub = img2_transformed - img1_padded_float
            ssim = compare_ssim(img2_transformed, img1_padded_float, win_size=5, multichannel=True)
            mse = compare_mse(img2_transformed, img1_padded_float)
            ssims[i,j] = ssim
            mses[i,j] = mse

            axes[0][j].imshow(img2_padded_float)
            axes[0][j].set_title("Match image")

            axes[i][j].imshow(img2_transformed)
            axes[i][j].set_title("Transformed image")
            axes[i][j].set_xlabel("SSIM: %9.4f MSE: %9.4f" % (ssim, mse))

        # ax = plt.gca()
        # plot_matches(ax, img1, img2, keypoints1, keypoints2, matches12)

    print ssims
    print numpy.argmax(ssims, axis=1)
    print numpy.argmin(mses, axis=1)
                       
    plt.show()
Пример #48
0
def test_no_descriptors_extracted_orb():
    img = np.ones((128, 128))
    detector_extractor = ORB()
    with testing.raises(RuntimeError):
        detector_extractor.detect_and_extract(img)
Пример #49
0
def main():
    image_base_dir = '/home/dek/makerfaire-booth/2018/burger/experimental/dek/train_object_detector/decoded'
    canonical_dir = 'canonical'
    # template = os.path.join(image_base_dir, 'bottombun.0.00.27.34.-24.61.0.81.png')
    template = os.path.join(canonical_dir, 'patty.png')

    img1 = imread(template)
    # img1_padded = numpy.zeros( (256, 256,3), dtype=numpy.uint8)
    img1_padded = numpy.resize( [255,255,255], (256, 256, 3))
    s = img1.shape
    img1_padded[:s[0], :s[1]] = img1
    img1_gray = rgb2gray(img1)

    descriptor_extractor = ORB()

    descriptor_extractor.detect_and_extract(img1_gray)
    keypoints1 = descriptor_extractor.keypoints
    descriptors1 = descriptor_extractor.descriptors

    # g = glob.glob(os.path.join(image_base_dir, 'patty*.nobox.png'))
    # for moving in g:
    while True:
        rot, tx, ty, scale = get_random_orientation()
        # img2 = imread(moving)
        img2 = draw_example('patty', 256, 256, rot, tx, ty, scale)
        img2_gray = rgb2gray(img2)

        try:
            descriptor_extractor.detect_and_extract(img2_gray)
        except RuntimeError:
            continue
        
        keypoints2 = descriptor_extractor.keypoints
        descriptors2 = descriptor_extractor.descriptors

        matches12 = match_descriptors(descriptors1, descriptors2, cross_check=True)

        src = keypoints2[matches12[:, 1]][:, ::-1]
        dst = keypoints1[matches12[:, 0]][:, ::-1]

        model_robust, inliers = \
            ransac((src, dst), SimilarityTransform,
                   min_samples=4, residual_threshold=2)
        if not model_robust:
            print "bad"
            continue
        img2_transformed = transform.warp(img2, model_robust.inverse, mode='constant', cval=1)
        img1_padded_float = img1_padded.astype(numpy.float64)/255.
        sub = img2_transformed - img1_padded_float
        print compare_ssim(img2_transformed, img1_padded_float, win_size=5, multichannel=True)
        fig, axes = plt.subplots(2, 2, figsize=(7, 6), sharex=True, sharey=True)
        ax = axes.ravel()

        ax[0].imshow(img1_padded_float)
        ax[1].imshow(img2)
        ax[1].set_title("Template image")
        ax[2].imshow(img2_transformed)
        ax[2].set_title("Matched image")
        ax[3].imshow(sub)
        ax[3].set_title("Subtracted image")
        # plt.gray()

        # ax = plt.gca()
        # plot_matches(ax, img1, img2, keypoints1, keypoints2, matches12)


        plt.show()
Пример #50
0
import numpy as np
from skimage import data
from skimage.color import rgb2gray
from skimage.feature import match_descriptors, ORB, plot_matches
from skimage.measure import ransac
from skimage.transform import FundamentalMatrixTransform
import matplotlib.pyplot as plt

np.random.seed(0)

img_left, img_right, groundtruth_disp = data.stereo_motorcycle()
img_left, img_right = map(rgb2gray, (img_left, img_right))

# Find sparse feature correspondences between left and right image.

descriptor_extractor = ORB()

descriptor_extractor.detect_and_extract(img_left)
keypoints_left = descriptor_extractor.keypoints
descriptors_left = descriptor_extractor.descriptors

descriptor_extractor.detect_and_extract(img_right)
keypoints_right = descriptor_extractor.keypoints
descriptors_right = descriptor_extractor.descriptors

matches = match_descriptors(descriptors_left, descriptors_right,
                            cross_check=True)

# Estimate the epipolar geometry between the left and right image.

model, inliers = ransac((keypoints_left[matches[:, 0]],
Пример #51
0
from skimage import data
from skimage import transform as tf
from skimage.feature import (match_descriptors, corner_harris,
                             corner_peaks, ORB, plot_matches)
from skimage.color import rgb2gray
import matplotlib.pyplot as plt
import numpy as np


test = np.array([2,6,4,8,9])

descriptor_extractor = ORB(n_keypoints=200)

descriptor_extractor.detect_and_extract(test)
Пример #52
0
from skimage import data
from skimage import transform as tf
from skimage.feature import (match_descriptors, corner_harris,
                             corner_peaks, ORB, plot_matches)
from skimage.color import rgb2gray
import matplotlib.pyplot as plt


img1 = rgb2gray(data.coins())
img2 = tf.rotate(img1, 180)
tform = tf.AffineTransform(scale=(1.3, 1.1), rotation=0.5,
                           translation=(0, -200))
img3 = tf.warp(img1, tform)

descriptor_extractor = ORB(n_keypoints=200)

descriptor_extractor.detect_and_extract(img1)
keypoints1 = descriptor_extractor.keypoints
descriptors1 = descriptor_extractor.descriptors

descriptor_extractor.detect_and_extract(img2)
keypoints2 = descriptor_extractor.keypoints
descriptors2 = descriptor_extractor.descriptors

descriptor_extractor.detect_and_extract(img3)
keypoints3 = descriptor_extractor.keypoints
descriptors3 = descriptor_extractor.descriptors

matches12 = match_descriptors(descriptors1, descriptors2, cross_check=True)
matches13 = match_descriptors(descriptors1, descriptors3, cross_check=True)
from skimage.feature import (match_descriptors, ORB, plot_matches)

schroedinger = misc.imread('Schroedinger.jpg')
# Transform the image using the skimage.transform library
# "rotate" does what you might expect
schroedinger_rotate = tf.rotate(schroedinger, 180)
# This sets up a transformation that changes the image's scale, rotates it,
# and moves it. "warp" then applies that transformation to the image
tform = tf.AffineTransform(scale=(1.3, 1.1), rotation=0.5,
                           translation=(0, -200))
schroedinger_warped = tf.warp(schroedinger, tform)

# ORB is an algorithm that detects good features in an image and then
# describes them in a compact way. The descriptions can then be matched
# across multiple images.
descriptor_extractor = ORB(n_keypoints=200)

# Apply the ORB algorithm to our images
descriptor_extractor.detect_and_extract(schroedinger)
keypoints1 = descriptor_extractor.keypoints
descriptors1 = descriptor_extractor.descriptors

descriptor_extractor.detect_and_extract(schroedinger_rotate)
keypoints2 = descriptor_extractor.keypoints
descriptors2 = descriptor_extractor.descriptors

descriptor_extractor.detect_and_extract(schroedinger_warped)
keypoints3 = descriptor_extractor.keypoints
descriptors3 = descriptor_extractor.descriptors

# See which descriptors match across the images