예제 #1
0
    def run(self):
        self.results = np.array([])

        # Let subscriber know the total
        self.startProgress.emit(config.NISSL_COUNT)

        # For updating the progress in the UI
        self.total = 1

        # Compute SIFT for region before starting
        self.kp1, self.des1 = feature.extract_sift(self.im)

        # Set multithreading if capable
        if config.MULTITHREAD:
            pool = ThreadPool(processes = cv2.getNumberOfCPUs())
        else:
            pool = ThreadPool(processes = 1)

        # Total number of images to compare against
        nissl = range(1, config.NISSL_COUNT + 1)

        # Begin mapping process
        pool.map(self.process_level, nissl)
        pool.close()
        pool.join()

        # Tell UI the results
        self.endProgress.emit(self.results)
예제 #2
0
def match(im):
    atlas = feature.im_read('scripts_testing/plate-34.jpg')

    if affine:
        kp, des = feature.extract_sift(im)
        kp2, des2 = feature.extract_sift(atlas)
    else:
        kp, des = sift.detectAndCompute(im, None)
        kp2, des2 = sift.detectAndCompute(atlas, None)

    match = feature.match(im, kp, des, atlas, kp2, des2)
    print("Matches: ", len(match.matches))
    print("Inliers: ", match.inlier_count)
    plt.figure(figsize=(10,10))
    plt.imshow(match.result)
    plt.figure(figsize=(10,10))
    plt.imshow(match.result2)

    return match.H
예제 #3
0
def draw_kp(im):
    if affine:
        kp, des = feature.extract_sift(im)
    else:
        kp = sift.detect(im)

    print("Keypoints: ", len(kp))
    im_kp = cv2.drawKeypoints(im, kp, None, flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
    plt.figure(figsize=(10,10))
    plt.imshow(im_kp)
예제 #4
0
    def set_im_region(self, im_region):
        logger.info("Image Region Shape: {0}", im_region.shape)
        w, h, c = im_region.shape

        # Check if the user would like to see the region keypoints
        if config.UI_SHOW_KP:
            kp, des = feature.extract_sift(im_region)
            import cv2
            im_region = cv2.drawKeypoints(
                im_region,
                kp,
                None,
                flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
            logger.info("Keypoints drawn for region")

        # Check if we should save the region for testing
        if config.UI_SAVE_REGION:
            feature.im_write('region.png', im_region)

        # Reduce the size of images to a reasonable range
        import scipy.misc as misc
        reduction_percent = int(config.RESIZE_WIDTH / w * 100)
        im_region = misc.imresize(im_region, reduction_percent)
        logger.info("Resized region to {0}", im_region.shape)

        # Save the original selection for actual matching
        self.region = im_region

        self.canvas_region.imshow(im_region)
        self.canvas_input.clear_corners()
        self.btn_find_match.setEnabled(True)
        self.slider_ratio_test.setEnabled(True)

        if config.UI_WARP:
            self.btn_warp.setEnabled(True)
            self.btn_reset.setEnabled(True)
            self.slider_warp_points.setEnabled(True)
            self.slider_warp_disp.setEnabled(True)

        if config.UI_ANGLE:
            self.slider_angle.setEnabled(True)
예제 #5
0
s_label = s_data['labels']

pw_data = np.load('atlas_pw.npz')
pw_im = pw_data['images']
pw_label = pw_data['labels']

# Prepare SM
similarity_matrix = np.zeros((len(s_im), len(pw_im)))
print("Similarity Matrix Shape", similarity_matrix.shape)

# Precompute SIFT descriptors for atlas
kp = []
des = []
for j in range(len(pw_im)):
    im2 = pw_im[j]
    kp2, des2 = feature.extract_sift(im2)
    kp.append(kp2)
    des.append(des2)

# Begin matching
start_time = timer()
for i in range(similarity_matrix.shape[0]):
    print("Matching S", s_label[i])
    im1 = s_im[i]
    kp1, des1 = feature.extract_sift(im1)

    for j in range(similarity_matrix.shape[1]):
        im2 = pw_im[j]
        kp2 = kp[j]
        des2 = des[j]