예제 #1
0
def main(argv=None):  # pylint: disable=unused-argument
    """Program entrance."""
    # create sift detector.
    sift_wrapper = SiftWrapper(n_sample=FLAGS.max_kpt_num)
    sift_wrapper.half_sigma = FLAGS.half_sigma
    sift_wrapper.pyr_off = FLAGS.pyr_off
    sift_wrapper.ori_off = FLAGS.ori_off
    sift_wrapper.create()
    # create deep feature extractor.
    graph = load_frozen_model(FLAGS.model_path, print_nodes=False)
    sess = tf.Session(graph=graph)
    # extract deep feature from images.
    deep_feat1, cv_kpts1, img1 = extract_deep_features(sift_wrapper,
                                                       sess,
                                                       FLAGS.img1_path,
                                                       qtz=False)
    deep_feat2, cv_kpts2, img2 = extract_deep_features(sift_wrapper,
                                                       sess,
                                                       FLAGS.img2_path,
                                                       qtz=False)
    # match features by OpenCV brute-force matcher (CPU).
    matcher_wrapper = MatcherWrapper()
    # the ratio criterion is set to 0.89 for GeoDesc as described in the paper.
    deep_good_matches, deep_mask = matcher_wrapper.get_matches(
        deep_feat1,
        deep_feat2,
        cv_kpts1,
        cv_kpts2,
        ratio=0.89,
        cross_check=True,
        info='deep')

    deep_display = matcher_wrapper.draw_matches(img1, cv_kpts1, img2, cv_kpts2,
                                                deep_good_matches, deep_mask)
    # compare with SIFT.
    if FLAGS.cf_sift:
        sift_feat1 = sift_wrapper.compute(img1, cv_kpts1)
        sift_feat2 = sift_wrapper.compute(img2, cv_kpts2)
        sift_good_matches, sift_mask = matcher_wrapper.get_matches(
            sift_feat1,
            sift_feat2,
            cv_kpts1,
            cv_kpts2,
            ratio=0.80,
            cross_check=True,
            info='sift')
        sift_display = matcher_wrapper.draw_matches(img1, cv_kpts1, img2,
                                                    cv_kpts2,
                                                    sift_good_matches,
                                                    sift_mask)
        display = np.concatenate((sift_display, deep_display), axis=0)
    else:
        display = deep_display

    cv2.imshow('display', display)
    cv2.waitKey()

    sess.close()
예제 #2
0
    def get_data(self, seq_idx, dense_desc):
        random.seed(0)
        if self.suffix is None:
            sift_wrapper = SiftWrapper(n_feature=self.sample_num, peak_thld=0.04)
            sift_wrapper.ori_off = self.upright
            sift_wrapper.create()

        hseq_data = HSeqData()
        seq_name = self.seqs[seq_idx]

        for img_idx in range(1, 7):
            # read image features.
            img_feat = np.load(os.path.join(seq_name, '%d_img_feat.npy' % img_idx))
            # read images.
            img = cv2.imread(os.path.join(seq_name, '%d.ppm' % img_idx))
            gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
            img_size = img.shape

            if self.suffix is None:
                npy_kpts, cv_kpts = sift_wrapper.detect(gray)
                if not dense_desc:
                    sift_wrapper.build_pyramid(gray)
                    patches = sift_wrapper.get_patches(cv_kpts)
                else:
                    patches = None
            else:
                with open(os.path.join(seq_name, ('%d' + self.suffix + '.pkl') % img_idx), 'rb') as handle:
                    data_dict = pickle.load(handle, encoding='latin1')
                npy_kpts = data_dict['npy_kpts']
                if not dense_desc:
                    patches = data_dict['patches']
                else:
                    patches = None

            kpt_num = npy_kpts.shape[0]
            # compose affine crop matrix.
            crop_mat = np.zeros((kpt_num, 6))
            # rely on the SIFT orientation estimation.
            m_cos = np.cos(-npy_kpts[:, 3]) * self.patch_scale * npy_kpts[:, 2]
            m_sin = np.sin(-npy_kpts[:, 3]) * self.patch_scale * npy_kpts[:, 2]
            crop_mat[:, 0] = m_cos / float(img_size[1])
            crop_mat[:, 1] = m_sin / float(img_size[1])
            crop_mat[:, 2] = (npy_kpts[:, 0] - img_size[1] / 2.) / (img_size[1] / 2.)
            crop_mat[:, 3] = -m_sin / float(img_size[0])
            crop_mat[:, 4] = m_cos / float(img_size[0])
            crop_mat[:, 5] = (npy_kpts[:, 1] - img_size[0] / 2.) / (img_size[0] / 2.)
            npy_kpts = npy_kpts[:, 0:2]

            # read homography matrix.
            if img_idx > 1:
                homo_mat = open(os.path.join(seq_name, 'H_1_%d' % img_idx)).read().splitlines()
                homo_mat = np.array([float(i) for i in ' '.join(homo_mat).split()])
                homo_mat = np.reshape(homo_mat, (3, 3))
            else:
                homo_mat = None

            hseq_data.img.append(img)
            hseq_data.kpt_param.append(crop_mat)
            hseq_data.patch.append(patches)
            hseq_data.coord.append(npy_kpts)
            hseq_data.h**o.append(homo_mat)
            hseq_data.img_feat.append(img_feat)

        return seq_name, hseq_data