Example #1
0
    def check_val_rep(num_points=25):
        total_rep_avg = []
        num_examples = dataset_class.get_num_patches(True)
        fetches = [src_score_maps_activation, dst_score_maps_activation]

        for _ in tqdm(range(num_examples)):
            images_batch, images_dst_batch, h_src_2_dst_batch, h_dst_2_src_batch = sess.run(next_val_batch)

            feed_dict = {
                input_network_src: images_batch,
                input_network_dst: images_dst_batch,
                h_src_2_dst: h_src_2_dst_batch,
                h_dst_2_src: h_dst_2_src_batch,
                phase_train: False,
                dimension_image: np.array(
                    [images_batch.shape[0], images_batch.shape[1], images_batch.shape[2]], dtype=np.int32),
                dimension_image_dst: np.array(
                    [images_dst_batch.shape[0], images_dst_batch.shape[1], images_dst_batch.shape[2]], dtype=np.int32),
            }

            src_scores, dst_scores = sess.run(fetches, feed_dict=feed_dict)

            # Apply NMS
            src_scores = rep_tools.apply_nms(src_scores[0, :, :, 0], args.nms_size)
            dst_scores = rep_tools.apply_nms(dst_scores[0, :, :, 0], args.nms_size)

            hom = geo_tools.prepare_homography(h_dst_2_src_batch[0])
            mask_src, mask_dst = geo_tools.create_common_region_masks(hom, images_batch[0].shape, images_dst_batch[0].shape)

            src_scores = np.multiply(src_scores, mask_src)
            dst_scores = np.multiply(dst_scores, mask_dst)

            src_pts = geo_tools.get_point_coordinates(src_scores, num_points=num_points, order_coord='xysr')
            dst_pts = geo_tools.get_point_coordinates(dst_scores, num_points=num_points, order_coord='xysr')

            dst_to_src_pts = geo_tools.apply_homography_to_points(dst_pts, hom)

            repeatability_results = rep_tools.compute_repeatability(src_pts, dst_to_src_pts)

            total_rep_avg.append(repeatability_results['rep_single_scale'])
        return np.asarray(total_rep_avg).mean()
Example #2
0
    def extract_features(image):

        pyramid = pyramid_gaussian(image,
                                   max_layer=args.pyramid_levels,
                                   downscale=args.scale_factor_levels)

        score_maps = {}
        for (j, resized) in enumerate(pyramid):
            im = resized.reshape(1, resized.shape[0], resized.shape[1], 1)

            feed_dict = {
                input_network:
                im,
                phase_train:
                False,
                dimension_image:
                np.array([1, im.shape[1], im.shape[2]], dtype=np.int32),
            }

            im_scores = sess.run(maps, feed_dict=feed_dict)

            im_scores = geo_tools.remove_borders(im_scores,
                                                 borders=args.border_size)
            score_maps['map_' +
                       str(j + 1 + args.upsampled_levels)] = im_scores[0, :, :,
                                                                       0]

        if args.upsampled_levels:
            for j in range(args.upsampled_levels):
                factor = args.scale_factor_levels**(args.upsampled_levels - j)
                up_image = cv2.resize(image, (0, 0), fx=factor, fy=factor)

                im = np.reshape(up_image,
                                (1, up_image.shape[0], up_image.shape[1], 1))

                feed_dict = {
                    input_network:
                    im,
                    phase_train:
                    False,
                    dimension_image:
                    np.array([1, im.shape[1], im.shape[2]], dtype=np.int32),
                }

                im_scores = sess.run(maps, feed_dict=feed_dict)

                im_scores = geo_tools.remove_borders(im_scores,
                                                     borders=args.border_size)
                score_maps['map_' + str(j + 1)] = im_scores[0, :, :, 0]

        im_pts = []
        for idx_level in range(levels):

            scale_value = (args.scale_factor_levels**(idx_level -
                                                      args.upsampled_levels))
            scale_factor = 1. / scale_value

            h_scale = np.asarray([[scale_factor, 0., 0.],
                                  [0., scale_factor, 0.], [0., 0., 1.]])
            h_scale_inv = np.linalg.inv(h_scale)
            h_scale_inv = h_scale_inv / h_scale_inv[2, 2]

            num_points_level = point_level[idx_level]
            if idx_level > 0:
                res_points = int(
                    np.asarray(
                        [point_level[a]
                         for a in range(0, idx_level + 1)]).sum() -
                    len(im_pts))
                num_points_level = res_points

            im_scores = rep_tools.apply_nms(
                score_maps['map_' + str(idx_level + 1)], args.nms_size)
            im_pts_tmp = geo_tools.get_point_coordinates(
                im_scores, num_points=num_points_level, order_coord='xysr')

            im_pts_tmp = geo_tools.apply_homography_to_points(
                im_pts_tmp, h_scale_inv)

            if not idx_level:
                im_pts = im_pts_tmp
            else:
                im_pts = np.concatenate((im_pts, im_pts_tmp), axis=0)

        if args.order_coord == 'yxsr':
            im_pts = np.asarray(
                list(map(lambda x: [x[1], x[0], x[2], x[3]], im_pts)))

        im_pts = im_pts[(-1 * im_pts[:, 3]).argsort()]
        im_pts = im_pts[:args.num_points]

        # Extract descriptor from features
        descriptors = []
        im = image.reshape(1, image.shape[0], image.shape[1], 1)
        for idx_desc_batch in range(int(len(im_pts) / 250 + 1)):
            points_batch = im_pts[idx_desc_batch * 250:(idx_desc_batch + 1) *
                                  250]

            if not len(points_batch):
                break

            feed_dict = {
                input_network:
                im,
                phase_train:
                False,
                kpts_coord:
                points_batch[:, :2],
                kpts_scale:
                args.scale_factor * points_batch[:, 2],
                kpts_batch:
                np.zeros(len(points_batch)),
                dimension_image:
                np.array([1, im.shape[1], im.shape[2]], dtype=np.int32),
            }

            patch_batch = sess.run(input_patches, feed_dict=feed_dict)
            patch_batch = np.reshape(patch_batch,
                                     (patch_batch.shape[0], 1, 32, 32))
            data_a = torch.from_numpy(patch_batch)
            data_a = data_a.cuda()
            data_a = Variable(data_a)
            with torch.no_grad():
                out_a = model(data_a)
            desc_batch = out_a.data.cpu().numpy().reshape(-1, 128)
            if idx_desc_batch == 0:
                descriptors = desc_batch
            else:
                descriptors = np.concatenate([descriptors, desc_batch], axis=0)

        return im_pts, descriptors