Exemple #1
0
def sc_ringkeys_auc():
    struct_filename = '/media/admini/LENOVO/dataset/kitti/lidar_odometry/birdview_dataset/tmp/struct_file_05_ds.txt'
    images_info = make_images_info(struct_filename=struct_filename)
    print(len(images_info))

    positions = np.array(
        [image_info['position'] for image_info in images_info])
    print(positions.shape)

    data_file = '/home/admini/yanhao/large-scale-pointcloud-matching/pnv-kitti05.npy'
    descriptors = np.load(data_file)
    print(descriptors.shape)

    descriptors = torch.Tensor(descriptors)
    positions = torch.Tensor(positions)
    diff = descriptors[..., None] - descriptors.transpose(0, 1)[None, ...]
    score_matrix = (1 - torch.einsum('mdn,mdn->mn', diff, diff)).numpy()

    diff = positions[..., None] - positions.transpose(0, 1)[None, ...]
    label_matrix = (torch.einsum('mdn,mdn->mn', diff, diff) <
                    (args.positive_search_radius**2)).numpy()

    print('AUC:',
          roc_auc_score(label_matrix.reshape(-1), score_matrix.reshape(-1)))
    pass
Exemple #2
0
def top_k_m2dp():
    k = 25
    positive_radius = 3
    struct_filename = '/media/admini/LENOVO/dataset/kitti/lidar_odometry/birdview_dataset/tmp/struct_file_one_scan_02.txt'
    images_info = make_images_info(struct_filename=struct_filename)
    print(len(images_info))

    positions = np.array(
        [image_info['position'] for image_info in images_info])
    print(positions.shape)

    data_file = '/home/admini/yanhao/large-scale-pointcloud-matching/m2dp-kitti02.mat'
    descriptors = scio.loadmat(data_file)['descriptors'][:-1]
    print(descriptors.shape)

    descriptors = np.asarray(descriptors, order='C').astype('float32')

    database_descriptors, query_descriptors, database_positions, query_positions = \
        train_test_split(descriptors, positions, test_size=0.4, random_state=10)

    index = faiss.IndexFlatL2(database_descriptors.shape[-1])
    index.add(database_descriptors)
    topk_score_overall = np.zeros(k)
    for descriptor, position in zip(query_descriptors, query_positions):
        distances, indices = index.search(descriptor.reshape(1, -1), k)
        candidate_positions = database_positions[indices[0]]
        diff = candidate_positions - position
        # print((diff * diff).sum(axis=1))
        # print(diff)
        is_true_result = (diff * diff).sum(axis=1) < positive_radius**2
        topk_score = is_true_result.cumsum() > 0
        # print(topk_score)
        topk_score_overall += topk_score
    topk_score_overall /= len(query_descriptors)
    print(topk_score_overall)
    print(database_descriptors.shape)
    print(query_descriptors.shape)
    print(database_positions.shape)
    print(query_positions.shape)

    pass
def main():
    images_info_validate = make_images_info(struct_filename=os.path.join(
        args.dataset_dir, 'struct_file_' + args.sequence_validate + '.txt'))

    validate_images_dir = os.path.join(args.dataset_dir,
                                       args.sequence_validate)

    validate_dataset = SuperglueDataset(
        images_info=images_info_validate,
        images_dir=validate_images_dir,
        positive_search_radius=args.positive_search_radius,
        meters_per_pixel=args.meters_per_pixel)
    validate_data_loader = DataLoader(validate_dataset,
                                      batch_size=1,
                                      shuffle=True)

    sift = cv2.SIFT_create(nfeatures=191,
                           contrastThreshold=0.002,
                           edgeThreshold=15,
                           sigma=1.2)
    orb = cv2.ORB_create(nfeatures=args.number_of_features)
    validate_detector(orb, validate_data_loader)

    pass
def verify():
    """
    This function verify if the keypoints in from superpoint+superglue are correctly labelled by ground truth relative pose
    """
    images_dir = os.path.join(args.dataset_dir, args.sequence)
    images_info = make_images_info(
        struct_filename=os.path.join(args.dataset_dir, 'struct_file_' +
                                     args.sequence + '.txt'))
    dataset = SuperglueDataset(
        images_info=images_info,
        images_dir=images_dir,
        positive_search_radius=args.positive_search_radius,
        meters_per_pixel=args.meters_per_pixel)
    data_loader = DataLoader(dataset, batch_size=1, shuffle=True)

    saved_model_file = os.path.join(args.saved_model_path,
                                    'superglue-lidar-birdview.pth.tar')

    config = {
        'superpoint': {
            'nms_radius': 4,
            'keypoint_threshold': 0.005,
            'max_keypoints': 200,
        },
        'Superglue': {
            'weights': 'outdoor',
            'sinkhorn_iterations': 100,
            'match_threshold': 0.2,
        }
    }

    model = Matching(config)
    model_checkpoint = torch.load(saved_model_file,
                                  map_location=lambda storage, loc: storage)
    model.load_state_dict(model_checkpoint)
    print("Loaded model checkpoints from \'{}\'.".format(saved_model_file))
    device = torch.device(
        'cuda' if torch.cuda.is_available() and args.use_gpu else 'cpu')
    model.to(device)

    torch.set_grad_enabled(False)

    for target, source, T_target_source in data_loader:
        # iteration += 1
        assert (source.shape == target.shape)
        B, C, W, H = source.shape
        target = target.to(device)
        source = source.to(device)
        pred = model({'image0': target, 'image1': source})
        target_kpts = pred['keypoints0'][0].cpu()
        source_kpts = pred['keypoints1'][0].cpu()
        if len(target_kpts) == 0 or len(source_kpts) == 0:
            continue

        # in superglue/numpy/tensor the coordinates are (i,j) which correspond to (v,u) in PIL Image/opencv
        target_kpts_in_meters = target_kpts * args.meters_per_pixel - 50
        source_kpts_in_meters = source_kpts * args.meters_per_pixel - 50
        match_mask_ground_truth = make_ground_truth_matrix(
            target_kpts_in_meters, source_kpts_in_meters, T_target_source[0],
            args.tolerance_in_meters)
        target_image_raw = target[0][0].cpu().numpy()
        source_image_raw = source[0][0].cpu().numpy()
        target_image_raw = np.stack([target_image_raw] * 3, -1) * 30
        source_image_raw = np.stack([source_image_raw] * 3, -1) * 30

        cv2.imshow('target_image_raw', target_image_raw)

        # target_kpts = np.round(target_kpts.numpy()).astype(int)

        T_target_source = T_target_source[0].numpy()
        source_kpts = source_kpts.numpy()

        source_kpts_in_meters = pts_from_pixel_to_meter(
            source_kpts, args.meters_per_pixel)
        print('T_target_source:\n', T_target_source)
        source_kpts_in_meters_in_target_img = [
            (T_target_source[:3, :3] @ np.array(
                [source_kpt[0], source_kpt[1], 0]) +
             T_target_source[:3, 3])[:2]
            for source_kpt in source_kpts_in_meters
        ]
        source_kpts_in_meters_in_target_img = np.array(
            source_kpts_in_meters_in_target_img)

        source_kpts_in_target_img = pts_from_meter_to_pixel(
            source_kpts_in_meters_in_target_img, args.meters_per_pixel)

        source_kpts = np.round(source_kpts).astype(int)
        source_kpts_in_target_img = np.round(source_kpts_in_target_img).astype(
            int)

        target_image_poi = target_image_raw.copy()
        source_image_poi = source_image_raw.copy()
        for (x0, y0), (x1, y1) in zip(source_kpts, source_kpts_in_target_img):
            # c = c.tolist()
            # cv2.line(target_image, (x0, y0), (x0 + 50, y0 + 50),
            #          color=[255,0,0], thickness=1, lineType=cv2.LINE_AA)
            # display line end-points as circles
            cv2.circle(target_image_poi, (x1, y1),
                       2, (0, 255, 0),
                       1,
                       lineType=cv2.LINE_AA)
            cv2.circle(source_image_poi, (x0, y0),
                       2, (255, 0, 0),
                       1,
                       lineType=cv2.LINE_AA)
            # cv2.circle(out, (x1 + margin + W0, y1), 2, c, -1,
            #            lineType=cv2.LINE_AA)

        cv2.imshow('target_image', target_image_poi)
        cv2.imshow('source_image', source_image_poi)
        cv2.waitKey(0)

    torch.set_grad_enabled(True)
    pass
def visualize_matching_all():
    """
    This function visualize the feature point matching pipeline
    """
    images_dir = os.path.join(args.dataset_dir, args.sequence)
    images_info = make_images_info(
        struct_filename=os.path.join(args.dataset_dir, 'struct_file_' +
                                     args.sequence + '.txt'))
    dataset = SuperglueDataset(
        images_info=images_info,
        images_dir=images_dir,
        positive_search_radius=args.positive_search_radius,
        meters_per_pixel=args.meters_per_pixel,
        return_filename=True)
    data_loader = DataLoader(dataset, batch_size=1, shuffle=False)

    saved_model_file = os.path.join(args.saved_model_path,
                                    'spsg-lidar-birdview.pth.tar')

    config = {
        'superpoint': {
            'nms_radius': 4,
            'keypoint_threshold': 0.005,
            'max_keypoints': 200,
        },
        'Superglue': {
            'weights': 'outdoor',
            'sinkhorn_iterations': 100,
            'match_threshold': 0.1,
        }
    }

    model = Matching(config)
    model_checkpoint = torch.load(saved_model_file,
                                  map_location=lambda storage, loc: storage)
    model.load_state_dict(model_checkpoint)
    print("Loaded model checkpoints from \'{}\'.".format(saved_model_file))
    device = torch.device(
        'cuda' if torch.cuda.is_available() and args.use_gpu else 'cpu')
    model.to(device)

    torch.set_grad_enabled(False)

    for target, source, T_target_source, target_filename, source_filename in data_loader:
        # iteration += 1
        assert (source.shape == target.shape)
        print(target_filename[0])
        print(source_filename[0])
        B, C, W, H = source.shape
        target = target.to(device)
        source = source.to(device)
        pred = model({'image0': target, 'image1': source})
        target_kpts = pred['keypoints0'][0].cpu()
        source_kpts = pred['keypoints1'][0].cpu()
        if len(target_kpts) == 0 or len(source_kpts) == 0:
            continue

        # in superglue/numpy/tensor the coordinates are (i,j) which correspond to (v,u) in PIL Image/opencv
        target_kpts_in_meters = target_kpts * args.meters_per_pixel - 50
        source_kpts_in_meters = source_kpts * args.meters_per_pixel - 50
        match_mask_ground_truth = make_ground_truth_matrix(
            target_kpts_in_meters, source_kpts_in_meters, T_target_source[0],
            args.tolerance_in_meters)
        target_image_raw = target[0][0].cpu().numpy()
        source_image_raw = source[0][0].cpu().numpy()
        target_image_raw = np.stack([target_image_raw] * 3, -1) * 10
        source_image_raw = np.stack([source_image_raw] * 3, -1) * 10

        cv2.imshow('target_image_raw', target_image_raw)
        cv2.imshow('source_image_raw', source_image_raw)

        # target_kpts = np.round(target_kpts.numpy()).astype(int)

        T_target_source = T_target_source[0].numpy()
        source_kpts = source_kpts.numpy()
        target_kpts = target_kpts.numpy()

        source_kpts_in_meters = pts_from_pixel_to_meter(
            source_kpts, args.meters_per_pixel)
        print('T_target_source:\n', T_target_source)
        source_kpts_in_meters_in_target_img = [
            (T_target_source[:3, :3] @ np.array(
                [source_kpt[0], source_kpt[1], 0]) +
             T_target_source[:3, 3])[:2]
            for source_kpt in source_kpts_in_meters
        ]
        source_kpts_in_meters_in_target_img = np.array(
            source_kpts_in_meters_in_target_img)

        source_kpts_in_target_img = pts_from_meter_to_pixel(
            source_kpts_in_meters_in_target_img, args.meters_per_pixel)

        source_kpts = np.round(source_kpts).astype(int)
        source_kpts_in_target_img = np.round(source_kpts_in_target_img).astype(
            int)

        target_image_poi = visualize_poi(target_image_raw.copy(), target_kpts,
                                         (0, 1, 0))
        source_image_poi = visualize_poi(source_image_raw.copy(), source_kpts,
                                         (1, 0, 0))
        # target_image_poi = target_image_raw.copy()
        # source_image_poi = source_image_raw.copy()
        # for (x0, y0), (x1, y1) in zip(source_kpts, target_kpts):
        #     # c = c.tolist()
        #     # cv2.line(target_image, (x0, y0), (x0 + 50, y0 + 50),
        #     #          color=[255,0,0], thickness=1, lineType=cv2.LINE_AA)
        #     # display line end-points as circles
        #     cv2.circle(target_image_poi, (x1, y1), 4, (0, 255, 0), 1, lineType=cv2.LINE_AA)
        #     cv2.circle(source_image_poi, (x0, y0), 4, (255, 0, 0), 1, lineType=cv2.LINE_AA)
        #     # cv2.circle(out, (x1 + margin + W0, y1), 2, c, -1,
        #     #            lineType=cv2.LINE_AA)

        cv2.imshow('target_image_poi', target_image_poi)
        cv2.imshow('source_image_poi', source_image_poi)

        matches = pred['matches0'][0].cpu().numpy()

        valid = matches > -1
        target_kpts_matched = target_kpts[valid]
        source_kpts_matched = source_kpts[matches[valid]]

        # Matching visualize
        match_image = visualize_matching(target_image_poi, source_image_poi,
                                         target_kpts_matched,
                                         source_kpts_matched)

        W, H = 480, 460
        h_margin = 10
        v_margin = 10
        window_image = np.ones((2 * H + 2 * v_margin, 2 * W + h_margin, 3))
        window_image[:H, :(W)] = cv2.resize(target_image_raw, (W, H),
                                            cv2.INTER_NEAREST)
        window_image[:H, -W:] = cv2.resize(source_image_raw, (W, H),
                                           cv2.INTER_NEAREST)
        window_image[H + v_margin:, :] = cv2.resize(
            match_image, (2 * W + h_margin, H + v_margin), cv2.INTER_NEAREST)

        cv2.imshow('match_image', match_image)
        cv2.imshow("window_image", window_image)
        cv2.waitKey(0)

        # margin = 10
        # match_image = np.ones((H, 2 * W + margin))
        # match_image = np.stack([match_image] * 3, -1)
        #
        # match_image[:, :W] = target_image_poi
        # match_image[:, W + margin:] = source_image_poi
        #
        #
        # for (x0, y0), (x1, y1) in zip(target_kpts_matched, source_kpts_matched):
        #     cv2.line(match_image, (x0, y0), (x1 + margin + W, y1),
        #              color=[0.9, 0.9, 0], thickness=1, lineType=cv2.LINE_AA)
        #     # display line end-points as circles
        #     # cv2.circle(match_image, (x0, y0), 2, (0, 255, 0), -1, lineType=cv2.LINE_AA)
        #     # cv2.circle(match_image, (x1 + margin + W, y1), 2, (255, 0, 0), -1,
        #     #            lineType=cv2.LINE_AA)

    torch.set_grad_enabled(True)
    pass
Exemple #6
0
def spi_vlad_roc_auc():
    images_info_validate = make_images_info(struct_filename=os.path.join(
        args.dataset_dir, 'struct_file_' + args.sequence_validate + '.txt'))

    validate_images_dir = os.path.join(args.dataset_dir,
                                       args.sequence_validate)

    # validate_database_images_info, validate_query_images_info = train_test_split(images_info_validate,
    #                                                                              test_size=0.4, random_state=20)

    base_model = BaseModel(300, 300)
    dim = 256

    # Define model for embedding
    net_vlad = NetVLAD(num_clusters=args.num_clusters,
                       dim=dim,
                       alpha=1.0,
                       outdim=args.final_dim)
    model = EmbedNet(base_model, net_vlad)

    saved_model_file = os.path.join(args.saved_model_path,
                                    'model-to-check-top1.pth.tar')

    model_checkpoint = torch.load(saved_model_file,
                                  map_location=lambda storage, loc: storage)
    model.load_state_dict(model_checkpoint)
    dev = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    model.to(dev)
    print("Loaded model checkpoints from \'{}\'.".format(saved_model_file))

    validate_dataset = PureDataset(images_info=images_info_validate,
                                   images_dir=validate_images_dir)
    validate_data_loader = DataLoader(validate_dataset, batch_size=1)

    descriptors = []
    positions = []
    torch.set_grad_enabled(False)
    i = 0
    for query, query_info in tqdm(validate_data_loader):
        # print(query.shape)
        netvlad_encoding = model(query.to(dev)).cpu().view(-1)
        # print(netvlad_encoding.shape)
        descriptors.append(netvlad_encoding)
        position = query_info['position'].view(3)
        positions.append(position)
        i = i + 1
        # if i > 100:
        #     break
        # print(netvlad_encoding)
    descriptors = torch.cat(descriptors).view(-1, args.final_dim)
    # print(descriptors.shape)

    N = len(descriptors)
    diff = descriptors[..., None] - descriptors.transpose(0, 1)[None, ...]
    score_matrix = (1 - torch.einsum('mdn,mdn->mn', diff, diff)).numpy()
    # print(score_matrix)

    positions = torch.cat(positions).view(-1, 3)
    diff = positions[..., None] - positions.transpose(0, 1)[None, ...]
    label_matrix = (torch.einsum('mdn,mdn->mn', diff, diff) <
                    (args.positive_search_radius**2)).numpy()
    # print(label_matrix.reshape(-1))
    # print(score_matrix.reshape(-1))

    print('AUC:',
          roc_auc_score(label_matrix.reshape(-1), score_matrix.reshape(-1)))

    # print('F1-score:', f1_score(label_matrix.reshape(-1), score_matrix.reshape(-1)))

    precision, recall, thresholds = precision_recall_curve(
        label_matrix.reshape(-1), score_matrix.reshape(-1))
    print(recall, precision)
    plt.plot(recall, precision, lw=1)

    # plt.plot([0, 1], [0, 1], '--', color=(0.6, 0.6, 0.6), label="Luck")
    plt.xlim([-0.05, 1.05])
    plt.ylim([-0.05, 1.05])
    plt.xlabel("Recall Rate")
    plt.ylabel("Precision Rate")
    plt.show()

    descriptors = descriptors.numpy()
    np.save('spi-vlad-kitti' + args.sequence_validate + '.npy', descriptors)

    torch.set_grad_enabled(True)
    # label_matrix =
    pass
Exemple #7
0
def main():
    images_info_train = make_images_info(struct_filename=os.path.join(
        args.dataset_dir, 'struct_file_' + args.sequence_train + '.txt'))
    images_info_validate = make_images_info(struct_filename=os.path.join(
        args.dataset_dir, 'struct_file_' + args.sequence_validate + '.txt'))

    train_images_dir = os.path.join(args.dataset_dir, args.sequence_train)
    validate_images_dir = os.path.join(args.dataset_dir,
                                       args.sequence_validate)

    # train_database_images_info, train_query_images_info = train_test_split(images_info_train, test_size=0.1,
    #                                                                        random_state=23)
    # validate_database_images_info, validate_query_images_info = train_test_split(images_info_validate, test_size=0.2,
    #                                                                              random_state=23)

    train_dataset = SuperglueDataset(
        images_info=images_info_train,
        images_dir=train_images_dir,
        positive_search_radius=args.positive_search_radius,
        meters_per_pixel=args.meters_per_pixel)
    validate_dataset = SuperglueDataset(
        images_info=images_info_validate,
        images_dir=validate_images_dir,
        positive_search_radius=args.positive_search_radius,
        meters_per_pixel=args.meters_per_pixel)
    train_data_loader = DataLoader(train_dataset,
                                   batch_size=args.batch_size,
                                   shuffle=True)
    validate_data_loader = DataLoader(validate_dataset,
                                      batch_size=1,
                                      shuffle=True)

    saved_model_file = os.path.join(args.saved_model_path,
                                    'spsg-rotation-invariant.pth.tar')
    # saved_model_file = os.path.join(args.saved_model_path, 'spsg-juxin.pth.tar')

    config = {
        'superpoint': {
            'nms_radius': 4,
            'keypoint_threshold': 0.005,
            'max_keypoints': 400,
        },
        'Superglue': {
            'weights': 'outdoor',
            'sinkhorn_iterations': 100,
            'match_threshold': 0.2,
        }
    }
    device = torch.device(
        'cuda' if torch.cuda.is_available() and args.use_gpu else 'cpu')
    model = Matching(config).to(device)

    if args.load_checkpoints:
        model_checkpoint = torch.load(
            saved_model_file, map_location=lambda storage, loc: storage)
        model.load_state_dict(model_checkpoint)
        print("Loaded model checkpoints from \'{}\'.".format(saved_model_file))

    optimizer = optim.Adam(list(model.superglue.parameters()) +
                           list(model.superpoint.convDa.parameters()) +
                           list(model.superpoint.convDb.parameters()),
                           lr=args.learning_rate)
    viz = visdom.Visdom()
    train_loss = viz.scatter(X=np.asarray([[0, 0]]))
    train_precision = viz.scatter(X=np.asarray([[0, 0]]))
    train_recall = viz.scatter(X=np.asarray([[0, 0]]))
    train_true_pairs = viz.scatter(X=np.asarray([[0, 0]]))
    viz_train = {
        'viz': viz,
        'train_loss': train_loss,
        'train_precision': train_precision,
        'train_recall': train_recall,
        'train_true_pairs': train_true_pairs,
    }

    viz_validate = {
        'viz': viz,
        'validate_precision': train_precision,
        'validate_recall': train_recall,
        'validate_true_pairs': train_true_pairs,
    }

    for epoch in range(args.epochs):
        epoch = epoch + 1
        if epoch % 1 == 0:
            validate(epoch,
                     model,
                     validate_data_loader,
                     viz_validate=viz_validate)
            # validate_sift(sift_matcher, validate_data_loader)
        train(epoch, model, optimizer, train_data_loader, viz_train=viz_train)
        torch.save(model.state_dict(), saved_model_file)
        print("Saved models in \'{}\'.".format(saved_model_file))

    pass
Exemple #8
0
            self.images_info[pos_index]['orientation'])
        T_query_positive = np.linalg.inv(
            T_w_query) @ T_w_positive @ T_rot_wrt_z(theta_in_deg / 180 * np.pi)

        # convert translation in pixels
        # T_query_positive[:3,3] = T_query_positive[:3,3] / self.meters_per_pixel
        if self.return_filename:
            return query, positive, T_query_positive, \
                   os.path.join(self.images_dir, self.images_info[index]['image_file']), \
                   os.path.join(self.images_dir, self.images_info[pos_index]['image_file'])
        else:
            return query, positive, T_query_positive


if __name__ == '__main__':
    # dataset_dir = '/media/admini/My_data/0904/dataset'
    dataset_dir = '/media/admini/lavie/dataset/birdview_dataset'
    sequence = '00'
    positive_search_radius = 8
    images_info = make_images_info(
        struct_filename=os.path.join(dataset_dir, 'struct_file_' + sequence +
                                     '.txt'))
    images_dir = os.path.join(dataset_dir, sequence)
    dataset = SuperglueDataset(images_info=images_info,
                               images_dir=images_dir,
                               positive_search_radius=positive_search_radius)
    data_loader = DataLoader(dataset, batch_size=2, shuffle=True)
    # with tqdm(data_loader) as tq:
    for item in data_loader:
        print(item)
    print(len(dataset))