def download_models(main_folder):
    """
    This downloads all models used in this project.
    :param main_folder: The folder in which this file is located
    :return unet_folder: returns the path to the UNetNormalGen dir
    """
    # check if the models are already there, if not download them
    unet_folder = os.path.abspath(os.path.join(main_folder, "UNetNormalGen"))
    model_folder = os.path.join(unet_folder, "model")
    if not os.path.exists(model_folder):
        print("Download all models, by running the 'download_models.py'")
        download_all_models()
    return unet_folder
Ejemplo n.º 2
0
def main():
    parser = argparse.ArgumentParser(
        description='Patch-NetVLAD-Feature-Extract')
    parser.add_argument(
        '--config_path',
        type=str,
        default=join(PATCHNETVLAD_ROOT_DIR, 'configs/performance.ini'),
        help=
        'File name (with extension) to an ini file that stores most of the configuration data for patch-netvlad'
    )
    parser.add_argument(
        '--dataset_file_path',
        type=str,
        required=True,
        help=
        'Full path (with extension) to a text file that stores the save location and name of all images in the dataset folder'
    )
    parser.add_argument(
        '--dataset_root_dir',
        type=str,
        default='',
        help=
        'If the files in dataset_file_path are relative, use dataset_root_dir as prefix.'
    )
    parser.add_argument('--output_features_dir',
                        type=str,
                        default=join(PATCHNETVLAD_ROOT_DIR, 'output_features'),
                        help='Path to store all patch-netvlad features')
    parser.add_argument('--nocuda',
                        action='store_true',
                        help='If true, use CPU only. Else use GPU.')

    opt = parser.parse_args()
    print(opt)

    configfile = opt.config_path
    assert os.path.isfile(configfile)
    config = configparser.ConfigParser()
    config.read(configfile)

    cuda = not opt.nocuda
    if cuda and not torch.cuda.is_available():
        raise Exception("No GPU found, please run with --nocuda")

    device = torch.device("cuda" if cuda else "cpu")

    encoder_dim, encoder = get_backend()

    if not os.path.isfile(opt.dataset_file_path):
        opt.dataset_file_path = join(PATCHNETVLAD_ROOT_DIR,
                                     'dataset_imagenames',
                                     opt.dataset_file_path)

    dataset = PlaceDataset(None, opt.dataset_file_path, opt.dataset_root_dir,
                           None, config['feature_extract'])

    # must resume to do extraction
    resume_ckpt = config['global_params']['resumePath'] + config[
        'global_params']['num_pcs'] + '.pth.tar'

    # backup: try whether resume_ckpt is relative to PATCHNETVLAD_ROOT_DIR
    if not isfile(resume_ckpt):
        resume_ckpt = join(PATCHNETVLAD_ROOT_DIR, resume_ckpt)
        if not isfile(resume_ckpt):
            from download_models import download_all_models
            download_all_models(ask_for_permission=True)

    if isfile(resume_ckpt):
        print("=> loading checkpoint '{}'".format(resume_ckpt))
        checkpoint = torch.load(resume_ckpt,
                                map_location=lambda storage, loc: storage)
        assert checkpoint['state_dict']['WPCA.0.bias'].shape[0] == int(
            config['global_params']['num_pcs'])
        config['global_params']['num_clusters'] = str(
            checkpoint['state_dict']['pool.centroids'].shape[0])

        model = get_model(encoder,
                          encoder_dim,
                          opt,
                          config['global_params'],
                          append_pca_layer=True)

        if int(config['global_params']
               ['nGPU']) > 1 and torch.cuda.device_count() > 1:
            model.encoder = nn.DataParallel(model.encoder)
            # if opt.mode.lower() != 'cluster':
            model.pool = nn.DataParallel(model.pool)

        model.load_state_dict(checkpoint['state_dict'])
        model = model.to(device)
        print("=> loaded checkpoint '{}'".format(resume_ckpt, ))
    else:
        raise FileNotFoundError(
            "=> no checkpoint found at '{}'".format(resume_ckpt))

    feature_extract(dataset, model, device, opt, config)

    torch.cuda.empty_cache(
    )  # garbage clean GPU memory, a bug can occur when Pytorch doesn't automatically clear the
    # memory after runs
    print('\n\nDone. Finished extracting and saving features')
Ejemplo n.º 3
0
def main():
    parser = argparse.ArgumentParser(description='Patch-NetVLAD-Match-Two')
    parser.add_argument('--config_path', type=str, default=join(PATCHNETVLAD_ROOT_DIR, 'configs/performance.ini'),
                        help='File name (with extension) to an ini file that stores most of the configuration data for patch-netvlad')
    parser.add_argument('--nocuda', action='store_true', help='If true, use CPU only. Else use GPU.')

    opt = parser.parse_args()
    print(opt)

    configfile = opt.config_path
    assert os.path.isfile(configfile)
    config = configparser.ConfigParser()
    config.read(configfile)

    cuda = not opt.nocuda
    if cuda and not torch.cuda.is_available():
        raise Exception("No GPU found, please run with --nocuda")

    device = torch.device("cuda" if cuda else "cpu")

    encoder_dim, encoder = get_backend()

    # must load from a resume to do extraction
    resume_ckpt = config['global_params']['resumePath'] + config['global_params']['num_pcs'] + '.pth.tar'

    # backup: try whether resume_ckpt is relative to script path
    if not isfile(resume_ckpt):
        resume_ckpt = join(PATCHNETVLAD_ROOT_DIR, resume_ckpt)
        if not isfile(resume_ckpt):
            from download_models import download_all_models
            download_all_models(ask_for_permission=True)

    if isfile(resume_ckpt):
        print("=> loading checkpoint '{}'".format(resume_ckpt))
        checkpoint = torch.load(resume_ckpt, map_location=lambda storage, loc: storage)
        assert checkpoint['state_dict']['WPCA.0.bias'].shape[0] == int(config['global_params']['num_pcs'])
        config['global_params']['num_clusters'] = str(checkpoint['state_dict']['pool.centroids'].shape[0])

        model = get_model(encoder, encoder_dim, config['global_params'], append_pca_layer=True)

        if int(config['global_params']['nGPU']) > 1 and torch.cuda.device_count() > 1:
            model.encoder = nn.DataParallel(model.encoder)
            model.pool = nn.DataParallel(model.pool)

        model.load_state_dict(checkpoint['state_dict'])
        model = model.to(device)
        print("=> loaded checkpoint '{}'".format(resume_ckpt, ))
    else:
        raise FileNotFoundError("=> no checkpoint found at '{}'".format(resume_ckpt))

    vid = cv2.VideoCapture(0)
    _, last_frame = vid.read()

    while(True):
        _, frame = vid.read()

        match_two(model, device, config, frame, last_frame, None)

        key = cv2.waitKey(1)
        if key & 0xFF == ord('q'):
            break
        elif key & 0xFF == ord('n'):
            last_frame = frame

    vid.release()
    cv2.destroyAllWindows()
    torch.cuda.empty_cache()  # garbage clean GPU memory, a bug can occur when Pytorch doesn't automatically clear the
    # memory after runs
    print('Done')
Ejemplo n.º 4
0
def main():
    parser = argparse.ArgumentParser(description='Patch-NetVLAD-Match-Two')
    parser.add_argument(
        '--config_path',
        type=str,
        default=join(PATCHNETVLAD_ROOT_DIR, 'configs/performance.ini'),
        help=
        'File name (with extension) to an ini file that stores most of the configuration data for patch-netvlad'
    )
    parser.add_argument('--first_im_path',
                        type=str,
                        default=join(PATCHNETVLAD_ROOT_DIR,
                                     'example_images/tokyo_db.png'),
                        help='Full path (with extension) to an image file')
    parser.add_argument(
        '--second_im_path',
        type=str,
        default=join(PATCHNETVLAD_ROOT_DIR, 'example_images/tokyo_query.jpg'),
        help='Full path (with extension) to another image file')
    parser.add_argument(
        '--plot_save_path',
        type=str,
        default=join(PATCHNETVLAD_ROOT_DIR, 'results'),
        help=
        'Path plus optional prefix pointing to a location to save the output matching plot'
    )
    parser.add_argument('--nocuda',
                        action='store_true',
                        help='If true, use CPU only. Else use GPU.')

    opt = parser.parse_args()
    print(opt)

    configfile = opt.config_path
    assert os.path.isfile(configfile)
    config = configparser.ConfigParser()
    config.read(configfile)

    cuda = opt.nocuda
    if cuda and not torch.cuda.is_available():
        raise Exception("No GPU found, please run with --nocuda")

    device = torch.device("cuda" if cuda else "cpu")

    encoder_dim, encoder = get_backend()

    # must load from a resume to do extraction
    resume_ckpt = config['global_params']['resumePath'] + config[
        'global_params']['num_pcs'] + '.pth.tar'

    model = get_model(encoder,
                      encoder_dim,
                      opt,
                      config['global_params'],
                      append_pca_layer=True)

    if int(config['global_params']
           ['nGPU']) > 1 and torch.cuda.device_count() > 1:
        model.encoder = nn.DataParallel(model.encoder)
        # if opt.mode.lower() != 'cluster':
        model.pool = nn.DataParallel(model.pool)

    # backup: try whether resume_ckpt is relative to script path
    if not isfile(resume_ckpt):
        resume_ckpt = join(PATCHNETVLAD_ROOT_DIR, resume_ckpt)
        if not isfile(resume_ckpt):
            from download_models import download_all_models
            download_all_models(ask_for_permission=True)

    if isfile(resume_ckpt):
        print("=> loading checkpoint '{}'".format(resume_ckpt))
        checkpoint = torch.load(resume_ckpt,
                                map_location=lambda storage, loc: storage)
        model.load_state_dict(checkpoint['state_dict'])
        model = model.to(device)
        print("=> loaded checkpoint '{}'".format(resume_ckpt, ))
    else:
        raise FileNotFoundError(
            "=> no checkpoint found at '{}'".format(resume_ckpt))

    match_two(model, device, opt, config)

    torch.cuda.empty_cache(
    )  # garbage clean GPU memory, a bug can occur when Pytorch doesn't automatically clear the
    # memory after runs
    print('Done')