예제 #1
0
    def __call__(self, network, device, logger):
        stopwatch = StopWatch()

        # extract database and query vectors
        print('>> {}: database images...'.format(self.dataset))
        vecs = extract_vectors(network, self.images, self.image_size, self.transforms, device=device)
        print('>> {}: query images...'.format(self.dataset))
        if self.images == self.qimages and set(self.bbxs) == {None}:
            qvecs = vecs.clone()
        else:
            qvecs = extract_vectors(network, self.qimages, self.image_size, self.transforms, device=device, bbxs=self.bbxs)
        stopwatch.lap("extract_descriptors")

        print('>> {}: Evaluating...'.format(self.dataset))

        # convert to numpy
        vecs = vecs.numpy()
        qvecs = qvecs.numpy()

        # search, rank, and print
        scores = np.dot(vecs.T, qvecs)
        ranks = np.argsort(-scores, axis=0)
        averages, scores = compute_map_and_print(self.dataset, ranks, self.gnd)
        stopwatch.lap("compute_score")

        first_score = scores[list(scores.keys())[0]]
        logger(None, len(first_score), "dataset", stopwatch.reset(), "scalar/time")
        logger(None, len(first_score), "score_avg", averages, "scalar/score")

        assert len({len(x) for x in scores.values()}) == 1
        for i, _ in enumerate(first_score):
            logger(i, len(first_score), "score", {x: scores[x][i] for x in scores}, "scalar/score")
예제 #2
0
def test(datasets, net):
    print(">> Evaluating network on test datasets...")
    image_size = 1024

    # moving network to gpu and eval mode
    net.cuda()
    net.eval()
    # set up the transform
    normalize = transforms.Normalize(mean=net.meta["mean"],
                                     std=net.meta["std"])
    transform = transforms.Compose([transforms.ToTensor(), normalize])

    # compute whitening
    # Lw = None
    Lw = net.meta["Lw"]["retrieval-SfM-120k"]["ss"]

    # evaluate on test datasets
    # datasets = args.test_datasets.split(",")
    for dataset in datasets:
        start = time.time()

        print(">> {}: Extracting...".format(dataset))

        # prepare config structure for the test dataset
        cfg = configdataset(dataset, os.path.join(get_data_root(), "test"))
        images = [cfg["im_fname"](cfg, i) for i in range(cfg["n"])]
        qimages = [cfg["qim_fname"](cfg, i) for i in range(cfg["nq"])]
        bbxs = [tuple(cfg["gnd"][i]["bbx"]) for i in range(cfg["nq"])]

        # extract database and query vectors
        print(">> {}: database images...".format(dataset))
        vecs = extract_vectors(net, images, image_size, transform)
        print(">> {}: query images...".format(dataset))
        qvecs = extract_vectors(net, qimages, image_size, transform, bbxs)

        print(">> {}: Evaluating...".format(dataset))

        # convert to numpy
        vecs = vecs.numpy()
        qvecs = qvecs.numpy()

        # search, rank, and print
        scores = np.dot(vecs.T, qvecs)
        ranks = np.argsort(-scores, axis=0)
        compute_map_and_print(dataset, ranks, cfg["gnd"])

        if Lw is not None:
            # whiten the vectors
            vecs_lw = whitenapply(vecs, Lw["m"], Lw["P"])
            qvecs_lw = whitenapply(qvecs, Lw["m"], Lw["P"])

            # search, rank, and print
            scores = np.dot(vecs_lw.T, qvecs_lw)
            ranks = np.argsort(-scores, axis=0)
            compute_map_and_print(dataset + " + whiten", ranks, cfg["gnd"])

        print(">> {}: elapsed time: {}".format(dataset,
                                               htime(time.time() - start)))
예제 #3
0
def get_scores_whitening(whiten_key,
                         net,
                         transform,
                         ms,
                         msp,
                         image_size,
                         setup_network=True,
                         gpu=True):
    """Learn scores whitening for the given network."""
    if whiten_key in LEARNED_WHITENING:
        return LEARNED_WHITENING[whiten_key]

    else:
        print("Learning scores whitening...")

        # extract whitening vectors
        wvecs = extract_vectors(net,
                                train_images,
                                image_size,
                                transform,
                                ms=ms,
                                msp=msp,
                                setup_network=setup_network,
                                gpu=gpu)

        # learning whitening
        wvecs = wvecs.numpy()
        m, P = whitenlearn(wvecs, db['qidxs'], db['pidxs'])
        Lw = {'m': m, 'P': P}

        # cache learned whitening
        LEARNED_WHITENING[whiten_key] = Lw

        print("Whitening learned and cached.")
        return Lw
예제 #4
0
def learning_lw(net):
    net.cuda()
    net.eval()
    # set up the transform
    normalize = transforms.Normalize(mean=net.meta["mean"], std=net.meta["std"])
    transform = transforms.Compose([transforms.ToTensor(), normalize])

    test_whiten = "retrieval-SfM-30k"
    print(">> {}: Learning whitening...".format(test_whiten))

    # loading db
    db_root = os.path.join(get_data_root(), "train", test_whiten)
    ims_root = os.path.join(db_root, "ims")
    db_fn = os.path.join(db_root, "{}-whiten.pkl".format(test_whiten))
    with open(db_fn, "rb") as f:
        db = pickle.load(f)
    images = [cid2filename(db["cids"][i], ims_root) for i in range(len(db["cids"]))]

    # extract whitening vectors
    print(">> {}: Extracting...".format(args.test_whiten))
    wvecs = extract_vectors(net, images, 1024, transform)

    # learning whitening
    print(">> {}: Learning...".format(args.test_whiten))
    wvecs = wvecs.numpy()
    m, P = whitenlearn(wvecs, db["qidxs"], db["pidxs"])
    Lw = {"m": m, "P": P}
    return Lw
예제 #5
0
파일: test.py 프로젝트: wzb1005/mdir
def _compute_whitening(whitening, net, image_size, transform, ms, msp):
    # compute whitening
    start = time.time()

    print('>> {}: Learning whitening...'.format(whitening))

    # loading db
    db_root = os.path.join(get_data_root(), 'train', whitening)
    ims_root = os.path.join(db_root, 'ims')
    db_fn = os.path.join(db_root, '{}-whiten.pkl'.format(whitening))
    with open(db_fn, 'rb') as f:
        db = pickle.load(f)
    images = [
        cid2filename(db['cids'][i], ims_root) for i in range(len(db['cids']))
    ]

    # extract whitening vectors
    print('>> {}: Extracting...'.format(whitening))
    wvecs = extract_vectors(net, images, image_size, transform, ms=ms, msp=msp)

    # learning whitening
    print('>> {}: Learning...'.format(whitening))
    wvecs = wvecs.numpy()
    m, P = whitenlearn(wvecs, db['qidxs'], db['pidxs'])
    Lw = {'m': m, 'P': P}

    elapsed = time.time() - start
    print('>> {}: elapsed time: {}'.format(whitening, htime(elapsed)))

    return Lw, elapsed
예제 #6
0
    def constructfeature(self, hash_size, input_dim, num_hashtables):
        multiscale = '[1]'
        print(">> Loading network:\n>>>> '{}'".format(self.network))
        # state = load_url(PRETRAINED[args.network], model_dir=os.path.join(get_data_root(), 'networks'))
        state = torch.load(self.network)
        # parsing net params from meta
        # architecture, pooling, mean, std required
        # the rest has default values, in case that is doesnt exist
        net_params = {}
        net_params['architecture'] = state['meta']['architecture']
        net_params['pooling'] = state['meta']['pooling']
        net_params['local_whitening'] = state['meta'].get('local_whitening', False)
        net_params['regional'] = state['meta'].get('regional', False)
        net_params['whitening'] = state['meta'].get('whitening', False)
        net_params['mean'] = state['meta']['mean']
        net_params['std'] = state['meta']['std']
        net_params['pretrained'] = False
        # network initialization
        net = init_network(net_params)
        net.load_state_dict(state['state_dict'])
        print(">>>> loaded network: ")
        print(net.meta_repr())
        # setting up the multi-scale parameters
        ms = list(eval(multiscale))
        print(">>>> Evaluating scales: {}".format(ms))
        # moving network to gpu and eval mode
        if torch.cuda.is_available():
            net.cuda()
        net.eval()

        # set up the transform
        normalize = transforms.Normalize(
            mean=net.meta['mean'],
            std=net.meta['std']
        )
        transform = transforms.Compose([
            transforms.ToTensor(),
            normalize
        ])

        # extract database and query vectors
        print('>> database images...')
        images = ImageProcess(self.img_dir).process()
        vecs, img_paths = extract_vectors(net, images, 1024, transform, ms=ms)
        feature_dict = dict(zip(img_paths, list(vecs.detach().cpu().numpy().T)))
        # index
        lsh = LSHash(hash_size=int(hash_size), input_dim=int(input_dim), num_hashtables=int(num_hashtables))
        for img_path, vec in feature_dict.items():
            lsh.index(vec.flatten(), extra_data=img_path)

        # ## 保存索引模型
        # with open(self.feature_path, "wb") as f:
        #     pickle.dump(feature_dict, f)
        # with open(self.index_path, "wb") as f:
        #     pickle.dump(lsh, f)

        print("extract feature is done")
        return feature_dict, lsh
예제 #7
0
파일: train.py 프로젝트: tmcortes/BELoss
def testUkbench(net, eConfig):

    #datasets = eConfig['test-datasets'].split(',')
    #results = []
    #
    #for dataset in datasets:
    #    results.append((dataset, np.random.rand(1)[0]))
    #
    #return results

    print('>> Evaluating network on test datasets...')

    # for testing we use image size of max 1024
    dataset = 'ukbench'
    image_size = 362

    # moving network to gpu and eval mode
    net.cuda()
    net.eval()

    # set up the transform
    normalize = transforms.Normalize(mean=net.meta['mean'],
                                     std=net.meta['std'])
    transform = transforms.Compose([transforms.ToTensor(), normalize])

    dbpath = os.path.join(get_data_root(), 'test', 'ukbench', 'full')
    images = [
        os.path.join(dbpath, 'ukbench{:05d}.jpg'.format(i))
        for i in range(10200)
    ]
    labels = np.arange(10200, dtype=np.int) // 4

    # extract database and query vectors
    print('>> {}: database images...'.format(dataset))
    X = extract_vectors(net, images, image_size, transform)

    print('>> {}: Evaluating...'.format(dataset))

    # rank the similarities
    X = X.numpy()
    scores = np.dot(X.T, X)
    ranks = np.argsort(-scores, axis=1)
    ranks = ranks[:, 0:4]

    # compute the average accuracy for the first 4 entries
    ranksLabel = labels[ranks]
    accs = np.sum(ranksLabel == np.repeat(labels[:, np.newaxis], 4, axis=1),
                  axis=1)
    avgAcc = np.mean(accs)
    print('avgAcc: {:.6f}'.format(avgAcc))

    return [('ukbench', avgAcc)]
예제 #8
0
    def test_feature(self):
        multiscale = '[1]'
        print(">> Loading network:\n>>>> '{}'".format(self.network))
        # state = load_url(PRETRAINED[args.network], model_dir=os.path.join(get_data_root(), 'networks'))
        state = torch.load(self.network)
        # parsing net params from meta
        # architecture, pooling, mean, std required
        # the rest has default values, in case that is doesnt exist
        net_params = {}
        net_params['architecture'] = state['meta']['architecture']
        net_params['pooling'] = state['meta']['pooling']
        net_params['local_whitening'] = state['meta'].get('local_whitening', False)
        net_params['regional'] = state['meta'].get('regional', False)
        net_params['whitening'] = state['meta'].get('whitening', False)
        net_params['mean'] = state['meta']['mean']
        net_params['std'] = state['meta']['std']
        net_params['pretrained'] = False
        # network initialization
        net = init_network(net_params)
        net.load_state_dict(state['state_dict'])
        print(">>>> loaded network: ")
        print(net.meta_repr())
        # setting up the multi-scale parameters
        ms = list(eval(multiscale))
        print(">>>> Evaluating scales: {}".format(ms))
        # moving network to gpu and eval mode
        if torch.cuda.is_available():
            net.cuda()
        net.eval()

        # set up the transform
        normalize = transforms.Normalize(
            mean=net.meta['mean'],
            std=net.meta['std']
        )
        transform = transforms.Compose([
            transforms.ToTensor(),
            normalize
        ])

        # extract database and query vectors
        print('>> database images...')
        images = ImageProcess(self.img_dir).process()
        vecs, img_paths = extract_vectors(net, images, 1024, transform, ms=ms)
        feature_dict = dict(zip(img_paths, list(vecs.detach().cpu().numpy().T)))
        return feature_dict
예제 #9
0
def query(query_path, bbx=None):
    global net, tranform, ms, msp, vect
    q = [query_path]

    if bbx is not None:
        bbxs = [bbx]
    else:
        bbxs = None
    q2vecs = extract_vectors(net,
                             q,
                             1024,
                             transform,
                             bbxs=bbxs,
                             ms=ms,
                             msp=msp)
    scores = np.dot(vect.T, q2vecs)
    ranks = np.argsort(-scores, axis=0)
    ranks = ranks.reshape(5063)
    name = []
    for i in range(10):
        name += [img_list[ranks[i]]]
    return ranks, name
예제 #10
0
def main():
    args = parser.parse_args()

    # check if there are unknown datasets
    for dataset in args.datasets.split(','):
        if dataset not in datasets_names:
            raise ValueError(
                'Unsupported or unknown dataset: {}!'.format(dataset))

    # check if test dataset are downloaded
    # and download if they are not
    download_train(get_data_root())
    download_test(get_data_root())

    # setting up the visible GPU
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_id

    # loading network from path
    if args.network_path is not None:

        print(">> Loading network:\n>>>> '{}'".format(args.network_path))
        if args.network_path in PRETRAINED:
            # pretrained networks (downloaded automatically)
            state = load_url(PRETRAINED[args.network_path],
                             model_dir=os.path.join(get_data_root(),
                                                    'networks'))
        else:
            # fine-tuned network from path
            state = torch.load(args.network_path)

        # parsing net params from meta
        # architecture, pooling, mean, std required
        # the rest has default values, in case that is doesnt exist
        net_params = {}
        net_params['architecture'] = state['meta']['architecture']
        net_params['pooling'] = state['meta']['pooling']
        net_params['local_whitening'] = state['meta'].get(
            'local_whitening', False)
        net_params['regional'] = state['meta'].get('regional', False)
        net_params['whitening'] = state['meta'].get('whitening', False)
        net_params['mean'] = state['meta']['mean']
        net_params['std'] = state['meta']['std']
        net_params['pretrained'] = False

        # load network
        net = init_network(net_params)
        net.load_state_dict(state['state_dict'])

        # if whitening is precomputed
        if 'Lw' in state['meta']:
            net.meta['Lw'] = state['meta']['Lw']

        print(">>>> loaded network: ")
        print(net.meta_repr())

    # loading offtheshelf network
    elif args.network_offtheshelf is not None:

        # parse off-the-shelf parameters
        offtheshelf = args.network_offtheshelf.split('-')
        net_params = {}
        net_params['architecture'] = offtheshelf[0]
        net_params['pooling'] = offtheshelf[1]
        net_params['local_whitening'] = 'lwhiten' in offtheshelf[2:]
        net_params['regional'] = 'reg' in offtheshelf[2:]
        net_params['whitening'] = 'whiten' in offtheshelf[2:]
        net_params['pretrained'] = True

        # load off-the-shelf network
        print(">> Loading off-the-shelf network:\n>>>> '{}'".format(
            args.network_offtheshelf))
        net = init_network(net_params)
        print(">>>> loaded network: ")
        print(net.meta_repr())

    # setting up the multi-scale parameters
    ms = list(eval(args.multiscale))
    if len(ms) > 1 and net.meta['pooling'] == 'gem' and not net.meta[
            'regional'] and not net.meta['whitening']:
        msp = net.pool.p.item()
        print(">> Set-up multiscale:")
        print(">>>> ms: {}".format(ms))
        print(">>>> msp: {}".format(msp))
    else:
        msp = 1

    # moving network to gpu and eval mode
    net.cuda()
    net.eval()

    # set up the transform
    normalize = transforms.Normalize(mean=net.meta['mean'],
                                     std=net.meta['std'])
    transform = transforms.Compose([transforms.ToTensor(), normalize])

    # compute whitening
    if args.whitening is not None:
        start = time.time()

        if 'Lw' in net.meta and args.whitening in net.meta['Lw']:

            print('>> {}: Whitening is precomputed, loading it...'.format(
                args.whitening))

            if len(ms) > 1:
                Lw = net.meta['Lw'][args.whitening]['ms']
            else:
                Lw = net.meta['Lw'][args.whitening]['ss']

        else:

            # if we evaluate networks from path we should save/load whitening
            # not to compute it every time
            if args.network_path is not None:
                whiten_fn = args.network_path + '_{}_whiten'.format(
                    args.whitening)
                if len(ms) > 1:
                    whiten_fn += '_ms'
                whiten_fn += '.pth'
            else:
                whiten_fn = None

            if whiten_fn is not None and os.path.isfile(whiten_fn):
                print('>> {}: Whitening is precomputed, loading it...'.format(
                    args.whitening))
                Lw = torch.load(whiten_fn)

            else:
                print('>> {}: Learning whitening...'.format(args.whitening))

                # loading db
                db_root = os.path.join(get_data_root(), 'train',
                                       args.whitening)
                ims_root = os.path.join(db_root, 'ims')
                db_fn = os.path.join(db_root,
                                     '{}-whiten.pkl'.format(args.whitening))
                with open(db_fn, 'rb') as f:
                    db = pickle.load(f)
                images = [
                    cid2filename(db['cids'][i], ims_root)
                    for i in range(len(db['cids']))
                ]

                # extract whitening vectors
                print('>> {}: Extracting...'.format(args.whitening))
                wvecs = extract_vectors(net,
                                        images,
                                        args.image_size,
                                        transform,
                                        ms=ms,
                                        msp=msp)

                # learning whitening
                print('>> {}: Learning...'.format(args.whitening))
                wvecs = wvecs.numpy()
                m, P = whitenlearn(wvecs, db['qidxs'], db['pidxs'])
                Lw = {'m': m, 'P': P}

                # saving whitening if whiten_fn exists
                if whiten_fn is not None:
                    print('>> {}: Saving to {}...'.format(
                        args.whitening, whiten_fn))
                    torch.save(Lw, whiten_fn)

        print('>> {}: elapsed time: {}'.format(args.whitening,
                                               htime(time.time() - start)))

    else:
        Lw = None

    # evaluate on test datasets
    datasets = args.datasets.split(',')
    for dataset in datasets:
        start = time.time()

        print('>> {}: Extracting...'.format(dataset))

        # extract database and query vectors
        print('>> {}: database images...'.format(dataset))
        images = get_imlist("E:\\PycharmProjects\\image-retrieval\\holiday2\\")
        names = []
        for i, img_path in enumerate(images):
            img_name = os.path.split(img_path)[1]
            print(img_name)
            names.append(img_name)
        # prepare config structure for the test dataset
        # cfg = configdataset(dataset, os.path.join(get_data_root(), 'test'))
        # images = [cfg['im_fname'](cfg, i) for i in range(cfg['n'])]
        # try:
        #     bbxs = [tuple(cfg['gnd'][i]['bbx']) for i in range(cfg['nq'])]
        # except:
        #     bbxs = None  # for holidaysmanrot and copydays

        # names = []
        # for i, img_path in enumerate(images):
        #     img_name = os.path.split(img_path)[1]
        #     print(img_name)
        #     names.append(img_name)

        # extract database and query vectors
        print('>> {}: database images...'.format(dataset))
        vecs = extract_vectors(net,
                               images,
                               args.image_size,
                               transform,
                               ms=ms,
                               msp=msp)

        # convert to numpy
        vecs = vecs.numpy()
        vecs = vecs.T
        print("--------------------------------------------------")
        print("      writing feature extraction results ...")
        print("--------------------------------------------------")
        output = "gem_res_holiday_3.h5"
        h5f = h5py.File(output, 'w')
        h5f.create_dataset('dataset_1', data=vecs)
        h5f.create_dataset('dataset_2', data=np.string_(names))
        h5f.close()

        print('>> {}: elapsed time: {}'.format(dataset,
                                               htime(time.time() - start)))
]

# %%
# extract whitening vectors
print('>> {}: Extracting...'.format(whitening))
split_num = 4
extract_num = int(len(images) / split_num)
num_list = list(range(0, len(images) + 1, extract_num))
num_list[-1] = len(images)
# %%
part = [0]
for k in part:
    print('>>>> extract part {} of {}'.format(k + 1, split_num))
    wvecs = extract_vectors(net,
                            images[num_list[k]:num_list[k + 1]],
                            image_size,
                            transform,
                            ms=ms,
                            msp=msp)
    wvecs = wvecs.numpy()
    print('>>>> save whitening vecs to pkl...')
    wvecs_file_path = os.path.join(
        get_data_root(), 'whitening_vecs{}_of_{}.pkl'.format(k + 1, split_num))
    wvecs_file = open(wvecs_file_path, 'wb')
    pickle.dump(wvecs, wvecs_file)
    wvecs_file.close()
    print('>>>> whitening_vecs{}_of_{}.pkl save done...'.format(
        k + 1, split_num))

# %%
print('>>>> load whitening vecs from pkl...')
split_num = 4
예제 #12
0
def main():
    args = parser.parse_args()

    # check if test dataset are downloaded
    # and download if they are not
    #download_train(get_data_root())
    #download_test(get_data_root())

    # setting up the visible GPU
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_id

    # loading network from path
    if args.network_path is not None:
        print(">> Loading network:\n>>>> '{}'".format(args.network_path))
        if args.network_path in PRETRAINED:
            # pretrained networks (downloaded automatically)
            state = load_url(PRETRAINED[args.network_path],
                             model_dir=os.path.join(get_data_root(),
                                                    'networks'))
        else:
            state = torch.load(args.network_path)
        net = init_network(model=state['meta']['architecture'],
                           pooling=state['meta']['pooling'],
                           whitening=state['meta']['whitening'],
                           mean=state['meta']['mean'],
                           std=state['meta']['std'],
                           pretrained=False)
        net.load_state_dict(state['state_dict'])

        # if whitening is precomputed
        if 'Lw' in state['meta']:
            net.meta['Lw'] = state['meta']['Lw']

        print(">>>> loaded network: ")
        print(net.meta_repr())

    # loading offtheshelf network
    elif args.network_offtheshelf is not None:
        offtheshelf = args.network_offtheshelf.split('-')
        if len(offtheshelf) == 3:
            if offtheshelf[2] == 'whiten':
                offtheshelf_whiten = True
            else:
                raise (RuntimeError(
                    "Incorrect format of the off-the-shelf network. Examples: resnet101-gem | resnet101-gem-whiten"
                ))
        else:
            offtheshelf_whiten = False
        print(">> Loading off-the-shelf network:\n>>>> '{}'".format(
            args.network_offtheshelf))
        net = init_network(model=offtheshelf[0],
                           pooling=offtheshelf[1],
                           whitening=offtheshelf_whiten)
        print(">>>> loaded network: ")
        print(net.meta_repr())

    # setting up the multi-scale parameters
    ms = [1]
    msp = 1
    if args.multiscale:
        ms = [1, 1. / math.sqrt(2), 1. / 2]
        if net.meta['pooling'] == 'gem' and net.whiten is None:
            msp = net.pool.p.data.tolist()[0]

    # moving network to gpu and eval mode
    net.cuda()
    net.eval()
    # set up the transform
    normalize = transforms.Normalize(mean=net.meta['mean'],
                                     std=net.meta['std'])
    transform = transforms.Compose([transforms.ToTensor(), normalize])

    # compute whitening
    if args.whitening is not None:
        start = time.time()

        if 'Lw' in net.meta and args.whitening in net.meta['Lw']:

            print('>> {}: Whitening is precomputed, loading it...'.format(
                args.whitening))

            if args.multiscale:
                Lw = net.meta['Lw'][args.whitening]['ms']
            else:
                Lw = net.meta['Lw'][args.whitening]['ss']

        else:

            print('>> {}: Learning whitening...'.format(args.whitening))

            # loading db
            db_root = os.path.join(get_data_root(), 'train', args.whitening)
            ims_root = os.path.join(db_root, 'ims')
            db_fn = os.path.join(db_root,
                                 '{}-whiten.pkl'.format(args.whitening))
            with open(db_fn, 'rb') as f:
                db = pickle.load(f)
            images = [
                cid2filename(db['cids'][i], ims_root)
                for i in range(len(db['cids']))
            ]

            # extract whitening vectors
            print('>> {}: Extracting...'.format(args.whitening))
            wvecs = extract_vectors(net,
                                    images,
                                    args.image_size,
                                    transform,
                                    ms=ms,
                                    msp=msp)

            # learning whitening
            print('>> {}: Learning...'.format(args.whitening))
            wvecs = wvecs.numpy()
            m, P = whitenlearn(wvecs, db['qidxs'], db['pidxs'])
            Lw = {'m': m, 'P': P}

        print('>> {}: elapsed time: {}'.format(args.whitening,
                                               htime(time.time() - start)))
    else:
        Lw = None

    # evaluate on test datasets
    datasets = args.datasets.split(',')
    for dataset in datasets:
        start = time.time()

        print('>> {}: Extracting...'.format(dataset))

        # prepare config structure for the test dataset
        cfg = configdataset(dataset, os.path.join(get_data_root(), 'test'))
        images = [cfg['im_fname'](cfg, i) for i in range(cfg['n'])]
        qimages = [cfg['qim_fname'](cfg, i) for i in range(cfg['nq'])]
        bbxs = [tuple(cfg['gnd'][i]['bbx']) for i in range(cfg['nq'])]

        # extract database and query vectors
        print('>> {}: database images...'.format(dataset))
        vecs = extract_vectors(net,
                               images,
                               args.image_size,
                               transform,
                               ms=ms,
                               msp=msp)
        print('>> {}: query images...'.format(dataset))
        qvecs = extract_vectors(net,
                                qimages,
                                args.image_size,
                                transform,
                                bbxs=bbxs,
                                ms=ms,
                                msp=msp)

        print('>> {}: Evaluating...'.format(dataset))

        # convert to numpy
        vecs = vecs.numpy()
        qvecs = qvecs.numpy()

        # search, rank, and print
        scores = np.dot(vecs.T, qvecs)
        ranks = np.argsort(-scores, axis=0)
        compute_map_and_print(dataset, ranks, cfg['gnd'])

        if Lw is not None:
            # whiten the vectors
            vecs_lw = whitenapply(vecs, Lw['m'], Lw['P'])
            qvecs_lw = whitenapply(qvecs, Lw['m'], Lw['P'])

            # search, rank, and print
            scores = np.dot(vecs_lw.T, qvecs_lw)
            ranks = np.argsort(-scores, axis=0)
            compute_map_and_print(dataset + ' + whiten', ranks, cfg['gnd'])

        print('>> {}: elapsed time: {}'.format(dataset,
                                               htime(time.time() - start)))
예제 #13
0
def main():
    args = parser.parse_args()

    # check if there are unknown datasets
    for dataset in args.datasets.split(','):
        if dataset not in datasets_names:
            raise ValueError(
                'Unsupported or unknown dataset: {}!'.format(dataset))

    # check if test dataset are downloaded
    # and download if they are not
    download_train(get_data_root())
    download_test(get_data_root())

    # setting up the visible GPU
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_id

    # loading network
    # pretrained networks (downloaded automatically)
    print(">> Loading network:\n>>>> '{}'".format(args.network))
    state = load_url(PRETRAINED[args.network],
                     model_dir=os.path.join(get_data_root(), 'networks'))
    # parsing net params from meta
    # architecture, pooling, mean, std required
    # the rest has default values, in case that is doesnt exist
    net_params = {}
    net_params['architecture'] = state['meta']['architecture']
    net_params['pooling'] = state['meta']['pooling']
    net_params['local_whitening'] = state['meta'].get('local_whitening', False)
    net_params['regional'] = state['meta'].get('regional', False)
    net_params['whitening'] = state['meta'].get('whitening', False)
    net_params['mean'] = state['meta']['mean']
    net_params['std'] = state['meta']['std']
    net_params['pretrained'] = False
    # network initialization
    net = init_network(net_params)
    net.load_state_dict(state['state_dict'])

    print(">>>> loaded network: ")
    print(net.meta_repr())

    # setting up the multi-scale parameters
    ms = list(eval(args.multiscale))
    print(">>>> Evaluating scales: {}".format(ms))

    # moving network to gpu and eval mode
    net.cuda()
    net.eval()

    # set up the transform
    normalize = transforms.Normalize(mean=net.meta['mean'],
                                     std=net.meta['std'])
    transform = transforms.Compose([transforms.ToTensor(), normalize])

    # evaluate on test datasets
    datasets = args.datasets.split(',')
    for dataset in datasets:
        start = time.time()

        print('>> {}: Extracting...'.format(dataset))

        # prepare config structure for the test dataset
        cfg = configdataset(dataset, os.path.join(get_data_root(), 'test'))
        images = [cfg['im_fname'](cfg, i) for i in range(cfg['n'])]
        qimages = [cfg['qim_fname'](cfg, i) for i in range(cfg['nq'])]
        try:
            bbxs = [tuple(cfg['gnd'][i]['bbx']) for i in range(cfg['nq'])]
        except:
            bbxs = None  # for holidaysmanrot and copydays

        # extract database and query vectors
        print('>> {}: database images...'.format(dataset))
        vecs = extract_vectors(net, images, args.image_size, transform, ms=ms)
        print('>> {}: query images...'.format(dataset))
        qvecs = extract_vectors(net,
                                qimages,
                                args.image_size,
                                transform,
                                bbxs=bbxs,
                                ms=ms)

        print('>> {}: Evaluating...'.format(dataset))

        # convert to numpy
        vecs = vecs.numpy()
        qvecs = qvecs.numpy()

        # search, rank, and print
        scores = np.dot(vecs.T, qvecs)
        ranks = np.argsort(-scores, axis=0)

        top_k = 100
        ranks_fnames_qs = []
        for q_id in range(len(cfg["qimlist"])):
            ranks_q = list(ranks[:top_k, q_id])
            ranks_fname_per_q = []
            for img_id in ranks_q:
                ranks_fname_per_q.append(cfg["imlist"][img_id])
            ranks_fnames_qs.append(ranks_fname_per_q)

        compute_map_and_print(dataset, ranks, cfg['gnd'])
        compute_map_and_print_top_k(dataset, ranks_fnames_qs, cfg['gnd'],
                                    cfg["imlist"])

        sys.exit()
        with open(dataset + "_gl18_tl_resnet101_gem_w_m.pkl", "wb") as f:
            data = {"ranks": ranks, "db_images": images, "q_images": qimages}
            pickle.dump(data, f)

        print('>> {}: elapsed time: {}'.format(dataset,
                                               htime(time.time() - start)))
예제 #14
0
def main():

	arg_parser = argparse.ArgumentParser()
	arg_parser.add_argument('train_images_path')
	arg_parser.add_argument('test_images_path')
	arg_parser.add_argument('predictions_path')
	args = arg_parser.parse_args()

	imsize=480
	train_path=args.train_images_path
	test_path=args.test_images_path
	outfile=args.predictions_path
	##read data##
	data_list=os.listdir(ipath)
	train_images = get_imlist(train_path)
	test_images = get_imlist(test_path)
	##RAMAC##
	RAMAC = extract_feature(train_images, 'resnet101', imsize)
	RAMAC_test = extract_feature(test_images, 'resnet101', imsize)
	##UEL##
	normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                 std=[0.229, 0.224, 0.225])
	transform_train = transforms.Compose([
        transforms.ToPILImage(),
        transforms.RandomCrop(size=224),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        normalize,
    ])
	transform_test = transforms.Compose([
        transforms.ToPILImage(),
        transforms.CenterCrop(224),
        transforms.ToTensor(),
        normalize,
    ])
	net = resnet101(pretrained=True,low_dim=128)
	model_path = './model/UEL.t'#After training UEL
	net.load_state_dict(torch.load())
	
	imset = DataLoader(path = train_path, transform=transform_test)
	train_loader = torch.utils.data.DataLoader(imset, batch_size=32, shuffle=False, num_workers=0)
	UEL = obtainf(net, train_loader)
	
	imset = DataLoader(path = test_path, transform=transform_test)
	test_loader = torch.utils.data.DataLoader(imset, batch_size=32, shuffle=False, num_workers=0)
	UEL_test = obtainf(net, test_loader)
	##GEM##
	image_size=1024
	multiscale='[1, 2**(1/2), 1/2**(1/2)]'
	state = torch.load('./model/retrievalSfM120k-vgg16-gem-b4dcdc6.pth')
	
	net_params = {}
	net_params['architecture'] = state['meta']['architecture']
	net_params['pooling'] = state['meta']['pooling']
	net_params['local_whitening'] = state['meta'].get('local_whitening', False)
	net_params['regional'] = state['meta'].get('regional', False)
	net_params['whitening'] = state['meta'].get('whitening', False)
	net_params['mean'] = state['meta']['mean']
	net_params['std'] = state['meta']['std']
	net_params['pretrained'] = False
	# load network
	net = init_network(net_params)
	net.load_state_dict(state['state_dict'])
        
	# if whitening is precomputed
	if 'Lw' in state['meta']:
		net.meta['Lw'] = state['meta']['Lw']
	ms = list(eval(multiscale))
	msp = net.pool.p.item()
	
	net.cuda()
	net.eval()
	# set up the transform
	normalize = transforms.Normalize(
		mean=net.meta['mean'],
		std=net.meta['std']
    )
	transform = transforms.Compose([
        transforms.ToTensor(),
        normalize
    ])
	
	GEM = extract_vectors(net,train_images , 480, transform, ms=ms, msp=msp).numpy().T
	GEM_test = extract_vectors(net,test_images , 480, transform, ms=ms, msp=msp).numpy().T
	##Retrieval##
	feats=np.concatenate((RAMAC,UEL,GEM),axis=1).astype('float32')
	query_feat=np.concatenate((RAMAC_test, UEL_test,GEM_test),axis=1).astype('float32')
	##diffusion##
	kq, kd = 7, 50
	gamma=80
	diffusion = Diffusion(feats, '/')
	offline = diffusion.get_offline_results(1024, kd)
	print('[search] 1) k-NN search')
	sims, ids = diffusion.knn.search(query_feat, kq)
	sims = sims ** gamma
	qr_num = ids.shape[0]
	print('[search] 2) linear combination')
	all_scores = np.empty((qr_num, 7), dtype=np.float32)
	all_ranks = np.empty((qr_num, 7), dtype=np.int)
	for i in range(qr_num):
		scores = sims[i] @ offline[ids[i]]
		parts = np.argpartition(-scores, 7)[:7]
		ranks = np.argsort(-scores[parts])
		all_scores[i] = scores[parts][ranks]
		all_ranks[i] = parts[ranks]
	I = all_ranks
	##output##
	out=pd.DataFrame(list(map(lambda x: x.split('/')[-1].split('.jpg')[0],timages)))
	out['1']=pd.DataFrame(I)[0].map(lambda x:data_list[x].split('.')[0] )
	out['2']=pd.DataFrame(I)[1].map(lambda x:data_list[x].split('.')[0] )
	out['3']=pd.DataFrame(I)[2].map(lambda x:data_list[x].split('.')[0] )
	out['4']=pd.DataFrame(I)[3].map(lambda x:data_list[x].split('.')[0] )
	out['5']=pd.DataFrame(I)[4].map(lambda x:data_list[x].split('.')[0] )
	out['6']=pd.DataFrame(I)[5].map(lambda x:data_list[x].split('.')[0] )
	out['7']=pd.DataFrame(I)[6].map(lambda x:data_list[x].split('.')[0] )
	out.to_csv(outfile,index=None,header=None)
예제 #15
0
def main():
    args = parser.parse_args()

    # check if there are unknown datasets
    for dataset in args.datasets.split(','):
        if dataset not in datasets_names:
            raise ValueError(
                'Unsupported or unknown dataset: {}!'.format(dataset))

    # check if test dataset are downloaded
    # and download if they are not
    #download_train(get_data_root())
    #download_test(get_data_root())

    # setting up the visible GPU
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_id

    # loading network from path
    if args.network_path is not None:

        print(">> Loading network:\n>>>> '{}'".format(args.network_path))
        if args.network_path in PRETRAINED:
            # pretrained networks (downloaded automatically)
            state = load_url(PRETRAINED[args.network_path],
                             model_dir=os.path.join(get_data_root(),
                                                    'networks'))
        else:
            # fine-tuned network from path
            state = torch.load(args.network_path)

        # parsing net params from meta
        # architecture, pooling, mean, std required
        # the rest has default values, in case that is doesnt exist
        net_params = {}
        net_params['architecture'] = state['meta']['architecture']
        net_params['pooling'] = state['meta']['pooling']
        net_params['local_whitening'] = state['meta'].get(
            'local_whitening', False)
        net_params['regional'] = state['meta'].get('regional', False)
        net_params['whitening'] = state['meta'].get('whitening', False)
        net_params['mean'] = state['meta']['mean']
        net_params['std'] = state['meta']['std']
        net_params['pretrained'] = False

        # load network
        net = init_network(net_params)
        net.load_state_dict(state['state_dict'])

        # if whitening is precomputed
        if 'Lw' in state['meta']:
            net.meta['Lw'] = state['meta']['Lw']

        print(">>>> loaded network: ")
        print(net.meta_repr())

    # loading offtheshelf network
    elif args.network_offtheshelf is not None:

        # parse off-the-shelf parameters
        offtheshelf = args.network_offtheshelf.split('-')
        net_params = {}
        net_params['architecture'] = offtheshelf[0]
        net_params['pooling'] = offtheshelf[1]
        net_params['local_whitening'] = 'lwhiten' in offtheshelf[2:]
        net_params['regional'] = 'reg' in offtheshelf[2:]
        net_params['whitening'] = 'whiten' in offtheshelf[2:]
        net_params['pretrained'] = True

        # load off-the-shelf network
        print(">> Loading off-the-shelf network:\n>>>> '{}'".format(
            args.network_offtheshelf))
        net = init_network(net_params)
        print(">>>> loaded network: ")
        print(net.meta_repr())

    # setting up the multi-scale parameters
    ms = list(eval(args.multiscale))
    if len(ms) > 1 and net.meta['pooling'] == 'gem' and not net.meta[
            'regional'] and not net.meta['whitening']:
        msp = net.pool.p.item()
        print(">> Set-up multiscale:")
        print(">>>> ms: {}".format(ms))
        print(">>>> msp: {}".format(msp))
    else:
        msp = 1

    # moving network to gpu and eval mode
    net.cuda()
    net.eval()

    # set up the transform
    normalize = transforms.Normalize(mean=net.meta['mean'],
                                     std=net.meta['std'])
    transform = transforms.Compose([transforms.ToTensor(), normalize])

    # compute whitening
    if args.whitening is not None:
        start = time.time()

        if 'Lw' in net.meta and args.whitening in net.meta['Lw']:

            print('>> {}: Whitening is precomputed, loading it...'.format(
                args.whitening))

            if len(ms) > 1:
                Lw = net.meta['Lw'][args.whitening]['ms']
            else:
                Lw = net.meta['Lw'][args.whitening]['ss']

        else:

            # if we evaluate networks from path we should save/load whitening
            # not to compute it every time
            if args.network_path is not None:
                whiten_fn = args.network_path + '_{}_whiten'.format(
                    args.whitening)
                if len(ms) > 1:
                    whiten_fn += '_ms'
                whiten_fn += '.pth'
            else:
                whiten_fn = None

            if whiten_fn is not None and os.path.isfile(whiten_fn):
                print('>> {}: Whitening is precomputed, loading it...'.format(
                    args.whitening))
                Lw = torch.load(whiten_fn)

            else:
                print('>> {}: Learning whitening...'.format(args.whitening))

                # loading db
                db_root = os.path.join(get_data_root(), 'train',
                                       args.whitening)
                ims_root = os.path.join(db_root, 'ims')
                db_fn = os.path.join(db_root,
                                     '{}-whiten.pkl'.format(args.whitening))
                with open(db_fn, 'rb') as f:
                    db = pickle.load(f)
                images = [
                    cid2filename(db['cids'][i], ims_root)
                    for i in range(len(db['cids']))
                ]

                # extract whitening vectors
                print('>> {}: Extracting...'.format(args.whitening))
                wvecs = extract_vectors(net,
                                        images,
                                        args.image_size,
                                        transform,
                                        ms=ms,
                                        msp=msp)

                # learning whitening
                print('>> {}: Learning...'.format(args.whitening))
                wvecs = wvecs.numpy()
                m, P = whitenlearn(wvecs, db['qidxs'], db['pidxs'])
                Lw = {'m': m, 'P': P}

                # saving whitening if whiten_fn exists
                if whiten_fn is not None:
                    print('>> {}: Saving to {}...'.format(
                        args.whitening, whiten_fn))
                    torch.save(Lw, whiten_fn)

        print('>> {}: elapsed time: {}'.format(args.whitening,
                                               htime(time.time() - start)))

    else:
        Lw = None
    # evaluate on test datasets
    datasets = args.datasets.split(',')
    for dataset in datasets:
        start = time.time()

        print('>> {}: Extracting...'.format(dataset))

        # prepare config structure for the test dataset
        #cfg = configdataset(dataset, os.path.join(get_data_root(), 'test'))
        images = []
        gallery_file = open(
            '/home/zzd/University1652-Baseline/gallery_name.txt')
        for line in gallery_file:
            images.append('/home/zzd/University1652-Baseline/' +
                          line.replace('\n', '')[2:])
        #qimages = [cfg['qim_fname'](cfg,i) for i in range(cfg['nq'])]
        qimages = []
        query_file = open('/home/zzd/University1652-Baseline/query_name.txt')
        for line in query_file:
            qimages.append('/home/zzd/University1652-Baseline/' +
                           line.replace('\n', '')[2:])

        gallery_label = get_id(images)
        query_label = get_id(qimages)

        # extract database and query vectors
        print('>> {}: database images...'.format(dataset))
        gallery_feature = extract_vectors(net,
                                          images,
                                          args.image_size,
                                          transform,
                                          ms=ms,
                                          msp=msp)
        gallery_feature = torch.transpose(gallery_feature, 0, 1)
        print('>> {}: query images...'.format(dataset))
        query_feature = extract_vectors(net,
                                        qimages,
                                        args.image_size,
                                        transform,
                                        ms=ms,
                                        msp=msp)
        query_feature = torch.transpose(query_feature, 0, 1)
        result = {
            'gallery_f': gallery_feature.numpy(),
            'gallery_label': gallery_label,
            'query_f': query_feature.numpy(),
            'query_label': query_label
        }
        scipy.io.savemat('pytorch_result.mat', result)
        os.system('python evaluate_gpu.py')
        print('>> {}: Evaluating...'.format(dataset))
def main():
    args = parser.parse_args()

    # loading network from path
    if args.network_path is not None:

        print(">> Loading network:\n>>>> '{}'".format(args.network_path))
        if args.network_path in PRETRAINED:
            # pretrained networks (downloaded automatically)
            state = load_url(PRETRAINED[args.network_path],
                             model_dir=os.path.join(get_data_root(),
                                                    'networks'))
        else:
            # fine-tuned network from path
            state = torch.load(args.network_path)

        # parsing net params from meta
        # architecture, pooling, mean, std required
        # the rest has default values, in case that is doesnt exist
        net_params = {}
        net_params['architecture'] = state['meta']['architecture']
        net_params['pooling'] = state['meta']['pooling']
        net_params['local_whitening'] = state['meta'].get(
            'local_whitening', False)
        net_params['regional'] = state['meta'].get('regional', False)
        net_params['whitening'] = state['meta'].get('whitening', False)
        net_params['mean'] = state['meta']['mean']
        net_params['std'] = state['meta']['std']
        net_params['pretrained'] = False

        # load network
        net = init_network(net_params)
        net.load_state_dict(state['state_dict'])

        # if whitening is precomputed
        if 'Lw' in state['meta']:
            net.meta['Lw'] = state['meta']['Lw']

        print(">>>> loaded network: ")
        if "epoch" in state:
            print("Model after {} epochs".format(state["epoch"]))
        print(net.meta_repr())

    # loading offtheshelf network
    elif args.network_offtheshelf is not None:

        # parse off-the-shelf parameters
        offtheshelf = args.network_offtheshelf.split('-')
        net_params = {}
        net_params['architecture'] = offtheshelf[0]
        net_params['pooling'] = offtheshelf[1]
        net_params['local_whitening'] = 'lwhiten' in offtheshelf[2:]
        net_params['regional'] = 'reg' in offtheshelf[2:]
        net_params['whitening'] = 'whiten' in offtheshelf[2:]
        net_params['pretrained'] = True

        # load off-the-shelf network
        print(">> Loading off-the-shelf network:\n>>>> '{}'".format(
            args.network_offtheshelf))
        net = init_network(net_params)
        print(">>>> loaded network: ")
        print(net.meta_repr())

    # setting up the multi-scale parameters: test both single scale and multiscale
    ms_singlescale = [1]
    msp_singlescale = 1

    ms_multiscale = list(eval(args.multiscale))
    msp_multiscale = 1
    if len(ms_multiscale
           ) > 1 and net.meta['pooling'] == 'gem' and not net.meta[
               'regional'] and not net.meta['whitening']:
        msp_multiscale = net.pool.p.item()
    print(">> Set-up multiscale:")
    print(">>>> ms: {}".format(ms_multiscale))
    print(">>>> msp: {}".format(msp_multiscale))

    # moving network to gpu and eval mode
    net.cuda()
    net.eval()

    # set up the transform
    normalize = transforms.Normalize(mean=net.meta['mean'],
                                     std=net.meta['std'])
    transform = transforms.Compose([transforms.ToTensor(), normalize])

    # compute whitening
    if args.whitening is not None:
        start = time.time()
        if 'Lw' in net.meta and args.whitening in net.meta['Lw']:
            print('>> {}: Whitening is precomputed, loading it...'.format(
                args.whitening))
            Lw = net.meta['Lw'][args.whitening]
        else:
            # if we evaluate networks from path we should save/load whitening
            # not to compute it every time
            if args.network_path is not None:
                whiten_fn = args.network_path + '_{}_whiten'.format(
                    args.whitening)
                whiten_fn += '.pth'
            else:
                whiten_fn = None

            if whiten_fn is not None and os.path.isfile(whiten_fn):
                print('>> {}: Whitening is precomputed, loading it...'.format(
                    args.whitening))
                Lw = torch.load(whiten_fn)
            else:
                Lw = {}
                for whiten_type, ms, msp in zip(
                    ["ss", "ms"], [ms_singlescale, ms_multiscale],
                    [msp_singlescale, msp_multiscale]):
                    print('>> {0}: Learning whitening {1}...'.format(
                        args.whitening, whiten_type))

                    # loading db
                    db_root = os.path.join(get_data_root(), 'train',
                                           args.whitening)
                    ims_root = os.path.join(db_root, 'ims')
                    db_fn = os.path.join(
                        db_root, '{}-whiten.pkl'.format(args.whitening))
                    with open(db_fn, 'rb') as f:
                        db = pickle.load(f)
                    images = [
                        cid2filename(db['cids'][i], ims_root)
                        for i in range(len(db['cids']))
                    ]

                    # extract whitening vectors
                    print('>> {}: Extracting...'.format(args.whitening))
                    wvecs = extract_vectors(net,
                                            images,
                                            args.image_size,
                                            transform,
                                            ms=ms,
                                            msp=msp)

                    # learning whitening
                    print('>> {}: Learning...'.format(args.whitening))
                    wvecs = wvecs.numpy()
                    m, P = whitenlearn(wvecs, db['qidxs'], db['pidxs'])
                    Lw[whiten_type] = {'m': m, 'P': P}

                    print('>> {}: elapsed time: {}'.format(
                        args.whitening, htime(time.time() - start)))

                # saving whitening if whiten_fn exists
                if whiten_fn is not None:
                    print('>> {}: Saving to {}...'.format(
                        args.whitening, whiten_fn))
                    torch.save(Lw, whiten_fn)
    else:
        Lw = None

    # evaluate on test datasets
    datasets = args.datasets.split(',')
    for dataset in datasets:
        start = time.time()

        for whiten_type, ms, msp in zip(["ss", "ms"],
                                        [ms_singlescale, ms_multiscale],
                                        [msp_singlescale, msp_multiscale]):
            print('>> Extracting feature on {0}, whitening {1}'.format(
                dataset, whiten_type))

            # prepare config structure for the test dataset
            cfg = configdataset(dataset, os.path.join(get_data_root(), 'test'))
            images = [cfg['im_fname'](cfg, i) for i in range(cfg['n'])]
            qimages = [cfg['qim_fname'](cfg, i) for i in range(cfg['nq'])]
            bbxs = [tuple(cfg['gnd'][i]['bbx']) for i in range(cfg['nq'])]

            # extract database and query vectors
            print('>> {}: database images...'.format(dataset))
            vecs = extract_vectors(net,
                                   images,
                                   args.image_size,
                                   transform,
                                   ms=ms,
                                   msp=msp)
            print('>> {}: query images...'.format(dataset))
            qvecs = extract_vectors(net,
                                    qimages,
                                    args.image_size,
                                    transform,
                                    bbxs=bbxs,
                                    ms=ms,
                                    msp=msp)

            print('>> {}: Evaluating...'.format(dataset))

            # convert to numpy
            vecs = vecs.numpy()
            qvecs = qvecs.numpy()

            # search, rank, and print
            scores = np.dot(vecs.T, qvecs)
            ranks = np.argsort(-scores, axis=0)
            compute_map_and_print(dataset, ranks, cfg['gnd'])

            if Lw is not None:
                # whiten the vectors
                vecs_lw = whitenapply(vecs, Lw[whiten_type]['m'],
                                      Lw[whiten_type]['P'])
                qvecs_lw = whitenapply(qvecs, Lw[whiten_type]['m'],
                                       Lw[whiten_type]['P'])

                # search, rank, and print
                scores = np.dot(vecs_lw.T, qvecs_lw)
                ranks = np.argsort(-scores, axis=0)
                compute_map_and_print(
                    dataset + ' + whiten {}'.format(whiten_type), ranks,
                    cfg['gnd'])

            print('>> {}: elapsed time: {}'.format(dataset,
                                                   htime(time.time() - start)))
예제 #17
0
파일: train.py 프로젝트: tmcortes/BELoss
def testHolidays(net, eConfig, dataset, Lw):

    print('>> Evaluating network on test dataset: {}'.format(dataset))

    # for testing we use image size of max 1024
    image_size = 1024

    ms = [1]
    msp = 1
    if (eConfig['multiscale']):
        ms = [1, 1. / math.sqrt(2), 1. / 2]
        if net.meta['pooling'] == 'gem' and net.whiten is None:
            msp = net.pool.p.data.tolist()[0]

    # moving network to gpu and eval mode
    net.cuda()
    net.eval()

    # set up the transform
    normalize = transforms.Normalize(mean=net.meta['mean'],
                                     std=net.meta['std'])
    transform = transforms.Compose([transforms.ToTensor(), normalize])

    # read the images and generate paths and queries-positive indexes
    dbpath = os.path.join(get_data_root(), 'test', 'holidays')
    ext = 'jpg' if dataset == 'holidays' else 'rjpg'
    images = sorted(os.listdir(os.path.join(dbpath, ext)))
    with open(os.path.join(dbpath, 'straight_gnd_holidays.pkl'), 'rb') as f:
        queries = pickle.load(f)
        positives = pickle.load(f)

    qidx = []
    pidx = []
    for i in range(len(queries)):

        qidx.append(images.index(queries[i]))

        aux = []
        for j in range(len(positives[i])):
            aux.append(images.index(positives[i][j]))
        pidx.append(aux)

    # extract database and query vectors
    print('>> {}: database images...'.format(dataset))
    X = extract_vectors(net, [os.path.join(dbpath, ext, n) for n in images],
                        image_size,
                        transform,
                        ms=ms,
                        msp=msp)

    print('>> {}: Evaluating...'.format(dataset))

    # rank the similarities
    X = X.numpy()

    if (Lw is not None):
        X = whitenapply(X, Lw['m'], Lw['P'])

    scores = np.dot(X.T, X)
    ranks = np.argsort(-scores, axis=1)
    ranks = ranks[qidx, 1::]

    APs = []
    for i, r in enumerate(ranks):
        trueRanks = np.isin(r, pidx[i])
        trueRanks = np.where(trueRanks == True)[0]
        APs.append(compute_ap(trueRanks, len(pidx[i])))

    mAP = np.mean(APs)
    print(">> {}: mAP {:.2f}".format(dataset, mAP * 100))

    # return the average mAP
    return (dataset + ('+ multiscale' if eConfig['multiscale'] else ''), mAP)
def main():
    args = parser.parse_args()

    # check if there are unknown datasets
    for dataset in args.datasets.split(','):
        if dataset not in datasets_names:
            raise ValueError('Unsupported or unknown dataset: {}!'.format(dataset))

    # check if test dataset are downloaded
    # and download if they are not
    # download_train(get_data_root())
    # download_test(get_data_root())

    # setting up the visible GPU
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_id

    # loading network from path
    if args.network_path is not None:

        print(">> Loading network:\n>>>> '{}'".format(args.network_path))
        if args.network_path in PRETRAINED:
            # pretrained networks (downloaded automatically)
            state = load_url(PRETRAINED[args.network_path], model_dir=os.path.join(get_data_root(), 'networks'))
        else:
            # fine-tuned network from path
            state = torch.load(args.network_path)

        # parsing net params from meta
        # architecture, pooling, mean, std required
        # the rest has default values, in case that is doesnt exist
        net_params = {}
        net_params['architecture'] = state['meta']['architecture']
        net_params['pooling'] = state['meta']['pooling']
        net_params['local_whitening'] = state['meta'].get('local_whitening', False)
        net_params['regional'] = state['meta'].get('regional', False)
        net_params['whitening'] = state['meta'].get('whitening', False)
        net_params['mean'] = state['meta']['mean']
        net_params['std'] = state['meta']['std']
        net_params['pretrained'] = False
        net_params['multi_layer_cat'] = state['meta']['multi_layer_cat']

        # load network
        net = init_network(net_params)
        net.load_state_dict(state['state_dict'])
        
        # if whitening is precomputed
        if 'Lw' in state['meta']:
            net.meta['Lw'] = state['meta']['Lw']
        
        print(">>>> loaded network: ")
        print(net.meta_repr())

    # loading offtheshelf network
    elif args.network_offtheshelf is not None:
        
        # parse off-the-shelf parameters
        offtheshelf = args.network_offtheshelf.split('-')
        net_params = {}
        net_params['architecture'] = offtheshelf[0]
        net_params['pooling'] = offtheshelf[1]
        net_params['local_whitening'] = 'lwhiten' in offtheshelf[2:]
        net_params['regional'] = 'reg' in offtheshelf[2:]
        net_params['whitening'] = 'whiten' in offtheshelf[2:]
        net_params['pretrained'] = True

        # load off-the-shelf network
        print(">> Loading off-the-shelf network:\n>>>> '{}'".format(args.network_offtheshelf))
        net = init_network(net_params)
        print(">>>> loaded network: ")
        print(net.meta_repr())

    # setting up the multi-scale parameters
    print(">> image size: {}".format(args.image_size))
    ms = list(eval(args.multiscale))
    if len(ms)>1 and net.meta['pooling'] == 'gem' and not net.meta['regional'] and not net.meta['whitening']:
        msp = net.pool.p.item()
        print(">> Set-up multiscale:")
        print(">>>> ms: {}".format(ms))            
        print(">>>> msp: {}".format(msp))
    else:
        msp = 1
        print(">> Set-up multiscale:")
        print(">>>> ms: {}".format(ms))
        print(">>>> msp: {}".format(msp))

    # moving network to gpu and eval mode
    net.cuda()
    net.eval()

    # set up the transform
    normalize = transforms.Normalize(
        mean=net.meta['mean'],
        std=net.meta['std']
    )
    transform = transforms.Compose([
        transforms.ToTensor(),
        normalize
    ])

    # compute whitening
    if args.whitening is not None:
        start = time.time()

        if 'Lw' in net.meta and args.whitening in net.meta['Lw']:
            
            print('>> {}: Whitening is precomputed, loading it...'.format(args.whitening))
            
            if len(ms)>1:
                Lw = net.meta['Lw'][args.whitening]['ms']
            else:
                Lw = net.meta['Lw'][args.whitening]['ss']

        else:

            # if we evaluate networks from path we should save/load whitening
            # not to compute it every time
            if args.network_path is not None:
                whiten_fn = args.network_path + '_{}_whiten'.format(args.whitening)
                if len(ms) > 1:
                    whiten_fn += '_ms'
                whiten_fn += '.pth'
            else:
                whiten_fn = None

            if whiten_fn is not None and os.path.isfile(whiten_fn):
                print('>> {}: Whitening is precomputed, loading it...'.format(args.whitening))
                Lw = torch.load(whiten_fn)

            else:
                print('>> {}: Learning whitening...'.format(args.whitening))
                
                # loading db
                db_root = os.path.join(get_data_root(), 'train', args.whitening)
                ims_root = os.path.join(db_root, 'ims')
                db_fn = os.path.join(db_root, '{}-whiten.pkl'.format(args.whitening))
                with open(db_fn, 'rb') as f:
                    db = pickle.load(f)
                images = [cid2filename(db['cids'][i], ims_root) for i in range(len(db['cids']))]

                # extract whitening vectors
                print('>> {}: Extracting...'.format(args.whitening))
                wvecs = extract_vectors(net, images, args.image_size, transform, ms=ms, msp=msp)
                
                # learning whitening 
                print('>> {}: Learning...'.format(args.whitening))
                wvecs = wvecs.numpy()
                m, P = whitenlearn(wvecs, db['qidxs'], db['pidxs'])
                Lw = {'m': m, 'P': P}

                # saving whitening if whiten_fn exists
                if whiten_fn is not None:
                    print('>> {}: Saving to {}...'.format(args.whitening, whiten_fn))
                    torch.save(Lw, whiten_fn)

        print('>> {}: elapsed time: {}'.format(args.whitening, htime(time.time()-start)))

    else:
        Lw = None

    # evaluate on test datasets
    datasets = args.datasets.split(',')
    for dataset in datasets: 
        start = time.time()

        print('>> {}: Extracting...'.format(dataset))

        # prepare config structure for the test dataset
        cfg = configdataset(dataset, os.path.join(get_data_root(), 'test'))
        images = [cfg['im_fname'](cfg,i) for i in range(cfg['n'])]
        qimages = [cfg['qim_fname'](cfg,i) for i in range(cfg['nq'])]
        # bbxs = [tuple(cfg['gnd'][i]['bbx']) for i in range(cfg['nq'])]

        print('>> not use bbxs...')
        bbxs = None

        # key_url_list = ParseData(os.path.join(get_data_root(), 'index.csv'))
        # index_image_path = os.path.join(get_data_root(), 'resize_index_image')
        # images = [os.path.join(index_image_path, key_url_list[i][0]) for i in range(len(key_url_list))]
        # key_url_list = ParseData(os.path.join(get_data_root(), 'test.csv'))
        # test_image_path = os.path.join(get_data_root(), 'resize_test_image')
        # qimages = [os.path.join(test_image_path, key_url_list[i][0]) for i in range(len(key_url_list))]
        # # bbxs = [tuple(cfg['gnd'][i]['bbx']) for i in range(cfg['nq'])]

        # csvfile = open(os.path.join(get_data_root(), 'index_clear.csv'), 'r')
        # csvreader = csv.reader(csvfile)
        # images = [line[:1][0] for line in csvreader]
        #
        # csvfile = open(os.path.join(get_data_root(), 'test_clear.csv'), 'r')
        # csvreader = csv.reader(csvfile)
        # qimages = [line[:1][0] for line in csvreader]

        # bbxs = None
        
        # extract database and query vectors
        print('>> {}: database images...'.format(dataset))
        vecs = extract_vectors(net, images, args.image_size, transform, ms=ms, msp=msp)
        # vecs = torch.randn(2048, 5063)
        # vecs = torch.randn(2048, 4993)

        # hxq modified
        # bbxs = None
        # print('>> set no bbxs...')
        print('>> {}: query images...'.format(dataset))
        qvecs = extract_vectors(net, qimages, args.image_size, transform, bbxs=bbxs, ms=ms, msp=msp)
        
        print('>> {}: Evaluating...'.format(dataset))

        # convert to numpy
        vecs = vecs.numpy()
        qvecs = qvecs.numpy()

        # search, rank, and print
        scores = np.dot(vecs.T, qvecs)

        # hxq modified, test add features map for retrieval
        # vecs = [vecs[i].numpy() for i in range(len(vecs))]
        # qvecs_temp = np.zeros((qvecs[0].shape[0], len(qvecs)))
        # for i in range(len(qvecs)):
        #     qvecs_temp[:, i] = qvecs[i][:, 0].numpy()
        # qvecs = qvecs_temp
        #
        # scores = np.zeros((len(vecs), qvecs.shape[-1]))
        # for i in range(len(vecs)):
        #     scores[i, :] = np.amax(np.dot(vecs[i].T, qvecs), 0)

        ranks = np.argsort(-scores, axis=0)
        mismatched_info = compute_map_and_print(dataset, ranks, cfg['gnd'], kappas=[1, 5, 10, 100])

        # hxq added
        show_false_img = False
        if show_false_img == True:
            print('>> Save mismatched image tuple...')
            for info in mismatched_info:
                mismatched_img_show_save(info, qimages, images, args, bbxs=bbxs)
    
        if Lw is not None:
            # whiten the vectors
            vecs_lw  = whitenapply(vecs, Lw['m'], Lw['P'])
            qvecs_lw = whitenapply(qvecs, Lw['m'], Lw['P'])

            # search, rank, and print
            scores = np.dot(vecs_lw.T, qvecs_lw)
            ranks = np.argsort(-scores, axis=0)
            mismatched_info = compute_map_and_print(dataset + ' + whiten', ranks, cfg['gnd'])

            # hxq added
            # show_false_img = False
            if show_false_img == True:
                print('>> Save mismatched image tuple...')
                for info in mismatched_info:
                    mismatched_img_show_save(info, qimages, images, args, bbxs=bbxs)
        
        print('>> {}: elapsed time: {}'.format(dataset, htime(time.time()-start)))
예제 #19
0
def main():
    args = parser.parse_args()

    # check if there are unknown datasets
    for scene in args.scenes.split(','):
        if scene not in datasets_names:
            raise ValueError('Unsupported or unknown scene: {}!'.format(scene))

    # setting up the visible GPU
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_id

    # loading network
    # pretrained networks (downloaded automatically)
    print(">> Loading network:\n>>>> '{}'".format(args.network))
    state = load_url(PRETRAINED[args.network],
                     model_dir=os.path.join(get_data_root(), 'networks'))
    # parsing net params from meta
    # architecture, pooling, mean, std required
    # the rest has default values, in case that is doesnt exist
    net_params = {}
    net_params['architecture'] = state['meta']['architecture']
    net_params['pooling'] = state['meta']['pooling']
    net_params['local_whitening'] = state['meta'].get('local_whitening', False)
    net_params['regional'] = state['meta'].get('regional', False)
    net_params['whitening'] = state['meta'].get('whitening', False)
    net_params['mean'] = state['meta']['mean']
    net_params['std'] = state['meta']['std']
    net_params['pretrained'] = False
    # network initialization
    net = init_network(net_params)
    net.load_state_dict(state['state_dict'])

    print(">>>> loaded network: ")
    print(net.meta_repr())

    # setting up the multi-scale parameters
    ms = list(eval(args.multiscale))
    print(">>>> Evaluating scales: {}".format(ms))

    # moving network to gpu and eval mode
    net.cuda()
    net.eval()

    # set up the transform
    normalize = transforms.Normalize(mean=net.meta['mean'],
                                     std=net.meta['std'])
    transform = transforms.Compose([transforms.ToTensor(), normalize])

    # evaluate on test datasets
    scenes = args.scenes.split(',')
    for scene in scenes:
        start = time.time()

        print('>> {}: Extracting...'.format(scene))
        img_path = osp.join(args.data_path, scene, "images")
        images = [
            osp.join(img_path, fname) for fname in os.listdir(img_path)
            if fname[-3:].lower() in ['jpg', 'png']
        ]

        # extract vectors
        vecs = extract_vectors(net, images, args.image_size, transform, ms=ms)

        print('>> {}: Evaluating...'.format(scene))

        # convert to numpy
        vecs = vecs.numpy()

        # search, rank, and print
        scores = np.dot(vecs.T, vecs)
        ranks = np.argsort(-scores, axis=0)

        images = [img.split('/')[-1] for img in images]
        for top_k in list(eval(args.top_n)):
            pairs = []
            for q_id in range(len(images)):
                img_q = images[q_id]
                pairs_per_q = [
                    " ".join([img_q, images[db_id]])
                    for db_id in list(ranks[1:top_k + 1, q_id])
                ]
                pairs += pairs_per_q
            with open(
                    osp.join(args.data_path, scene,
                             "image_pairs_" + str(top_k) + ".txt"), "w") as f:
                for pair in pairs:
                    f.write(pair + "\n")

        print('>> {}: elapsed time: {}'.format(scene,
                                               htime(time.time() - start)))
예제 #20
0
def main():
    args = parser.parse_args()

    # check if test dataset are downloaded
    # and download if they are not
    download_train(get_data_root())
    download_test(get_data_root())

    # setting up the visible GPU
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_id

    # loading network from path
    if args.network_path is not None:
        net = load_network(args.network_path)

    # loading offtheshelf network
    elif args.network_offtheshelf is not None:
        net = load_offtheshelf(args.network_offtheshelf)

    # setting up the multi-scale parameters
    ms = [1]
    msp = 1
    if args.multiscale:
        ms = [1, 1./math.sqrt(2), 1./2]
        if net.meta['pooling'] == 'gem' and net.whiten is None:
            msp = net.pool.p.data.tolist()[0]

    # moving network to gpu and eval mode
    net.cuda()
    net.eval()
    # set up the transform
    normalize = transforms.Normalize(
        mean=net.meta['mean'],
        std=net.meta['std']
    )
    transform = transforms.Compose([
        transforms.ToTensor(),
        normalize
    ])

    # compute whitening
    if args.whitening is not None:
        start = time.time()

        if 'Lw' in net.meta and args.whitening in net.meta['Lw']:

            print('>> {}: Whitening is precomputed, loading it...'.format(args.whitening))

            if args.multiscale:
                Lw = net.meta['Lw'][args.whitening]['ms']
            else:
                Lw = net.meta['Lw'][args.whitening]['ss']

        else:

            print('>> {}: Learning whitening...'.format(args.whitening))

            if args.whitening == "scores":
                # special logic for scores database
                from score_retrieval.exports import (
                    db,
                    train_images as images,
                )

            else:
                # loading db
                db_root = os.path.join(get_data_root(), 'train', args.test_whiten)
                ims_root = os.path.join(db_root, 'ims')
                db_fn = os.path.join(db_root, '{}-whiten.pkl'.format(args.test_whiten))
                with open(db_fn, 'rb') as f:
                    db = pickle.load(f)
                images = [cid2filename(db['cids'][i], ims_root) for i in range(len(db['cids']))]

            # extract whitening vectors
            print('>> {}: Extracting...'.format(args.whitening))
            wvecs = extract_vectors(net, images, args.image_size, transform, ms=ms, msp=msp)

            # learning whitening
            print('>> {}: Learning...'.format(args.whitening))
            wvecs = wvecs.numpy()
            m, P = whitenlearn(wvecs, db['qidxs'], db['pidxs'])
            Lw = {'m': m, 'P': P}

        print('>> {}: elapsed time: {}'.format(args.whitening, htime(time.time()-start)))
    else:
        Lw = None

    # evaluate on test datasets
    datasets = args.datasets.split(',')
    for dataset in datasets:
        start = time.time()

        print('>> {}: Extracting...'.format(dataset))

        if dataset == "scores":
            # Special added logic to handle loading our score dataset
            from score_retrieval.exports import (
                images,
                qimages,
                gnd,
            )

            print('>> {}: database images...'.format(dataset))
            vecs = extract_vectors(net, images, args.image_size, transform, ms=ms, msp=msp)
            print('>> {}: query images...'.format(dataset))
            qvecs = extract_vectors(net, qimages, args.image_size, transform, ms=ms, msp=msp)

        else:
            # extract ground truth
            cfg = configdataset(dataset, os.path.join(get_data_root(), 'test'))
            gnd = cfg['gnd']

            # prepare config structure for the test dataset
            images = [cfg['im_fname'](cfg,i) for i in range(cfg['n'])]
            qimages = [cfg['qim_fname'](cfg,i) for i in range(cfg['nq'])]
            bbxs = [tuple(gnd[i]['bbx']) for i in range(cfg['nq'])]

            # extract database and query vectors
            print('>> {}: database images...'.format(dataset))
            vecs = extract_vectors(net, images, args.image_size, transform, ms=ms, msp=msp)
            print('>> {}: query images...'.format(dataset))
            qvecs = extract_vectors(net, qimages, args.image_size, transform, bbxs=bbxs, ms=ms, msp=msp)

        # validation
        print(">> {}: gnd stats: {}, {}, {}".format(
            dataset,
            len(gnd),
            [len(x["ok"]) for x in gnd[10:]],
            [len(x["junk"]) for x in gnd[10:]],
        ))
        print(">> {}: image stats: {}, {}".format(dataset, len(images), len(qimages)))
        assert len(gnd) == len(qimages), (len(gnd), len(qimages))

        print('>> {}: Evaluating...'.format(dataset))

        # convert to numpy
        vecs = vecs.numpy()
        qvecs = qvecs.numpy()
        print(">> {}: qvecs.shape: {}".format(dataset, qvecs.shape))

        # search, rank, and print
        scores = np.dot(vecs.T, qvecs)
        ranks = np.argsort(-scores, axis=0)
        print(">> {}: ranks (shape {}) head: {}".format(dataset, ranks.shape, ranks[10:,10:]))
        print(">> {}: gnd head: {}".format(dataset, gnd[5:]))

        # Compute and print metrics
        compute_acc(ranks, gnd, dataset)
        compute_mrr(ranks, gnd, dataset)
        compute_map_and_print(dataset, ranks, gnd)

        if Lw is not None:
            # whiten the vectors
            vecs_lw  = whitenapply(vecs, Lw['m'], Lw['P'])
            qvecs_lw = whitenapply(qvecs, Lw['m'], Lw['P'])

            # search, rank, and print
            scores = np.dot(vecs_lw.T, qvecs_lw)
            ranks = np.argsort(-scores, axis=0)
            compute_acc(ranks, gnd, dataset + " + whiten")
            compute_mrr(ranks, gnd, dataset + " + whiten")
            compute_map_and_print(dataset + " + whiten", ranks, gnd)

        print('>> {}: elapsed time: {}'.format(dataset, htime(time.time()-start)))
예제 #21
0
def main():
    args = parser.parse_args()

    # check if there are unknown datasets
    for dataset in args.datasets.split(','):
        if dataset not in datasets_names:
            raise ValueError(
                'Unsupported or unknown dataset: {}!'.format(dataset))

    # check if test dataset are downloaded
    # and download if they are not
    #download_train(get_data_root())
    download_test(get_data_root())

    # setting up the visible GPU
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_id

    # loading network from path
    if args.network_path is not None:

        print(">> Loading network:\n>>>> '{}'".format(args.network_path))
        if args.network_path in PRETRAINED:
            # pretrained networks (downloaded automatically)
            state = load_url(PRETRAINED[args.network_path],
                             model_dir=os.path.join(get_data_root(),
                                                    'networks'))
        else:
            # fine-tuned network from path
            state = torch.load(args.network_path)

        # parsing net params from meta
        # architecture, pooling, mean, std required
        # the rest has default values, in case that is doesnt exist
        net_params = {}
        net_params['architecture'] = state['meta']['architecture']
        net_params['pooling'] = state['meta']['pooling']
        net_params['local_whitening'] = state['meta'].get(
            'local_whitening', False)
        net_params['regional'] = state['meta'].get('regional', False)
        net_params['whitening'] = state['meta'].get('whitening', False)
        net_params['mean'] = state['meta']['mean']
        net_params['std'] = state['meta']['std']
        net_params['pretrained'] = False

        # load network
        net = init_network(net_params)
        net.load_state_dict(state['state_dict'])

        # if whitening is precomputed
        if 'Lw' in state['meta']:
            net.meta['Lw'] = state['meta']['Lw']

        print(">>>> loaded network: ")
        print(net.meta_repr())

    # loading offtheshelf network
    elif args.network_offtheshelf is not None:

        # parse off-the-shelf parameters
        offtheshelf = args.network_offtheshelf.split('-')
        net_params = {}
        net_params['architecture'] = offtheshelf[0]
        net_params['pooling'] = offtheshelf[1]
        net_params['local_whitening'] = 'lwhiten' in offtheshelf[2:]
        net_params['regional'] = 'reg' in offtheshelf[2:]
        net_params['whitening'] = 'whiten' in offtheshelf[2:]
        net_params['pretrained'] = True

        # load off-the-shelf network
        print(">> Loading off-the-shelf network:\n>>>> '{}'".format(
            args.network_offtheshelf))
        net = init_network(net_params)
        print(">>>> loaded network: ")
        print(net.meta_repr())

    # setting up the multi-scale parameters
    ms = list(eval(args.multiscale))
    if len(ms) > 1 and net.meta['pooling'] == 'gem' and not net.meta[
            'regional'] and not net.meta['whitening']:
        msp = net.pool.p.item()
        print(">> Set-up multiscale:")
        print(">>>> ms: {}".format(ms))
        print(">>>> msp: {}".format(msp))
    else:
        msp = 1

    # moving network to gpu and eval mode
    net.cuda()
    net.eval()

    # set up the transform
    normalize = transforms.Normalize(mean=net.meta['mean'],
                                     std=net.meta['std'])
    transform = transforms.Compose([transforms.ToTensor(), normalize])

    # compute whitening
    if args.whitening is not None:
        start = time.time()

        if 'Lw' in net.meta and args.whitening in net.meta['Lw']:

            print('>> {}: Whitening is precomputed, loading it...'.format(
                args.whitening))

            if len(ms) > 1:
                Lw = net.meta['Lw'][args.whitening]['ms']
            else:
                Lw = net.meta['Lw'][args.whitening]['ss']

        else:

            # if we evaluate networks from path we should save/load whitening
            # not to compute it every time
            if args.network_path is not None:
                whiten_fn = args.network_path + '_{}_whiten'.format(
                    args.whitening)
                if len(ms) > 1:
                    whiten_fn += '_ms'
                whiten_fn += '.pth'
            else:
                whiten_fn = None

            if whiten_fn is not None and os.path.isfile(whiten_fn):
                print('>> {}: Whitening is precomputed, loading it...'.format(
                    args.whitening))
                Lw = torch.load(whiten_fn)

            else:
                print('>> {}: Learning whitening...'.format(args.whitening))

                # loading db
                db_root = os.path.join(get_data_root(), 'train',
                                       args.whitening)
                ims_root = os.path.join(db_root, 'ims')
                db_fn = os.path.join(db_root,
                                     '{}-whiten.pkl'.format(args.whitening))
                with open(db_fn, 'rb') as f:
                    db = pickle.load(f)
                images = [
                    cid2filename(db['cids'][i], ims_root)
                    for i in range(len(db['cids']))
                ]

                # extract whitening vectors
                print('>> {}: Extracting...'.format(args.whitening))
                wvecs = extract_vectors(net,
                                        images,
                                        args.image_size,
                                        transform,
                                        ms=ms,
                                        msp=msp)

                # learning whitening
                print('>> {}: Learning...'.format(args.whitening))
                wvecs = wvecs.numpy()
                m, P = whitenlearn(wvecs, db['qidxs'], db['pidxs'])
                Lw = {'m': m, 'P': P}

                # saving whitening if whiten_fn exists
                if whiten_fn is not None:
                    print('>> {}: Saving to {}...'.format(
                        args.whitening, whiten_fn))
                    torch.save(Lw, whiten_fn)

        print('>> {}: elapsed time: {}'.format(args.whitening,
                                               htime(time.time() - start)))

    else:
        Lw = None

    # evaluate on test datasets
    datasets = args.datasets.split(',')
    for dataset in datasets:
        start = time.time()
        """
        print('>> {}: Extracting...'.format(dataset))

        # prepare config structure for the test dataset
        cfg = configdataset(dataset, os.path.join(get_data_root(), 'test'))
        images = [cfg['im_fname'](cfg,i) for i in range(cfg['n'])]
        qimages = [cfg['qim_fname'](cfg,i) for i in range(cfg['nq'])]
        try:
            bbxs = [tuple(cfg['gnd'][i]['bbx']) for i in range(cfg['nq'])]
        except:
            bbxs = None  # for holidaysmanrot and copydays

        # extract database and query vectors
        print('>> {}: database images...'.format(dataset))
        vecs = extract_vectors(net, images, args.image_size, transform, ms=ms, msp=msp)
        print('>> {}: query images...'.format(dataset))
        qvecs = extract_vectors(net, qimages, args.image_size, transform, bbxs=bbxs, ms=ms, msp=msp)
        
        print('>> {}: Evaluating...'.format(dataset))

        # convert to numpy
        vecs = vecs.numpy()
        qvecs = qvecs.numpy()

        print (vecs.shape)
        print (qvecs.shape)

        # search, rank, and print
        scores = np.dot(vecs.T, qvecs)
        print (scores.shape)

        # to save scores (single query)
        # oxford
        #f = 'oxf_single.npy'
        # paris
        #f = 'par_single.npy'
        # roxford
        #f = 'roxf_single.npy'
        # rparis
        f = 'rpar_single.npy'

        ranks = np.argsort(-scores, axis=0)
        compute_map_and_print(dataset, ranks, cfg['gnd'])
    
        if Lw is not None:
            # whiten the vectors
            vecs_lw  = whitenapply(vecs, Lw['m'], Lw['P'])
            qvecs_lw = whitenapply(qvecs, Lw['m'], Lw['P'])

            # search, rank, and print
            scores = np.dot(vecs_lw.T, qvecs_lw)
            # save
            np.save(f, scores)

            ranks = np.argsort(-scores, axis=0)
            compute_map_and_print(dataset + ' + whiten', ranks, cfg['gnd'])
        """

        ############################################################
        # Test
        # prepare config structure for the test dataset
        cfg = configdataset(dataset, os.path.join(get_data_root(), 'test'))
        images = [cfg['im_fname'](cfg, i) for i in range(cfg['n'])]
        qimages = [cfg['qim_fname'](cfg, i) for i in range(cfg['nq'])]

        print(qimages)

        # to load scores
        # oxford
        #f = 'oxf_single.npy'
        #f = 'oxf_mq_avg.npy'
        #f = 'oxf_mq_max.npy'
        #f = 'oxf_sc_imf.npy'

        # paris
        #f = 'par_single.npy'
        #f = 'par_mq_avg.npy'
        #f = 'par_mq_max.npy'
        f = 'par_sc_imf.npy'

        # roxford
        #f = 'roxf_single.npy'
        #f = 'roxf_mq_avg.npy'
        #f = 'roxf_mq_max.npy'
        #f = 'roxf_sc_imf.npy'

        # rparis
        #f = 'rpar_single.npy'
        #f = 'rpar_mq_avg.npy'
        #f = 'rpar_mq_max.npy'
        #f = 'rpar_sc_imf.npy'

        # load
        scores = np.load(f)
        ranks = np.argsort(-scores, axis=0)
        compute_map_and_print(dataset + ' + whiten', ranks, cfg['gnd'])

        print('>> {}: elapsed time: {}'.format(dataset,
                                               htime(time.time() - start)))
예제 #22
0
def main():
    #def process(network_path, datasets='oxford5k,paris6k', whitening=None, image_size=1024, multiscale = '[1]', query=None):
    args = parser.parse_args()
    #args.query = None
    # check if there are unknown datasets
    for dataset in args.datasets.split(','):
        if dataset not in datasets_names:
            raise ValueError(
                'Unsupported or unknown dataset: {}!'.format(dataset))

    # check if test dataset are downloaded
    # and download if they are not
    #download_train(get_data_root())
    #download_test(get_data_root())

    # setting up the visible GPU
    #os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_id

    # loading network from path
    if args.network_path is not None:

        print(">> Loading network:\n>>>> '{}'".format(args.network_path))
        if args.network_path in PRETRAINED:
            # pretrained networks (downloaded automatically)
            state = load_url(PRETRAINED[args.network_path],
                             model_dir=os.path.join(get_data_root(),
                                                    'networks'))
        else:
            # fine-tuned network from path
            state = torch.load(args.network_path)

        # parsing net params from meta
        # architecture, pooling, mean, std required
        # the rest has default values, in case that is doesnt exist
        net_params = {}
        net_params['architecture'] = state['meta']['architecture']
        net_params['pooling'] = state['meta']['pooling']
        net_params['local_whitening'] = state['meta'].get(
            'local_whitening', False)
        net_params['regional'] = state['meta'].get('regional', False)
        net_params['whitening'] = state['meta'].get('whitening', False)
        net_params['mean'] = state['meta']['mean']
        net_params['std'] = state['meta']['std']
        net_params['pretrained'] = False

        # load network
        net = init_network(net_params)
        net.load_state_dict(state['state_dict'])

        # if whitening is precomputed
        if 'Lw' in state['meta']:
            net.meta['Lw'] = state['meta']['Lw']

        print(">>>> loaded network: ")
        print(net.meta_repr())

    # setting up the multi-scale parameters
    ms = list(eval(args.multiscale))
    if len(ms) > 1 and net.meta['pooling'] == 'gem' and not net.meta[
            'regional'] and not net.meta['whitening']:
        msp = net.pool.p.item()
        print(">> Set-up multiscale:")
        print(">>>> ms: {}".format(ms))
        print(">>>> msp: {}".format(msp))
    else:
        msp = 1

    # moving network to gpu and eval mode
    #net.cuda()
    #net.eval()

    # set up the transform
    normalize = transforms.Normalize(mean=net.meta['mean'],
                                     std=net.meta['std'])
    transform = transforms.Compose([transforms.ToTensor(), normalize])

    # compute whitening
    if args.whitening is not None:
        start = time.time()

        if 'Lw' in net.meta and args.whitening in net.meta['Lw']:

            print('>> {}: Whitening is precomputed, loading it...'.format(
                args.whitening))

            if len(ms) > 1:
                Lw = net.meta['Lw'][args.whitening]['ms']
            else:
                Lw = net.meta['Lw'][args.whitening]['ss']

        else:

            # if we evaluate networks from path we should save/load whitening
            # not to compute it every time
            if args.network_path is not None:
                whiten_fn = args.network_path + '_{}_whiten'.format(
                    args.whitening)
                if len(ms) > 1:
                    whiten_fn += '_ms'
                whiten_fn += '.pth'
            else:
                whiten_fn = None
            print(whiten_fn)
            return
            if whiten_fn is not None and os.path.isfile(whiten_fn):
                print('>> {}: Whitening is precomputed, loading it...'.format(
                    args.whitening))
                Lw = torch.load(whiten_fn)

            else:
                print('>> {}: Learning whitening...'.format(args.whitening))

                # loading db
                db_root = os.path.join(get_data_root(), 'train',
                                       args.whitening)
                ims_root = os.path.join(db_root, 'ims')
                db_fn = os.path.join(db_root,
                                     '{}-whiten.pkl'.format(args.whitening))
                with open(db_fn, 'rb') as f:
                    db = pickle.load(f)
                images = [
                    cid2filename(db['cids'][i], ims_root)
                    for i in range(len(db['cids']))
                ]

                # extract whitening vectors
                print('>> {}: Extracting...'.format(args.whitening))
                wvecs = extract_vectors(net,
                                        images,
                                        args.image_size,
                                        transform,
                                        ms=ms,
                                        msp=msp)

                # learning whitening
                print('>> {}: Learning...'.format(args.whitening))
                wvecs = wvecs.numpy()
                m, P = whitenlearn(wvecs, db['qidxs'], db['pidxs'])
                Lw = {'m': m, 'P': P}

                # saving whitening if whiten_fn exists
                if whiten_fn is not None:
                    print('>> {}: Saving to {}...'.format(
                        args.whitening, whiten_fn))
                    torch.save(Lw, whiten_fn)

        print('>> {}: elapsed time: {}'.format(args.whitening,
                                               htime(time.time() - start)))

    else:
        Lw = None

    # evaluate on test datasets
    datasets = args.datasets.split(',')
    # query type

    for dataset in datasets:
        start = time.time()

        print('>> {}: Extracting...'.format(dataset))

        # prepare config structure for the test dataset
        cfg = configdataset(dataset, os.path.join(get_data_root(), 'test'))

        #for i in cfg: print(i)
        #print(cfg['gnd'][0]['bbx'])
        #return

        # extract database and query vectors
        print('>> {}: database images...'.format(dataset))
        feas_dir = os.path.join(cfg['dir_data'], 'features')
        if not os.path.isdir(feas_dir):
            os.mkdir(feas_dir)
        feas_sv = os.path.join(
            feas_dir, dataset + '_' + args.network_path + '_features.pkl')
        if not os.path.isfile(feas_sv):
            images = [cfg['im_fname'](cfg, i) for i in range(cfg['n'])]
            vecs = extract_vectors(net,
                                   images,
                                   args.image_size,
                                   transform,
                                   ms=ms,
                                   msp=msp)
            with open(feas_sv, 'wb') as f:
                pickle.dump(vecs, f)
        else:
            with open(feas_sv, 'rb') as f:
                vecs = pickle.load(f)

        print('>> {}: query images...'.format(dataset))
        if args.query is not None:
            qimages = [args.query]
            qvecs = extract_vectors(net,
                                    qimages,
                                    args.image_size,
                                    transform,
                                    ms=ms,
                                    msp=msp)
        else:
            qfeas_dir = feas_dir
            qfeas_sv = os.path.join(
                qfeas_dir,
                dataset + '_' + args.network_path + '_qfeatures.pkl')
            if not os.path.isfile(qfeas_sv):
                qimages = [cfg['qim_fname'](cfg, i) for i in range(cfg['nq'])]
                try:
                    bbxs = [
                        tuple(cfg['gnd'][i]['bbx']) for i in range(cfg['nq'])
                    ]
                except:
                    bbxs = None
                qvecs = extract_vectors(net,
                                        qimages,
                                        args.image_size,
                                        transform,
                                        bbxs=bbxs,
                                        ms=ms,
                                        msp=msp)
                with open(qfeas_sv, 'wb') as f:
                    pickle.dump(qvecs, f)
            else:
                with open(qfeas_sv, 'rb') as f:
                    qvecs = pickle.load(f)

        print('>> {}: Evaluating...'.format(dataset))
        # convert to numpy
        vecs = vecs.numpy()
        qvecs = qvecs.numpy()
        #qvecs = qvecs[:, 0].reshape(-1, 1)
        #args.query = True
        # search, rank, and print
        if Lw is not None:
            # whiten the vectors
            vecs_lw = whitenapply(vecs, Lw['m'], Lw['P'])
            qvecs_lw = whitenapply(qvecs, Lw['m'], Lw['P'])

            # search, rank, and print
            scores = np.dot(vecs_lw.T, qvecs_lw)
            ranksw = np.argsort(-scores, axis=0)
            if args.query is None:
                #compute_map_and_print(dataset + ' + whiten', ranksw, cfg['gnd'])
                compute_map_and_print1(dataset + ' + whiten', ranksw,
                                       cfg['gnd'])

                scores = np.dot(vecs.T, qvecs)
                ranks = np.argsort(-scores, axis=0)
                # compute_map_and_print(dataset, ranks, cfg['gnd'])
                compute_map_and_print1(dataset, ranks, cfg['gnd'])
            else:
                a = []
                for i in ranksw:
                    a.append(
                        os.path.join(cfg['dir_images'], cfg['imlist'][i[0]]) +
                        cfg['ext'])
                print(a[:10])
                result = cfg['dir_data'] + '_result'
                with open(result + '.pkl', 'wb') as f:
                    pickle.dump(a[:10], f)
        print('>> {}: elapsed time: {}'.format(dataset,
                                               htime(time.time() - start)))
예제 #23
0
def test(datasets, net, wandb_enabled=False, epoch=-1):
    
    global global_step

    print('>> Evaluating network on test datasets...')

    # for testing we use image size of max 1024
    image_size = 1024

    # moving network to gpu and eval mode
    net.cuda()
    net.eval()
    # set up the transform
    normalize = transforms.Normalize(
        mean=net.meta['mean'],
        std=net.meta['std']
    )
    transform = transforms.Compose([
        transforms.ToTensor(),
        normalize
    ])

    # compute whitening
    if args.test_whiten:
        start = time.time()

        print('>> {}: Learning whitening...'.format(args.test_whiten))

        # loading db
        db_root = os.path.join(get_data_root(), 'train', args.test_whiten)
        ims_root = os.path.join(db_root, 'ims')
        db_fn = os.path.join(db_root, '{}-whiten.pkl'.format(args.test_whiten))
        with open(db_fn, 'rb') as f:
            db = pickle.load(f)
        images = [cid2filename(db['cids'][i], ims_root) for i in range(len(db['cids']))]

        # extract whitening vectors
        print('>> {}: Extracting...'.format(args.test_whiten))
        wvecs = extract_vectors(net, images, image_size, transform)  # implemented with torch.no_grad
        
        # learning whitening 
        print('>> {}: Learning...'.format(args.test_whiten))
        wvecs = wvecs.numpy()
        m, P = whitenlearn(wvecs, db['qidxs'], db['pidxs'])
        Lw = {'m': m, 'P': P}

        print('>> {}: elapsed time: {}'.format(args.test_whiten, htime(time.time()-start)))
    else:
        Lw = None

    # evaluate on test datasets
    datasets = args.test_datasets.split(',')
    for dataset in datasets: 
        start = time.time()

        print('>> {}: Extracting...'.format(dataset))

        # prepare config structure for the test dataset
        cfg = configdataset(dataset, os.path.join(get_data_root(), 'test'))
        images = [cfg['im_fname'](cfg,i) for i in range(cfg['n'])]
        qimages = [cfg['qim_fname'](cfg,i) for i in range(cfg['nq'])]
        bbxs = [tuple(cfg['gnd'][i]['bbx']) for i in range(cfg['nq'])]
        
        # extract database and query vectors
        print('>> {}: database images...'.format(dataset))
        vecs = extract_vectors(net, images, image_size, transform)  # implemented with torch.no_grad
        print('>> {}: query images...'.format(dataset))
        qvecs = extract_vectors(net, qimages, image_size, transform, bbxs)  # implemented with torch.no_grad
        
        print('>> {}: Evaluating...'.format(dataset))

        # convert to numpy
        vecs = vecs.numpy()
        qvecs = qvecs.numpy()

        # search, rank, and print
        scores = np.dot(vecs.T, qvecs)
        ranks = np.argsort(-scores, axis=0)
        compute_map_and_print(dataset, ranks, cfg['gnd'], wandb_enabled=wandb_enabled, epoch=epoch, global_step=global_step)
    
        if Lw is not None:
            # whiten the vectors
            vecs_lw  = whitenapply(vecs, Lw['m'], Lw['P'])
            qvecs_lw = whitenapply(qvecs, Lw['m'], Lw['P'])

            # search, rank, and print
            scores = np.dot(vecs_lw.T, qvecs_lw)
            ranks = np.argsort(-scores, axis=0)
            compute_map_and_print(dataset + ' + whiten', ranks, cfg['gnd'], wandb_enabled=wandb_enabled, epoch=epoch, global_step=global_step)
        
        print('>> {}: elapsed time: {}'.format(dataset, htime(time.time()-start)))
                and not net.meta['regional'] and not net.meta['whitening']:
            msp = net.pool.p.item()
            print(">> Set-up multiscale:")
            print(">>>> ms: {}".format(param['ms']))
            print(">>>> msp: {}".format(msp))
        else:
            msp = 1
            print(">> Set-up multiscale:")
            print(">>>> ms: {}".format(param['ms']))
            print(">>>> msp: {}".format(msp))

        # extract database and query vectors
        print('>> {}: database images...'.format(dataset))
        vecs = extract_vectors(net,
                               images,
                               image_size,
                               transform,
                               ms=param['ms'],
                               msp=msp)
        print('>> {}: query images...'.format(dataset))
        qvecs = extract_vectors(net,
                                qimages,
                                image_size,
                                transform,
                                bbxs=bbxs,
                                ms=param['ms'],
                                msp=msp)
        print('>> {}: Evaluating...'.format(dataset))
        # convert to numpy
        vecs = vecs.numpy()
        qvecs = qvecs.numpy()
        # search, rank, and print
예제 #25
0
def call_benchmark(
    # must pass one of images or paths
    images=None,
    paths=None,
    **kwargs,
):
    """Run the given network on the given data and return vectors for it."""
    # load params
    params = default_params.copy()
    params.update(kwargs)

    network = params["network"]
    offtheshelf = params["offtheshelf"]
    image_size = params["image_size"]
    gpu = params["gpu"]
    multiscale = params["multiscale"]
    whitening = params["whitening"]

    net_key = (network, offtheshelf, gpu)

    if net_key in LOADED_NETWORKS:
        net = LOADED_NETWORKS[net_key]

    else:
        # load network
        if offtheshelf:
            net = load_offtheshelf(network)
        else:
            net = load_network(network)

        # moving network to gpu and eval mode
        if gpu:
            net.cuda()
        net.eval()

        # store network in memo dict
        LOADED_NETWORKS[net_key] = net

    # setting up the multi-scale parameters
    ms = [1]
    msp = 1
    if multiscale:
        ms = [1, 1 / np.sqrt(2), 1 / 2]
        if net.meta['pooling'] == 'gem' and net.whiten is None:
            msp = net.pool.p.data.tolist()[0]

    # set up the transform
    normalize = transforms.Normalize(
        mean=net.meta['mean'],
        std=net.meta['std'],
    )
    transform = transforms.Compose([
        transforms.ToTensor(),
        normalize,
    ])

    # setting up whitening
    if whitening is not None:
        if 'Lw' in net.meta and whitening in net.meta['Lw']:
            if multiscale:
                Lw = net.meta['Lw'][whitening]['ms']
            else:
                Lw = net.meta['Lw'][whitening]['ss']
        elif whitening == "scores":
            whiten_key = (network, offtheshelf, image_size, multiscale)
            Lw = get_scores_whitening(whiten_key,
                                      net,
                                      transform,
                                      ms,
                                      msp,
                                      image_size,
                                      setup_network=False,
                                      gpu=gpu)
        else:
            raise ValueError(
                "invalid whitening {} (valid whitenings: {})".format(
                    whitening, list(net.meta['Lw'].keys())))

    # process the given data
    if images is not None:
        images = np.asarray(images)
        print("images.shape =", images.shape)
        vecs = vectors_from_images(net,
                                   images,
                                   transform,
                                   ms=ms,
                                   msp=msp,
                                   setup_network=False,
                                   gpu=gpu)
    else:
        vecs = extract_vectors(net,
                               paths,
                               image_size,
                               transform,
                               ms=ms,
                               msp=msp,
                               setup_network=False,
                               gpu=gpu)

    # convert to numpy
    vecs = vecs.numpy()

    # apply whitening
    if whitening is not None:
        vecs = whitenapply(vecs, Lw['m'], Lw['P'])

    # take transpose
    vecs = vecs.T
    print("vecs.shape =", vecs.shape)
    return vecs
예제 #26
0
def main():
    args = parser.parse_args()

    # check if test dataset are downloaded
    # and download if they are not
    download_train(get_data_root())
    download_test(get_data_root())

    # setting up the visible GPU
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_id

    # loading network from path
    if args.network_path is not None:
        print(">> Loading network:\n>>>> '{}'".format(args.network_path))
        state = torch.load(args.network_path)
        net = init_network(model=state['meta']['architecture'],
                           pooling=state['meta']['pooling'],
                           whitening=state['meta']['whitening'],
                           mean=state['meta']['mean'],
                           std=state['meta']['std'],
                           pretrained=False)
        net.load_state_dict(state['state_dict'])
        print(">>>> loaded network: ")
        print(net.meta_repr())

    # loading offtheshelf network
    elif args.network_offtheshelf is not None:
        offtheshelf = args.network_offtheshelf.split('-')
        if len(offtheshelf) == 3:
            if offtheshelf[2] == 'whiten':
                offtheshelf_whiten = True
            else:
                raise (RuntimeError(
                    "Incorrect format of the off-the-shelf network. Examples: resnet101-gem | resnet101-gem-whiten"
                ))
        else:
            offtheshelf_whiten = False
        print(">> Loading off-the-shelf network:\n>>>> '{}'".format(
            args.network_offtheshelf))
        net = init_network(model=offtheshelf[0],
                           pooling=offtheshelf[1],
                           whitening=offtheshelf_whiten)
        print(">>>> loaded network: ")
        print(net.meta_repr())

    # setting up the multi-scale parameters
    ms = [1]
    msp = 1
    if args.multiscale:
        ms = [1, 1. / math.sqrt(2), 1. / 2]
        if net.meta['pooling'] == 'gem' and net.whiten is None:
            msp = net.pool.p.data.tolist()[0]

    # moving network to gpu and eval mode
    net.cuda()
    net.eval()
    # set up the transform
    normalize = transforms.Normalize(mean=net.meta['mean'],
                                     std=net.meta['std'])
    transform = transforms.Compose([transforms.ToTensor(), normalize])

    # compute whitening
    if args.whitening is not None:
        start = time.time()

        print('>> {}: Learning whitening...'.format(args.whitening))

        # loading db
        db_root = os.path.join(get_data_root(), 'train', args.whitening)
        ims_root = os.path.join(db_root, 'ims')
        db_fn = os.path.join(db_root, '{}-whiten.pkl'.format(args.whitening))
        with open(db_fn, 'rb') as f:
            db = pickle.load(f)
        images = [
            cid2filename(db['cids'][i], ims_root)
            for i in range(len(db['cids']))
        ]

        # extract whitening vectors
        print('>> {}: Extracting...'.format(args.whitening))
        wvecs = extract_vectors(net,
                                images,
                                args.image_size,
                                transform,
                                ms=ms,
                                msp=msp)

        # learning whitening
        print('>> {}: Learning...'.format(args.whitening))
        wvecs = wvecs.numpy()
        m, P = whitenlearn(wvecs, db['qidxs'], db['pidxs'])
        Lw = {'m': m, 'P': P}

        print('>> {}: elapsed time: {}'.format(args.whitening,
                                               htime(time.time() - start)))
    else:
        Lw = None

    datasets = args.datasets.split(',')
    for dataset in datasets:
        start = time.time()

        print('>> {}: Extracting...'.format(dataset))

        if dataset == 'reco':
            images, qimages = landmark_recognition_dataset()
            bbxs = [None for x in qimages]

        elif dataset == 'retr':
            images, _ = landmark_retrieval_dataset()
            qimages = []
            bbxs = [None for x in qimages]

        else:
            # prepare config structure for the test dataset
            cfg = configdataset(dataset, os.path.join(get_data_root(), 'test'))
            images = [cfg['im_fname'](cfg, i) for i in range(cfg['n'])]
            qimages = [cfg['qim_fname'](cfg, i) for i in range(cfg['nq'])]
            bbxs = [tuple(cfg['gnd'][i]['bbx']) for i in range(cfg['nq'])]

        with open('%s_fnames.pkl' % dataset, 'wb') as f:
            pickle.dump([images, qimages], f)

        # extract database and query vectors
        print('>> {}: database images...'.format(dataset))
        vecs = extract_vectors(net,
                               images,
                               args.image_size,
                               transform,
                               ms=ms,
                               msp=msp)
        vecs = vecs.numpy()
        print('>> saving')
        np.save('{}_vecs.npy'.format(dataset), vecs)

        if len(qimages) > 0:
            print('>> {}: query images...'.format(dataset))
            qvecs = extract_vectors(net,
                                    qimages,
                                    args.image_size,
                                    transform,
                                    bbxs=bbxs,
                                    ms=ms,
                                    msp=msp)
            qvecs = qvecs.numpy()
            np.save('{}_qvecs.npy'.format(dataset), qvecs)

        if Lw is not None:
            # whiten the vectors
            vecs_lw = whitenapply(vecs, Lw['m'], Lw['P'])
            qvecs_lw = whitenapply(qvecs, Lw['m'], Lw['P'])

            # TODO

        print('>> {}: elapsed time: {}'.format(dataset,
                                               htime(time.time() - start)))
예제 #27
0
def main():
    args = parser.parse_args()

    # setting up the visible GPU
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_id

    # loading network
    # pretrained networks (downloaded automatically)
    print(">> Loading network:\n>>>> '{}'".format(args.network))
    state = load_url(PRETRAINED[args.network],
                     model_dir=os.path.join(get_data_root(), 'networks'))
    # parsing net params from meta
    # architecture, pooling, mean, std required
    # the rest has default values, in case that is doesnt exist
    net_params = {}
    net_params['architecture'] = state['meta']['architecture']
    net_params['pooling'] = state['meta']['pooling']
    net_params['local_whitening'] = state['meta'].get('local_whitening', False)
    net_params['regional'] = state['meta'].get('regional', False)
    net_params['whitening'] = state['meta'].get('whitening', False)
    net_params['mean'] = state['meta']['mean']
    net_params['std'] = state['meta']['std']
    net_params['pretrained'] = False
    # network initialization
    net = init_network(net_params)
    net.load_state_dict(state['state_dict'])

    print(">>>> loaded network: ")
    print(net.meta_repr())

    # setting up the multi-scale parameters
    ms = list(eval(args.multiscale))
    print(">>>> Evaluating scales: {}".format(ms))

    # moving network to gpu and eval mode
    net.cuda()
    net.eval()

    # set up the transform
    normalize = transforms.Normalize(mean=net.meta['mean'],
                                     std=net.meta['std'])
    transform = transforms.Compose([transforms.ToTensor(), normalize])

    # evaluate on test datasets
    start = time.time()

    print('>> {}: Extracting...')

    img_path = osp.join(args.data_path, "images", "images_upright")
    fnames_db = [
        osp.join(path, fname)
        for path, _, files in os.walk(osp.join(img_path, "db"))
        for fname in files if fname[-3:].lower() in ["jpg", "png"]
    ]

    fnames_q = [
        osp.join(path, fname)
        for path, _, files in os.walk(osp.join(img_path, "query"))
        for fname in files if fname[-3:].lower() in ["jpg", "png"]
    ]

    # extract vectors
    vecs_db = extract_vectors(net,
                              fnames_db,
                              args.image_size,
                              transform,
                              ms=ms)
    vecs_q = extract_vectors(net, fnames_q, args.image_size, transform, ms=ms)

    print('>> {}: Evaluating...')

    # convert to numpy
    vecs_db = vecs_db.numpy()
    vecs_q = vecs_q.numpy()

    # search, rank, and print
    scores_db = np.dot(vecs_db.T, vecs_db)
    scores_q = np.dot(vecs_db.T, vecs_q)
    ranks_db = np.argsort(-scores_db, axis=0)
    ranks_q = np.argsort(-scores_q, axis=0)

    print(ranks_q.shape, ranks_db.shape)

    images_db = [fname_db[len(img_path) + 1:] for fname_db in fnames_db]
    images_q = [fname_q[len(img_path) + 1:] for fname_q in fnames_q]
    pairs_db = []
    for q_id in range(len(images_db)):
        img_q = images_db[q_id]
        pairs_per_q = [
            " ".join([img_q, images_db[db_id]])
            for db_id in list(ranks_db[1:args.top_n + 1, q_id])
        ]
        pairs_db += pairs_per_q

    for q_id in range(len(images_q)):
        img_q = images_q[q_id]
        pairs_per_q = [
            " ".join([img_q, images_db[db_id]])
            for db_id in list(ranks_q[1:args.top_n + 1, q_id])
        ]
        pairs_db += pairs_per_q

    with open(
            osp.join(args.data_path,
                     "image_pairs_day_night_" + str(args.top_n) + ".txt"),
            "w") as f:
        for pair in pairs_db:
            f.write(pair + "\n")

    print("Done")
def main():
    args = parser.parse_args()
    # setting up the visible GPU
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_id

    # loading network from path
    result_dir = 'retreival_results'
    if args.network_path is not None:
        result_dir = os.path.join(result_dir, args.network_path)
        print(">> Loading network:\n>>>> '{}'".format(args.network_path))
        if args.network_path in PRETRAINED:
            # pretrained networks (downloaded automatically)
            state = load_url(PRETRAINED[args.network_path],
                             model_dir=os.path.join(get_data_root(),
                                                    'networks'))
        else:
            state = torch.load(args.network_path)
        net = init_network(model=state['meta']['architecture'],
                           pooling=state['meta']['pooling'],
                           whitening=state['meta']['whitening'],
                           mean=state['meta']['mean'],
                           std=state['meta']['std'],
                           pretrained=False)
        net.load_state_dict(state['state_dict'])

        # if whitening is precomputed
        if 'Lw' in state['meta']:
            net.meta['Lw'] = state['meta']['Lw']

        print(">>>> loaded network: ")
        print(net.meta_repr())

    # loading offtheshelf network
    elif args.network_offtheshelf is not None:
        result_dir = os.path.join(result_dir, args.network_offtheshelf)
        offtheshelf = args.network_offtheshelf.split('-')
        if len(offtheshelf) == 3:
            if offtheshelf[2] == 'whiten':
                offtheshelf_whiten = True
            else:
                raise (RuntimeError(
                    "Incorrect format of the off-the-shelf network. Examples: resnet101-gem | resnet101-gem-whiten"
                ))
        else:
            offtheshelf_whiten = False
        print(">> Loading off-the-shelf network:\n>>>> '{}'".format(
            args.network_offtheshelf))
        net = init_network(model=offtheshelf[0],
                           pooling=offtheshelf[1],
                           whitening=offtheshelf_whiten)
        print(">>>> loaded network: ")
        print(net.meta_repr())

    # setting up the multi-scale parameters
    ms = [1]
    msp = 1
    if args.multiscale:
        ms = [1, 1. / math.sqrt(2), 1. / 2]
        if net.meta['pooling'] == 'gem' and net.whiten is None:
            msp = net.pool.p.data.tolist()[0]

    # moving network to gpu and eval mode
    net.cuda()
    net.eval()

    # set up the transform
    normalize = transforms.Normalize(mean=net.meta['mean'],
                                     std=net.meta['std'])
    transform = transforms.Compose([transforms.ToTensor(), normalize])

    # compute whitening
    if args.whitening is not None:
        start = time.time()
        if 'Lw' in net.meta and args.whitening in net.meta['Lw']:

            print('>> {}: Whitening is precomputed, loading it...'.format(
                args.whitening))

            if args.multiscale:
                Lw = net.meta['Lw'][args.whitening]['ms']
            else:
                Lw = net.meta['Lw'][args.whitening]['ss']
        else:
            # Save whitening TODO
            print('>> {}: Learning whitening...'.format(args.whitening))

            # loading db
            db_root = os.path.join(get_data_root(), 'train', args.whitening)
            ims_root = os.path.join(db_root, 'ims')
            db_fn = os.path.join(db_root,
                                 '{}-whiten.pkl'.format(args.whitening))
            with open(db_fn, 'rb') as f:
                db = pickle.load(f)
            images = [
                cid2filename(db['cids'][i], ims_root)
                for i in range(len(db['cids']))
            ]

            # extract whitening vectors
            print('>> {}: Extracting...'.format(args.whitening))
            wvecs = extract_vectors(net,
                                    images,
                                    args.image_size,
                                    transform,
                                    ms=ms,
                                    msp=msp)

            # learning whitening
            print('>> {}: Learning...'.format(args.whitening))
            wvecs = wvecs.numpy()
            m, P = whitenlearn(wvecs, db['qidxs'], db['pidxs'])
            Lw = {'m': m, 'P': P}

        print('>> {}: elapsed time: {}'.format(args.whitening,
                                               htime(time.time() - start)))
    else:
        Lw = None

    # evaluate on test datasets
    data_root = args.data_root
    datasets = datasets_names[args.dataset]
    result_dict = {}
    for dataset in datasets:
        start = time.time()
        result_dict[dataset] = {}
        print('>> {}: Extracting...'.format(dataset))

        # prepare config structure for the test dataset
        images = get_imlist(data_root, dataset, args.train_txt)
        qimages = get_imlist(data_root, dataset, args.query_txt)

        # extract database and query vectors
        print('>> {}: database images...'.format(dataset))
        vecs = extract_vectors(net,
                               images,
                               args.image_size,
                               transform,
                               root=os.path.join(data_root, dataset),
                               ms=ms,
                               msp=msp)
        print('>> {}: query images...'.format(dataset))
        qvecs = extract_vectors(net,
                                qimages,
                                args.image_size,
                                transform,
                                root=os.path.join(data_root, dataset),
                                ms=ms,
                                msp=msp)
        print('>> {}: Evaluating...'.format(dataset))

        # convert to numpy
        vecs = vecs.numpy()
        qvecs = qvecs.numpy()
        scores, ranks = cal_ranks(vecs, vecs, Lw)
        result_dict[dataset]['train'] = {'scores': scores, 'ranks': ranks}
        scores, ranks = cal_ranks(vecs, qvecs, Lw)
        result_dict[dataset]['test'] = {'scores': scores, 'ranks': ranks}
        print('>> {}: elapsed time: {}'.format(dataset,
                                               htime(time.time() - start)))

    # Save retrieval results
    if not os.path.exists(result_dir):
        os.makedirs(result_dir)
    result_file = os.path.join(result_dir, args.outfile)
    np.save(result_file, result_dict)
    print('Save retrieval results to {}'.format(result_file))
예제 #29
0
파일: train.py 프로젝트: tmcortes/BELoss
def testOxfordParisHolidays(net, eConfig):

    #datasets = eConfig['test-datasets'].split(',')
    #results = []
    #
    #for dataset in datasets:
    #    results.append((dataset, np.random.rand(1)[0]))
    #
    #return results

    print('>> Evaluating network on test datasets...')

    # for testing we use image size of max 1024
    image_size = 1024

    # setting up the multi-scale parameters
    ms = [1]
    msp = 1
    if (eConfig['multiscale']):
        ms = [1, 1. / math.sqrt(2), 1. / 2]
        if net.meta['pooling'] == 'gem' and net.whiten is None:
            msp = net.pool.p.data.tolist()[0]

    # moving network to gpu and eval mode
    net.cuda()
    net.eval()
    # set up the transform
    normalize = transforms.Normalize(mean=net.meta['mean'],
                                     std=net.meta['std'])
    transform = transforms.Compose([transforms.ToTensor(), normalize])

    # compute whitening
    if eConfig['whitening']:

        start = time.time()

        print('>> {}: Learning whitening...'.format(eConfig['test-whiten']))

        # loading db
        db_root = os.path.join(get_data_root(), 'train',
                               eConfig['test-whiten'])
        ims_root = os.path.join(db_root, 'ims')
        db_fn = os.path.join(db_root,
                             '{}-whiten.pkl'.format(eConfig['test-whiten']))
        with open(db_fn, 'rb') as f:
            db = pickle.load(f)
        images = [
            cid2filename(db['cids'][i], ims_root)
            for i in range(len(db['cids']))
        ]

        # extract whitening vectors
        print('>> {}: Extracting...'.format(eConfig['test-whiten']))
        wvecs = extract_vectors(net,
                                images,
                                image_size,
                                transform,
                                ms=ms,
                                msp=msp)

        # learning whitening
        print('>> {}: Learning...'.format(eConfig['test-whiten']))
        wvecs = wvecs.numpy()
        m, P = whitenlearn(wvecs, db['qidxs'], db['pidxs'])
        Lw = {'m': m, 'P': P}

        print('>> {}: elapsed time: {}'.format(eConfig['test-whiten'],
                                               htime(time.time() - start)))

    else:
        Lw = None

    # evaluate on test datasets
    datasets = eConfig['test-datasets'].split(',')
    results = []

    for dataset in datasets:
        start = time.time()

        if (dataset != 'holidays' and dataset != 'rholidays'):
            print('>> {}: Extracting...'.format(dataset))

            # prepare config structure for the test dataset
            cfg = configdataset(dataset, os.path.join(get_data_root(), 'test'))
            images = [cfg['im_fname'](cfg, i) for i in range(cfg['n'])]
            qimages = [cfg['qim_fname'](cfg, i) for i in range(cfg['nq'])]
            bbxs = [tuple(cfg['gnd'][i]['bbx']) for i in range(cfg['nq'])]

            if (dataset == 'oxford105k' or dataset == 'paris106k'):
                images.extend(cfg['distractors'])

            # extract database and query vectors
            print('>> {}: database images...'.format(dataset))
            vecs = extract_vectors(net,
                                   images,
                                   image_size,
                                   transform,
                                   ms=ms,
                                   msp=msp)
            print('>> {}: query images...'.format(dataset))
            qvecs = extract_vectors(net,
                                    qimages,
                                    image_size,
                                    transform,
                                    bbxs,
                                    ms=ms,
                                    msp=msp)
            print('>> {}: Evaluating...'.format(dataset))

            # convert to numpy
            vecs = vecs.numpy()
            qvecs = qvecs.numpy()

            # search, rank, and print
            scores = np.dot(vecs.T, qvecs)
            ranks = np.argsort(-scores, axis=0)
            results.append(
                compute_map_and_print(
                    dataset +
                    ('+ multiscale' if eConfig['multiscale'] else ''), ranks,
                    cfg['gnd']))

            if Lw is not None:
                # whiten the vectors
                vecs_lw = whitenapply(vecs, Lw['m'], Lw['P'])
                qvecs_lw = whitenapply(qvecs, Lw['m'], Lw['P'])

                # search, rank, and print
                scores = np.dot(vecs_lw.T, qvecs_lw)
                ranks = np.argsort(-scores, axis=0)
                results.append(
                    compute_map_and_print(dataset + ' + whiten', ranks,
                                          cfg['gnd']))

        else:
            results.append(testHolidays(net, eConfig, dataset, Lw))

        print('>> {}: elapsed time: {}'.format(dataset,
                                               htime(time.time() - start)))

    return results
예제 #30
0
def test(net,
         data_root,
         data_splits,
         gt_root,
         epoch,
         pass_thres=8,
         knn=10,
         query_key='val',
         db_key='train',
         log=None):
    print('>> Evaluating network on test datasets...')

    # for testing we use image size of max 1024
    image_size = 1024

    # moving network to gpu and eval mode
    net.cuda()
    net.eval()

    # Data loading code
    normalize = transforms.Normalize(mean=net.meta['mean'],
                                     std=net.meta['std'])
    transform = transforms.Compose([
        transforms.ToTensor(),
        normalize,
    ])

    # TODO: Whitening
    rank_data = {}
    for dataset in data_splits:
        print('>> Evaluate model on {}'.format(dataset))
        start = time.time()
        dbims = data_splits[dataset][db_key]
        qims = data_splits[dataset][query_key]
        dbvecs = extract_vectors(net,
                                 dbims,
                                 image_size,
                                 transform,
                                 root=os.path.join(data_root, dataset))
        qvecs = extract_vectors(net,
                                qims,
                                image_size,
                                transform,
                                root=os.path.join(data_root, dataset))
        print(
            '>> Extracted database images: {} query images: {} time: {:.2f} gpu usage: {:.2f}%'
            .format(dbvecs.size(), qvecs.size(),
                    time.time() - start, get_gpu_mem_usage()))

        # Retrieval
        dbvecs = dbvecs.numpy()
        qvecs = qvecs.numpy()
        scores, ranks = cal_ranks(dbvecs, qvecs, Lw=None)
        rank_data[dataset] = ranks
    print('>> Total elapsed time for retrieval: {:.2f}'.format(time.time() -
                                                               start))

    # Calculate accuracy with gt_score
    start = time.time()
    avg_percent, avg_sim = eval_retrieval(gt_root,
                                          rank_data,
                                          data_splits,
                                          pass_thres,
                                          knn,
                                          query_key=query_key,
                                          db_key=db_key)
    print('>> Total elapsed time for evaluation: {:.2f}'.format(time.time() -
                                                                start))
    lprint(
        'Test Avg percent: {}, avg similairty {}'.format(avg_percent, avg_sim),
        log)