示例#1
0
    def __call__(self, network, device, logger):
        stopwatch = StopWatch()

        # extract database and query vectors
        print('>> {}: database images...'.format(self.dataset))
        vecs = extract_vectors(network, self.images, self.image_size, self.transforms, device=device)
        print('>> {}: query images...'.format(self.dataset))
        if self.images == self.qimages and set(self.bbxs) == {None}:
            qvecs = vecs.clone()
        else:
            qvecs = extract_vectors(network, self.qimages, self.image_size, self.transforms, device=device, bbxs=self.bbxs)
        stopwatch.lap("extract_descriptors")

        print('>> {}: Evaluating...'.format(self.dataset))

        # convert to numpy
        vecs = vecs.numpy()
        qvecs = qvecs.numpy()

        # search, rank, and print
        scores = np.dot(vecs.T, qvecs)
        ranks = np.argsort(-scores, axis=0)
        averages, scores = compute_map_and_print(self.dataset, ranks, self.gnd)
        stopwatch.lap("compute_score")

        first_score = scores[list(scores.keys())[0]]
        logger(None, len(first_score), "dataset", stopwatch.reset(), "scalar/time")
        logger(None, len(first_score), "score_avg", averages, "scalar/score")

        assert len({len(x) for x in scores.values()}) == 1
        for i, _ in enumerate(first_score):
            logger(i, len(first_score), "score", {x: scores[x][i] for x in scores}, "scalar/score")
示例#2
0
def test(datasets, net, noise, image_size):
    global base
    print(">> Evaluating network on test datasets...")

    net.cuda()
    net.eval()
    normalize = transforms.Normalize(mean=net.meta["mean"],
                                     std=net.meta["std"])

    def add_noise(img):
        n = noise
        n = F.interpolate(n.unsqueeze(0),
                          mode=MODE,
                          size=tuple(img.shape[-2:]),
                          align_corners=True).squeeze()
        return torch.clamp(img + n, 0, 1)

    transform_base = transforms.Compose([transforms.ToTensor(), normalize])
    transform_query = transforms.Compose(
        [transforms.ToTensor(),
         transforms.Lambda(add_noise), normalize])

    if "Lw" in net.meta:
        Lw = net.meta["Lw"]["retrieval-SfM-120k"]["ss"]
    else:
        Lw = None

    # evaluate on test datasets
    datasets = args.test_datasets.split(",")
    attack_result = {}
    for dataset in datasets:
        start = time.time()

        print(">> {}: Extracting...".format(dataset))

        cfg = configdataset(dataset, os.path.join(get_data_root(), "test"))
        images = [cfg["im_fname"](cfg, i) for i in range(cfg["n"])]
        qimages = [cfg["qim_fname"](cfg, i) for i in range(cfg["nq"])]
        bbxs = [tuple(cfg["gnd"][i]["bbx"]) for i in range(cfg["nq"])]

        # extract database and query vectors
        print(">> {}: database images...".format(dataset))
        with torch.no_grad():
            if dataset in base and str(image_size) in base[dataset]:
                vecs = base[dataset][str(image_size)]
            else:
                vecs = extract_vectors(net, images, image_size, transform_base)
                if dataset not in base:
                    base[dataset] = {}
                base[dataset][str(image_size)] = vecs
                fname = args.network_path.replace("/", "_") + ".pkl"
                with open(f"base/{fname}", "wb") as f:
                    pickle.dump(base, f)
            print(">> {}: query images...".format(dataset))
            qvecs = extract_vectors(net, qimages, image_size, transform_query,
                                    bbxs)

        print(">> {}: Evaluating...".format(dataset))

        # convert to numpy
        vecs = vecs.numpy()
        qvecs = qvecs.numpy()

        # whiten the vectors
        vecs_lw = whitenapply(vecs, Lw["m"], Lw["P"])
        qvecs_lw = whitenapply(qvecs, Lw["m"], Lw["P"])

        # search, rank, and print
        scores = np.dot(vecs_lw.T, qvecs_lw)
        ranks = np.argsort(-scores, axis=0)
        r = compute_map_and_print(dataset + " + whiten", ranks, cfg["gnd"])
        attack_result[dataset] = r

        print(">> {}: elapsed time: {}".format(dataset,
                                               htime(time.time() - start)))
    return inv_gfr(
        attack_result,
        baseline_result[net.meta["architecture"]][net.meta["pooling"]])
示例#3
0
def test(datasets, net):
    print('>> Evaluating network on test datasets...')

    # for testing we use image size of max 1024
    image_size = 1024

    # moving network to gpu and eval mode
    net.cuda()
    net.eval()
    # set up the transform
    normalize = transforms.Normalize(mean=net.meta['mean'],
                                     std=net.meta['std'])
    transform = transforms.Compose([transforms.ToTensor(), normalize])

    # compute whitening
    if args.test_whiten:
        start = time.time()

        print('>> {}: Learning whitening...'.format(args.test_whiten))

        # loading db
        db_root = os.path.join(get_data_root(), 'train', args.test_whiten)
        ims_root = os.path.join(db_root, 'ims')
        db_fn = os.path.join(db_root, '{}-whiten.pkl'.format(args.test_whiten))
        with open(db_fn, 'rb') as f:
            db = pickle.load(f)
        images = [
            cid2filename(db['cids'][i], ims_root)
            for i in range(len(db['cids']))
        ]

        # extract whitening vectors
        print('>> {}: Extracting...'.format(args.test_whiten))
        wvecs = extract_vectors(net,
                                images,
                                image_size,
                                transform,
                                print_freq=10,
                                batchsize=20)  # implemented with torch.no_grad

        # learning whitening
        print('>> {}: Learning...'.format(args.test_whiten))
        wvecs = wvecs.numpy()
        m, P = whitenlearn(wvecs, db['qidxs'], db['pidxs'])
        Lw = {'m': m, 'P': P}

        print('>> {}: elapsed time: {}'.format(args.test_whiten,
                                               htime(time.time() - start)))
    else:
        Lw = None

    # evaluate on test datasets
    datasets = args.test_datasets.split(',')
    for dataset in datasets:
        start = time.time()

        print('>> {}: Extracting...'.format(dataset))

        # prepare config structure for the test dataset
        cfg = configdataset(dataset, os.path.join(get_data_root(), 'test'))
        images = [cfg['im_fname'](cfg, i) for i in range(cfg['n'])]
        qimages = [cfg['qim_fname'](cfg, i) for i in range(cfg['nq'])]
        if dataset == 'cdvs_test_retrieval':
            bbxs = None
        else:
            bbxs = None

        print('>> {}: database images...'.format(dataset))
        if args.pool == 'gem':
            ms = [1, 1 / 2**(1 / 2), 1 / 2]
        else:
            ms = [1]
        if len(ms) > 1 and net.meta['pooling'] == 'gem' and not net.meta[
                'regional'] and not net.meta['whitening']:
            msp = net.pool.p.item()
            print(">> Set-up multiscale:")
            print(">>>> ms: {}".format(ms))
            print(">>>> msp: {}".format(msp))
        else:
            msp = 1
        vecs = extract_vectors(net,
                               images,
                               image_size,
                               transform,
                               bbxs,
                               ms=ms,
                               msp=msp,
                               print_freq=1000,
                               batchsize=1)  # implemented with torch.no_grad
        print('>> {}: query images...'.format(dataset))
        qvecs = extract_vectors(net,
                                qimages,
                                image_size,
                                transform,
                                bbxs,
                                ms=ms,
                                msp=msp,
                                print_freq=1000,
                                batchsize=1)  # implemented with torch.no_grad

        print('>> {}: Evaluating...'.format(dataset))

        # convert to numpy
        vecs = vecs.numpy()
        qvecs = qvecs.numpy()

        # search, rank, and print
        scores = np.dot(vecs.T, qvecs)
        ranks = np.argsort(-scores, axis=0)
        if dataset == 'cdvs_test_retrieval':
            compute_map_and_print(dataset, ranks, cfg['gnd_id'])
        else:
            compute_map_and_print(dataset, ranks, cfg['gnd'])

        if Lw is not None:
            # whiten the vectors
            vecs_lw = whitenapply(vecs, Lw['m'], Lw['P'])
            qvecs_lw = whitenapply(qvecs, Lw['m'], Lw['P'])

            # search, rank, and print
            scores = np.dot(vecs_lw.T, qvecs_lw)
            ranks = np.argsort(-scores, axis=0)
            compute_map_and_print(dataset + ' + whiten', ranks, cfg['gnd'])

        print('>> {}: elapsed time: {}'.format(dataset,
                                               htime(time.time() - start)))
示例#4
0
def test(args, config, model, rank=None, world_size=None, **varargs):
    log_debug('Evaluating network on test datasets...')

    # Eval mode
    model.eval()
    data_config = config["dataloader"]

    # Average score
    avg_score = 0.0

    # Evaluate on test datasets
    list_datasets = data_config.getstruct("test_datasets")

    if data_config.get("multi_scale"):
        scales = eval(data_config.get("multi_scale"))
    else:
        scales = [1]

    for dataset in list_datasets:

        start = time.time()

        log_debug('{%s}: Loading Dataset', dataset)

        # Prepare database
        db = ParisOxfordTestDataset(root_dir=path.join(args.data, 'test',
                                                       dataset),
                                    name=dataset)

        batch_size = data_config.getint("test_batch_size")

        with torch.no_grad():
            """ Paris and Oxford are :
                    1 - resized to a ratio of desired max size, after bbx cropping 
                    2 - normalized after that
                    3 - not flipped and not scaled (!! important for evaluation)

            """
            # Prepare query loader
            log_debug('{%s}: Extracting descriptors for query images', dataset)

            query_tf = ISSTestTransform(
                shortest_size=data_config.getint("test_shortest_size"),
                longest_max_size=data_config.getint("test_longest_max_size"),
                random_scale=data_config.getstruct("random_scale"))

            query_data = ISSDataset(root_dir='',
                                    name="query",
                                    images=db['query_names'],
                                    bbx=db['query_bbx'],
                                    transform=query_tf)

            query_sampler = DistributedARBatchSampler(
                data_source=query_data,
                batch_size=data_config.getint("test_batch_size"),
                num_replicas=world_size,
                rank=rank,
                drop_last=True,
                shuffle=False)

            query_dl = torch.utils.data.DataLoader(
                query_data,
                batch_sampler=query_sampler,
                collate_fn=iss_collate_fn,
                pin_memory=True,
                num_workers=data_config.getstruct("num_workers"),
                shuffle=False)

            # Extract query vectors
            qvecs = torch.zeros(varargs["output_dim"], len(query_data)).cuda()

            for it, batch in tqdm(enumerate(query_dl), total=len(query_dl)):
                # Upload batch
                batch = {
                    k: batch[k].cuda(device=varargs["device"],
                                     non_blocking=True)
                    for k in INPUTS
                }

                _, pred = model(**batch,
                                scales=scales,
                                do_prediction=True,
                                do_augmentaton=False)

                distributed.barrier()

                qvecs[:,
                      it * batch_size:(it + 1) * batch_size] = pred["ret_pred"]

                del pred

            # Prepare negative database data loader
            log_debug('{%s}: Extracting descriptors for database images',
                      dataset)

            database_tf = ISSTestTransform(
                shortest_size=data_config.getint("test_shortest_size"),
                longest_max_size=data_config.getint("test_longest_max_size"),
                random_scale=data_config.getstruct("random_scale"))

            database_data = ISSDataset(root_dir='',
                                       name="database",
                                       images=db['img_names'],
                                       transform=database_tf)

            database_sampler = DistributedARBatchSampler(
                data_source=database_data,
                batch_size=data_config.getint("test_batch_size"),
                num_replicas=world_size,
                rank=rank,
                drop_last=True,
                shuffle=False)

            database_dl = torch.utils.data.DataLoader(
                database_data,
                batch_sampler=database_sampler,
                collate_fn=iss_collate_fn,
                pin_memory=True,
                num_workers=data_config.getstruct("num_workers"),
                shuffle=False)

            # Extract negative pool vectors
            database_vecs = torch.zeros(varargs["output_dim"],
                                        len(database_data)).cuda()

            for it, batch in tqdm(enumerate(database_dl),
                                  total=len(database_dl)):
                # Upload batch

                batch = {
                    k: batch[k].cuda(device=varargs["device"],
                                     non_blocking=True)
                    for k in INPUTS
                }

                _, pred = model(**batch,
                                scales=scales,
                                do_prediction=True,
                                do_augmentaton=False)

                distributed.barrier()

                database_vecs[:, it * batch_size:(it + 1) *
                              batch_size] = pred["ret_pred"]

                del pred

        # Compute dot product scores and ranks on GPU
        # scores = torch.mm(database_vecs.t(), qvecs)
        # scores, scores_indices = torch.sort(-scores, dim=0, descending=False)

        # convert to numpy
        qvecs = qvecs.cpu().numpy()
        database_vecs = database_vecs.cpu().numpy()

        # search, rank, and print
        scores = np.dot(database_vecs.T, qvecs)
        ranks = np.argsort(-scores, axis=0)

        score = compute_map_and_print(dataset, ranks, db['gnd'], log_info)
        log_info('{%s}: Running time = %s', dataset,
                 htime(time.time() - start))

        avg_score += 0.5 * score["mAP"]

    # As Evaluation metrics
    log_info('Average score = %s', avg_score)

    return avg_score
示例#5
0
def main():
    args = parser.parse_args()

    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_id
    using_cdvs = float(args.using_cdvs)
    # loading network from path
    if args.network_path is not None:

        print(">> Loading network:\n>>>> '{}'".format(args.network_path))
        if args.network_path in PRETRAINED:
            # pretrained networks (downloaded automatically)
            state = load_url(PRETRAINED[args.network_path], model_dir=os.path.join(get_data_root(), 'networks'))
        else:
            # fine-tuned network from path
            state = torch.load(args.network_path)

        # parsing net params from meta
        # architecture, pooling, mean, std required
        # the rest has default values, in case that is doesnt exist
        net_params = {}
        net_params['architecture'] = state['meta']['architecture']
        net_params['pooling'] = state['meta']['pooling']
        net_params['local_whitening'] = state['meta'].get('local_whitening', False)
        net_params['regional'] = state['meta'].get('regional', False)
        net_params['whitening'] = state['meta'].get('whitening', False)
        net_params['mean'] = state['meta']['mean']
        net_params['std'] = state['meta']['std']
        net_params['pretrained'] = False

        # load network
        net = init_network(net_params)
        net.load_state_dict(state['state_dict'])

        # if whitening is precomputed
        if 'Lw' in state['meta']:
            net.meta['Lw'] = state['meta']['Lw']

        print(">>>> loaded network: ")
        print(net.meta_repr())

    # loading offtheshelf network
    elif args.network_offtheshelf is not None:

        # parse off-the-shelf parameters
        offtheshelf = args.network_offtheshelf.split('-')
        net_params = {}
        net_params['architecture'] = offtheshelf[0]
        net_params['pooling'] = offtheshelf[1]
        net_params['local_whitening'] = 'lwhiten' in offtheshelf[2:]
        net_params['regional'] = 'reg' in offtheshelf[2:]
        net_params['whitening'] = 'whiten' in offtheshelf[2:]
        net_params['pretrained'] = True

        # load off-the-shelf network
        print(">> Loading off-the-shelf network:\n>>>> '{}'".format(args.network_offtheshelf))
        net = init_network(net_params)
        print(">>>> loaded network: ")
        print(net.meta_repr())

    # setting up the multi-scale parameters
    ms = list(eval(args.multiscale))
    if len(ms) > 1 and net.meta['pooling'] == 'gem' and not net.meta['regional'] and not net.meta['whitening']:
        msp = net.pool.p.item()
        print(">> Set-up multiscale:")
        print(">>>> ms: {}".format(ms))
        print(">>>> msp: {}".format(msp))
    else:
        msp = 1

    # moving network to gpu and eval mode
    net.cuda()
    net.eval()

    # set up the transform
    normalize = transforms.Normalize(
        mean=net.meta['mean'],
        std=net.meta['std']
    )
    transform = transforms.Compose([
        transforms.ToTensor(),
        normalize
    ])

    # compute whitening
    if args.whitening is not None:
        start = time.time()

        if 'Lw' in net.meta and args.whitening in net.meta['Lw']:

            print('>> {}: Whitening is precomputed, loading it...'.format(args.whitening))

            if len(ms) > 1:
                Lw = net.meta['Lw'][args.whitening]['ms']
            else:
                Lw = net.meta['Lw'][args.whitening]['ss']

        else:

            # if we evaluate networks from path we should save/load whitening
            # not to compute it every time
            if args.network_path is not None:
                whiten_fn = args.network_path + '_{}_whiten'.format(args.whitening)
                if len(ms) > 1:
                    whiten_fn += '_ms'
                whiten_fn += '.pth'
            else:
                whiten_fn = None

            if whiten_fn is not None and os.path.isfile(whiten_fn):
                print('>> {}: Whitening is precomputed, loading it...'.format(args.whitening))
                Lw = torch.load(whiten_fn)

            else:
                print('>> {}: Learning whitening...'.format(args.whitening))

                # loading db
                db_root = os.path.join(get_data_root(), 'train', args.whitening)
                ims_root = os.path.join(db_root, 'ims')
                db_fn = os.path.join(db_root, '{}-whiten.pkl'.format(args.whitening))
                with open(db_fn, 'rb') as f:
                    db = pickle.load(f)
                images = [cid2filename(db['cids'][i], ims_root) for i in range(len(db['cids']))]

                # extract whitening vectors
                print('>> {}: Extracting...'.format(args.whitening))
                wvecs = extract_vectors(net, images, args.image_size, transform, ms=ms, msp=msp)

                # learning whitening
                print('>> {}: Learning...'.format(args.whitening))
                wvecs = wvecs.numpy()
                m, P = whitenlearn(wvecs, db['qidxs'], db['pidxs'])
                Lw = {'m': m, 'P': P}

                # saving whitening if whiten_fn exists
                if whiten_fn is not None:
                    print('>> {}: Saving to {}...'.format(args.whitening, whiten_fn))
                    torch.save(Lw, whiten_fn)

        print('>> {}: elapsed time: {}'.format(args.whitening, htime(time.time() - start)))

    else:
        Lw = None

    # evaluate on test datasets
    datasets = args.datasets.split(',')
    result_dir=args.network_path[0:-8]
    epoch_lun=args.network_path[0:-8].split('/')[-1].replace('model_epoch','')
    print(">> Creating directory if it does not exist:\n>> '{}'".format(result_dir))
    if not os.path.exists(result_dir):
        os.makedirs(result_dir)
    for dataset in datasets:
        start = time.time()
        # search, rank, and print
        print('>> {}: Extracting...'.format(dataset))

        # prepare config structure for the test dataset
        cfg = configdataset(dataset, os.path.join(get_data_root(), 'test'))
        tuple_bbxs_qimlist=None
        tuple_bbxs_imlist=None
        images = [cfg['im_fname'](cfg, i) for i in range(cfg['n'])]
        qimages = [cfg['qim_fname'](cfg, i) for i in range(cfg['nq'])]
        # extract database and query vectors

        print('>> {}: query images...'.format(dataset))
        qvecs = extract_vectors(net, qimages, args.image_size, transform, bbxs=tuple_bbxs_qimlist, ms=ms, msp=msp, batchsize=1)
        qvecs = qvecs.numpy()
        qvecs = qvecs.astype(np.float32)
        np.save(os.path.join(result_dir, "{}_qvecs_ep{}_resize.npy".format(dataset,epoch_lun)), qvecs)
        print('>> {}: database images...'.format(dataset))
        vecs = extract_vectors(net, images, args.image_size, transform, ms=ms, bbxs=tuple_bbxs_imlist, msp=msp, batchsize=1)
        vecs = vecs.numpy()
        vecs = vecs.astype(np.float32)
        np.save(os.path.join(result_dir, "{}_vecs_ep{}_resize.npy".format(dataset,epoch_lun)), vecs)
        scores = np.dot(vecs.T, qvecs)
        if using_cdvs!=0:
            print('>> {}: cdvs global descriptor loading...'.format(dataset))
            qvecs_global = cfg['qimlist_global']
            vecs_global = cfg['imlist_global']
            scores_global = np.dot(vecs_global, qvecs_global.T)
            scores+=scores_global*using_cdvs
        ranks = np.argsort(-scores, axis=0)
        if args.ir_remove!='0':
            rank_len=10
            rank_re = np.loadtxt(os.path.join(result_dir, '{}_ranks_new_relevent.txt'.format(dataset)))
            ## the max value of rank_len
            MAX_RANK_LEN = int((rank_re.shape[0]) ** 0.5)
            rank_re=rank_re.reshape(MAX_RANK_LEN,MAX_RANK_LEN,rank_re.shape[1])
            for m in range(rank_re.shape[2]):
                for i in range(rank_re.shape[0]):
                    rank_re[i][i][m]=1.0
            quanzhong=[1,0.7,0.4]+[0.1]*(MAX_RANK_LEN-3)
            for m in range(rank_re.shape[2]):
                #if adaption, then change the rank_len to a adaption params according to the rank_re_q, q_aer, cons_n
                if args.ir_adaption:
                    using_local_query=True
                    cons_n = 5
                    q_aer = float(args.ir_adaption)
                    if using_local_query:
                        ## using local feature scores, please don't forget note the query_q belong to deep
                        rank_re_q = np.loadtxt(os.path.join(result_dir, '{}_ranks_new_query.txt'.format(dataset)))
                        query_q = rank_re_q[:, m]
                    else:
                        ## using deep feature scores
                        query_q = scores[ranks[:, m], m]

                    rank_len=0
                    jishu=0
                    for idx in range(min(len(query_q),MAX_RANK_LEN)-cons_n):
                        if jishu<cons_n:
                            if query_q[idx]>q_aer:
                                rank_len=idx+1
                            else:
                                jishu+=1
                        else:
                            break
                max_dim = min(rank_len, MAX_RANK_LEN)
                print (max_dim)
                if max_dim>2:
                    #put the image to the MAX_RANK_LEN2 location if equals max_dim then re rank in the maxdim length
                    list2 = []
                    list_hou = []
                    MAX_RANK_LEN2 = max_dim
                    for i in range(MAX_RANK_LEN2):
                        if i < max_dim:
                            fenshu = 0
                            for j in range(max_dim):
                                fenshu+=rank_re[min(i,j)][max(i,j)][m]*quanzhong[j]
                            fenshu = fenshu / (max_dim - 1)
                            if fenshu > float(args.ir_remove):
                                list2.append(ranks[i][m])
                            else:
                                list_hou.append(ranks[i][m])
                        else:
                            list2.append(ranks[i][m])
                    ranks[0:MAX_RANK_LEN2, m] = list2 + list_hou
        np.savetxt(os.path.join(result_dir, "{}_ranks.txt".format(dataset)), ranks.astype(np.int))
        if dataset == 'cdvs_test_retrieval':
            compute_map_and_print(dataset, ranks, cfg['gnd_id'])
        else:
            compute_map_and_print(dataset, ranks, cfg['gnd'])