コード例 #1
0
def main():
    args = parser.parse_args()

    # check if there are unknown datasets
#     pdb.set_trace()
    for dataset in args.datasets.split(','):
        if dataset not in datasets_names:
            raise ValueError('Unsupported or unknown dataset: {}!'.format(dataset))
    # setting up the visible GPU
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_id
    vectore_dir='/root/server/best_model/se101_gem_cap/model_epoch/'
    # extract database and query vectors

    cfg = configdataset("pet", get_data_root())
    print('>>  query images...')
    qvecs = np.load(os.path.join(vectore_dir, "pet_qvecs_ep1_resize_pca256_dba.npy"))
    print('>>  database images...')
    vecs = np.load(os.path.join(vectore_dir, "pet_vecs_ep1_resize_pca256.npy"))
    start = time.time()
    scores = np.dot(vecs, qvecs.T)
    ranks = np.argsort(-scores, axis=0)
    compute_map_and_print(dataset, ranks, cfg['gnd_id'])


#
    print(">> compute scores..")
    res = faiss.StandardGpuResources()
    dimension = vecs.shape[1]

    #     index_flat = faiss.index_factory(dimension,"PCA4096,PQ8 ",faiss.METRIC_INNER_PRODUCT)
    index_flat = faiss.IndexFlatIP(dimension)
    gpu_index_flat = faiss.index_cpu_to_gpu(res, 0, index_flat)
    gpu_index_flat.add(np.ascontiguousarray(vecs))
    top_k = 20
    D, I = gpu_index_flat.search(np.ascontiguousarray(qvecs), top_k)  # actual search (115977, top_k)
    qe_qvecs = np.zeros((qvecs.shape), dtype=np.float32)
    for i in range(5):
        for na in range(qe_qvecs.shape[0]):
            qe_qvecs[na, :] = np.vstack((qvecs[na, :][np.newaxis, :], vecs[I[na, :top_k], :])).mean(0)
        if i != 4:
            D, I = gpu_index_flat.search(np.ascontiguousarray(qe_qvecs), top_k)

    # D, I = gpu_index_flat.search(np.ascontiguousarray(qe_qvecs), k)  # actual search
    scores = np.dot(vecs, qe_qvecs.T)
    ranks = np.argsort(-scores, axis=0)
    compute_map_and_print(dataset, ranks, cfg['gnd_id'])
コード例 #2
0
def main():
    # setting up the visible GPU
    os.environ['CUDA_VISIBLE_DEVICES'] = "1"
    # evaluate on test datasets
    dataset = "pet"
    cfg = configdataset("pet_show_alldatabase", get_data_root())
    vectore_dir = 'best_model/se101_gem/model_epoch1/show_result'
    vecs = np.load(
        os.path.join(vectore_dir, "pet_show_alldatabase_vecs_ep1_resize.npy"))
    qvecs = np.load(
        os.path.join(vectore_dir, "pet_show_alldatabase_qvecs_ep1_resize.npy"))
    vecs = vecs.T
    qvecs = qvecs.T
    ori_dim = int(qvecs.shape[1])
    out_dim = 256

    # scores = np.dot(vecs, qvecs.T)
    # ranks = np.argsort(-scores, axis=0)
    # print(ranks.shape)
    # compute_map_and_print(dataset, ranks, cfg['gnd_id'])

    ##PCA method for test

    mat = faiss.PCAMatrix(ori_dim, out_dim)
    print(ori_dim, vecs.shape)
    mat.train(np.ascontiguousarray(vecs))
    assert mat.is_trained
    qvecs_pca = mat.apply_py(np.ascontiguousarray(qvecs))
    vecs_pca = mat.apply_py(np.ascontiguousarray(vecs))
    print(qvecs_pca.shape)

    np.save(
        os.path.join(vectore_dir,
                     "pet_show_alldatabase_vecs_ep1_resize_pca.npy"), vecs_pca)
    np.save(
        os.path.join(vectore_dir,
                     "pet_show_alldatabase_qvecs_ep1_resize_pca.npy"),
        qvecs_pca)
コード例 #3
0
def main():
    args = parser.parse_args()

    # check if there are unknown datasets
#     pdb.set_trace()
    for dataset in args.datasets.split(','):
        if dataset not in datasets_names:
            raise ValueError('Unsupported or unknown dataset: {}!'.format(dataset))
    # setting up the visible GPU
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_id
    vectore_dir='/root/server/best_model/se101_gem_cap/model_epoch1/'
    # extract database and query vectors

    cfg = configdataset("pet", get_data_root())
    print('>>  query images...')
    qvecs = np.load(os.path.join(vectore_dir, "pet_qvecs_ep1_resize_pca256_dba.npy"))
    print('>>  database images...')
    vecs = np.load(os.path.join(vectore_dir, "pet_vecs_ep1_resize_pca256.npy"))
    start = time.time()
    scores = np.dot(vecs, qvecs.T)
    ranks = np.argsort(-scores, axis=0)
    new_ranks=IR2(ranks)
    scores = np.dot(vecs, qvecs.T)
    compute_map_and_print(dataset, ranks, cfg['gnd_id'])
コード例 #4
0
def main():
    args = parser.parse_args()
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_id
    using_cdvs = float(args.using_cdvs)
    # loading network from path
    if args.network_path is not None:

        print(">> Loading network:\n>>>> '{}'".format(args.network_path))
        if args.network_path in PRETRAINED:
            # pretrained networks (downloaded automatically)
            state = load_url(PRETRAINED[args.network_path],
                             model_dir=os.path.join(get_data_root(),
                                                    'networks'))
        else:
            # fine-tuned network from path
            state = torch.load(args.network_path)

        # parsing net params from meta
        # architecture, pooling, mean, std required
        # the rest has default values, in case that is doesnt exist
        net_params = {}
        net_params['architecture'] = state['meta']['architecture']
        net_params['pooling'] = state['meta']['pooling']
        net_params['local_whitening'] = state['meta'].get(
            'local_whitening', False)
        net_params['regional'] = state['meta'].get('regional', False)
        net_params['whitening'] = state['meta'].get('whitening', False)
        net_params['mean'] = state['meta']['mean']
        net_params['std'] = state['meta']['std']
        net_params['pretrained'] = False
        net_params['use_caption'] = args.use_caption
        # load network
        net = init_network(net_params)
        net.load_state_dict(state['state_dict'])

        # if whitening is precomputed
        if 'Lw' in state['meta']:
            net.meta['Lw'] = state['meta']['Lw']

        print(">>>> loaded network: ")
        print(net.meta_repr())

    # loading offtheshelf network
    elif args.network_offtheshelf is not None:

        # parse off-the-shelf parameters
        offtheshelf = args.network_offtheshelf.split('-')
        net_params = {}
        net_params['architecture'] = offtheshelf[0]
        net_params['pooling'] = offtheshelf[1]
        net_params['local_whitening'] = 'lwhiten' in offtheshelf[2:]
        net_params['regional'] = 'reg' in offtheshelf[2:]
        net_params['whitening'] = 'whiten' in offtheshelf[2:]
        net_params['pretrained'] = True

        # load off-the-shelf network
        print(">> Loading off-the-shelf network:\n>>>> '{}'".format(
            args.network_offtheshelf))
        net = init_network(net_params)
        print(">>>> loaded network: ")
        print(net.meta_repr())

    # setting up the multi-scale parameters
    ms = list(eval(args.multiscale))
    if len(ms) > 1 and net.meta['pooling'] == 'gem' and not net.meta[
            'regional'] and not net.meta['whitening']:
        msp = net.pool.p.item()
        print(">> Set-up multiscale:")
        print(">>>> ms: {}".format(ms))
        print(">>>> msp: {}".format(msp))
    else:
        msp = 1

    # moving network to gpu and eval mode
    net.cuda()
    net.eval()

    # set up the transform
    normalize = transforms.Normalize(mean=net.meta['mean'],
                                     std=net.meta['std'])
    transform = transforms.Compose([transforms.ToTensor(), normalize])

    # evaluate on test datasets
    datasets = args.datasets.split(',')

    result_dir = args.network_path[0:-8] + "/show_result"
    epoch_lun = args.network_path[0:-8].split('/')[-1].replace(
        'model_epoch', '')
    print(">> Creating directory if it does not exist:\n>> '{}'".format(
        result_dir))
    if not os.path.exists(result_dir):
        os.makedirs(result_dir)
    for dataset in datasets:
        start = time.time()
        # search, rank, and print
        print('>> {}: Extracting...'.format(dataset))

        cfg = configdataset(dataset,
                            get_data_root(),
                            use_caption=args.use_caption)
        tuple_bbxs_qimlist = None
        tuple_bbxs_imlist = None
        images = [cfg['im_fname'](cfg, i) for i in range(cfg['n'])]
        qimages = [cfg['qim_fname'](cfg, i) for i in range(cfg['nq'])]
        # extract database and query vectors
        if args.use_caption:
            images_cap = [cfg['im_caption'](cfg, i) for i in range(cfg['n'])]
            qimages_cap = [
                cfg['qim_caption'](cfg, i) for i in range(cfg['nq'])
            ]
        print('>> {}: query images...'.format(dataset))
        qvecs = extract_vectors(net,
                                qimages,
                                args.image_size,
                                transform,
                                bbxs=tuple_bbxs_qimlist,
                                ms=ms,
                                msp=msp,
                                batchsize=20)
        qvecs = qvecs.numpy()
        qvecs = qvecs.astype(np.float32)
        np.save(
            os.path.join(result_dir,
                         "{}_qvecs_ep{}_resize.npy".format(dataset,
                                                           epoch_lun)), qvecs)
        #         qvecs = np.load(os.path.join(result_dir, "{}_qvecs_ep{}_resize.npy".format(dataset,epoch_lun)))
        print('>> {}: database images...'.format(dataset))
        vecs = extract_vectors(net,
                               images,
                               args.image_size,
                               transform,
                               ms=ms,
                               bbxs=tuple_bbxs_imlist,
                               msp=msp,
                               batchsize=20)
        vecs = vecs.numpy()
        vecs = vecs.astype(np.float32)
        np.save(
            os.path.join(result_dir,
                         "{}_vecs_ep{}_resize.npy".format(dataset, epoch_lun)),
            vecs)
        if False:
            if using_cdvs != 0:
                print(
                    '>> {}: cdvs global descriptor loading...'.format(dataset))
                qvecs_global = cfg['qimlist_global']
                vecs_global = cfg['imlist_global']
                scores_global = np.dot(vecs_global, qvecs_global.T)
                scores += scores_global * using_cdvs
            ranks = np.argsort(-scores, axis=0)
            if args.ir_remove != '0':
                rank_len = 10
                ## yuzhi, lianxushulinag while pick rank_len
                rank_re = np.loadtxt(
                    os.path.join(result_dir,
                                 '{}_ranks_new_relevent.txt'.format(dataset)))

                ## the max value of rank_len
                MAX_RANK_LEN = int((rank_re.shape[0])**0.5)
                rank_re = rank_re.reshape(MAX_RANK_LEN, MAX_RANK_LEN,
                                          rank_re.shape[1])
                for m in range(rank_re.shape[2]):
                    for i in range(rank_re.shape[0]):
                        rank_re[i][i][m] = 1.0
                quanzhong = [1, 0.7, 0.4] + [0.1] * (MAX_RANK_LEN - 3)
                for m in range(rank_re.shape[2]):
                    #if adaption, then change the rank_len to a adaption params according to the rank_re_q, q_aer, cons_n
                    if args.ir_adaption:
                        using_local_query = True
                        cons_n = 5
                        q_aer = float(args.ir_adaption)
                        if using_local_query:
                            ## using local feature scores, please don't forget note the query_q belong to deep
                            rank_re_q = np.loadtxt(
                                os.path.join(
                                    result_dir,
                                    '{}_ranks_new_query.txt'.format(dataset)))
                            query_q = rank_re_q[:, m]
                        else:
                            ## using deep feature scores
                            query_q = scores[ranks[:, m], m]

                        rank_len = 0
                        jishu = 0
                        for idx in range(
                                min(len(query_q), MAX_RANK_LEN) - cons_n):
                            if jishu < cons_n:
                                if query_q[idx] > q_aer:
                                    rank_len = idx + 1
                                else:
                                    jishu += 1
                            else:
                                break
                    max_dim = min(rank_len, MAX_RANK_LEN)
                    print(max_dim)
                    if max_dim > 2:
                        #put the image to the MAX_RANK_LEN2 location if equals max_dim then re rank in the maxdim length
                        list2 = []
                        list_hou = []
                        MAX_RANK_LEN2 = max_dim
                        for i in range(MAX_RANK_LEN2):
                            if i < max_dim:
                                fenshu = 0
                                for j in range(max_dim):
                                    fenshu += rank_re[min(i, j)][max(
                                        i, j)][m] * quanzhong[j]
                                fenshu = fenshu / (max_dim - 1)
                                if fenshu > float(args.ir_remove):
                                    list2.append(ranks[i][m])
                                else:
                                    list_hou.append(ranks[i][m])
                            else:
                                list2.append(ranks[i][m])
                        ranks[0:MAX_RANK_LEN2, m] = list2 + list_hou


#             np.savetxt(os.path.join(result_dir, "{}_ranks.txt".format(dataset)), ranks.astype(np.int))
            np.savetxt(
                os.path.join('/home/donghuihui/data/CDVS_DATASET/paper_plot/',
                             "{}_our_ranks.txt".format(dataset)),
                ranks.astype(np.int))
            compute_map_and_print(dataset, ranks, cfg['gnd_id'])
コード例 #5
0
    def __init__(self):
        self.queue=[]
        datasets=["pet_show_alldatabase"]
        dataset="pet_show_alldatabase"
        vectore_dir='EXPORT_DIR=petModel/pet_se_resnext101_32x4d_gem_contrastive_m0.70/_adam_lr1.0e-06_wd1.0e-04_gid1/_nnum5_qsize4000_psize2000/_bsize5_imsize1024/model_epoch1'
        self.cfg = configdataset(dataset, get_data_root())
        self.images = [self.cfg['im_fname'](self.cfg, i) for i in range(self.cfg['n'])]
        self.qimages = [self.cfg['qim_fname'](self.cfg, i) for i in range(self.cfg['nq'])]
        self.name_cluster = self.cfg['nameTolable']
        print('>>  query images...')
        qvecs = np.vstack([np.load(os.path.join(vectore_dir, "{}_qvecs_ep1_resize.npy".format(dataset))).astype('float32')for dataset in datasets])
        print('>>  database images...')
        self.vecs = np.vstack([np.load(os.path.join(vectore_dir, "{}_vecs_ep1_resize.npy".format(dataset))).astype('float32')for dataset in datasets])#可以换成DBA后的数据

        print(">> compute scores..")
        self.vecs = self.vecs.transpose(1, 0)#(1093759, 2048)  如果是DBA后的数据,这里不需要转置
        qvecs = qvecs.transpose(1, 0)#(115977, 2048)
        res = faiss.StandardGpuResources()
        dimension=self.vecs.shape[1]
        #     index_flat = faiss.index_factory(dimension,"PCA4096,PQ8 ",faiss.METRIC_INNER_PRODUCT)
        index_flat = faiss.IndexFlatIP(dimension)
        self.gpu_index_flat = faiss.index_cpu_to_gpu(res, 0, index_flat)
        self.gpu_index_flat.add(np.ascontiguousarray(self.vecs))
        k=100

        threshold = 0.09
        D, I = self.gpu_index_flat.search(np.ascontiguousarray(qvecs), 1)
        network_path = "best_model/se101_gem_cap/model_epoch1.pth.tar"

        multiscale="[1, 1/2**(1/2), 1/2]"
        state = torch.load(network_path)
        net_params = {}
        net_params['architecture'] = state['meta']['architecture']
        net_params['pooling'] = state['meta']['pooling']
        net_params['local_whitening'] = state['meta'].get('local_whitening', False)
        net_params['regional'] = state['meta'].get('regional', False)
        net_params['whitening'] = state['meta'].get('whitening', False)
        net_params['mean'] = state['meta']['mean']
        net_params['std'] = state['meta']['std']
        net_params['pretrained'] = False
        net_params['use_caption'] = True
        self.net = init_network(net_params)
        self.net.load_state_dict(state['state_dict'])

        # if whitening is precomputed
        if 'Lw' in state['meta']:
            self.net.meta['Lw'] = state['meta']['Lw']

        print(">>>> loaded network: ")
        print(self.net.meta_repr())
        self.ms = list(eval(multiscale))
        if len(self.ms) > 1 and self.net.meta['pooling'] == 'gem' and not self.net.meta['regional'] and not self.net.meta['whitening']:
            self.msp = self.net.pool.p.item()
            print(">> Set-up multiscale:")
            print(">>>> ms: {}".format(self.ms))
            print(">>>> msp: {}".format(self.msp))
        else:
            self.msp = 1
        normalize = transforms.Normalize(
            mean=self.net.meta['mean'],
            std=self.net.meta['std']
        )
        self.transform = transforms.Compose([
            transforms.ToTensor(),
            normalize
        ])

        self.sequenceVevModel = sent2vec.Sent2vecModel()
        self.sequenceVevModel.load_model('sent2vec/wiki_unigrams.bin')
コード例 #6
0
ファイル: DBA.py プロジェクト: FrozenIII/PetsRetrieval
def main():
    args = parser.parse_args()

    # check if there are unknown datasets
    #     pdb.set_trace()
    for dataset in args.datasets.split(','):
        if dataset not in datasets_names:
            raise ValueError(
                'Unsupported or unknown dataset: {}!'.format(dataset))
    expansion_m = args.query_expansion
    # setting up the visible GPU
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_id
    liner_s = args.linear_s
    # evaluate on test datasets
    datasets = args.datasets.split(',')
    vectore_dir = 'best_model/se101_gem/model_epoch1/show_result'
    # extract database and query vectors
    nets = args.trained_network.split(',')

    cfg = configdataset("pet", get_data_root())
    # print('>>  query images...')
    # qvecs = np.vstack([np.load(os.path.join(vectore_dir, i,
    #                                         "{}_qvecs_ep{}_resize.npy".format(
    #                                             dataset,i.split('/')[-1].replace('model_epoch','')))
    #                            ).astype('float32') for i in nets])
    # print('>>  database images...')
    # vecs = np.vstack([np.load(os.path.join(vectore_dir, i,
    #                                         "{}_vecs_ep{}_resize.npy".format(
    #                                             dataset, i.split('/')[-1].replace('model_epoch', '')))
    #                            ).astype('float32') for i in nets])
    # np.save(os.path.join(vectore_dir, "se50g_se101g_se101p_vecs_ep0_resize.npy"), vecs)
    # np.save(os.path.join(vectore_dir, "se50g_se101g_se101p_qvecs_ep0_resize.npy"), qvecs)

    print('>>  query images...')
    qvecs = np.load(
        os.path.join(vectore_dir,
                     "pet_show_alldatabase_qvecs_ep1_resize_pca.npy"))
    print('>>  database images...')
    vecs = np.load(
        os.path.join(vectore_dir,
                     "pet_show_alldatabase_vecs_ep1_resize_pca.npy"))
    start = time.time()
    # scores = np.dot(vecs, qvecs.T)
    # ranks = np.argsort(-scores, axis=0)
    # compute_map_and_print(dataset, ranks, cfg['gnd_id'])

    print(vecs.shape, qvecs.shape)
    print(">> compute scores..")
    #     vecs = vecs.transpose(1, 0)#(1093759, 2048)
    #     qvecs = qvecs.transpose(1, 0)#(115977, 2048)
    res = faiss.StandardGpuResources()
    dimension = vecs.shape[1]
    index_flat = faiss.IndexFlatIP(dimension)
    gpu_index_flat = faiss.index_cpu_to_gpu(res, 0, index_flat)

    gpu_index_flat.add(np.ascontiguousarray(vecs))
    top_k = 20
    D, I = gpu_index_flat.search(np.ascontiguousarray(qvecs),
                                 top_k)  # actual search (115977, top_k)
    #
    qe_qvecs = np.zeros((qvecs.shape), dtype=np.float32)
    for na in tqdm(range(qe_qvecs.shape[0])):
        qe_qvecs[na, :] = np.vstack(
            (qvecs[na, :][np.newaxis, :], vecs[I[na, :top_k], :])).mean(0)
    np.save(
        os.path.join(vectore_dir,
                     "pet_show_alldatabase_qvecs_ep1_resize_dba.npy"),
        qe_qvecs)
    # scores = np.dot(vecs, qe_qvecs.T)
    # ranks = np.argsort(-scores, axis=0)
    # compute_map_and_print(dataset, ranks, cfg['gnd_id'])

    print('>> time: {}'.format(htime(time.time() - start)))
コード例 #7
0
def test(datasets, net):
    print('>> Evaluating network on test datasets...')

    # for testing we use image size of max 1024
    image_size = 1024

    # moving network to gpu and eval mode
    net.cuda()
    net.eval()
    # set up the transform
    normalize = transforms.Normalize(mean=net.meta['mean'],
                                     std=net.meta['std'])
    transform = transforms.Compose([transforms.ToTensor(), normalize])

    # compute whitening
    if args.test_whiten:
        start = time.time()

        print('>> {}: Learning whitening...'.format(args.test_whiten))

        # loading db
        db_root = os.path.join(get_data_root(), 'train', args.test_whiten)
        ims_root = os.path.join(db_root, 'ims')
        db_fn = os.path.join(db_root, '{}-whiten.pkl'.format(args.test_whiten))
        with open(db_fn, 'rb') as f:
            db = pickle.load(f)
        images = [
            cid2filename(db['cids'][i], ims_root)
            for i in range(len(db['cids']))
        ]

        # extract whitening vectors
        print('>> {}: Extracting...'.format(args.test_whiten))
        wvecs = extract_vectors(net,
                                images,
                                image_size,
                                transform,
                                print_freq=10,
                                batchsize=20)  # implemented with torch.no_grad

        # learning whitening
        print('>> {}: Learning...'.format(args.test_whiten))
        wvecs = wvecs.numpy()
        m, P = whitenlearn(wvecs, db['qidxs'], db['pidxs'])
        Lw = {'m': m, 'P': P}

        print('>> {}: elapsed time: {}'.format(args.test_whiten,
                                               htime(time.time() - start)))
    else:
        Lw = None

    # evaluate on test datasets
    datasets = args.test_datasets.split(',')
    for dataset in datasets:
        start = time.time()

        print('>> {}: Extracting...'.format(dataset))
        cfg = configdataset(dataset, get_data_root())
        images = [cfg['im_fname'](cfg, i) for i in range(cfg['n'])]
        qimages = [cfg['qim_fname'](cfg, i) for i in range(cfg['nq'])]
        bbxs = None

        # extract database and query vectors
        print('>> {}: database images...'.format(dataset))
        if args.pool == 'gem':
            ms = [1, 1 / 2**(1 / 2), 1 / 2]
        else:
            ms = [1]
        if len(ms) > 1 and net.meta['pooling'] == 'gem' and not net.meta[
                'regional'] and not net.meta['whitening']:
            msp = net.pool.p.item()
            print(">> Set-up multiscale:")
            print(">>>> ms: {}".format(ms))
            print(">>>> msp: {}".format(msp))
        else:
            msp = 1
        vecs = extract_vectors(net,
                               images,
                               image_size,
                               transform,
                               bbxs,
                               ms=ms,
                               msp=msp,
                               print_freq=1000,
                               batchsize=20)  # implemented with torch.no_grad
        print('>> {}: query images...'.format(dataset))
        qvecs = extract_vectors(net,
                                qimages,
                                image_size,
                                transform,
                                bbxs,
                                ms=ms,
                                msp=msp,
                                print_freq=1000,
                                batchsize=20)  # implemented with torch.no_grad

        print('>> {}: Evaluating...'.format(dataset))

        # convert to numpy
        vecs = vecs.numpy()
        qvecs = qvecs.numpy()

        # search, rank, and print
        scores = np.dot(vecs.T, qvecs)
        ranks = np.argsort(-scores, axis=0)
        compute_map_and_print(dataset, ranks, cfg['gnd_id'])

        if Lw is not None:
            # whiten the vectors
            vecs_lw = whitenapply(vecs, Lw['m'], Lw['P'])
            qvecs_lw = whitenapply(qvecs, Lw['m'], Lw['P'])

            # search, rank, and print
            scores = np.dot(vecs_lw.T, qvecs_lw)
            ranks = np.argsort(-scores, axis=0)
            compute_map_and_print(dataset + ' + whiten', ranks, cfg['gnd'])

        print('>> {}: elapsed time: {}'.format(dataset,
                                               htime(time.time() - start)))