def evaluation(qf, q_pids, q_camids, gf, g_pids, g_camids,hamming=False,qf_0=None,gf_0=None):

    m, n = len(qf), len(gf)
    if qf_0 is not None and gf_0 is not None:
        qf1 = torch.from_numpy(qf_0)
        gf1 = torch.from_numpy(gf_0)


    qf0=torch.from_numpy(qf)
    gf0=torch.from_numpy(gf)


    distmat = torch.pow(qf0, 2).sum(dim=1, keepdim=True).expand(m, n) + torch.pow(gf0, 2).sum(dim=1, keepdim=True).expand(n, m).t()
    distmat.addmm_(1, -2, qf0, gf0.t())
    distmat = distmat.numpy()/100

    if qf_0 is not None and gf_0 is not None:

        if hamming == True:
            import scipy.spatial.distance as distt
            distmat1 = distt.cdist(qf1.numpy(), gf1.numpy(), 'hamming')/171
        else:
            distmat1 = torch.pow(qf1, 2).sum(dim=1, keepdim=True).expand(m, n) + torch.pow(gf1, 2).sum(dim=1,keepdim=True).expand(n, m).t()
            distmat1.addmm_(1, -2, qf1, gf1.t())
            distmat1 = distmat1.numpy()/250


        for i in range(0,11):
            a = 0.1*i
            b = 1-a
            distmat_1 = b * distmat + a * distmat1
            print("Computing CMC and mAP"+str(a)+"::"+str(b))
            cmc, mAP, indices = evaluate(distmat_1, q_pids, g_pids, q_camids, g_camids)
            ranks = [1, 5, 10, 20, 30, 50, 100, 200]
            print_evaluation(cmc,mAP,ranks)
示例#2
0
    def eval_result(gr, qr):

        gf, g_pids, g_camids = gr
        qf, q_pids, q_camids = qr

        m, n = qf.size(0), gf.size(0)
        distmat = torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, n) + \
            torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, m).t()
        distmat.addmm_(1, -2, qf, gf.t())
        distmat = distmat.numpy()

        print("Computing CMC and mAP")
        cmc, mAP = evaluate(distmat,
                            q_pids,
                            g_pids,
                            q_camids,
                            g_camids,
                            use_metric_cuhk03=args.use_metric_cuhk03)

        print("Results ----------")
        print("mAP: {:.2%}".format(mAP))
        print("CMC curve")
        for r in ranks:
            print("Rank-{:<3}: {:.2%}".format(r, cmc[r - 1]))
        print("------------------")

        return [mAP, *cmc]
示例#3
0
def test(model,
         queryloader,
         galleryloader,
         use_gpu,
         ranks=[1, 5, 10, 20],
         return_distmat=False):
    batch_time = AverageMeter()

    model.eval()

    with torch.no_grad():
        qf, q_pids, q_camids = [], [], []
        for batch_idx, (imgs, pids, camids) in enumerate(queryloader):
            if use_gpu: imgs = imgs.cuda()

            end = time.time()
            features = model(imgs)
            batch_time.update(time.time() - end)

            features = features.data.cpu()
            qf.append(features)
            q_pids.extend(pids)
            q_camids.extend(camids)
        qf = torch.cat(qf, 0)
        q_pids = np.asarray(q_pids)
        q_camids = np.asarray(q_camids)

        print("Extracted features for query set, obtained {}-by-{} matrix".
              format(qf.size(0), qf.size(1)))

        gf, g_pids, g_camids = [], [], []
        end = time.time()
        for batch_idx, (imgs, pids, camids) in enumerate(galleryloader):
            if use_gpu: imgs = imgs.cuda()

            end = time.time()
            features = model(imgs)
            batch_time.update(time.time() - end)

            features = features.data.cpu()
            gf.append(features)
            g_pids.extend(pids)
            g_camids.extend(camids)
        gf = torch.cat(gf, 0)
        g_pids = np.asarray(g_pids)
        g_camids = np.asarray(g_camids)

        print("Extracted features for gallery set, obtained {}-by-{} matrix".
              format(gf.size(0), gf.size(1)))

    print("==> BatchTime(s)/BatchSize(img): {:.3f}/{}".format(
        batch_time.avg, args.test_batch))

    m, n = qf.size(0), gf.size(0)
    distmat = torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, n) + \
              torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, m).t()
    distmat.addmm_(1, -2, qf, gf.t())
    distmat = distmat.numpy()

    print("Computing CMC and mAP")
    cmc, mAP = evaluate(distmat,
                        q_pids,
                        g_pids,
                        q_camids,
                        g_camids,
                        use_metric_cuhk03=args.use_metric_cuhk03)

    print("Results ----------")
    print("mAP: {:.1%}".format(mAP))
    print("CMC curve")
    for r in ranks:
        print("Rank-{:<3}: {:.1%}".format(r, cmc[r - 1]))
    print("------------------")

    if return_distmat:
        return distmat
    return cmc[0]
def test(model,
         queryloader,
         galleryloader,
         pool,
         use_gpu,
         ranks=[1, 5, 10, 20],
         return_distmat=False):
    batch_time = AverageMeter()

    model.eval()

    with torch.no_grad():
        qf, q_pids, q_camids = [], [], []
        for batch_idx, (imgs, pids, camids) in enumerate(queryloader):
            if use_gpu: imgs = imgs.cuda()
            b, s, c, h, w = imgs.size()
            imgs = imgs.view(b * s, c, h, w)

            end = time.time()
            features = model(imgs)
            batch_time.update(time.time() - end)

            features = features.view(b, s, -1)
            if pool == 'avg':
                features = torch.mean(features, 1)
            else:
                features, _ = torch.max(features, 1)
            features = features.data.cpu()
            qf.append(features)
            q_pids.extend(pids)
            q_camids.extend(camids)
        qf = torch.cat(qf, 0)
        q_pids = np.asarray(q_pids)
        q_camids = np.asarray(q_camids)

        print('Extracted features for query set, obtained {}-by-{} matrix'.
              format(qf.size(0), qf.size(1)))

        gf, g_pids, g_camids = [], [], []
        for batch_idx, (imgs, pids, camids) in enumerate(galleryloader):
            if use_gpu: imgs = imgs.cuda()
            b, s, c, h, w = imgs.size()
            imgs = imgs.view(b * s, c, h, w)

            end = time.time()
            features = model(imgs)
            batch_time.update(time.time() - end)

            features = features.view(b, s, -1)
            if pool == 'avg':
                features = torch.mean(features, 1)
            else:
                features, _ = torch.max(features, 1)
            features = features.data.cpu()
            gf.append(features)
            g_pids.extend(pids)
            g_camids.extend(camids)
        gf = torch.cat(gf, 0)
        g_pids = np.asarray(g_pids)
        g_camids = np.asarray(g_camids)

        print('Extracted features for gallery set, obtained {}-by-{} matrix'.
              format(gf.size(0), gf.size(1)))

    print('=> BatchTime(s)/BatchSize(img): {:.3f}/{}'.format(
        batch_time.avg, args.test_batch_size * args.seq_len))

    m, n = qf.size(0), gf.size(0)
    distmat = torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, n) + \
              torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, m).t()
    distmat.addmm_(1, -2, qf, gf.t())
    distmat = distmat.numpy()

    print('Computing CMC and mAP')
    cmc, mAP = evaluate(distmat, q_pids, g_pids, q_camids, g_camids)

    print('Results ----------')
    print('mAP: {:.1%}'.format(mAP))
    print('CMC curve')
    for r in ranks:
        print('Rank-{:<3}: {:.1%}'.format(r, cmc[r - 1]))
    print('------------------')

    if return_distmat:
        return distmat
    return cmc[0]
示例#5
0
print("Python time: {}".format(pytime))
print("Cython time: {}".format(cytime))
print("Cython is {} times faster than python\n".format(pytime / cytime))

print("=> Check precision")

num_q = 10
num_g = 200
max_rank = 5
distmat = np.random.rand(num_q, num_g) * 20
q_pids = np.random.randint(0, num_q, size=num_q)
g_pids = np.random.randint(0, num_g, size=num_g)
q_camids = np.random.randint(0, 5, size=num_q)
g_camids = np.random.randint(0, 5, size=num_g)

cmc, mAP = evaluate(distmat,
                    q_pids,
                    g_pids,
                    q_camids,
                    g_camids,
                    max_rank,
                    use_cython=False)
print("Python:\nmAP = {} \ncmc = {}\n".format(mAP, cmc))
cmc, mAP = evaluate(distmat,
                    q_pids,
                    g_pids,
                    q_camids,
                    g_camids,
                    max_rank,
                    use_cython=True)
print("Cython:\nmAP = {} \ncmc = {}\n".format(mAP, cmc))
示例#6
0
def test(model,
         queryloader,
         galleryloader,
         use_gpu,
         ranks=[1, 5, 10, 20],
         return_distmat=False):
    flip_eval = args.flip_eval

    if flip_eval:
        print('# Using Flip Eval')

    batch_time = AverageMeter()

    model.eval()

    with torch.no_grad():
        qf, q_pids, q_camids, q_paths = [], [], [], []

        if flip_eval:
            enumerator = enumerate(zip(queryloader[0], queryloader[1]))
        else:
            enumerator = enumerate(queryloader[0])

        for batch_idx, package in enumerator:
            end = time.time()

            if flip_eval:
                (imgs0, pids, camids, paths), (imgs1, _, _, _) = package
                if use_gpu:
                    imgs0, imgs1 = imgs0.cuda(), imgs1.cuda()
                features = (model(imgs0)[0] + model(imgs1)[0]) / 2.0
                # print(features.size())
            else:
                (imgs, pids, camids, paths) = package
                if use_gpu:
                    imgs = imgs.cuda()

                features = model(imgs)[0]

            batch_time.update(time.time() - end)

            features = features.data.cpu()
            qf.append(features)
            q_pids.extend(pids)
            q_camids.extend(camids)
            q_paths.extend(paths)
        qf = torch.cat(qf, 0)
        q_pids = np.asarray(q_pids)
        q_camids = np.asarray(q_camids)

        print("Extracted features for query set, obtained {}-by-{} matrix".
              format(qf.size(0), qf.size(1)))

        gf, g_pids, g_camids, g_paths = [], [], [], []
        if flip_eval:
            enumerator = enumerate(zip(galleryloader[0], galleryloader[1]))
        else:
            enumerator = enumerate(galleryloader[0])

        for batch_idx, package in enumerator:
            # print('f**k')
            end = time.time()

            if flip_eval:
                (imgs0, pids, camids, paths), (imgs1, _, _, _) = package
                if use_gpu:
                    imgs0, imgs1 = imgs0.cuda(), imgs1.cuda()
                features = (model(imgs0)[0] + model(imgs1)[0]) / 2.0
                # print(features.size())
            else:
                (imgs, pids, camids, _) = package
                if use_gpu:
                    imgs = imgs.cuda()

                features = model(imgs)[0]

            batch_time.update(time.time() - end)

            features = features.data.cpu()
            gf.append(features)
            g_pids.extend(pids)
            g_camids.extend(camids)
            g_paths.extend(paths)
        gf = torch.cat(gf, 0)
        g_pids = np.asarray(g_pids)
        g_camids = np.asarray(g_camids)

        print("Extracted features for gallery set, obtained {}-by-{} matrix".
              format(gf.size(0), gf.size(1)))

        if os.environ.get('save_feat'):
            import scipy.io as io
            io.savemat(
                os.environ.get('save_feat'), {
                    'q': qf.data.numpy(),
                    'g': gf.data.numpy(),
                    'qt': q_pids,
                    'gt': g_pids
                })
            # return

    print("==> BatchTime(s)/BatchSize(img): {:.3f}/{}".format(
        batch_time.avg, args.test_batch_size))

    m, n = qf.size(0), gf.size(0)
    distmat = torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, n) + \
              torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, m).t()
    distmat.addmm_(1, -2, qf, gf.t())
    distmat = distmat.numpy()

    if os.environ.get('distmat'):
        import scipy.io as io
        io.savemat(os.environ.get('distmat'), {
            'distmat': distmat,
            'qp': q_paths,
            'gp': g_paths
        })

    print("Computing CMC and mAP")
    cmc, mAP = evaluate(distmat,
                        q_pids,
                        g_pids,
                        q_camids,
                        g_camids,
                        use_metric_cuhk03=args.use_metric_cuhk03)

    print("Results ----------")
    print("mAP: {:.2%}".format(mAP))
    print("CMC curve")
    for r in ranks:
        print("Rank-{:<3}: {:.2%}".format(r, cmc[r - 1]))
    print("------------------")

    if return_distmat:
        return distmat
    return cmc[0]
示例#7
0
文件: train.py 项目: zhanziwei/PAMTRI
def test(model,
         keyptaware,
         multitask,
         queryloader,
         galleryloader,
         use_gpu,
         vcolor2label,
         vtype2label,
         ranks=range(1, 51),
         return_distmat=False):
    batch_time = AverageMeter()

    model.eval()

    with torch.no_grad():
        qf = []
        q_vids = []
        q_camids = []
        q_vcolors = []
        q_vtypes = []
        pred_q_vcolors = []
        pred_q_vtypes = []
        for batch_idx, (imgs, vids, camids, vcolors, vtypes,
                        vkeypts) in enumerate(queryloader):
            if use_gpu:
                if keyptaware:
                    imgs, vkeypts = imgs.cuda(), vkeypts.cuda()
                else:
                    imgs = imgs.cuda()

            end = time.time()

            if keyptaware and multitask:
                output_vids, output_vcolors, output_vtypes, features = model(
                    imgs, vkeypts)
            elif keyptaware:
                output_vids, features = model(imgs, vkeypts)
            elif multitask:
                output_vids, output_vcolors, output_vtypes, features = model(
                    imgs)
            else:
                output_vids, features = model(imgs)

            batch_time.update(time.time() - end)

            features = features.data.cpu()
            qf.append(features)
            q_vids.extend(vids)
            q_camids.extend(camids)
            if multitask:
                q_vcolors.extend(vcolors)
                q_vtypes.extend(vtypes)
                pred_q_vcolors.extend(output_vcolors.cpu().numpy())
                pred_q_vtypes.extend(output_vtypes.cpu().numpy())
        qf = torch.cat(qf, 0)
        q_vids = np.asarray(q_vids)
        q_camids = np.asarray(q_camids)
        if multitask:
            q_vcolors = np.asarray(q_vcolors)
            q_vtypes = np.asarray(q_vtypes)
            pred_q_vcolors = np.asarray(pred_q_vcolors)
            pred_q_vtypes = np.asarray(pred_q_vtypes)

        print("Extracted features for query set, obtained {}-by-{} matrix".
              format(qf.size(0), qf.size(1)))

        gf = []
        g_vids = []
        g_camids = []
        g_vcolors = []
        g_vtypes = []
        pred_g_vcolors = []
        pred_g_vtypes = []
        for batch_idx, (imgs, vids, camids, vcolors, vtypes,
                        vkeypts) in enumerate(galleryloader):
            if use_gpu:
                if keyptaware:
                    imgs, vkeypts = imgs.cuda(), vkeypts.cuda()
                else:
                    imgs = imgs.cuda()

            end = time.time()

            if keyptaware and multitask:
                output_vids, output_vcolors, output_vtypes, features = model(
                    imgs, vkeypts)
            elif keyptaware:
                output_vids, features = model(imgs, vkeypts)
            elif multitask:
                output_vids, output_vcolors, output_vtypes, features = model(
                    imgs)
            else:
                output_vids, features = model(imgs)

            batch_time.update(time.time() - end)

            features = features.data.cpu()
            gf.append(features)
            g_vids.extend(vids)
            g_camids.extend(camids)
            if multitask:
                g_vcolors.extend(vcolors)
                g_vtypes.extend(vtypes)
                pred_g_vcolors.extend(output_vcolors.cpu().numpy())
                pred_g_vtypes.extend(output_vtypes.cpu().numpy())
        gf = torch.cat(gf, 0)
        g_vids = np.asarray(g_vids)
        g_camids = np.asarray(g_camids)
        if multitask:
            g_vcolors = np.asarray(g_vcolors)
            g_vtypes = np.asarray(g_vtypes)
            pred_g_vcolors = np.asarray(pred_g_vcolors)
            pred_g_vtypes = np.asarray(pred_g_vtypes)

        print("Extracted features for gallery set, obtained {}-by-{} matrix".
              format(gf.size(0), gf.size(1)))

    print("==> BatchTime(s)/BatchSize(img): {:.3f}/{}".format(
        batch_time.avg, args.test_batch))

    m, n = qf.size(0), gf.size(0)
    distmat = torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, n) + \
              torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, m).t()
    distmat.addmm_(1, -2, qf, gf.t())
    distmat = distmat.numpy()

    print("Computing CMC and mAP")
    cmc, mAP = evaluate(distmat, q_vids, g_vids, q_camids, g_camids)

    print("Results ----------")
    print("mAP: {:.2%}".format(mAP))
    print("CMC curve")
    for r in ranks:
        print("Rank-{:<3}: {:.2%}".format(r, cmc[r - 1]))
    print("------------------")

    if multitask:
        print("Compute attribute classification accuracy")

        for q in range(q_vcolors.size):
            q_vcolors[q] = vcolor2label[q_vcolors[q]]
        for g in range(g_vcolors.size):
            g_vcolors[g] = vcolor2label[g_vcolors[g]]
        q_vcolor_errors = np.argmax(pred_q_vcolors, axis=1) - q_vcolors
        g_vcolor_errors = np.argmax(pred_g_vcolors, axis=1) - g_vcolors
        vcolor_error_num = np.count_nonzero(
            q_vcolor_errors) + np.count_nonzero(g_vcolor_errors)
        vcolor_accuracy = 1.0 - (float(vcolor_error_num) /
                                 float(distmat.shape[0] + distmat.shape[1]))
        print("Color classification accuracy: {:.2%}".format(vcolor_accuracy))

        for q in range(q_vtypes.size):
            q_vtypes[q] = vcolor2label[q_vtypes[q]]
        for g in range(g_vtypes.size):
            g_vtypes[g] = vcolor2label[g_vtypes[g]]
        q_vtype_errors = np.argmax(pred_q_vtypes, axis=1) - q_vtypes
        g_vtype_errors = np.argmax(pred_g_vtypes, axis=1) - g_vtypes
        vtype_error_num = np.count_nonzero(q_vtype_errors) + np.count_nonzero(
            g_vtype_errors)
        vtype_accuracy = 1.0 - (float(vtype_error_num) /
                                float(distmat.shape[0] + distmat.shape[1]))
        print("Type classification accuracy: {:.2%}".format(vtype_accuracy))

        print("------------------")

    if return_distmat:
        return distmat
    return cmc[0]
示例#8
0
def test(model,
         model_decoder,
         queryloader,
         galleryloader,
         use_gpu,
         epoch,
         ranks=[1, 5, 10, 20],
         return_distmat=False):
    batch_time = AverageMeter()

    model.eval()
    model_decoder.eval()

    with torch.no_grad():
        qf, q_pids, q_camids = [], [], []
        for batch_idx, (imgs, pids, camids, img_paths,
                        imgs_texture) in enumerate(queryloader):
            if use_gpu:
                imgs = imgs.cuda()

            end = time.time()
            features, feat_texture, x_down1, x_down2, x_down3 = model(imgs)

            recon_texture, x_sim1, x_sim2, x_sim3, x_sim4 = model_decoder(
                feat_texture, x_down1, x_down2, x_down3)
            out = recon_texture.data.cpu().numpy()[0].squeeze()
            out = out.transpose((1, 2, 0))
            out = (out / 2.0 + 0.5) * 255.
            out = out.astype(np.uint8)
            print(
                'finish: ',
                os.path.join(
                    args.save_dir,
                    img_paths[0].split('images_labeled/')[-1].split('.jpg')[0]
                    + '_ep_' + str(epoch) + '.jpg'))
            cv2.imwrite(
                os.path.join(
                    args.save_dir,
                    img_paths[0].split('images_labeled/')[-1].split('.jpg')[0]
                    + '_ep_' + str(epoch) + '.jpg'), out)

            batch_time.update(time.time() - end)

            features = features.data.cpu()
            qf.append(features)
            q_pids.extend(pids)
            q_camids.extend(camids)
        qf = torch.cat(qf, 0)
        q_pids = np.asarray(q_pids)
        q_camids = np.asarray(q_camids)

        print("Extracted features for query set, obtained {}-by-{} matrix".
              format(qf.size(0), qf.size(1)))

        gf, g_pids, g_camids = [], [], []
        for batch_idx, (imgs, pids, camids, _,
                        imgs_texture) in enumerate(galleryloader):
            if use_gpu:
                imgs = imgs.cuda()

            end = time.time()
            features, feat_texture, x_down1, x_down2, x_down3 = model(imgs)
            batch_time.update(time.time() - end)

            features = features.data.cpu()
            gf.append(features)
            g_pids.extend(pids)
            g_camids.extend(camids)
        gf = torch.cat(gf, 0)
        g_pids = np.asarray(g_pids)
        g_camids = np.asarray(g_camids)

        print("Extracted features for gallery set, obtained {}-by-{} matrix".
              format(gf.size(0), gf.size(1)))

    print("==> BatchTime(s)/BatchSize(img): {:.3f}/{}".format(
        batch_time.avg, args.test_batch_size))

    m, n = qf.size(0), gf.size(0)
    distmat = torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, n) + \
              torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, m).t()
    distmat.addmm_(1, -2, qf, gf.t())
    distmat = distmat.numpy()

    print("Computing CMC and mAP")
    cmc, mAP = evaluate(distmat,
                        q_pids,
                        g_pids,
                        q_camids,
                        g_camids,
                        use_metric_cuhk03=args.use_metric_cuhk03)

    print("Results ----------")
    print("mAP: {:.1%}".format(mAP))
    print("CMC curve")
    for r in ranks:
        print("Rank-{:<3}: {:.1%}".format(r, cmc[r - 1]))
    print("------------------")

    if return_distmat:
        return distmat
    return cmc[0]
def test(model_plt,
         model_vecl,
         queryloader_plt,
         queryloader_vecl,
         galleryloader_plt,
         galleryloader_vecl,
         use_gpu,
         ranks=[1, 5, 10, 20],
         return_distmat=False):

    if use_plt:
        model_plt.eval()
        with torch.no_grad():
            qf_plt, q_pids_plt, q_camids_plt, q_names_plt = [], [], [], []
            for batch_idx, (imgs, pids, camids,
                            names) in enumerate(queryloader_plt[0]):
                if use_gpu: imgs = imgs.cuda()

                features = model_plt(imgs)
                features = features.data.cpu()

                qf_plt.append(features)
                q_pids_plt.extend(pids)
                q_camids_plt.extend(camids)
                q_names_plt.extend(names)

            qf_plt = torch.cat(qf_plt, 0)
            # qf_flip_plt = torch.cat(qf_flip_plt, 0)
            # qf_plt = (qf_plt + qf_flip_plt) / 2.
            q_pids_plt = np.asarray(q_pids_plt)
            q_camids_plt = np.asarray(q_camids_plt)
            q_names_plt = np.asarray(q_names_plt)
            print("\nPlate model: extracted features for query set, obtained {}-by-{} matrix".\
                  format(qf_plt.size(0), qf_plt.size(1)))

            gf_plt, g_pids_plt, g_camids_plt, g_names_plt = [], [], [], []
            for batch_idx, (imgs, pids, camids,
                            names) in enumerate(galleryloader_plt[0]):
                if use_gpu: imgs = imgs.cuda()

                features = model_plt(imgs)
                features = features.data.cpu()

                gf_plt.append(features)
                g_pids_plt.extend(pids)
                g_camids_plt.extend(camids)
                g_names_plt.extend(names)

            gf_plt = torch.cat(gf_plt, 0)
            # gf_flip_plt = torch.cat(gf_flip_plt, 0)
            # gf_plt = (gf_plt + gf_flip_plt) / 2.
            g_pids_plt = np.asarray(g_pids_plt)
            g_camids_plt = np.asarray(g_camids_plt)
            g_names_plt = np.asarray(g_names_plt)
            print(
                "Plate model: extracted features for gallery set, obtained {}-by-{} matrix"
                .format(gf_plt.size(0), gf_plt.size(1)))

    if not only_use_plt:
        model_vecl.eval()
        with torch.no_grad():
            qf_vecl, q_pids_vecl, q_camids_vecl, q_groups_vecl, q_names_vecl = [], [], [], [], []
            for batch_idx, (imgs, pids, goplbs, _, camids, _,
                            names) in enumerate(queryloader_vecl[0]):
                if use_gpu: imgs = imgs.cuda()

                features = model_vecl(imgs)
                features = features.data.cpu()

                qf_vecl.append(features)
                q_pids_vecl.extend(pids)
                q_camids_vecl.extend(camids)
                q_groups_vecl.extend(goplbs)
                q_names_vecl.extend(names)

            # qf_flip_vecl = []
            # for imgs, _, _, _ in queryloader_vecl[1]:
            #     features = model_vecl(imgs).data.cpu()
            #     features = torch.div(features, features.norm(dim=1, keepdim=True))
            #     qf_flip_vecl.append(features)

            qf_vecl = torch.cat(qf_vecl, 0)
            # qf_flip_vecl = torch.cat(qf_flip_vecl, 0)
            # qf_vecl = (qf_vecl + qf_flip_vecl) / 2.
            q_pids_vecl = np.asarray(q_pids_vecl)
            q_camids_vecl = np.asarray(q_camids_vecl)
            q_groups_vecl = np.asarray(q_groups_vecl)
            q_names_vecl = np.asarray(q_names_vecl)
            print("Vehicle model: extracted features for query set, obtained {}-by-{} matrix".\
                  format(qf_vecl.size(0), qf_vecl.size(1)))

            gf_vecl, g_pids_vecl, g_camids_vecl, g_groups_vecl, g_names_vecl = [], [], [], [], []
            for batch_idx, (imgs, pids, goplbs, _, camids, _,
                            names) in enumerate(galleryloader_vecl[0]):
                if use_gpu: imgs = imgs.cuda()

                features = model_vecl(imgs)
                features = features.data.cpu()

                gf_vecl.append(features)
                g_pids_vecl.extend(pids)
                g_camids_vecl.extend(camids)
                g_groups_vecl.extend(goplbs)
                g_names_vecl.extend(names)

            # gf_flip_vecl = []
            # for imgs, _, _, _ in galleryloader_vecl[1]:
            #     features = model_vecl(imgs).data.cpu()
            #     features = torch.div(features, features.norm(dim=1, keepdim=True))
            #     gf_flip_vecl.append(features)

            gf_vecl = torch.cat(gf_vecl, 0)
            # gf_flip_vecl = torch.cat(gf_flip_vecl, 0)
            # gf_vecl = (gf_vecl + gf_flip_vecl) / 2.
            g_pids_vecl = np.asarray(g_pids_vecl)
            g_camids_vecl = np.asarray(g_camids_vecl)
            g_groups_vecl = np.asarray(g_groups_vecl)
            g_names_vecl = np.asarray(g_names_vecl)
            print("Vehicle model: extracted features for gallery set, obtained {}-by-{} matrix".\
                  format(gf_vecl.size(0), gf_vecl.size(1)))

    def _distmat_wplt_calc(qf_vecl, gf_vecl, qf_plt, gf_plt):
        qf_vecl, gf_vecl = np.array(qf_vecl), np.array(gf_vecl)
        qf_plt, gf_plt = np.array(qf_plt), np.array(gf_plt)

        # q_incl_pos, q_excl_pos, g_incl_pos, g_excl_pos = [], [], [], []
        q_plt_flg, g_plt_flg = [], []
        for i, name in enumerate(q_names_vecl):
            if name in q_names_plt:
                # q_incl_pos.append(i)
                q_plt_flg.append(1)
            else:
                # q_excl_pos.append(i)
                q_plt_flg.append(0)

        for i, name in enumerate(g_names_vecl):
            if name in g_names_plt:
                # g_incl_pos.append(i)
                g_plt_flg.append(1)
            else:
                # g_excl_pos.append(i)
                g_plt_flg.append(0)

        q_plt_flg = np.array(q_plt_flg)[:, np.newaxis]
        g_plt_flg = np.array(g_plt_flg)[np.newaxis, :]

        all_plt_flg = q_plt_flg * g_plt_flg

        qf_new = np.zeros((len(qf_vecl), 2048))
        gf_new = np.zeros((len(gf_vecl), 2048))
        for i, ft in enumerate(qf_vecl):
            if q_names_vecl[i] in q_names_plt:
                qf_plt_add = k_plt * np.squeeze(
                    qf_plt[q_names_plt == q_names_vecl[i]], 0)
                qf_new[i] = np.concatenate((qf_vecl[i], qf_plt_add))
                # qf_new[i] = np.concatenate((np.zeros(1024), qf_plt_add))
                qf_new[i] = qf_new[i] / np.linalg.norm(qf_new[i])
            else:
                qf_new[i] = np.concatenate((qf_vecl[i], np.zeros(1024)))
        for i, ft in enumerate(gf_vecl):
            if g_names_vecl[i] in g_names_plt:
                gf_plt_add = k_plt * np.squeeze(
                    gf_plt[g_names_plt == g_names_vecl[i]], 0)
                gf_new[i] = np.concatenate((gf_vecl[i], gf_plt_add))
                # gf_new[i] = np.concatenate((np.zeros(1024), gf_plt_add))
                gf_new[i] = gf_new[i] / np.linalg.norm(gf_new[i])
            else:
                gf_new[i] = np.concatenate((gf_vecl[i], np.zeros(1024)))

        distmat = np.zeros((len(qf_new), len(gf_new)))
        for i in range(len(qf_new)):
            if (i + 1) % 400 == 0:
                print("Processed {:.2f}%...".format(i / len(qf_new) * 100))
            for j in range(len(gf_new)):
                cur_qf = qf_new[i].copy()
                cur_gf = gf_new[j].copy()
                if all_plt_flg[i, j] == 0:
                    cur_qf[1024:] = 0
                    cur_gf[1024:] = 0
                distmat[i, j] = np.linalg.norm(cur_qf - cur_gf)

        return distmat

    def _distmat_noplt_calc():
        m, n = qf_vecl.size(0), gf_vecl.size(0)

        distmat = torch.pow(qf_vecl, 2).sum(dim=1, keepdim=True).expand(m, n) + \
                  torch.pow(gf_vecl, 2).sum(dim=1, keepdim=True).expand(n, m).t()
        distmat.addmm_(1, -2, qf_vecl, gf_vecl.t())
        distmat = distmat.numpy()
        return distmat

    def _distmat_only_plt():
        m, n = qf_plt.size(0), gf_plt.size(0)

        distmat = torch.pow(qf_plt, 2).sum(dim=1, keepdim=True).expand(m, n) + \
                  torch.pow(gf_plt, 2).sum(dim=1, keepdim=True).expand(n, m).t()
        distmat.addmm_(1, -2, qf_plt, gf_plt.t())
        distmat = distmat.numpy()
        return distmat

    start_time = time.time()
    print("\nStart calculating distmat.")
    if only_use_plt:
        distmat = _distmat_only_plt()
        q_pids = q_pids_plt
        g_pids = g_pids_plt
        q_camids = q_camids_plt
        g_camids = g_camids_plt
    else:
        q_pids = q_pids_vecl
        g_pids = g_pids_vecl
        q_camids = q_camids_vecl
        g_camids = g_camids_vecl
        if use_plt:
            print("Calculating distmat with plates.")
            distmat = _distmat_wplt_calc(qf_vecl, gf_vecl, qf_plt, gf_plt)
        else:
            print("Calculating distmat without plates.")
            distmat = _distmat_noplt_calc()
        print("Distmat calculation done.\n")

    def _kmeans_rerank():
        all_rk_idxes_ori = np.argsort(distmat, axis=1)
        all_rk_idxes = all_rk_idxes_ori.copy()
        num_q, num_g = distmat.shape
        matches = []
        for q_idx in range(num_q):
            order = all_rk_idxes[q_idx]
            group_label = g_groups_vecl.copy()
            group_label = group_label[order]
            q_pid = q_pids_vecl[q_idx]
            q_camid = q_camids_vecl[q_idx]
            remove = (g_pids_vecl[order] == q_pid) & (g_camids_vecl[order]
                                                      == q_camid)
            keep = np.invert(remove)

            dstmt = distmat[q_idx].copy()
            rk_idxes = all_rk_idxes[q_idx].copy()
            match = (g_pids_vecl[rk_idxes] == q_pid).astype(np.int32)
            rk_idxes = rk_idxes[keep]
            group_label = group_label[keep]
            match = match[keep]

            top_rk_match = match
            # top_rk_match = match[:top_rerk_num]
            top_rk_idxes = rk_idxes
            # top_rk_idxes = rk_idxes[:top_rerk_num]
            group_label = group_label
            # group_label = group_label[:top_rerk_num]
            top_rk_dstmt = dstmt
            # top_rk_dstmt = dstmt[top_rk_idxes]
            top_rk_gf = gf_vecl[top_rk_idxes]

            # all_clsts, _ = KMeans(top_rk_gf, num_clusters)
            all_clsts = []
            for i in range(1, 5):
                all_clsts.append(np.where(group_label == i)[0])
            lens = [len(clsts) for clsts in all_clsts]
            max_len = max(lens)

            # all_grp_rk_idxes = np.zeros((num_clusters, max_len))
            # all_grp_rk_dstmt = np.zeros((num_clusters, max_len))
            all_grp_rk_idxes = np.zeros((num_clusters, max_len))
            all_grp_rk_dstmt = np.zeros((num_clusters, max_len))
            all_grp_match = np.zeros((num_clusters, max_len))

            rerk_idxes = []
            rerk_match = []
            for i, clsts in enumerate(all_clsts):
                grp_rk_idxes = top_rk_idxes[clsts]
                grp_rk_dstmt = top_rk_dstmt[clsts]
                grp_match = top_rk_match[clsts]
                rlt_rk_idxes = np.argsort(grp_rk_dstmt)
                abs_rk_idxes = grp_rk_idxes[rlt_rk_idxes]
                grp_rk_dstmt = grp_rk_dstmt[rlt_rk_idxes]
                grp_match = grp_match[rlt_rk_idxes]

                all_grp_rk_idxes[i, :len(grp_rk_idxes)] = abs_rk_idxes
                all_grp_rk_idxes[i, len(grp_rk_idxes):] = np.inf
                all_grp_rk_dstmt[i, :len(grp_rk_dstmt)] = grp_rk_dstmt
                all_grp_rk_dstmt[i, len(grp_rk_dstmt):] = np.inf
                all_grp_match[i, :len(grp_match)] = grp_match
                all_grp_match[i, len(grp_match):] = np.inf

            for i in range(max_len):
                smln_idxes = all_grp_rk_idxes[:, i]
                smln_dstmt = all_grp_rk_dstmt[:, i]
                smln_match = all_grp_match[:, i]
                rlt_rk_idxes = np.argsort(smln_dstmt)
                abs_rk_idxes = smln_idxes[rlt_rk_idxes]
                abs_rk_match = smln_match[rlt_rk_idxes]
                for idx in abs_rk_idxes:
                    if idx != np.inf:
                        rerk_idxes.append(idx)
                for mth in abs_rk_match:
                    if mth != np.inf:
                        rerk_match.append(mth)

            # all_rk_idxes[q_idx][:top_rerk_num] = np.array(rerk_idxes).astype(np.int32)
            # rk_idxes[:top_rerk_num] = np.array(rerk_idxes).astype(np.int32)
            # rk_idxes = np.array(rerk_idxes).astype(np.int32)
            # match[:top_rerk_num] = np.array(rerk_match).astype(np.int32)
            match = np.array(rerk_match).astype(np.int32)
            matches.append(match)

        return matches

    if vecl_plt_kmeans:
        print("Start reranking using kmeans.")
        matches = _kmeans_rerank()
        print("Rerank done.\n")
    else:
        matches = None

    print("Start computing CMC and mAP")
    cmc, mAP = evaluate(distmat,
                        q_pids,
                        g_pids,
                        q_camids,
                        g_camids,
                        use_metric_cuhk03=args.use_metric_cuhk03,
                        use_cython=False,
                        matches=matches)
    elapsed = round(time.time() - start_time)
    elapsed = str(datetime.timedelta(seconds=elapsed))
    print("Evaluate test data time (h:m:s): {}.".format(elapsed))
    print("Test data results ----------")
    print("temAP: {:.2%}".format(mAP))
    print("CMC curve")
    for r in ranks:
        print("teRank-{:<3}: {:.2%}".format(r, cmc[r - 1]))
    print("------------------")

    if return_distmat:
        return distmat
    return cmc[0]
示例#10
0
def test(model,
         queryloader,
         galleryloader,
         use_gpu,
         args,
         writer,
         epoch,
         ranks=[1, 5, 10, 20],
         return_distmat=False,
         tsne_clusters=3):

    batch_time = AverageMeter()

    model.eval()

    with torch.no_grad():
        qf, q_pids, q_camids = [], [], []
        q_imgPath = []
        for batch_idx, (input) in enumerate(queryloader):
            if not args.draw_tsne:
                imgs, pids, camids = input
            else:
                imgs, pids, camids, img_path = input
                q_imgPath.extend(img_path)
            if use_gpu: imgs = imgs.cuda()

            end = time.time()
            features = model(imgs)
            batch_time.update(time.time() - end)

            features = features.data.cpu()
            qf.append(features)
            q_pids.extend(pids)
            q_camids.extend(camids)
        qf = torch.cat(qf, 0)
        q_pids = np.asarray(q_pids)
        q_camids = np.asarray(q_camids)
        q_imgPath = np.asarray(q_imgPath)

        print("Extracted features for query set, obtained {}-by-{} matrix".
              format(qf.size(0), qf.size(1)))

        gf, g_pids, g_camids = [], [], []
        g_imgPath = []
        end = time.time()
        for batch_idx, (input) in enumerate(galleryloader):
            if not args.draw_tsne:
                imgs, pids, camids = input
            else:
                imgs, pids, camids, img_path = input
                g_imgPath.extend(img_path)
            if use_gpu: imgs = imgs.cuda()

            end = time.time()
            features = model(imgs)
            batch_time.update(time.time() - end)

            features = features.data.cpu()
            gf.append(features)
            g_pids.extend(pids)
            g_camids.extend(camids)
        gf = torch.cat(gf, 0)
        g_pids = np.asarray(g_pids)
        g_camids = np.asarray(g_camids)
        g_imgPath = np.asarray(q_imgPath)

        print("Extracted features for gallery set, obtained {}-by-{} matrix".
              format(gf.size(0), gf.size(1)))

    print("==> BatchTime(s)/BatchSize(img): {:.3f}/{}".format(
        batch_time.avg, args.test_batch))

    if args.use_ecn:
        distmat = (ECN_custom(qf,
                              gf,
                              k=25,
                              t=3,
                              q=8,
                              method='rankdist',
                              use_cosine=args.use_cosine)).transpose()
    elif not args.use_cosine:
        m, n = qf.size(0), gf.size(0)
        distmat = torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, n) + \
                  torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, m).t()
        distmat.addmm_(1, -2, qf, gf.t())
        distmat = distmat.numpy()

        if args.re_ranking:
            distmat_q_q = torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, m) + \
                      torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, m).t()
            distmat_q_q.addmm_(1, -2, qf, qf.t())
            distmat_q_q = distmat_q_q.numpy()

            distmat_g_g = torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, n) + \
                      torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, n).t()
            distmat_g_g.addmm_(1, -2, gf, gf.t())
            distmat_g_g = distmat_g_g.numpy()

            distmat = re_ranking(distmat,
                                 distmat_q_q,
                                 distmat_g_g,
                                 k1=20,
                                 k2=6,
                                 lambda_value=0.3)

    else:
        m, n = qf.size(0), gf.size(0)
        qf_norm = qf / qf.norm(dim=1)[:, None]
        gf_norm = gf / gf.norm(dim=1)[:, None]
        distmat = torch.addmm(1, torch.ones((m, n)), -1, qf_norm,
                              gf_norm.transpose(0, 1))
        distmat = distmat.numpy()

        if args.re_ranking:
            distmat_q_q = torch.addmm(1, torch.ones((m, m)), -1, qf_norm,
                                      qf_norm.transpose(0, 1))
            distmat_q_q = distmat_q_q.numpy()

            distmat_g_g = torch.addmm(1, torch.ones((n, n)), -1, gf_norm,
                                      gf_norm.transpose(0, 1))
            distmat_g_g = distmat_g_g.numpy()

            distmat = re_ranking(distmat,
                                 distmat_q_q,
                                 distmat_g_g,
                                 k1=20,
                                 k2=6,
                                 lambda_value=0.3)

    print("Computing CMC and mAP")
    cmc, mAP = evaluate(distmat,
                        q_pids,
                        g_pids,
                        q_camids,
                        g_camids,
                        use_metric_cuhk03=args.use_metric_cuhk03)

    print("Results ----------")
    print("mAP: {:.1%}".format(mAP))
    print("CMC curve")
    for r in ranks:
        print("Rank-{:<3}: {:.1%}".format(r, cmc[r - 1]))
    print("------------------")

    if args.draw_tsne:
        drawTSNE(qf, gf, q_pids, g_pids, q_camids, g_camids, q_imgPath,
                 g_imgPath, tsne_clusters, args.save_dir)
    if return_distmat:
        return distmat

    if writer != None:
        writer.add_scalars('Testing',
                           dict(rank_1=cmc[0], rank_5=cmc[4], mAP=mAP),
                           epoch + 1)
    return cmc[0]
示例#11
0
def test(model,
         queryloader,
         galleryloader,
         use_gpu,
         ranks=[1, 5, 10, 20],
         return_distmat=False):
    batch_time = AverageMeter()

    model.eval()

    with torch.no_grad():
        qf, q_pids, q_camids = [], [], []
        for batch_idx, (imgs, pids, camids, caps) in enumerate(queryloader):
            if use_gpu:
                imgs, caps = imgs.cuda(), caps.cuda()

            end = time.time()
            imgs_batch = imgs

            _, features = model(imgs_batch)

            batch_time.update(time.time() - end)

            features = features.data.cpu()
            qf.append(features)
            q_pids.extend(pids)
            q_camids.extend(camids)

        qf = torch.cat(qf, 0)
        q_pids = np.asarray(q_pids)
        q_camids = np.asarray(q_camids)

        print("Extracted features for query set, obtained {}-by-{} matrix".
              format(qf.size(0), qf.size(1)))
        sys.stdout.flush()

        gf, g_pids, g_camids = [], [], []
        for batch_idx, (imgs, pids, camids, caps) in enumerate(galleryloader):
            if use_gpu:
                imgs, caps = imgs.cuda(), caps.cuda()

            end = time.time()
            imgs_batch = imgs

            _, features = model(imgs_batch)
            batch_time.update(time.time() - end)

            features = features.data.cpu()
            gf.append(features)
            g_pids.extend(pids)
            g_camids.extend(camids)
        gf = torch.cat(gf, 0)
        g_pids = np.asarray(g_pids)
        g_camids = np.asarray(g_camids)

        print("Extracted features for gallery set, obtained {}-by-{} matrix".
              format(gf.size(0), gf.size(1)))
        sys.stdout.flush()

    print("==> BatchTime(s)/BatchSize(img): {:.3f}/{}".format(
        batch_time.avg, args.test_batch))

    sys.stdout.flush()
    if args.rerank:
        print('re-ranking')
        distmat = re_ranking(qf, gf, k1=20, k2=6, lambda_value=0.1)
    else:
        m, n = qf.size(0), gf.size(0)
        distmat = torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, n) + \
                  torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, m).t()
        distmat.addmm_(1, -2, qf, gf.t())

    print("Computing CMC and mAP")
    sys.stdout.flush()

    cmc, mAP = evaluate(distmat,
                        q_pids,
                        g_pids,
                        q_camids,
                        g_camids,
                        use_metric_cuhk03=args.use_metric_cuhk03)
    sio.savemat('dismat.mat', {'dismat': distmat})
    sio.savemat('g_pids.mat', {'g_pids': g_pids})
    sio.savemat('q_pids.mat', {'q_pids': q_pids})
    sio.savemat('g_camids.mat', {'g_camids': g_camids})
    sio.savemat('q_camids.mat', {'q_camids': q_camids})

    print("Results ----------")
    print("mAP: {:.1%}".format(mAP))
    print("CMC curve")
    for r in ranks:
        print("Rank-{:<3}: {:.1%}".format(r, cmc[r - 1]))
    print(cmc)
    print("------------------")
    sys.stdout.flush()

    if return_distmat:
        return distmat

    return cmc[0]
示例#12
0
def test_vib(model,
             queryloader,
             galleryloader,
             use_gpu,
             args,
             writer,
             epoch,
             ranks=[1, 5, 10, 20],
             return_distmat=False,
             use_cosine=False,
             draw_tsne=False,
             tsne_clusters=3):

    batch_time = AverageMeter()

    model.eval()

    with torch.no_grad():
        q_pids, q_camids = [], [], []
        for batch_idx, (imgs, pids, camids) in enumerate(queryloader):
            if use_gpu: imgs = imgs.cuda()

            end = time.time()
            features = model(imgs)
            batch_time.update(time.time() - end)

            features = features.data.cpu()
            qf_stat.append(features)
            q_pids.extend(pids)
            q_camids.extend(camids)
        qf_stat = torch.cat(qf_stat, 0)
        q_pids = np.asarray(q_pids)
        q_camids = np.asarray(q_camids)

        print("Extracted features for query set, obtained {}-by-{} matrix".
              format(qf_stat.size(0), qf_stat.size(1)))

        g_pids, g_camids = [], [], []
        end = time.time()
        for batch_idx, (imgs, pids, camids) in enumerate(galleryloader):
            if use_gpu: imgs = imgs.cuda()

            end = time.time()
            features = model(imgs)
            batch_time.update(time.time() - end)

            features = features.data.cpu()
            gf_stat.append(features)
            g_pids.extend(pids)
            g_camids.extend(camids)
        gf_stat = torch.cat(gf_stat, 0)
        g_pids = np.asarray(g_pids)
        g_camids = np.asarray(g_camids)

        print("Extracted features for gallery set, obtained {}-by-{} matrix".
              format(gf.size(0), gf.size(1)))

    print("==> BatchTime(s)/BatchSize(img): {:.3f}/{}".format(
        batch_time.avg, args.test_batch))

    pdb.set_trace()

    m, n = qf_stat.size(0), gf_stat.size(0)
    score_board = torch.zeros((m, n, n), dtype=torch.int16)
    qf = torch.zeros(m, 512)  #,torch.zeros(m,512)
    for _ in range(args.sampling_count):
        qf_sample = model.reparametrize_n(qf_stat[:, 0], qf_stat[:, 1])
        qf = qf + qf_sample

    qf = qf / args.sampling_count

    for _ in range(args.sampling_count):

        #qf = model.reparametrize_n(qf_stat[:,0],qf_stat[:,1])
        gf = model.reparametrize_n(gf_stat[:, 0], gf_stat[:, 1])
        if not use_cosine:
            distmat = torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, n) + \
                      torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, m).t()
            distmat.addmm_(1, -2, qf, gf.t())
            distmat = distmat.numpy()
        else:
            qf_norm = qf / qf.norm(dim=1)[:, None]
            gf_norm = gf / gf.norm(dim=1)[:, None]
            distmat = torch.addmm(1, torch.ones((m, n)), -1, qf_norm,
                                  gf_norm.transpose(0, 1))
            distmat = distmat.numpy()

        indices = np.argsort(distmat, axis=1)

        for indx in range(m):
            score_board[m, indices[indx], list(range(n))] += 1

    print("Computing CMC and mAP")
    cmc, mAP = evaluate(distmat,
                        q_pids,
                        g_pids,
                        q_camids,
                        g_camids,
                        use_metric_cuhk03=args.use_metric_cuhk03)

    print("Results ----------")
    print("mAP: {:.1%}".format(mAP))
    print("CMC curve")
    for r in ranks:
        print("Rank-{:<3}: {:.1%}".format(r, cmc[r - 1]))
    print("------------------")

    if draw_tsne:
        drawTSNE(qf, gf, q_pids, g_pids, q_camids, g_camids, tsne_clusters,
                 args.save_dir)
    if return_distmat:
        return distmat

    if writer != None:
        writer.add_scalars('Testing',
                           dict(rank_1=cmc[0], rank_5=cmc[4], mAP=mAP),
                           epoch + 1)
    return cmc[0]
示例#13
0
def test(model, queryloader, galleryloader, use_gpu, args,writer,epoch, ranks=[1, 5, 10, 20], return_distmat=False,use_cosine = False,draw_tsne=False,tsne_clusters=3):

    batch_time = AverageMeter()

    model.eval()

    with torch.no_grad():
        qf, q_pids, q_camids = [], [], []
        qf_std = []
        q_imgPath = []
        for batch_idx, (input) in enumerate(queryloader):
            if not args.draw_tsne:
                imgs, pids, camids = input
            else:
                imgs, pids, camids,img_path = input
                q_imgPath.extend(img_path)
            if use_gpu: imgs = imgs.cuda()

            end = time.time()
            features,std = model(imgs)
            batch_time.update(time.time() - end)

            features = features.data.cpu()
            std = std.data.cpu()
            qf.append(features)
            qf_std.append(std)
            q_pids.extend(pids)
            q_camids.extend(camids)
        qf = torch.cat(qf, 0)
        qf_std = torch.cat(qf_std, 0)
        q_pids = np.asarray(q_pids)
        q_camids = np.asarray(q_camids)
        q_imgPath = np.asarray(q_imgPath)

        print("Extracted features for query set, obtained {}-by-{} matrix".format(qf.size(0), qf.size(1)))

        gf, g_pids, g_camids = [], [], []
        #gf_std = []
        g_imgPath = []
        end = time.time()
        for batch_idx, (input) in enumerate(galleryloader):
            if not args.draw_tsne:
                imgs, pids, camids = input
            else:
                imgs, pids, camids,img_path = input
                g_imgPath.extend(img_path)
            if use_gpu: imgs = imgs.cuda()

            end = time.time()
            features,_ = model(imgs)
            batch_time.update(time.time() - end)

            features = features.data.cpu()
            #std = std.data.cpu()
            gf.append(features)
            #gf_std.append(std)
            g_pids.extend(pids)
            g_camids.extend(camids)
        gf = torch.cat(gf, 0)
        #gf_std = torch.cat(gf_std, 0)
        g_pids = np.asarray(g_pids)
        g_camids = np.asarray(g_camids)
        g_imgPath = np.asarray(q_imgPath)

        print("Extracted features for gallery set, obtained {}-by-{} matrix".format(gf.size(0), gf.size(1)))

    print("==> BatchTime(s)/BatchSize(img): {:.3f}/{}".format(batch_time.avg, args.test_batch))
    m, n = qf.size(0), gf.size(0)

    if args.use_ecn:
        distmat= (ECN(qf.numpy(),gf.numpy(),k=25,t=3,q=8,method='rankdist')).transpose()
    elif args.mahalanobis:
        print("Using STD for Mahalanobis distance")
        distmat = torch.zeros((m,n))
        # #pdb.set_trace()
        # qf = qf.data.numpy()
        # gf= gf.data.numpy()
        # qf_std = qf_std.data.numpy()
        # for q_indx in range(int(m)):
        #     distmat[q_indx]= pw(np.expand_dims(qf[q_indx],axis=0),gf,metric='mahalanobis',n_jobs=8, VI=(np.eye(qf_std[q_indx].shape[0])*(1/qf_std[q_indx]).transpose()))
        #     print(q_indx)
        # pdb.set_trace()
        qf = qf / qf_std
        for q_indx in range(int(m)):
            gf_norm = gf * 1/qf_std[q_indx]
            distmat[q_indx] = torch.pow(qf[q_indx], 2).sum(dim=0, keepdim=True).expand(n) + \
                      torch.pow(gf_norm, 2).sum(dim=1, keepdim=True).squeeze()
            distmat[q_indx].unsqueeze(0).addmm_(1, -2, qf[q_indx].unsqueeze(0), gf_norm.t())
        distmat = distmat.numpy()
    elif not (use_cosine or args.use_cosine):

        distmat = torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, n) + \
                  torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, m).t()
        distmat.addmm_(1, -2, qf, gf.t())
        distmat = distmat.numpy()

        if args.re_ranking:
            distmat_q_q = torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, m) + \
                      torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, m).t()
            distmat_q_q.addmm_(1, -2, qf, qf.t())
            distmat_q_q = distmat_q_q.numpy()

            distmat_g_g = torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, n) + \
                      torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, n).t()
            distmat_g_g.addmm_(1, -2, gf, gf.t())
            distmat_g_g = distmat_g_g.numpy()

            print("Normal Re-Ranking")
            distmat = re_ranking(distmat, distmat_q_q, distmat_g_g, k1=20, k2=6, lambda_value=0.3)
    else:
        qf_norm = qf/qf.norm(dim=1)[:,None]
        gf_norm = gf/gf.norm(dim=1)[:,None]
        distmat = torch.addmm(1,torch.ones((m,n)),-1,qf_norm,gf_norm.transpose(0,1))
        distmat = distmat.numpy()

        if args.re_ranking:
            distmat_q_q = torch.addmm(1,torch.ones((m,m)),-1,qf_norm,qf_norm.transpose(0,1))
            distmat_q_q = distmat_q_q.numpy()

            distmat_g_g = torch.addmm(1,torch.ones((n,n)),-1,gf_norm,gf_norm.transpose(0,1))
            distmat_g_g = distmat_g_g.numpy()

            print("Re-Ranking with Cosine")
            distmat = re_ranking(distmat, distmat_q_q, distmat_g_g, k1=20, k2=6, lambda_value=0.3)

    print("Computing CMC and mAP")
    cmc, mAP = evaluate(distmat, q_pids, g_pids, q_camids, g_camids, use_metric_cuhk03=args.use_metric_cuhk03)

    print("Results ----------")
    print("mAP: {:.1%}".format(mAP))
    print("CMC curve")
    for r in ranks:
        print("Rank-{:<3}: {:.1%}".format(r, cmc[r-1]))
    print("------------------")

    if draw_tsne:
        drawTSNE(qf,gf,q_pids, g_pids, q_camids, g_camids,q_imgPath, g_imgPath,tsne_clusters,args.save_dir)
    if return_distmat:
        return distmat


    if writer != None:
        writer.add_scalars(
          'Testing',
          dict(rank_1=cmc[0],
               rank_5 = cmc[4],
               mAP=mAP),
          epoch + 1)
    return cmc[0]
def test(model,
         queryloader,
         galleryloader,
         use_gpu,
         ranks=[1, 5, 10, 20],
         return_distmat=False,
         epoch=0):
    batch_time = AverageMeter()

    model.eval()

    with torch.no_grad():
        qf, q_pids, q_camids = [], [], []
        for batch_idx, (imgs, pids, camids, caps,
                        caps_raw) in enumerate(queryloader):
            if use_gpu:
                imgs, caps = imgs.cuda(), caps.cuda()

            end = time.time()

            features = model(imgs, caps)

            batch_time.update(time.time() - end)

            features = features.data.cpu()
            qf.append(features)
            q_pids.extend(pids)
            q_camids.extend(camids)
        qf = torch.cat(qf, 0)
        q_pids = np.asarray(q_pids)
        q_camids = np.asarray(q_camids)

        print("Extracted features for query set, obtained {}-by-{} matrix".
              format(qf.size(0), qf.size(1)))

        gf, g_pids, g_camids = [], [], []
        for batch_idx, (imgs, pids, camids, caps,
                        _) in enumerate(galleryloader):
            if use_gpu:
                imgs, caps = imgs.cuda(), caps.cuda()

            end = time.time()

            features = model(imgs, caps)

            batch_time.update(time.time() - end)

            features = features.data.cpu()
            gf.append(features)
            g_pids.extend(pids)
            g_camids.extend(camids)
        gf = torch.cat(gf, 0)
        g_pids = np.asarray(g_pids)
        g_camids = np.asarray(g_camids)

        print("Extracted features for gallery set, obtained {}-by-{} matrix".
              format(gf.size(0), gf.size(1)))

    print("==> BatchTime(s)/BatchSize(img): {:.3f}/{}".format(
        batch_time.avg, args.test_batch))

    if args.rerank:
        print('re-ranking (Euclidean distance)')
        distmat = re_ranking(qf,
                             gf,
                             k1=reranking_k1,
                             k2=reranking_k2,
                             lambda_value=reranking_lambda)

    else:
        m, n = qf.size(0), gf.size(0)
        distmat = torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, n) + \
                  torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, m).t()
        distmat.addmm_(1, -2, qf, gf.t())
        distmat = distmat.numpy()

    if args.evaluate:
        sio.savemat(mat_path + 'dismat.mat', {'dismat': distmat})
        sio.savemat(mat_path + 'g_pids.mat', {'g_pids': g_pids})
        sio.savemat(mat_path + 'q_pids.mat', {'q_pids': q_pids})
        sio.savemat(mat_path + 'g_camids.mat', {'g_camids': g_camids})
        sio.savemat(mat_path + 'q_camids.mat', {'q_camids': q_camids})

    print("Computing CMC and mAP")
    if args.dataset == 'market1501':
        cmc, mAP = evaluate(distmat,
                            q_pids,
                            g_pids,
                            q_camids,
                            g_camids,
                            use_metric_cuhk03=False)
    elif args.dataset == 'cuhk03':
        mAP, cmc = eval_map_cmc(distmat,
                                q_ids=q_pids,
                                g_ids=g_pids,
                                q_cams=q_camids,
                                g_cams=g_camids,
                                separate_camera_set=separate_camera_set,
                                single_gallery_shot=single_gallery_shot,
                                first_match_break=first_match_break,
                                topk=20)
    elif args.dataset == 'dukemtmcreid':
        cmc, mAP = evaluate(distmat,
                            q_pids,
                            g_pids,
                            q_camids,
                            g_camids,
                            use_metric_cuhk03=False)

    print("Results ----------")
    print("mAP: {:.1%}".format(mAP))
    print("CMC curve")
    for r in ranks:
        print("Rank-{:<3}: {:.1%}".format(r, cmc[r - 1]))
    print(cmc)
    print("------------------")

    if return_distmat:
        return distmat

    return cmc[0]
示例#15
0
def test(model, queryloader, galleryloader, use_gpu, ranks=[1, 5, 10, 20],
         return_distmat=False):
    batch_time = AverageMeter()
    model.eval()

    with torch.no_grad():
        qf, q_pids, q_camids, q_paths = [], [], [], []
        for batch_idx, (imgs, pids, camids, paths) in enumerate(queryloader):
            if use_gpu: imgs = imgs.cuda()

            end = time.time()
            features = model(imgs)
            batch_time.update(time.time() - end)

            features = features.data.cpu()
            qf.append(features)
            q_pids.extend(pids)
            q_camids.extend(camids)
            q_paths.extend(paths)
        qf = torch.cat(qf, 0)
        q_pids = np.asarray(q_pids)
        q_camids = np.asarray(q_camids)
        q_paths = np.asarray(q_paths)
        print("Extracted features for query set, obtained {}-by-{} matrix".format(qf.size(0), qf.size(1)))
        print("==> BatchTime(s)/BatchSize(img): {:.3f}/{}".format(batch_time.avg, args.test_batch))

        gf, g_pids, g_camids, g_paths = [], [], [], []
        for batch_idx, (imgs, pids, camids, paths) in enumerate(galleryloader):
            if use_gpu: imgs = imgs.cuda()

            end = time.time()
            features = model(imgs)
            batch_time.update(time.time() - end)

            features = features.data.cpu()
            gf.append(features)
            g_pids.extend(pids)
            g_camids.extend(camids)
            g_paths.extend(paths)
        gf = torch.cat(gf, 0)
        g_pids = np.asarray(g_pids)
        g_camids = np.asarray(g_camids)
        g_paths = np.asarray(g_paths)

        # TSNE analysis
        # tsne_show(data=np.array(gf), labels=g_pids)

        print("Extracted features for gallery set, obtained {}-by-{} matrix".format(gf.size(0), gf.size(1)))
        print("==> BatchTime(s)/BatchSize(img): {:.3f}/{}".format(batch_time.avg, args.test_batch))

    print("Start compute euclidean distmat.")
    if args.loss_type in euclidean_distance_loss:
        m, n = qf.size(0), gf.size(0)
        distmat = torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, n) + \
                  torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, m).t()
        distmat.addmm_(1, -2, qf, gf.t())
        distmat = distmat.numpy()
    print("Compute euclidean distmat done.")
    print("distmat shape:", distmat.shape)
    # result = {'query_f': qf.numpy(),
    #           'query_cam': q_camids, 'query_label': q_pids, 'quim_path': q_paths,
    #           'gallery_f': gf.numpy(),
    #           'gallery_cam': g_camids, 'gallery_label': g_pids, 'gaim_path': g_paths}
    # scipy.io.savemat(os.path.join(args.save_dir, 'features_' + str(60) + '.mat'), result)
    # dist_mat_dict = {'dist_mat': distmat}
    # scipy.io.savemat(os.path.join(args.save_dir, 'features_' + str(60) + '_dist.mat'), dist_mat_dict)
    print("Start computing CMC and mAP")
    start_time = time.time()
    cmc, mAP = evaluate(distmat, q_pids, g_pids, q_camids, g_camids,
                        use_metric_cuhk03=args.use_metric_cuhk03,
                        use_cython=False)

    elapsed = round(time.time() - start_time)
    elapsed = str(datetime.timedelta(seconds=elapsed))
    print("Evaluate test data time (h:m:s): {}.".format(elapsed))
    print("Test data results ----------")
    print("temAP: {:.2%}".format(mAP))
    print("CMC curve")
    for r in ranks:
        print("teRank-{:<3}: {:.2%}".format(r, cmc[r - 1]))
    print("------------------")

    if return_distmat:
        return distmat
    return cmc[0]