コード例 #1
0
ファイル: eval_LFW.py プロジェクト: haitian01/facenet-keras
def test(test_loader, model):
    labels, distances = [], []

    for batch_idx, (data_a, data_p,
                    label) in enumerate(test_loader.generate()):
        if len(label) == 0:
            break
        out_a, out_p = model.predict(data_a), model.predict(data_p)
        dists = np.linalg.norm(out_a - out_p, axis=1)

        distances.append(dists)
        labels.append(label)

        if batch_idx % log_interval == 0:
            print('Test Epoch: [{}/{} ({:.0f}%)]'.format(
                batch_idx * batch_size + len(data_a),
                len(test_loader.validation_images),
                100. * (batch_idx * batch_size + len(data_a)) /
                len(test_loader.validation_images)))

    labels = np.array([sublabel for label in labels for sublabel in label])
    distances = np.array([subdist for dist in distances for subdist in dist])
    tpr, fpr, accuracy, val, val_std, far, best_thresholds = evaluate(
        distances, labels)
    print('Accuracy: %2.5f+-%2.5f' % (np.mean(accuracy), np.std(accuracy)))
    print('Best_thresholds: %2.5f' % best_thresholds)
    print('Validation rate: %2.5f+-%2.5f @ FAR=%2.5f' % (val, val_std, far))
    plot_roc(fpr, tpr, figure_name="./model_data/roc_test.png")
コード例 #2
0
def test(test_loader, model):
    # switch to evaluate mode
    model.eval()

    labels, distances = [], []

    pbar = tqdm(enumerate(test_loader))
    for batch_idx, (data_a, data_p, label) in pbar:
        with torch.no_grad():
            data_a, data_p = data_a.type(torch.FloatTensor), data_p.type(torch.FloatTensor)
            if cuda:
                data_a, data_p = data_a.cuda(), data_p.cuda()
            data_a, data_p, label = Variable(data_a), \
                                    Variable(data_p), Variable(label)
            out_a, out_p = model(data_a), model(data_p)
            dists = torch.sqrt(torch.sum((out_a - out_p) ** 2, 1))

        distances.append(dists.data.cpu().numpy())
        labels.append(label.data.cpu().numpy())
        if batch_idx % log_interval == 0:
            pbar.set_description('Test Epoch: [{}/{} ({:.0f}%)]'.format(
                batch_idx * batch_size, len(test_loader.dataset),
                100. * batch_idx / len(test_loader)))

    labels = np.array([sublabel for label in labels for sublabel in label])
    distances = np.array([subdist for dist in distances for subdist in dist])
    tpr, fpr, accuracy, val, val_std, far, best_thresholds = evaluate(distances,labels)
    print('Accuracy: %2.5f+-%2.5f' % (np.mean(accuracy), np.std(accuracy)))
    print('Best_thresholds: %2.5f' % best_thresholds)
    print('Validation rate: %2.5f+-%2.5f @ FAR=%2.5f' % (val, val_std, far))
    plot_roc(fpr,tpr,figure_name="roc_test.png")
コード例 #3
0
def test(vid_model, img_model, queryloader, galleryloader, use_gpu):
    since = time.time()
    vid_model.eval()
    img_model.eval()

    print("Extract features")
    qf, q_pids, q_camids = [], [], []
    for batch_idx, (imgs, pids, camids) in enumerate(queryloader):
        if use_gpu:
            imgs = imgs.cuda()
        feat = img_model(imgs).data.cpu()
        qf.append(feat)
        q_pids.extend(pids)
        q_camids.extend(camids)
    qf = torch.cat(qf, 0)
    q_pids = np.asarray(q_pids)
    q_camids = np.asarray(q_camids)
    print("Extracted features for query set, obtained {} matrix".format(
        qf.shape))

    gf, g_pids, g_camids = [], [], []
    for batch_idx, (vids, pids, camids) in enumerate(galleryloader):
        gf.append(extract_vid_feature(vid_model, vids, use_gpu).squeeze())
        g_pids.extend(pids)
        g_camids.extend(camids)
    gf = torch.stack(gf)
    g_pids = np.asarray(g_pids)
    g_camids = np.asarray(g_camids)
    print("Extracted features for gallery set, obtained {} matrix".format(
        gf.shape))

    time_elapsed = time.time() - since
    print('Extracting features complete in {:.0f}m {:.0f}s'.format(
        time_elapsed // 60, time_elapsed % 60))

    print("Computing distance matrix")
    m, n = qf.size(0), gf.size(0)
    distmat = torch.zeros((m, n))

    q_norm = torch.norm(qf, p=2, dim=1, keepdim=True)
    g_norm = torch.norm(gf, p=2, dim=1, keepdim=True)
    qf = qf.div(q_norm.expand_as(qf))
    gf = gf.div(g_norm.expand_as(gf))
    for i in range(m):
        distmat[i] = -torch.mm(qf[i:i + 1], gf.t())
    distmat = distmat.numpy()

    print('image to video')
    cmc, mAP = evaluate(distmat, q_pids, g_pids, q_camids, g_camids)
    print(
        'top1:{:.2%} top5:{:.2%} top10:{:.2%} top20:{:.2%} mAP:{:.2%}'.format(
            cmc[0], cmc[4], cmc[9], cmc[19], mAP))

    return cmc[0]
コード例 #4
0
    def on_epoch_end(self, epoch, logs={}):
        labels, distances = [], []
        print("正在进行LFW数据集测试")

        for _, (data_a, data_p,
                label) in enumerate(self.test_loader.generate()):
            out_a, out_p = self.model.predict(data_a)[1], self.model.predict(
                data_p)[1]
            dists = np.linalg.norm(out_a - out_p, axis=1)

            distances.append(dists)
            labels.append(label)

        labels = np.array([sublabel for label in labels for sublabel in label])
        distances = np.array(
            [subdist for dist in distances for subdist in dist])
        _, _, accuracy, _, _, _, _ = evaluate(distances, labels)
        print('Accuracy: %2.5f+-%2.5f' % (np.mean(accuracy), np.std(accuracy)))
コード例 #5
0
def atest(model, queryloader, galleryloader, use_gpu, ranks=[1, 5, 10, 20]):
    with torch.no_grad():
        model.eval()
        qf, q_pids, q_camids = [], [], []
        for batch_idx, (imgs, pids, camids, head_map, body_map,
                        leg_map) in enumerate(queryloader):
            if batch_idx % 100 == 0:
                print("current query:" + str(batch_idx))
            if use_gpu:
                imgs = imgs.cuda()
            b, n, s, c, h, w = imgs.size()
            assert (b == 1)
            imgs = imgs.view(b * n, s, c, h, w)
            features = model(imgs, head_map, body_map, leg_map)
            features = features.view(n, -1)
            features = torch.mean(features, 0)
            features = features.data.cpu()
            qf.append(features)
            q_pids.extend(pids)
            q_camids.extend(camids)

        qf = torch.stack(qf)
        q_pids = np.asarray(q_pids)
        q_camids = np.asarray(q_camids)

        print("Extracted features for query set, obtained {}-by-{} matrix".
              format(qf.size(0), qf.size(1)))

        gf, g_pids, g_camids = [], [], []
        print('gallery num:' + str(len(galleryloader)))

        for batch_idx, (imgs, pids, camids, head_map, body_map,
                        leg_map) in enumerate(galleryloader):
            if batch_idx % 100 == 0:
                print("current gallery:" + str(batch_idx))
            if use_gpu:
                imgs = imgs.cuda()
            # imgs = Variable(imgs, volatile=True)
            b, n, s, c, h, w = imgs.size()
            imgs = imgs.view(b * n, s, c, h, w)
            assert (b == 1)
            features = model(imgs, head_map, body_map, leg_map)
            features = features.view(n, -1)
            features = torch.mean(features, 0)
            features = features.data.cpu()
            gf.append(features)
            g_pids.extend(pids)
            g_camids.extend(camids)
        gf = torch.stack(gf)
        g_pids = np.asarray(g_pids)
        g_camids = np.asarray(g_camids)

        print("Extracted features for gallery set, obtained {}-by-{} matrix".
              format(gf.size(0), gf.size(1)))
        print("Computing distance matrix")

        m, n = qf.size(0), gf.size(0)
        distmat = torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, n) + \
                  torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, m).t()
        distmat.addmm_(1, -2, qf, gf.t())
        distmat = distmat.numpy()

        print("Computing CMC and mAP")
        cmc, mAP = evaluate(distmat, q_pids, g_pids, q_camids, g_camids)

        print("Results ----------")
        print("mAP: {:.1%}".format(mAP))
        print("CMC curve")
        for r in ranks:
            print("Rank-{:<3}: {:.1%}".format(r, cmc[r - 1]))
        print("------------------")

    return cmc[0]
コード例 #6
0
ファイル: train.py プロジェクト: victor-gui/TKP
def test(vid_model, img_model, queryloader, galleryloader, queryimgloader,
         galleryimgloader, use_gpu):
    since = time.time()
    vid_model.eval()
    img_model.eval()

    print("Extract video features")
    vid_qf, vid_q_pids, vid_q_camids = [], [], []
    for batch_idx, (vids, pids, camids) in enumerate(queryloader):
        # if (batch_idx+1)%1000==0 or (batch_idx+1)%len(queryloader)==0:
        #     print("{}/{}".format(batch_idx+1, len(queryloader)))
        if use_gpu:
            vids = vids.cuda()
        feat = vid_model(vids)
        feat = feat.mean(1)
        feat = feat.data.cpu()

        vid_qf.append(feat)
        vid_q_pids.extend(pids)
        vid_q_camids.extend(camids)
    vid_qf = torch.cat(vid_qf, 0)
    vid_q_pids = np.asarray(vid_q_pids)
    vid_q_camids = np.asarray(vid_q_camids)
    print("Extracted features for query set, obtained {} matrix".format(
        vid_qf.shape))

    vid_gf, vid_g_pids, vid_g_camids = [], [], []
    for batch_idx, (vids, pids, camids) in enumerate(galleryloader):
        # if (batch_idx + 1) % 1000==0 or (batch_idx+1)%len(galleryloader)==0:
        #     print("{}/{}".format(batch_idx+1, len(galleryloader)))
        if use_gpu:
            vids = vids.cuda()
        feat = vid_model(vids)
        feat = feat.mean(1)
        feat = feat.data.cpu()

        vid_gf.append(feat)
        vid_g_pids.extend(pids)
        vid_g_camids.extend(camids)
    vid_gf = torch.cat(vid_gf, 0)
    vid_g_pids = np.asarray(vid_g_pids)
    vid_g_camids = np.asarray(vid_g_camids)

    if args.dataset == 'mars':
        # gallery set must contain query set, otherwise 140 query imgs will not have ground truth.
        vid_gf = torch.cat((vid_qf, vid_gf), 0)
        vid_g_pids = np.append(vid_q_pids, vid_g_pids)
        vid_g_camids = np.append(vid_q_camids, vid_g_camids)
    print("Extracted features for gallery set, obtained {} matrix".format(
        vid_gf.shape))

    print("Extract image features")
    img_qf, img_q_pids, img_q_camids = [], [], []
    for batch_idx, (imgs, pids, camids) in enumerate(queryimgloader):
        if use_gpu:
            imgs = imgs.cuda()
        feat = img_model(imgs).data.cpu()
        img_qf.append(feat)
        img_q_pids.extend(pids)
        img_q_camids.extend(camids)
    img_qf = torch.cat(img_qf, 0)
    img_q_pids = np.asarray(img_q_pids)
    img_q_camids = np.asarray(img_q_camids)
    print("Extracted features for query set, obtained {} matrix".format(
        img_qf.shape))

    img_gf, img_g_pids, img_g_camids = [], [], []
    for batch_idx, (imgs, pids, camids) in enumerate(galleryimgloader):
        if use_gpu:
            imgs = imgs.cuda()
        feat = img_model(imgs).data.cpu()
        img_gf.append(feat)
        img_g_pids.extend(pids)
        img_g_camids.extend(camids)
    img_gf = torch.cat(img_gf, 0)
    img_g_pids = np.asarray(img_g_pids)
    img_g_camids = np.asarray(img_g_camids)

    if args.dataset == 'mars':
        # gallery set must contain query set, otherwise 140 query imgs will not have ground truth.
        img_gf = torch.cat((img_qf, img_gf), 0)
        img_g_pids = np.append(img_q_pids, img_g_pids)
        img_g_camids = np.append(img_q_camids, img_g_camids)
    print("Extracted features for gallery set, obtained {} matrix".format(
        img_gf.shape))

    time_elapsed = time.time() - since
    print('Extracting features complete in {:.0f}m {:.0f}s'.format(
        time_elapsed // 60, time_elapsed % 60))

    print("Computing distance matrix")
    m, n = vid_qf.size(0), vid_gf.size(0)
    img_distmat = torch.zeros((m, n))
    vid_distmat = torch.zeros((m, n))
    i2v_distmat = torch.zeros((m, n))
    v2i_distmat = torch.zeros((m, n))

    img_q_norm = torch.norm(img_qf, p=2, dim=1, keepdim=True)
    img_g_norm = torch.norm(img_gf, p=2, dim=1, keepdim=True)
    vid_q_norm = torch.norm(vid_qf, p=2, dim=1, keepdim=True)
    vid_g_norm = torch.norm(vid_gf, p=2, dim=1, keepdim=True)
    img_qf = img_qf.div(img_q_norm.expand_as(img_qf))
    img_gf = img_gf.div(img_g_norm.expand_as(img_gf))
    vid_qf = vid_qf.div(vid_q_norm.expand_as(vid_qf))
    vid_gf = vid_gf.div(vid_g_norm.expand_as(vid_gf))

    for i in range(m):
        img_distmat[i] = -torch.mm(img_qf[i:i + 1], img_gf.t())
        vid_distmat[i] = -torch.mm(vid_qf[i:i + 1], vid_gf.t())
        i2v_distmat[i] = -torch.mm(img_qf[i:i + 1], vid_gf.t())
        v2i_distmat[i] = -torch.mm(vid_qf[i:i + 1], img_gf.t())

    img_distmat = img_distmat.numpy()
    vid_distmat = vid_distmat.numpy()
    i2v_distmat = i2v_distmat.numpy()
    v2i_distmat = v2i_distmat.numpy()

    print('image to image')
    cmc, mAP = evaluate(img_distmat, img_q_pids, img_g_pids, img_q_camids,
                        img_g_camids)
    print('top1:{:.2%} top5:{:.2%} top10:{:.2%} mAP:{:.2%}'.format(
        cmc[0], cmc[4], cmc[9], mAP))

    print('video to video')
    cmc, mAP = evaluate(vid_distmat, vid_q_pids, vid_g_pids, vid_q_camids,
                        vid_g_camids)
    print('top1:{:.2%} top5:{:.2%} top10:{:.2%} mAP:{:.2%}'.format(
        cmc[0], cmc[4], cmc[9], mAP))

    print('video to image')
    cmc, mAP = evaluate(v2i_distmat, vid_q_pids, img_g_pids, vid_q_camids,
                        img_g_camids)
    print('top1:{:.2%} top5:{:.2%} top10:{:.2%} mAP:{:.2%}'.format(
        cmc[0], cmc[4], cmc[9], mAP))

    print('image to video')
    cmc, mAP = evaluate(i2v_distmat, img_q_pids, vid_g_pids, img_q_camids,
                        vid_g_camids)
    print('top1:{:.2%} top5:{:.2%} top10:{:.2%} mAP:{:.2%}'.format(
        cmc[0], cmc[4], cmc[9], mAP))

    return cmc[0]
コード例 #7
0
ファイル: train.py プロジェクト: zhuMingXu/facenet-pytorch-1
def fit_ont_epoch(model, loss, epoch, epoch_size, gen, val_epoch_size, gen_val,
                  Epoch, test_loader, cuda):
    total_triple_loss = 0
    total_CE_loss = 0
    total_accuracy = 0

    val_total_triple_loss = 0
    val_total_CE_loss = 0
    val_total_accuracy = 0

    net.train()
    with tqdm(total=epoch_size,
              desc=f'Epoch {epoch + 1}/{Epoch}',
              postfix=dict,
              mininterval=0.3) as pbar:
        for iteration, batch in enumerate(gen):
            if iteration >= epoch_size:
                break
            images, labels = batch
            with torch.no_grad():
                if cuda:
                    images = Variable(
                        torch.from_numpy(images).type(
                            torch.FloatTensor)).cuda()
                    labels = Variable(torch.from_numpy(labels).long()).cuda()
                else:
                    images = Variable(
                        torch.from_numpy(images).type(torch.FloatTensor))
                    labels = Variable(torch.from_numpy(labels).long())

            optimizer.zero_grad()
            before_normalize, outputs1 = model.forward_feature(images)
            outputs2 = model.forward_classifier(before_normalize)

            _triplet_loss = loss(outputs1, Batch_size)
            _CE_loss = nn.NLLLoss()(F.log_softmax(outputs2, dim=-1), labels)
            _loss = _triplet_loss + _CE_loss

            _loss.backward()
            optimizer.step()

            with torch.no_grad():
                accuracy = torch.mean(
                    (torch.argmax(F.softmax(outputs2, dim=-1),
                                  dim=-1) == labels).type(torch.FloatTensor))

            total_accuracy += accuracy.item()
            total_triple_loss += _triplet_loss.item()
            total_CE_loss += _CE_loss.item()

            pbar.set_postfix(
                **{
                    'total_triple_loss': total_triple_loss / (iteration + 1),
                    'total_CE_loss': total_CE_loss / (iteration + 1),
                    'accuracy': total_accuracy / (iteration + 1),
                    'lr': get_lr(optimizer)
                })
            pbar.update(1)

    net.eval()
    print('Start Validation')
    with tqdm(total=val_epoch_size,
              desc=f'Epoch {epoch + 1}/{Epoch}',
              postfix=dict,
              mininterval=0.3) as pbar:
        for iteration, batch in enumerate(gen_val):
            if iteration >= val_epoch_size:
                break
            images, labels = batch
            with torch.no_grad():
                if cuda:
                    images = Variable(
                        torch.from_numpy(images).type(
                            torch.FloatTensor)).cuda()
                    labels = Variable(torch.from_numpy(labels).long()).cuda()
                else:
                    images = Variable(
                        torch.from_numpy(images).type(torch.FloatTensor))
                    labels = Variable(torch.from_numpy(labels).long())

                optimizer.zero_grad()
                before_normalize, outputs1 = model.forward_feature(images)
                outputs2 = model.forward_classifier(before_normalize)

                _triplet_loss = loss(outputs1, Batch_size)
                _CE_loss = nn.NLLLoss()(F.log_softmax(outputs2, dim=-1),
                                        labels)
                _loss = _triplet_loss + _CE_loss

                accuracy = torch.mean(
                    (torch.argmax(F.softmax(outputs2, dim=-1),
                                  dim=-1) == labels).type(torch.FloatTensor))

                val_total_accuracy += accuracy.item()
                val_total_triple_loss += _triplet_loss.item()
                val_total_CE_loss += _CE_loss.item()

            pbar.set_postfix(
                **{
                    'val_total_triple_loss':
                    val_total_triple_loss / (iteration + 1),
                    'val_total_CE_loss':
                    val_total_CE_loss / (iteration + 1),
                    'val_accuracy':
                    val_total_accuracy / (iteration + 1),
                    'lr':
                    get_lr(optimizer)
                })
            pbar.update(1)

    print("开始进行LFW数据集的验证。")
    labels, distances = [], []
    for _, (data_a, data_p, label) in enumerate(test_loader):
        with torch.no_grad():
            data_a, data_p = data_a.type(torch.FloatTensor), data_p.type(
                torch.FloatTensor)
            if cuda:
                data_a, data_p = data_a.cuda(), data_p.cuda()
            data_a, data_p, label = Variable(data_a), Variable(
                data_p), Variable(label)
            out_a, out_p = model(data_a), model(data_p)
            dists = torch.sqrt(torch.sum((out_a - out_p)**2, 1))
        distances.append(dists.data.cpu().numpy())
        labels.append(label.data.cpu().numpy())

    labels = np.array([sublabel for label in labels for sublabel in label])
    distances = np.array([subdist for dist in distances for subdist in dist])
    _, _, accuracy, _, _, _, _ = evaluate(distances, labels)
    print('LFW_Accuracy: %2.5f+-%2.5f' % (np.mean(accuracy), np.std(accuracy)))

    loss_history.append_loss(
        np.mean(accuracy),
        (total_triple_loss + total_CE_loss) / (epoch_size + 1),
        (val_total_triple_loss + val_total_CE_loss) / (val_epoch_size + 1))
    print('Finish Validation')
    print('Epoch:' + str(epoch + 1) + '/' + str(Epoch))
    print('Total Loss: %.4f' % ((total_triple_loss + total_CE_loss) /
                                (epoch_size + 1)))
    print('Saving state, iter:', str(epoch + 1))
    torch.save(
        model.state_dict(),
        'logs/Epoch%d-Total_Loss%.4f.pth-Val_Loss%.4f.pth' %
        ((epoch + 1), (total_triple_loss + total_CE_loss) / (epoch_size + 1),
         (val_total_triple_loss + val_total_CE_loss) / (val_epoch_size + 1)))

    return (val_total_triple_loss + val_total_CE_loss) / (val_epoch_size + 1)