Пример #1
0
def test(test_loader, model, epoch):
    # switch to evaluate mode
    model.eval()

    labels, distances = [], []

    pbar = tqdm(enumerate(test_loader))
    for batch_idx, (data_a, data_p, label) in pbar:
        if args.cuda:
            data_a, data_p = data_a.cuda(), data_p.cuda()
        data_a, data_p, label = Variable(data_a, volatile=True), \
                                Variable(data_p, volatile=True), Variable(label)

        # compute output
        out_a, out_p = model(data_a), model(data_p)
        dists = l2_dist.forward(
            out_a, out_p
        )  #torch.sqrt(torch.sum((out_a - out_p) ** 2, 1))  # euclidean distance
        distances.append(dists.data.cpu().numpy())
        labels.append(label.data.cpu().numpy())

        if batch_idx % args.log_interval == 0:
            pbar.set_description('Test Epoch: {} [{}/{} ({:.0f}%)]'.format(
                epoch, batch_idx * len(data_a), len(test_loader.dataset),
                100. * batch_idx / len(test_loader)))

    labels = np.array([sublabel for label in labels for sublabel in label])
    distances = np.array(
        [subdist[0] for dist in distances for subdist in dist])

    tpr, fpr, accuracy, val, val_std, far = evaluate(distances, labels)
    print('\33[91mTest set: Accuracy: {:.8f}\n\33[0m'.format(
        np.mean(accuracy)))
    logger.log_value('Test Accuracy', np.mean(accuracy))

    plot_roc(fpr, tpr, figure_name="roc_test_epoch_{}.png".format(epoch))
Пример #2
0
def own_train(train_loader, model, triploss, optimizer, epoch, data_size):
    model.train()
    labels, distances = [], []
    triplet_loss_sum = 0.0

    for batch_idx, (data_a, data_p, data_n, label_p,
                    label_n) in enumerate(train_loader):
        anc_img, pos_img, neg_img = data_a.to(device), data_p.to(
            device), data_n.to(device)
        with torch.set_grad_enabled(True):
            anc_embed, pos_embed, neg_embed = model(anc_img), model(
                pos_img), model(neg_img)
            pos_dist = l2_dist.forward(anc_embed, pos_embed)
            neg_dist = l2_dist.forward(anc_embed, neg_embed)
            all = (neg_dist - pos_dist < args.margin).cpu().numpy().flatten()
            hard_triplets = np.where(all == 1)
            if len(hard_triplets) == 0:
                continue
            anc_hard_embed = anc_embed[hard_triplets]
            pos_hard_embed = pos_embed[hard_triplets]
            neg_hard_embed = neg_embed[hard_triplets]

            anc_hard_img = anc_img[hard_triplets]
            pos_hard_img = pos_img[hard_triplets]
            neg_hard_img = neg_img[hard_triplets]

            model.forward_classifier(anc_hard_img)
            model.forward_classifier(pos_hard_img)
            model.forward_classifier(neg_hard_img)

            triplet_loss = triploss.forward(anc_hard_embed, pos_hard_embed,
                                            neg_hard_embed)
            logger.log_value('triplet_loss', triplet_loss)
            optimizer.zero_grad()
            triplet_loss.backward()
            optimizer.step()

            adjust_learning_rate(optimizer)

            distances.append(pos_dist.data.cpu().numpy())
            labels.append(np.ones(pos_dist.size(0)))

            distances.append(neg_dist.data.cpu().numpy())
            labels.append(np.zeros(neg_dist.size(0)))

            triplet_loss_sum += triplet_loss.item()

    avg_triplet_loss = triplet_loss_sum / data_size['train']
    labels = np.array([sublabel for label in labels for sublabel in label])
    distances = np.array([subdist for dist in distances for subdist in dist])

    tpr, fpr, accuracy, val, val_std, far = evaluate(distances, labels)

    print(' {} set - Triplet Loss   = {:.8f}'.format('train',
                                                     avg_triplet_loss))
    print(' {} set - Accuracy       = {:.8f}'.format('train',
                                                     np.mean(accuracy)))
    logger.log_value('Train Accuracy', np.mean(accuracy))
    plot_roc(fpr, tpr, figure_name="roc_train_epoch_{}.png".format(epoch))
    torch.save({
        'epoch': epoch + 1,
        'state_dict': model.state_dict()
    }, '{}/checkpoint_{}.pth'.format(LOG_DIR, epoch))
def test(model, queryloader, galleryloader, use_gpu, ranks=[1, 5, 10, 20]):
    batch_time = AverageMeter()

    model.eval()

    with torch.no_grad():
        qf, q_pids, q_camids = [], [], []
        for batch_idx, (imgs, pids, camids) in enumerate(queryloader):
            if use_gpu: imgs = imgs.cuda()

            end = time.time()
            features = model(imgs)
            batch_time.update(time.time() - end)

            features = features.data.cpu()
            qf.append(features)
            q_pids.extend(pids)
            q_camids.extend(camids)
        qf = torch.cat(qf, 0)
        q_pids = np.asarray(q_pids)
        q_camids = np.asarray(q_camids)

        print("Extracted features for query set, obtained {}-by-{} matrix".
              format(qf.size(0), qf.size(1)))

        gf, g_pids, g_camids = [], [], []
        end = time.time()
        for batch_idx, (imgs, pids, camids) in enumerate(galleryloader):
            if use_gpu: imgs = imgs.cuda()

            end = time.time()
            features = model(imgs)
            batch_time.update(time.time() - end)

            features = features.data.cpu()
            gf.append(features)
            g_pids.extend(pids)
            g_camids.extend(camids)
        gf = torch.cat(gf, 0)
        g_pids = np.asarray(g_pids)
        g_camids = np.asarray(g_camids)

        print("Extracted features for gallery set, obtained {}-by-{} matrix".
              format(gf.size(0), gf.size(1)))

    print("==> BatchTime(s)/BatchSize(img): {:.3f}/{}".format(
        batch_time.avg, args.test_batch))

    m, n = qf.size(0), gf.size(0)
    distmat = torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, n) + \
              torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, m).t()
    distmat.addmm_(1, -2, qf, gf.t())
    distmat = distmat.numpy()

    print("Computing CMC and mAP")
    cmc, mAP = evaluate(distmat,
                        q_pids,
                        g_pids,
                        q_camids,
                        g_camids,
                        use_metric_cuhk03=args.use_metric_cuhk03)

    print("Results ----------")
    print("mAP: {:.1%}".format(mAP))
    print("CMC curve")
    for r in ranks:
        print("Rank-{:<3}: {:.1%}".format(r, cmc[r - 1]))
    print("------------------")

    return cmc[0]
Пример #4
0
def test(model,
         queryloader,
         galleryloader,
         pool,
         use_gpu,
         ranks=[1, 5, 10, 20]):
    model.eval()

    with torch.no_grad():
        qf, q_pids, q_camids = [], [], []
        for batch_idx, (imgs, pids, camids) in enumerate(queryloader):
            if use_gpu: imgs = imgs.cuda()
            b, s, c, h, w = imgs.size()
            imgs = imgs.view(b * s, c, h, w)
            features = model(imgs)
            features = features.view(b, s, -1)
            if pool == 'avg':
                features = torch.mean(features, 1)
            else:
                features, _ = torch.max(features, 1)
            features = features.data.cpu()
            qf.append(features)
            q_pids.extend(pids)
            q_camids.extend(camids)
        qf = torch.cat(qf, 0)
        q_pids = np.asarray(q_pids)
        q_camids = np.asarray(q_camids)

        print("Extracted features for query set, obtained {}-by-{} matrix".
              format(qf.size(0), qf.size(1)))

        gf, g_pids, g_camids = [], [], []
        for batch_idx, (imgs, pids, camids) in enumerate(galleryloader):
            if use_gpu: imgs = imgs.cuda()
            b, s, c, h, w = imgs.size()
            imgs = imgs.view(b * s, c, h, w)
            features = model(imgs)
            features = features.view(b, s, -1)
            if pool == 'avg':
                features = torch.mean(features, 1)
            else:
                features, _ = torch.max(features, 1)
            features = features.data.cpu()
            gf.append(features)
            g_pids.extend(pids)
            g_camids.extend(camids)
        gf = torch.cat(gf, 0)
        g_pids = np.asarray(g_pids)
        g_camids = np.asarray(g_camids)

        print("Extracted features for gallery set, obtained {}-by-{} matrix".
              format(gf.size(0), gf.size(1)))

    print("Computing distance matrix")

    m, n = qf.size(0), gf.size(0)
    distmat = torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, n) + \
              torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, m).t()
    distmat.addmm_(1, -2, qf, gf.t())
    distmat = distmat.numpy()

    print("Computing CMC and mAP")
    cmc, mAP = evaluate(distmat, q_pids, g_pids, q_camids, g_camids)

    print("Results ----------")
    print("mAP: {:.1%}".format(mAP))
    print("CMC curve")
    for r in ranks:
        print("Rank-{:<3}: {:.1%}".format(r, cmc[r - 1]))
    print("------------------")

    return cmc[0]
def test(model, queryloader, galleryloader, use_gpu, ranks=[1, 5, 10, 20]):
    batch_time = AverageMeter()
    
    model.eval()

    with torch.no_grad():
        qf, q_pids, q_camids = [], [], []
        for batch_idx, (imgs, pids, camids) in enumerate(queryloader):
            if use_gpu: imgs = imgs.cuda()

            end = time.time()
            features = model(imgs)
            batch_time.update(time.time() - end)
            
            features = features.data.cpu()
            qf.append(features)
            q_pids.extend(pids)
            q_camids.extend(camids)
        qf = torch.cat(qf, 0)
        q_pids = np.asarray(q_pids)
        q_camids = np.asarray(q_camids)

        print("Extracted features for query set, obtained {}-by-{} matrix".format(qf.size(0), qf.size(1)))

        gf, g_pids, g_camids = [], [], []
        end = time.time()
        for batch_idx, (imgs, pids, camids) in enumerate(galleryloader):
            if use_gpu: imgs = imgs.cuda()

            end = time.time()
            features = model(imgs)
            batch_time.update(time.time() - end)

            features = features.data.cpu()
            gf.append(features)
            g_pids.extend(pids)
            g_camids.extend(camids)
        gf = torch.cat(gf, 0)
        g_pids = np.asarray(g_pids)
        g_camids = np.asarray(g_camids)

        print("Extracted features for gallery set, obtained {}-by-{} matrix".format(gf.size(0), gf.size(1)))

    print("==> BatchTime(s)/BatchSize(img): {:.3f}/{}".format(batch_time.avg, args.test_batch))

    m, n = qf.size(0), gf.size(0)
    distmat = torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, n) + \
              torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, m).t()
    distmat.addmm_(1, -2, qf, gf.t())
    distmat = distmat.numpy()

    print("Computing CMC and mAP")
    cmc, mAP = evaluate(distmat, q_pids, g_pids, q_camids, g_camids, use_metric_cuhk03=args.use_metric_cuhk03)

    print("Results ----------")
    print("mAP: {:.1%}".format(mAP))
    print("CMC curve")
    for r in ranks:
        print("Rank-{:<3}: {:.1%}".format(r, cmc[r-1]))
    print("------------------")

    return cmc[0]
def test(model,
         queryloader,
         galleryloader,
         pool,
         use_gpu,
         ranks=[1, 5, 10, 20]):
    model.eval()

    qf, q_pids, q_camids = [], [], []
    features = []
    for batch_idx, (imgs, pids, camids) in enumerate(queryloader):
        # print(batch_idx)
        if use_gpu:
            imgs = imgs.cuda()
        with torch.no_grad():
            imgs = Variable(imgs)
        # b=1, n=number of clips, s=16
        b, n, s, c, h, w = imgs.size()
        assert (b == 1)
        imgs = imgs.view(b * n, s, c, h, w)
        # mid = int(b*n/2)
        # l1 = imgs[0:mid]
        # l2 = imgs[mid:]
        # feature1 = model(l1)
        # feature2 = model(l2)
        if args.dataset == 'mars':
            maxratio = 1 / 4
        else:
            maxratio = 3 / 4
        for index, img in enumerate(imgs[0:int(b * n * maxratio)]):
            img = img.unsqueeze(0)
            feature = model(img)
            # feature = feature.unsqeeze(0)
            if index == 0:
                features = feature
            features = torch.cat((features, feature), 0)

        # features = model(imgs)
        # features = features.view(n, -1)
        features = torch.mean(features, 0)
        features = features.data.cpu()
        qf.append(features)
        q_pids.extend(pids)
        q_camids.extend(camids)
    qf = torch.stack(qf)
    q_pids = np.asarray(q_pids)
    q_camids = np.asarray(q_camids)

    print("Extracted features for query set, obtained {}-by-{} matrix".format(
        qf.size(0), qf.size(1)))

    gf, g_pids, g_camids = [], [], []
    features = []
    for batch_idx, (imgs, pids, camids) in enumerate(galleryloader):
        if use_gpu:
            imgs = imgs.cuda()
        with torch.no_grad():
            imgs = Variable(imgs)
        b, n, s, c, h, w = imgs.size()
        imgs = imgs.view(b * n, s, c, h, w)
        assert (b == 1)
        features = model(imgs)
        features = features.view(n, -1)
        if pool == 'avg':
            features = torch.mean(features, 0)
        else:
            features, _ = torch.max(features, 0)
        features = features.data.cpu()
        gf.append(features)
        g_pids.extend(pids)
        g_camids.extend(camids)
    gf = torch.stack(gf)
    g_pids = np.asarray(g_pids)
    g_camids = np.asarray(g_camids)

    print(
        "Extracted features for gallery set, obtained {}-by-{} matrix".format(
            gf.size(0), gf.size(1)))
    print("Computing distance matrix")

    m, n = qf.size(0), gf.size(0)
    distmat = torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(
        m, n) + torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, m).t()
    distmat.addmm_(1, -2, qf, gf.t())
    distmat = distmat.numpy()

    print("Computing CMC and mAP")
    cmc, mAP = evaluate(distmat, q_pids, g_pids, q_camids, g_camids)

    print("Results ----------")
    print("mAP: {:.1%}".format(mAP))
    print("CMC curve")
    for r in ranks:
        print("Rank-{:<3}: {:.1%}".format(r, cmc[r - 1]))
    print("------------------")

    return cmc[0]
              str(recall) + ' f1 ' + str(f1))

        predictions = model.predict(x_features)
        precision, recall, threshold, f1 = evaluation_on_balanced(
            predictions, y_labels, is_speaking_labels, threshold)
        print('Balanced test results: Prec ' + str(precision) + ' Recall ' +
              str(recall) + ' f1 ' + str(f1))
        print('Threshold ' + str(threshold))
    else:
        dev_preds = model.predict(x_dev)
        precision, recall, threshold, f1 = eval_metrics.threshold_evaluate(
            dev_preds, y_dev, False)
        print('Results on Dev: Prec ' + str(precision) + ' Recall ' +
              str(recall) + ' f1 ' + str(f1) + ' threshold ' + str(threshold))
        predictions = model.predict(x_features)
        precision, recall, f1 = eval_metrics.evaluate(predictions, y_labels,
                                                      threshold)
        print('Results: Prec ' + str(precision) + ' Recall ' + str(recall) +
              ' f1 ' + str(f1))
        precision, recall, f1 = eval_metrics.random_baseline(y_labels)
        print('Random baseline: Prec ' + str(precision) + ' Recall ' +
              str(recall) + ' f1 ' + str(f1))
        precision, recall, f1 = eval_metrics.speech_only_baseline(
            y_labels, is_speaking_labels)
        print('Speech only baseline: Prec ' + str(precision) + ' Recall ' +
              str(recall) + ' f1 ' + str(f1))
        precision, recall, f1 = eval_metrics.content_words_baseline(
            y_labels, is_speaking_labels, is_function_word_labels)
        print('Content word baseline: Prec ' + str(precision) + ' Recall ' +
              str(recall) + ' f1 ' + str(f1))
        failure_analysis(y_labels, predictions, test_audio_files, test_times)
def test(model,
         queryloader,
         galleryloader,
         use_gpu,
         ranks=[1, 5, 10, 20],
         use_salience=False,
         use_parsing=False,
         save_dir="",
         epoch=-1,
         save_rank=False,
         use_re_ranking=False):
    model.eval()
    with torch.no_grad():
        qf, q_pids, q_camids = [], [], []
        qimgs = []
        for batch_idx, tuple_i in enumerate(queryloader):
            if use_salience and not use_parsing:
                imgs, pids, camids, salience_imgs, qimg = tuple_i
            elif not use_salience and use_parsing:
                imgs, pids, camids, parsing_imgs, qimg = tuple_i
            elif use_salience and use_parsing:
                imgs, pids, camids, salience_imgs, parsing_imgs, qimg = tuple_i
            else:
                imgs, pids, camids, qimg = tuple_i

            if use_gpu:
                imgs = imgs.cuda()

            if use_salience and not use_parsing:
                features = model(imgs, salience_masks=salience_imgs)
            elif not use_salience and use_parsing:
                features = model(imgs, parsing_masks=parsing_imgs)
            elif use_salience and use_parsing:
                features = model(imgs,
                                 salience_masks=salience_imgs,
                                 parsing_masks=parsing_imgs)
            else:
                features = model(imgs)

            features = features.data.cpu()
            qf.append(features)
            q_pids.extend(pids)
            q_camids.extend(camids)
            qimgs.extend(qimg)

        qf = torch.cat(qf, 0)
        q_pids = np.asarray(q_pids)
        q_camids = np.asarray(q_camids)
        qimgs = np.asarray(qimgs)

        print("Extracted features for query set, obtained {}-by-{} matrix".
              format(qf.size(0), qf.size(1)))

        gf, g_pids, g_camids = [], [], []
        gimgs = []
        for batch_idx, tuple_i in enumerate(galleryloader):
            if use_salience and not use_parsing:
                imgs, pids, camids, salience_imgs, gimg = tuple_i
            elif not use_salience and use_parsing:
                imgs, pids, camids, parsing_imgs, gimg = tuple_i
            elif use_salience and use_parsing:
                imgs, pids, camids, salience_imgs, parsing_imgs, gimg = tuple_i
            else:
                imgs, pids, camids, gimg = tuple_i

            if use_gpu:
                imgs = imgs.cuda()

            if use_salience and not use_parsing:
                features = model(imgs, salience_masks=salience_imgs)
            elif not use_salience and use_parsing:
                features = model(imgs, parsing_masks=parsing_imgs)
            elif use_salience and use_parsing:
                features = model(imgs,
                                 salience_masks=salience_imgs,
                                 parsing_masks=parsing_imgs)
            else:
                features = model(imgs)

            features = features.data.cpu()
            gf.append(features)
            g_pids.extend(pids)
            g_camids.extend(camids)
            gimgs.extend(gimg)

        gf = torch.cat(gf, 0)
        g_pids = np.asarray(g_pids)
        g_camids = np.asarray(g_camids)
        gimgs = np.asarray(gimgs)

        print("Extracted features for gallery set, obtained {}-by-{} matrix".
              format(gf.size(0), gf.size(1)))

    print("Computing distance matrix")

    if use_re_ranking:
        qg_distmat = get_distance_matrix(qf, gf)
        gg_distmat = get_distance_matrix(gf, gf)
        qq_distmat = get_distance_matrix(qf, qf)

        print("Re-ranking feature maps")
        distmat = re_ranking(qg_distmat, qq_distmat, gg_distmat)
    else:
        distmat = get_distance_matrix(qf, gf)

    print("Computing CMC and mAP")
    cmc, mAP = evaluate(distmat,
                        q_pids,
                        g_pids,
                        q_camids,
                        g_camids,
                        use_metric_cuhk03=args.use_metric_cuhk03,
                        query_img_paths=qimgs,
                        gallery_img_paths=gimgs,
                        save_dir=save_dir,
                        epoch=epoch,
                        save_rank=save_rank)

    print("Results ----------")
    print("mAP: {:.1%}".format(mAP))
    print("CMC curve")
    for r in ranks:
        print("Rank-{:<3}: {:.1%}".format(r, cmc[r - 1]))
    print("------------------")

    return cmc[0]
Пример #9
0
def test(epoch, model, queryloader, galleryloader, use_gpu=True, ranks=[1, 5, 10, 20], summary=None):
    batch_time = AverageMeter()

    model.eval()

    with torch.no_grad():
        qf, q_pids, q_camids = [], [], []
        for batch_idx, (imgs, pids, camids) in enumerate(queryloader):
            if use_gpu:
                imgs = imgs.cuda()

            end = time.time()
            features = extract_feature(imgs,model)
            # features = features/torch.norm(features,p=2,dim=1,keepdim=True)
            batch_time.update(time.time() - end)

            features = features.data.cpu()
            qf.append(features)
            q_pids.extend(pids)
            q_camids.extend(camids)
        qf = torch.cat(qf, 0)
        q_pids = np.asarray(q_pids)
        q_camids = np.asarray(q_camids)

        print("Extracted features for query set, obtained {}-by-{} matrix".format(qf.size(0), qf.size(1)))

        gf, g_pids, g_camids = [], [], []
        for batch_idx, (imgs, pids, camids) in enumerate(galleryloader):
            if use_gpu:
                imgs = imgs.cuda()
            
            end = time.time()
            features = extract_feature(imgs,model)
            # features = features/torch.norm(features,p=2,dim=1,keepdim=True)
            batch_time.update(time.time() - end)

            features = features.data.cpu()
            gf.append(features)
            g_pids.extend(pids)
            g_camids.extend(camids)
        gf = torch.cat(gf, 0)
        g_pids = np.asarray(g_pids)
        g_camids = np.asarray(g_camids)
        if args.save_fea:
            print("Saving result.mat to {}".format(args.save_dir))
            result = {'gallery_f':gf.numpy(),'gallery_label':g_pids,'gallery_cam':g_camids,'query_f':qf.numpy(),'query_label':q_pids,'query_cam':q_camids}
            sio.savemat(os.path.join(args.save_dir, 'dp_result.mat'),result)
            return

        print("Extracted features for gallery set, obtained {}-by-{} matrix".format(gf.size(0), gf.size(1)))
    
    print("==> BatchTime(s)/BatchSize(img): {:.3f}/{}".format(batch_time.avg, args.test_batch))

    m, n = qf.size(0), gf.size(0)
    distmat = torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, n) + \
              torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, m).t()
    distmat.addmm_(1, -2, qf, gf.t())
    distmat = distmat.numpy()

    print("Computing CMC and mAP")
    cmc, mAP = evaluate(distmat, q_pids, g_pids, q_camids, g_camids, use_metric_cuhk03=args.use_metric_cuhk03)

    print("----------Results-----------")
    print("mAP: {:.1%}".format(mAP))
    print("CMC curve")
    for r in ranks:
        print("Rank-{:<3}: {:.1%}".format(r, cmc[r-1]))
    #print("---- Start Reranking... ----")
    #rerank_cmc, rerank_mAP = rerank_main(qf,q_camids,q_pids,gf,g_camids,g_pids)
    #print("Rerank_mAP: {:.1%}".format(rerank_mAP))
    #print("Rerank_CMC curve")
    #for r in ranks:
    #    print("Rank-{:<3}: {:.1%}".format(r, rerank_cmc[r-1]))
    print("----------------------------")
    if summary is not None:
        summary.add_scalars('rank result', {'rank1': round(cmc[0],3) * 100, 'rank5': round(cmc[1],3) * 100, 'mAP': round(mAP,3) * 100}, epoch)
    return cmc[0] #rerank_cmc[0]
Пример #10
0
        # compute output
        out_a, out_p = model(data_a), model(data_p)
        dists = l2_dist.forward(out_a,out_p)#torch.sqrt(torch.sum((out_a - out_p) ** 2, 1))  # euclidean distance
        distances.append(dists.data.cpu().numpy())
        labels.append(label.data.cpu().numpy())

        if batch_idx % args.log_interval == 0:
            pbar.set_description('Test Epoch: {} [{}/{} ({:.0f}%)]'.format(
                epoch, batch_idx * len(data_a), len(test_loader.dataset),
                100. * batch_idx / len(test_loader)))

    labels = np.array([sublabel for label in labels for sublabel in label])
    distances = np.array([subdist[0] for dist in distances for subdist in dist])

    tpr, fpr, accuracy, val, val_std, far = evaluate(distances,labels)
    print('\33[91mTest set: Accuracy: {:.8f}\n\33[0m'.format(np.mean(accuracy)))
    logger.log_value('Test Accuracy', np.mean(accuracy))

    plot_roc(fpr,tpr,figure_name="roc_test_epoch_{}.png".format(epoch))

def testaccuracy(test_loader,model,epoch):
    # switch to evaluate mode
    model.eval()
    pbar = tqdm(enumerate(test_loader))
    prec = []
    correct = 0
    for batch_idx, (data, label) in pbar:
        data_v = Variable(data.cuda())
        target_value = Variable(label)
def test_gcn_person_batch(model,
                          queryloader,
                          querygalleryloader,
                          galleryloader,
                          pool,
                          use_gpu,
                          ranks=[1, 5, 10, 20]):
    model.eval()

    g_bs = 16

    q_pids, q_pids_p, q_camids, q_camids_p = [], [], [], []
    g_pids, g_pids_p, g_camids, g_camids_p = [], [], [], []
    for batch_idx, (_, gids, pimgs, pids, camids, _) in enumerate(queryloader):
        q_pids.extend(gids)
        q_pids_p.extend(pids)
        q_camids.extend(camids)
        q_camids_p.extend([camids] * len(pids))
    #print(camids)
    q_pids = np.asarray(q_pids)
    q_pids_p = np.asarray(q_pids_p)
    q_pids_p = np.squeeze(q_pids_p)
    q_camids = np.asarray(q_camids)
    q_camids_p = np.asarray(q_camids_p)
    max_qcam = camids + 1
    print(q_pids.shape, q_pids_p.shape, q_camids.shape, q_camids_p.shape)

    for batch_idx, (_, gids, pimgs, pids, camids,
                    _) in enumerate(querygalleryloader):
        g_pids.extend(gids)
        #print(gids, pids, camids)
        tmp_pids = []
        for j in range(g_bs):
            tmp_pids.append([])
            for i in range(len(pids)):
                tmp_pids[j].append(pids[i][j])
        # tmp_pids -> list g_bs * 5
        for i in range(g_bs):
            g_pids_p.extend(tmp_pids[i])
            #print(camids)
            #print(camids[i].item())
            g_camids.extend([camids[i]])
            g_camids_p.extend([camids[i]] * len(tmp_pids[i]))
        #g_camids_p.extend([camids]* len(pids))
    #g_pids = np.asarray(g_pids)
    #g_pids_p = np.asarray(g_pids_p)
    #g_camids = np.asarray(g_camids)
    #g_camids_p = np.asarray(g_camids_p)
    #print(g_pids.shape, g_pids_p.shape, g_camids.shape, g_camids_p.shape)

    for batch_idx, (_, gids, pimgs, pids, camids,
                    _) in enumerate(galleryloader):
        g_pids.extend(gids)
        #print(gids, pids, camids)
        tmp_pids = []
        for j in range(g_bs):
            tmp_pids.append([])
            for i in range(len(pids)):
                tmp_pids[j].append(pids[i][j])
        # tmp_pids -> list g_bs * 5
        for i in range(g_bs):
            g_pids_p.extend(tmp_pids[i])
            #print(camids)
            #print(camids[i].item())
            g_camids.extend([camids[i]])
            g_camids_p.extend([camids[i] + max_qcam] * len(tmp_pids[i]))
        #g_camids_p.extend([camids]* len(pids))
    g_pids = np.asarray(g_pids)
    g_pids_p = np.asarray(g_pids_p)
    g_camids = np.asarray(g_camids)
    g_camids_p = np.asarray(g_camids_p)
    print(g_pids.shape, g_pids_p.shape, g_camids.shape, g_camids_p.shape)

    m, n = q_pids.shape[0], g_pids.shape[0]
    distmat = torch.zeros((m, n))

    m, n = q_pids_p.shape[0], g_pids_p.shape[0]
    distmat_p = torch.zeros((m, n))
    p_start = 0
    p_end = 0

    with torch.no_grad():
        for batch_idx, (_, gids, pimgs, pids, camids,
                        lenp) in enumerate(queryloader):
            #if batch_idx < 1720:
            #    continue
            start_time = time.time()
            if use_gpu:
                pimgs = pimgs.cuda()
            pimgs = Variable(pimgs)
            # b=1, n=number of clips, s=16
            b, s, c, h, w = pimgs.size()
            #pimgs = pimgs.permute(1, 0, 2, 3, 4)
            assert (b == 1)
            pimgs = pimgs.repeat(g_bs, 1, 1, 1, 1)
            pimgs = pimgs.view(g_bs * s, c, h, w)
            #pimgs = pimgs.view(s, c, h, w)
            num_nodes = s
            adj = []
            adj0 = torch.ones((lenp, lenp))
            if use_gpu:
                adj0 = adj0.cuda()
                adj0 = Variable(adj0)
                adj0.requires_gradient = False
            for aa in range(g_bs):
                adj.append(adj0)
            p_start = batch_idx * s
            p_end = (batch_idx + 1) * s
            #print(p_start, p_end)
            #print(batch_idx, g_bs, s)
            g_start = 0
            g_end = 0

            for batch_idx_g, (_, gids_g, pimgs_g, pids_g, camids_g,
                              lenp_g) in enumerate(querygalleryloader):
                if use_gpu:
                    pimgs_g = pimgs_g.cuda()
                pimgs_g = Variable(pimgs_g)
                # pimgs = pimgs.permute(1, 0, 2, 3, 4)
                b, s, c, h, w = pimgs_g.size()
                pimgs_g = pimgs_g.view(b * s, c, h, w)
                #pimgs_g = pimgs_g.view(s, c, h, w)
                assert (b == g_bs)
                num_nodes = s
                adj_g = []
                for aa in range(g_bs):
                    adj1 = torch.ones((lenp_g[aa], lenp_g[aa]))
                    if use_gpu:
                        adj1 = adj1.cuda()
                    adj1 = Variable(adj1)
                    adj1.requires_gradient = False
                    adj_g.append(adj1)
                features1, features2, features_p1, features_p2 = model(
                    pimgs, pimgs_g, adj, adj_g)
                #print(features_p1[0].shape, features_p2[0].shape)
                features_p1 = torch.cat(features_p1, dim=1)
                features_p2 = torch.cat(features_p2, dim=1)
                #print(features_p1.shape)
                dist_p = torch.pow(features_p1, 2).sum(dim=1, keepdim=True) + \
                          torch.pow(features_p2, 2).sum(dim=1, keepdim=True).t()
                dist_p.addmm_(1, -2, features_p1, features_p2.t())
                #p_end = p_start + dist_p.shape[0]
                #assert (p_end - p_start) == dist_p.shape[0]
                #print(p_end-p_start, dist_p.shape[0])
                g_end = g_start + dist_p.shape[1]
                #print(dist_p.shape)
                #print(features_p1.shape, features_p2.shape)
                #print(distmat_p[p_start:p_end, g_start:g_end].shape)
                #distmat_p[p_start:p_end, g_start:g_end] = dist_p
                for i in range(g_bs):
                    distmat_p[p_start:p_end, g_start + i * s:g_start +
                              (i + 1) * s] = dist_p[i * s:(i + 1) * s,
                                                    i * s:(i + 1) * s]
                #distmat_p[p_start:p_end, g_start:g_end] = dist_p
                assert (g_end == g_start + (i + 1) * s)
                g_start = g_end
                #print(dist)
                dist = F.pairwise_distance(features1, features2)
                #print(dist.shape)
                distmat[batch_idx,
                        batch_idx_g * g_bs:(batch_idx_g + 1) * g_bs] = dist
                #distmat[batch_idx, batch_idx_g] = dist
            #print(g_end)
            max_batch_idx_g = batch_idx_g + 1
            for batch_idx_g, (_, gids_g, pimgs_g, pids_g, camids_g,
                              lenp_g) in enumerate(galleryloader):
                if use_gpu:
                    pimgs_g = pimgs_g.cuda()
                pimgs_g = Variable(pimgs_g)
                # pimgs = pimgs.permute(1, 0, 2, 3, 4)
                b, s, c, h, w = pimgs_g.size()
                pimgs_g = pimgs_g.view(b * s, c, h, w)
                #pimgs_g = pimgs_g.view(s, c, h, w)
                assert (b == g_bs)
                num_nodes = s
                adj_g = []
                for aa in range(g_bs):
                    adj1 = torch.ones((lenp_g[aa], lenp_g[aa]))
                    if use_gpu:
                        adj1 = adj1.cuda()
                    adj1 = Variable(adj1)
                    adj1.requires_gradient = False
                    adj_g.append(adj1)
                features1, features2, features_p1, features_p2 = model(
                    pimgs, pimgs_g, adj, adj_g)
                #print(features_p1[0].shape, features_p2[0].shape)
                features_p1 = torch.cat(features_p1, dim=1)
                features_p2 = torch.cat(features_p2, dim=1)
                #print(features_p1.shape)
                dist_p = torch.pow(features_p1, 2).sum(dim=1, keepdim=True) + \
                          torch.pow(features_p2, 2).sum(dim=1, keepdim=True).t()
                dist_p.addmm_(1, -2, features_p1, features_p2.t())
                #p_end = p_start + dist_p.shape[0]
                #assert (p_end - p_start) == dist_p.shape[0]
                #print(p_end-p_start, dist_p.shape[0])
                g_end = g_start + dist_p.shape[1]
                #print(dist_p.shape)
                #print(features_p1.shape, features_p2.shape)
                #print(distmat_p[p_start:p_end, g_start:g_end].shape)
                #distmat_p[p_start:p_end, g_start:g_end] = dist_p
                for i in range(g_bs):
                    distmat_p[p_start:p_end, g_start + i * s:g_start +
                              (i + 1) * s] = dist_p[i * s:(i + 1) * s,
                                                    i * s:(i + 1) * s]
                #distmat_p[p_start:p_end, g_start:g_end] = dist_p
                assert (g_end == g_start + (i + 1) * s)
                g_start = g_end
                #print(dist)
                dist = F.pairwise_distance(features1, features2)
                #print(dist.shape)
                distmat[batch_idx, (max_batch_idx_g + batch_idx_g) *
                        g_bs:(max_batch_idx_g + batch_idx_g + 1) * g_bs] = dist
            #print(g_end)
            #p_start = p_end
            #print(batch_idx)
            end_time = time.time()
            print("image {:04d}, time : {:f}".format(batch_idx,
                                                     end_time - start_time))
    distmat = distmat.numpy()
    distmat_p = distmat_p.numpy()
    #print(distmat)

    print("Computing CMC and mAP")
    print(distmat.shape, q_pids.shape, g_pids.shape, q_camids.shape,
          g_camids.shape)
    cmc, mAP = evaluate(distmat, q_pids, g_pids, q_camids, g_camids)
    #cmc_p, mAP_p = evaluate_person(distmat_p, q_pids_p, g_pids_p, q_camids_p, g_camids_p)

    print("Group Reid Results ----------")
    print("mAP: {:.1%}".format(mAP))
    print("CMC curve")
    for r in ranks:
        print("Rank-{:<3}: {:.1%}".format(r, cmc[r - 1]))
    print("------------------")

    print(distmat_p.shape, q_pids_p.shape, g_pids_p.shape, q_camids_p.shape,
          g_camids_p.shape)
    cmc_p, mAP_p = evaluate_person(distmat_p, q_pids_p, g_pids_p, q_camids_p,
                                   g_camids_p)
    print("Person Reid Results ----------")
    print("mAP: {:.1%}".format(mAP_p))
    print("CMC curve")
    for r in ranks:
        print("Rank-{:<3}: {:.1%}".format(r, cmc_p[r - 1]))
    print("------------------")

    return cmc[0]
def test_gcn(model,
             queryloader,
             galleryloader,
             pool,
             use_gpu,
             ranks=[1, 5, 10, 20]):
    model.eval()

    q_pids, q_camids = [], []
    g_pids, g_camids = [], []

    for batch_idx, (_, gids, pimgs, pids, camids) in enumerate(queryloader):
        q_pids.extend(gids)
        q_camids.extend(camids)
    q_pids = np.asarray(q_pids)
    q_camids = np.asarray(q_camids)
    max_qcam = camids + 1

    for batch_idx, (_, gids, pimgs, pids, camids) in enumerate(galleryloader):
        g_pids.extend(gids)
        g_camids.extend(camids + max_qcam)
    g_pids = np.asarray(g_pids)
    g_camids = np.asarray(g_camids)

    m, n = q_pids.shape[0], g_pids.shape[0]
    distmat = torch.zeros((m, m + n))

    g_camids = np.concatenate((q_camids, g_camids), axis=0)
    g_pids = np.concatenate((q_pids, g_pids), axis=0)

    with torch.no_grad():
        for batch_idx, (_, gids, pimgs, pids,
                        camids) in enumerate(queryloader):
            if use_gpu:
                pimgs = pimgs.cuda()
            pimgs = Variable(pimgs)
            # b=1, n=number of clips, s=16
            b, s, c, h, w = pimgs.size()
            #pimgs = pimgs.permute(1, 0, 2, 3, 4)
            assert (b == 1)
            pimgs = pimgs.view(s, c, h, w)
            num_nodes = s
            adj = torch.ones((num_nodes, num_nodes))
            if use_gpu:
                adj = adj.cuda()
            adj = Variable(adj)

            for batch_idx_q, (_, gids_q, pimgs_q, pids_q,
                              camids_q) in enumerate(queryloader):
                if use_gpu:
                    pimgs_q = pimgs_q.cuda()
                pimgs_q = Variable(pimgs_q)
                # pimgs = pimgs.permute(1, 0, 2, 3, 4)
                b, s, c, h, w = pimgs_q.size()
                pimgs_q = pimgs_q.view(s, c, h, w)
                assert (b == 1)
                num_nodes = s
                adj_q = torch.ones((num_nodes, num_nodes))
                if use_gpu:
                    adj_q = adj_q.cuda()
                adj_q = Variable(adj_q)
                features1, features2 = model(pimgs, pimgs_q, [adj], [adj_q])
                #dist = torch.pow(features1, 2).sum(dim=1, keepdim=True) + \
                #          torch.pow(features2, 2).sum(dim=1, keepdim=True).t()
                #dist.addmm_(1, -2, features1, features2.t())
                #print(dist)
                dist = F.pairwise_distance(features1, features2)
                #print(dist)
                distmat[batch_idx, batch_idx_q] = dist

            for batch_idx_g, (_, gids_g, pimgs_g, pids_g,
                              camids_g) in enumerate(galleryloader):
                if use_gpu:
                    pimgs_g = pimgs_g.cuda()
                pimgs_g = Variable(pimgs_g)
                # pimgs = pimgs.permute(1, 0, 2, 3, 4)
                b, s, c, h, w = pimgs_g.size()
                pimgs_g = pimgs_g.view(s, c, h, w)
                assert (b == 1)
                num_nodes = s
                adj_g = torch.ones((num_nodes, num_nodes))
                if use_gpu:
                    adj_g = adj_g.cuda()
                adj_g = Variable(adj_g)
                features1, features2 = model(pimgs, pimgs_g, [adj], [adj_g])
                #dist = torch.pow(features1, 2).sum(dim=1, keepdim=True) + \
                #          torch.pow(features2, 2).sum(dim=1, keepdim=True).t()
                #dist.addmm_(1, -2, features1, features2.t())
                #print(dist)
                dist = F.pairwise_distance(features1, features2)
                #print(dist)
                distmat[batch_idx, batch_idx_g + m] = dist
    distmat = distmat.numpy()
    #print(distmat)

    print("Computing CMC and mAP")
    cmc, mAP = evaluate(distmat, q_pids, g_pids, q_camids, g_camids)

    print("Results ----------")
    print("mAP: {:.1%}".format(mAP))
    print("CMC curve")
    for r in ranks:
        print("Rank-{:<3}: {:.1%}".format(r, cmc[r - 1]))
    print("------------------")
    '''
    dist_qq = torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, m) + \
              torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, m).t()
    dist_qq.addmm_(1, -2, qf, qf.t())
    dist_qq = dist_qq.numpy()

    dist_gg = torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, n) + \
              torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, n).t()
    dist_gg.addmm_(1, -2, gf, gf.t())
    dist_gg = dist_gg.numpy()

    dist_re_rank = re_ranking(distmat, dist_qq, dist_gg)

    print("Computing CMC and mAP")
    cmc, mAP = evaluate(dist_re_rank, q_pids, g_pids, q_camids, g_camids)

    print("Results ----------")
    print("mAP: {:.1%}".format(mAP))
    print("CMC curve")
    for r in ranks:
        print("Rank-{:<3}: {:.1%}".format(r, cmc[r-1]))
    print("------------------")
    '''

    return cmc[0]
def main(args=None):

    # parse arguments
    if args is None:
        args = sys.argv[1:]
    args = parse_args(args)
    
    # data generator
    dataset = data_manager.init_imgreid_dataset(
        dataset_path=args.dataset_path, name=args.dataset
    )

    query_generator = DataGenerator(dataset.query[0],
                                    dataset.query[1], 
                                    camids=dataset.query[2], 
                                    batch_size=args.batch_size, 
                                    num_classes=dataset.num_query_pids, 
                                    target_size=(args.height, args.width), 
                                    learn_region=args.learn_region,
                                    shuffle=True,
                                    mode='inference')

    gallery_generator = DataGenerator(  dataset.gallery[0],
                                        dataset.gallery[1], 
                                        camids=dataset.gallery[2], 
                                        batch_size=args.batch_size, 
                                        num_classes=dataset.num_gallery_pids, 
                                        target_size=(args.height, args.width), 
                                        learn_region=args.learn_region,
                                        shuffle=True,
                                        mode='inference')

    model = modellib.HACNN(mode='inference', 
                           num_classes=dataset.num_query_pids, 
                           batch_size=args.batch_size, 
                           learn_region=args.learn_region).model

    load_weights(model, filepath=args.snapshot, by_name=True)
    
    '''
    img_path = '/Users/luke/Documents/ml_datasets/person_re_id/videotag_scene/dataset_7_lite/bounding_box_train/3434_0096.jpg'
    img = image.load_img(img_path, target_size=(args.height, args.width))
    img = image.img_to_array(img)
    
    ouput = model.predict(np.array([img]) ,verbose=1)
    print('ouput', np.array(ouput).shape)
    print('ouput', np.argmax(ouput[0]), np.argmax(ouput[1]))
    '''
    # evaluate
    qf, q_pids, q_camids = [], [], []
    for index in range(len(query_generator)):
        imgs, pids, camids = query_generator[index]
        # print('query', index)
        features = model.predict(imgs ,verbose=0)
        # print('features', features)
        qf.append(features)
        q_pids.extend(pids)
        q_camids.extend(camids)
    qf = np.concatenate(qf, axis=0)
    q_pids = np.asarray(q_pids)
    q_camids = np.asarray(q_camids)
    # print(qf.shape)
        
    gf, g_pids, g_camids = [], [], []
    for index in range(len(gallery_generator)):
        # print('gallery', index)
        imgs, pids, camids = gallery_generator[index]
        features = model.predict(imgs ,verbose=0)
        # print('features', features)
        gf.append(features)
        g_pids.extend(pids)
        g_camids.extend(camids)
    gf = np.concatenate(gf, axis=0)
    g_pids = np.asarray(g_pids)
    g_camids = np.asarray(g_camids)
    
    m, n = qf.shape[0], gf.shape[0]
    '''
    # qf = qf*0.001
    qf_pow = np.square(qf)
    qf_sum = np.sum(qf_pow, axis=1, keepdims=True)
    qf_ext = np.repeat(qf_sum, n, axis=1)

    # gf = gf*0.01
    gf_pow = np.square(gf)
    gf_sum = np.sum(gf_pow, axis=1, keepdims=True)
    gf_ext = np.repeat(gf_sum, m, axis=1)
    gf_ext_t = gf_ext.T
    distmat = qf_ext + gf_ext_t
    distmat = distmat + np.dot(qf, gf.T)*(-2.0)
    '''
    
    print("Compute pairwise euclidean distances")
    qf = np.expand_dims(qf, axis=1)
    qf = np.repeat(qf, n, axis=1)

    gf = np.expand_dims(gf, axis=0)
    gf = np.repeat(gf, m, axis=0)
    
    distmat = np.linalg.norm(qf-gf, axis=2,  keepdims=True)
    distmat = np.squeeze(distmat, axis=2)

    print("Computing CMC and mAP")
    cmc, mAP = evaluate(distmat, q_pids, g_pids, q_camids, g_camids, dataset_type=args.dataset)
    print("Results ----------")
    print("mAP: {:.1%}".format(mAP))
    print("CMC curve")
    ranks=[1, 5, 10, 20]
    for r in ranks:
        print("Rank-{:<3}: {:.1%}".format(r, cmc[r-1]))
    



    correct_matched = (g_pids == q_pids[:, np.newaxis]).astype(np.int32)
    correct_dist_aver = np.multiply(distmat, correct_matched)

    wrong_matched = (g_pids != q_pids[:, np.newaxis]).astype(np.int32)
    wrong_dist_aver = np.multiply(distmat, wrong_matched)

    print('정답의 distance 평균 : ', np.average(correct_dist_aver))
    print('오답의 distance 평균 : ', np.average(wrong_dist_aver))

    print("------------------")
Пример #14
0
import numpy as np
from IPython import embed
from eval_metrics import evaluate
from eval_metrics import evaluate, plot_roc, plot_acc

targets = np.random.randint(1, 10, (100))
distances = np.random.randn(100)
tpr, fpr, acc, val, val_std, far = evaluate(distances, targets)
embed()
plot_roc(fpr, tpr, figure_name='./test.png')
Пример #15
0
def train_valid(model, optimizer, scheduler, epoch, dataloaders, data_size):

    for phase in ['train', 'valid']:

        labels, distances = [], []
        triplet_loss_sum = 0.0

        if phase == 'train':
            scheduler.step()
            model.train()
        else:
            model.eval()

        for batch_idx, batch_sample in enumerate(dataloaders[phase]):

            #break
            anc_img = batch_sample['anc_img'].to(device)
            pos_img = batch_sample['pos_img'].to(device)
            neg_img = batch_sample['neg_img'].to(device)

            pos_cls = batch_sample['pos_class'].to(device)
            neg_cls = batch_sample['neg_class'].to(device)

            with torch.set_grad_enabled(phase == 'train'):

                # anc_embed, pos_embed and neg_embed are encoding(embedding) of image
                anc_embed, pos_embed, neg_embed = model(anc_img), model(
                    pos_img), model(neg_img)

                # choose the hard negatives only for "training"
                pos_dist = l2_dist.forward(anc_embed, pos_embed)
                neg_dist = l2_dist.forward(anc_embed, neg_embed)

                all = (neg_dist - pos_dist <
                       args.margin).cpu().numpy().flatten()
                if phase == 'train':
                    hard_triplets = np.where(all == 1)
                    if len(hard_triplets[0]) == 0:
                        continue
                else:
                    hard_triplets = np.where(all >= 0)

                anc_hard_embed = anc_embed[hard_triplets].to(device)
                pos_hard_embed = pos_embed[hard_triplets].to(device)
                neg_hard_embed = neg_embed[hard_triplets].to(device)

                anc_hard_img = anc_img[hard_triplets].to(device)
                pos_hard_img = pos_img[hard_triplets].to(device)
                neg_hard_img = neg_img[hard_triplets].to(device)

                pos_hard_cls = pos_cls[hard_triplets].to(device)
                neg_hard_cls = neg_cls[hard_triplets].to(device)

                anc_img_pred = model.forward_classifier(anc_hard_img).to(
                    device)
                pos_img_pred = model.forward_classifier(pos_hard_img).to(
                    device)
                neg_img_pred = model.forward_classifier(neg_hard_img).to(
                    device)

                triplet_loss = TripletLoss(args.margin).forward(
                    anc_hard_embed, pos_hard_embed, neg_hard_embed).to(device)

                if phase == 'train':
                    optimizer.zero_grad()
                    triplet_loss.backward()
                    optimizer.step()

                dists = l2_dist.forward(anc_embed, pos_embed)
                distances.append(dists.data.cpu().numpy())
                labels.append(np.ones(dists.size(0)))

                dists = l2_dist.forward(anc_embed, neg_embed)
                distances.append(dists.data.cpu().numpy())
                labels.append(np.zeros(dists.size(0)))

                triplet_loss_sum += triplet_loss.item()

        avg_triplet_loss = triplet_loss_sum / data_size[phase]
        labels = np.array([sublabel for label in labels for sublabel in label])
        distances = np.array(
            [subdist for dist in distances for subdist in dist])

        tpr, fpr, accuracy, val, val_std, far = evaluate(distances, labels)
        print('  {} set - Triplet Loss       = {:.8f}'.format(
            phase, avg_triplet_loss))
        print('  {} set - Accuracy           = {:.8f}'.format(
            phase, np.mean(accuracy)))

        with open('./log/{}_log_epoch{}.txt'.format(phase, epoch), 'w') as f:
            f.write(
                str(epoch) + '\t' + str(np.mean(accuracy)) + '\t' +
                str(avg_triplet_loss))

        if phase == 'train':
            torch.save({
                'epoch': epoch,
                'state_dict': model.state_dict()
            }, './log/checkpoint_epoch{}.pth'.format(epoch))
        else:
            plot_roc(fpr,
                     tpr,
                     figure_name='./log/roc_valid_epoch_{}.png'.format(epoch))
Пример #16
0
def evaluation(model,
               args,
               queryloader,
               galleryloader,
               use_gpu,
               ranks=[1, 5, 10, 20]):
    since = time.time()
    model.eval()

    qf, q_pids, q_camids = [], [], []
    for batch_idx, (vids, pids, camids) in enumerate(queryloader):
        if (batch_idx + 1) % 1000 == 0:
            print("{}/{}".format(batch_idx + 1, len(queryloader)))

        qf.append(extract(model, args, vids, use_gpu).squeeze())
        q_pids.extend(pids)
        q_camids.extend(camids)

    qf = torch.stack(qf)
    q_pids = np.asarray(q_pids)
    q_camids = np.asarray(q_camids)
    print("Extracted features for query set, obtained {} matrix".format(
        qf.shape))

    gf, g_pids, g_camids = [], [], []
    for batch_idx, (vids, pids, camids) in enumerate(galleryloader):
        if (batch_idx + 1) % 1000 == 0:
            print("{}/{}".format(batch_idx + 1, len(galleryloader)))

        gf.append(extract(model, args, vids, use_gpu).squeeze())
        g_pids.extend(pids)
        g_camids.extend(camids)

    gf = torch.stack(gf)
    g_pids = np.asarray(g_pids)
    g_camids = np.asarray(g_camids)

    if 'mars' in args.dataset:
        print('process the dataset mars!')
        # gallery set must contain query set, otherwise 140 query imgs will not have ground truth.
        gf = torch.cat((qf, gf), 0)
        g_pids = np.append(q_pids, g_pids)
        g_camids = np.append(q_camids, g_camids)

    print("Extracted features for gallery set, obtained {} matrix".format(
        gf.shape))

    time_elapsed = time.time() - since
    print('Extracting features complete in {:.0f}m {:.0f}s'.format(
        time_elapsed // 60, time_elapsed % 60))

    print("Computing distance matrix")
    m, n = qf.size(0), gf.size(0)
    distmat = torch.zeros((m, n))

    distmat = -torch.mm(qf, gf.t())
    distmat = distmat.data.cpu()
    distmat = distmat.numpy()

    print("Computing CMC and mAP")
    cmc, mAP = evaluate(distmat, q_pids, g_pids, q_camids, g_camids)

    print("Results ----------")
    print("mAP: {:.2%}".format(mAP))
    print("CMC curve")
    for r in ranks:
        print("Rank-{:<3}: {:.2%}".format(r, cmc[r - 1]))
    print("------------------")

    elapsed = round(time.time() - since)
    elapsed = str(datetime.timedelta(seconds=elapsed))
    print("Finished. Total elapsed time (h:m:s): {}.".format(elapsed))

    return cmc[0]
Пример #17
0
def test(model, queryloader, galleryloader, use_gpu, ranks=[1, 5, 10, 20]):
    since = time.time()
    batch_time = AverageMeter()

    model.eval()
    with torch.no_grad():
        qf, q_pids, q_camids = [], [], []
        for batch_idx, (imgs, pids, camids) in enumerate(queryloader):
            end = time.time()
            if use_gpu:
                imgs = imgs.cuda()

            n, c, h, w = imgs.size()
            features = torch.FloatTensor(n, model.module.feat_dim).zero_()
            for i in range(2):
                if (i == 1):
                    imgs = fliplr(imgs, use_gpu)
                f = model(imgs)
                f = f.data.cpu()
                features = features + f

            batch_time.update(time.time() - end)

            qf.append(features)
            q_pids.extend(pids)
            q_camids.extend(camids)
        qf = torch.cat(qf, 0)
        q_pids = np.asarray(q_pids)
        q_camids = np.asarray(q_camids)

        print("Extracted features for query set, obtained {}-by-{} matrix".
              format(qf.size(0), qf.size(1)))

        gf, g_pids, g_camids = [], [], []
        for batch_idx, (imgs, pids, camids) in enumerate(galleryloader):
            end = time.time()
            if use_gpu:
                imgs = imgs.cuda()

            n, c, h, w = imgs.size()
            features = torch.FloatTensor(n, model.module.feat_dim).zero_()
            for i in range(2):
                if (i == 1):
                    imgs = fliplr(imgs, use_gpu)
                f = model(imgs)
                f = f.data.cpu()
                features = features + f

            batch_time.update(time.time() - end)

            gf.append(features)
            g_pids.extend(pids)
            g_camids.extend(camids)
        gf = torch.cat(gf, 0)
        g_pids = np.asarray(g_pids)
        g_camids = np.asarray(g_camids)

        print("Extracted features for gallery set, obtained {}-by-{} matrix".
              format(gf.size(0), gf.size(1)))

    print("==> BatchTime(s)/BatchSize(img): {:.3f}/{}".format(
        batch_time.avg, args.test_batch))

    m, n = qf.size(0), gf.size(0)
    distmat = torch.zeros((m, n))
    if args.distance == 'euclidean':
        distmat = torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, n) + \
                  torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, m).t()
        distmat.addmm_(1, -2, qf, gf.t())
    else:
        q_norm = torch.norm(qf, p=2, dim=1, keepdim=True)
        g_norm = torch.norm(gf, p=2, dim=1, keepdim=True)
        qf = qf.div(q_norm.expand_as(qf))
        gf = gf.div(g_norm.expand_as(gf))
        distmat = -torch.mm(qf, gf.t())
    distmat = distmat.numpy()

    print("Computing CMC and mAP")
    cmc, mAP = evaluate(distmat, q_pids, g_pids, q_camids, g_camids)

    print("Results ----------")
    print("mAP: {:.1%}".format(mAP))
    print("CMC curve")
    for r in ranks:
        print("Rank-{:<3}: {:.1%}".format(r, cmc[r - 1]))
    print("------------------")

    return cmc[0]
Пример #18
0
def main():
    parser = argparse.ArgumentParser(description="Tensorflow Contrastive Convolution")
    parser.add_argument('--num_classes', default=10575, type=int,
                        metavar='N', help='number of classes (default: 5749)')
    parser.add_argument('--iters', type=int, default = 200000, metavar='N',
                        help='number of iterations to train (default: 10)')
    args = parser.parse_args()

    dataset = CasiaFaceDataset()
    testset = LFWDataset()

    base_model = ConstractiveFourLayers()
    gen_model = GenModel(512)
    reg_model = Regressor(350*32)
    idreg_model = IdentityRegressor(14*512*3*3, args.num_classes)

    input1 = tf.placeholder(tf.float32, [None, 128, 128, 1])
    input2 = tf.placeholder(tf.float32, [None, 128, 128, 1])
    target = tf.placeholder(tf.float32, [None, 1])
    c1 = tf.placeholder(tf.float32, [None, args.num_classes])
    c2 = tf.placeholder(tf.float32, [None, args.num_classes])

    A_list, B_list, org_kernel_1, org_kernel_2 = compute_contrastive_features(input1, input2, base_model, gen_model)

    reg_1 = reg_model.forward(A_list)
    reg_2 = reg_model.forward(B_list)

    SAB = tf.add(reg_1, reg_2) / 2.0

    hk1 = idreg_model.forward(org_kernel_1)
    hk2 = idreg_model.forward(org_kernel_2)
    # print("target", target)
    # print("SAB", SAB)

    loss1 = tf.losses.sigmoid_cross_entropy(multi_class_labels=target, logits=SAB)

    cross_entropy_1 = tf.reduce_mean(-tf.reduce_sum(c1 * tf.log(hk1), reduction_indices=[1]))
    cross_entropy_2 = tf.reduce_mean(-tf.reduce_sum(c2 * tf.log(hk2), reduction_indices=[1]))
    loss2 = tf.add(cross_entropy_1, cross_entropy_2) * 0.5

    loss = tf.add(loss1, loss2)

    optimizer = tf.train.GradientDescentOptimizer(0.001).minimize(loss)

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        f = open("result.txt", "w")
        for iteration in range(args.iters):

            data_1_batch, data_2_batch, c1_batch, c2_batch, target_batch = dataset.get_batch(batch_size=GLOBAL_BATCH_SIZE)
            # print(target_batch.shape)

            # data_1_cur, data_2_cur, c1_cur, c2_cur, target_cur = sess.run([data_1_batch, data_2_batch, c1_batch, c2_batch, target_batch])
            _, loss_val, loss1_val, loss2_val, reg1_val, reg2_val = sess.run([optimizer, loss, loss1, loss2, reg_1, reg_2],
                feed_dict={input1: data_1_batch, input2: data_2_batch, c1: c1_batch, c2: c2_batch, target: target_batch})

            print("Itera {0} : loss = {1}, loss1 = {2}, loss2 = {3}".format(iteration, loss_val, loss1_val, loss2_val))
            f.write("Itera {0} : loss = {1}, loss1 = {2}, loss2 = {3}\r\n".format(iteration, loss_val, loss1_val, loss2_val))
            f.flush()

            if(iteration != 0 and iteration % 100 == 0):
                acc_pool, start_time = [], time.time()
                for i in range(50):
                    test_1_batch, test_2_batch, label_batch = testset.get_batch(batch_size=GLOBAL_BATCH_SIZE)

                #     test_1_cur, test_2_cur, label_cur = sess.run([data_1_batch, data_2_batch, label_batch])
                    # out1_a, out1_b, k1, k2 = sess.run(compute_contrastive_features(test_1_batch, test_2_batch, base_model, gen_model))
                    SAB_val, reg1_val, reg2_val = sess.run([SAB, reg_1, reg_2], feed_dict={input1: test_1_batch, input2: test_2_batch})
                    # print("SAB", SAB_val)
                    # print("1v", reg1_val)
                    # print("2v", reg2_val)
                    dists = np.array(SAB_val).reshape((-1, 1))
                    # print(dists)
                    labels = np.array(label_batch).reshape((-1, 1))
                    # print(labels)
                    accuracy = evaluate(1.0 - dists, labels)

                    acc_pool.append(np.mean(accuracy))
                print("Acc(%.2f)"%(time.time()-start_time), np.mean(acc_pool), acc_pool)
                f.write("Acc" + str(np.mean(acc_pool)) + str(acc_pool) + str("\r\n"))
                f.flush()
        f.close()
    def test(self, queryloader, galleryloader, args, ranks=[1, 5, 10, 20]):
        self.brain.eval()
        self.feature_extractor.eval()

        buffer_file = '../scratch/' + args.dataset + '_test_features.pth'
        if not os.path.exists(buffer_file):
            # if the buffer files saved with features already existing, load buffer file
            # if not, extract the features from feature extractor
            qindividualf, qmeanf, q_pids, q_camids = [], [], [], []
            print('extracting query feats')
            for batch_idx, (imgs, pids,
                            camids) in enumerate(tqdm(queryloader)):
                if not args.use_cpu:
                    imgs = imgs.cuda()

                with torch.no_grad():
                    # b=1, n=number of clips, s=16
                    b, n, s, c, h, w = imgs.size()
                    assert (b == 1)
                    imgs = imgs.view(b * n, s, c, h, w)
                    individual_features = get_features(self.feature_extractor,
                                                       imgs,
                                                       args.test_num_tracks)
                    mean_features = torch.mean(individual_features, 0)

                    individual_features = individual_features.data.cpu()
                    mean_features = mean_features.data.cpu()

                    qindividualf.append(individual_features)
                    qmeanf.append(mean_features)
                    q_pids.extend(pids)
                    q_camids.extend(camids)
                    torch.cuda.empty_cache()

            qmeanf = torch.stack(qmeanf)
            q_pids = np.asarray(q_pids)
            q_camids = np.asarray(q_camids)

            print("Extracted features for query set, obtained {}-by-{} matrix".
                  format(qmeanf.size(0), qmeanf.size(1)))

            gindividualf, gmeanf, g_pids, g_camids = [], [], [], []
            print('extracting gallery feats')
            for batch_idx, (imgs, pids,
                            camids) in enumerate(tqdm(galleryloader)):
                if not args.use_cpu:
                    imgs = imgs.cuda()

                with torch.no_grad():
                    b, n, s, c, h, w = imgs.size()
                    imgs = imgs.view(b * n, s, c, h, w)
                    assert (b == 1)
                    # handle chunked data
                    individual_features = get_features(self.feature_extractor,
                                                       imgs,
                                                       args.test_num_tracks)
                    mean_features = torch.mean(individual_features, 0)
                    torch.cuda.empty_cache()

                individual_features = individual_features.data.cpu()
                mean_features = mean_features.data.cpu()

                gindividualf.append(individual_features)
                gmeanf.append(mean_features)
                g_pids.extend(pids)
                g_camids.extend(camids)

            gmeanf = torch.stack(gmeanf)
            g_pids = np.asarray(g_pids)
            g_camids = np.asarray(g_camids)

            print(
                "Extracted features for gallery set, obtained {}-by-{} matrix".
                format(gmeanf.size(0), gmeanf.size(1)))
            torch.save(
                {
                    'query': {
                        'meanft': qmeanf,
                        'individualft': qindividualf,
                        'pids': q_pids,
                        'camids': q_camids
                    },
                    'gallery': {
                        'meanft': gmeanf,
                        'individualft': gindividualf,
                        'pids': g_pids,
                        'camids': g_camids
                    }
                }, buffer_file)

        else:
            # load the buffer file
            print('loading and extraction information/features from file',
                  buffer_file)
            buffer = torch.load(buffer_file)
            qmeanf = buffer['query']['meanft']
            qindividualf = buffer['query']['individualft']
            q_camids = buffer['query']['camids']
            q_pids = buffer['query']['pids']
            gmeanf = buffer['gallery']['meanft']
            gindividualf = buffer['gallery']['individualft']
            g_camids = buffer['gallery']['camids']
            g_pids = buffer['gallery']['pids']

        print("Computing distance matrix for allframes evaluation (baseline)")
        m, n = qmeanf.size(0), gmeanf.size(0)
        distmat = torch.pow(qmeanf, 2).sum(dim=1, keepdim=True).expand(m, n) + \
                torch.pow(gmeanf, 2).sum(dim=1, keepdim=True).expand(n, m).t()
        distmat.addmm_(1, -2, qmeanf, gmeanf.t())
        distmat = distmat.numpy()
        print("Computing CMC and mAP")
        cmc, mAP = evaluate(distmat, q_pids, g_pids, q_camids, g_camids)
        print("Results ----------")
        print("mAP: {:.1%}".format(mAP))
        print("CMC curve")
        for r in ranks:
            print("Rank-{:<3}: {:.1%}".format(r, cmc[r - 1]))
        print("------------------")

        print('Computing distance matrix from DQN network')
        distmat = torch.zeros(m, n)
        instance_rewards = torch.zeros(m, n)
        comparisons = torch.zeros(m, n)
        print(m, n)
        for i, qid in tqdm(enumerate(q_pids)):
            q_features = self.select_random_features(qindividualf[i],
                                                     args.rl_seq_len)
            for j, gid in enumerate(g_pids):
                # print(qindividualf[i].shape, gindividualf[j].shape)
                g_features = self.select_random_features(
                    gindividualf[j], args.rl_seq_len)

                if not args.use_cpu:
                    q_features = q_features.cuda()
                    g_features = g_features.cuda()

                # print(q_features.shape, g_features.shape)
                self.env = Environment({
                    'features': q_features,
                    'id': qid
                }, {
                    'features': g_features,
                    'id': gid
                }, args.rp)
                _, reward, iters, q_vals = self.play_one_episode(is_test=True)
                instance_rewards[i, j] = reward
                comparisons[i, j] = iters
                distmat[i, j] = (q_vals[:, 1] - q_vals[:, 0]).item()
                # break

        print("Computing CMC and mAP (+ve distmat)")
        cmc, mAP = evaluate(distmat, q_pids, g_pids, q_camids, g_camids)
        print("Results ----------")
        print("mAP: {:.1%}".format(mAP))
        print("CMC curve")
        for r in ranks:
            print("Rank-{:<3}: {:.1%}".format(r, cmc[r - 1]))
        print("------------------")

        print("Computing CMC and mAP (-ve distmat)")
        cmc, mAP = evaluate(distmat, q_pids, g_pids, q_camids, g_camids)
        print("Results ----------")
        print("mAP: {:.1%}".format(mAP))
        print("CMC curve")
        for r in ranks:
            print("Rank-{:<3}: {:.1%}".format(r, cmc[r - 1]))
        print("------------------")

        print('average rewards', instance_rewards.mean().item())
        print('average comparison', comparisons.mean().item())

        return cmc[0]
    def train():
        model = FaceNetModel(embedding_size=args.embedding_size,
                             num_classes=args.num_classes).to(device)
        optimizer = optim.Adam(model.parameters(), lr=args.learning_rate)
        scheduler = lr_scheduler.StepLR(optimizer, step_size=50, gamma=0.1)
        if args.start_epoch != 0:
            checkpoint = torch.load(
                './log/checkpoint_epoch{}.pth'.format(args.start_epoch - 1))
            model.load_state_dict(checkpoint['state_dict'])

        train_loss = np.zeros((args.num_epochs))
        train_accuracy = np.zeros((args.num_epochs))

        for epoch in range(args.start_epoch,
                           args.num_epochs + args.start_epoch):
            print(80 * '-')
            print('Epoch [{}/{}]'.format(
                epoch, args.num_epochs + args.start_epoch - 1))

            data_transforms = {
                'train':
                transforms.Compose([
                    transforms.ToPILImage(),
                    transforms.RandomHorizontalFlip(),
                    transforms.ToTensor(),
                    transforms.Normalize(mean=[0.5, 0.5, 0.5],
                                         std=[0.5, 0.5, 0.5])
                ]),
                'valid':
                transforms.Compose([
                    transforms.ToPILImage(),
                    transforms.ToTensor(),
                    transforms.Normalize(mean=[0.5, 0.5, 0.5],
                                         std=[0.5, 0.5, 0.5])
                ])
            }

            face_dataset = {
                'train':
                TripletFaceDataset(root_dir=args.train_root_dir,
                                   csv_name=args.train_csv_name,
                                   num_triplets=args.num_train_triplets,
                                   transform=data_transforms['train']),
                'valid':
                TripletFaceDataset(root_dir=args.valid_root_dir,
                                   csv_name=args.valid_csv_name,
                                   num_triplets=args.num_valid_triplets,
                                   transform=data_transforms['valid'])
            }
            dataloaders = {
                x: torch.utils.data.DataLoader(face_dataset[x],
                                               batch_size=args.batch_size,
                                               shuffle=False,
                                               num_workers=args.num_workers)
                for x in ['train', 'valid']
            }

            data_size = {x: len(face_dataset[x]) for x in ['train', 'valid']}

            for phase in ['train', 'valid']:
                labels, distances = [], []
                triplet_loss_sum = 0.0
                if phase == 'train':
                    scheduler.step()
                    model.train()
                else:
                    model.eval()

                for batch_idx, batch_sample in enumerate(dataloaders[phase]):

                    anc_img = batch_sample['anc_img'].to(device)
                    pos_img = batch_sample['pos_img'].to(device)
                    neg_img = batch_sample['neg_img'].to(device)

                    #                    print(anc_img.shape)

                    pos_cls = batch_sample['pos_class'].to(device)
                    neg_cls = batch_sample['neg_class'].to(device)

                    with torch.set_grad_enabled(phase == 'train'):

                        # anc_embed, pos_embed and neg_embed are embedding of image
                        anc_embed, pos_embed, neg_embed = model(
                            anc_img), model(pos_img), model(neg_img)
                        #                        print(anc_embed.shape)

                        # choose the hard negatives only for "training"
                        pos_dist = l2_dist.forward(anc_embed, pos_embed)
                        neg_dist = l2_dist.forward(anc_embed, neg_embed)

                        all = (neg_dist - pos_dist <
                               args.margin).cpu().numpy().flatten()
                        if phase == 'train':
                            hard_triplets = np.where(all == 1)
                            if len(hard_triplets[0]) == 0:
                                continue
                        else:
                            hard_triplets = np.where(all >= 0)

                        anc_hard_embed = anc_embed[hard_triplets].to(device)
                        pos_hard_embed = pos_embed[hard_triplets].to(device)
                        neg_hard_embed = neg_embed[hard_triplets].to(device)

                        anc_hard_img = anc_img[hard_triplets].to(device)
                        pos_hard_img = pos_img[hard_triplets].to(device)
                        neg_hard_img = neg_img[hard_triplets].to(device)

                        pos_hard_cls = pos_cls[hard_triplets].to(device)
                        neg_hard_cls = neg_cls[hard_triplets].to(device)

                        anc_img_pred = model.forward_classifier(
                            anc_hard_img).to(device)
                        pos_img_pred = model.forward_classifier(
                            pos_hard_img).to(device)
                        neg_img_pred = model.forward_classifier(
                            neg_hard_img).to(device)

                        triplet_loss = TripletLoss(args.margin).forward(
                            anc_hard_embed, pos_hard_embed,
                            neg_hard_embed).to(device)

                        if phase == 'train':
                            optimizer.zero_grad()
                            triplet_loss.backward()
                            optimizer.step()

                        dists = l2_dist.forward(anc_embed, pos_embed)
                        distances.append(dists.data.cpu().numpy())
                        labels.append(np.ones(dists.size(0)))

                        dists = l2_dist.forward(anc_embed, neg_embed)
                        distances.append(dists.data.cpu().numpy())
                        labels.append(np.zeros(dists.size(0)))

                        triplet_loss_sum += triplet_loss.item()

                    torch.cuda.empty_cache()
                avg_triplet_loss = triplet_loss_sum / data_size[phase]
                labels = np.array(
                    [sublabel for label in labels for sublabel in label])
                distances = np.array(
                    [subdist for dist in distances for subdist in dist])

                tpr, fpr, accuracy, val, val_std, far = evaluate(
                    distances, labels)
                print('  {} set - Triplet Loss   = {:.8f}'.format(
                    phase, avg_triplet_loss))
                print('  {} set - Accuracy       = {:.8f}'.format(
                    phase, np.mean(accuracy)))

                with open('./log/{}_log.txt'.format(phase), 'a') as f:
                    f.write(
                        str(epoch) + '\t' + str(np.mean(accuracy)) + '\t' +
                        str(avg_triplet_loss))
                    f.write("\n")

                if phase == 'train':
                    torch.save(
                        {
                            'epoch': epoch,
                            'state_dict': model.state_dict()
                        }, 'log/checkpoint_epoch{}.pth'.format(epoch))
                    train_loss[epoch] = avg_triplet_loss
                if phase == 'valid':
                    train_accuracy[epoch] = np.mean(accuracy)

        print(80 * '-')
        torch.save(model, 'model.pkl')
        return train_loss, train_accuracy
Пример #21
0
def train(train_loader, model, optimizer, epoch):
    # switch to train mode
    model.train()

    labels, distances = [], []

    pbar = tqdm(enumerate(train_loader))
    for batch_idx, (data_a, data_p, data_n, label_p, label_n) in pbar:
        #print("on training{}".format(epoch))
        if args.cuda:
            data_a, data_p, data_n = data_a.cuda(), data_p.cuda(), data_n.cuda()

        # compute output
        out_a, out_p, out_n = model(data_a), model(data_p), model(data_n)


        if epoch > args.min_softmax_epoch:
            triplet_loss = TripletMarginLoss(args.margin).forward(out_a, out_p, out_n)
            loss = triplet_loss
            # compute gradient and update weights
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            logger.log_value('selected_triplet_loss', triplet_loss.data.item()).step()
            #logger.log_value('selected_cross_entropy_loss', cross_entropy_loss.data.item()).step()
            logger.log_value('selected_total_loss', loss.data.item()).step()

            if batch_idx % args.log_interval == 0:
                pbar.set_description(
                    'Train Epoch: {:3d} [{:8d}/{:8d} ({:3.0f}%)]\tLoss: {:.6f}'.format(
                        # epoch, batch_idx * len(data_a), len(train_loader.dataset),
                        epoch, batch_idx * len(data_a), len(train_loader) * len(data_a),
                        100. * batch_idx / len(train_loader),
                        loss.data.item()))


            dists = distance(out_a, out_n, args.distance)
            distances.append(dists.data.cpu().numpy())
            labels.append(np.zeros(dists.size(0)))


            dists = distance(out_a, out_p, args.distance)
            distances.append(dists.data.cpu().numpy())
            labels.append(np.ones(dists.size(0)))



        else:
        # Choose the hard negatives
            d_p = distance(out_a, out_p, args.distance)
            d_n = distance(out_a, out_n, args.distance)
            all = (d_n - d_p < args.margin).cpu().data.numpy().flatten()

            # log loss value for mini batch.
            total_coorect = np.where(all == 0)
            logger.log_value('Minibatch Train Accuracy', len(total_coorect[0]))

            total_dist = (d_n - d_p).cpu().data.numpy().flatten()
            logger.log_value('Minibatch Train distance', np.mean(total_dist))

            hard_triplets = np.where(all == 1)
            if len(hard_triplets[0]) == 0:
                continue

            if args.cuda:
                out_selected_a = Variable(torch.from_numpy(out_a.cpu().data.numpy()[hard_triplets]).cuda())
                out_selected_p = Variable(torch.from_numpy(out_p.cpu().data.numpy()[hard_triplets]).cuda())
                out_selected_n = Variable(torch.from_numpy(out_n.cpu().data.numpy()[hard_triplets]).cuda())

                selected_data_a = Variable(torch.from_numpy(data_a.cpu().data.numpy()[hard_triplets]).cuda())
                selected_data_p = Variable(torch.from_numpy(data_p.cpu().data.numpy()[hard_triplets]).cuda())
                selected_data_n = Variable(torch.from_numpy(data_n.cpu().data.numpy()[hard_triplets]).cuda())
            else:
                out_selected_a = Variable(torch.from_numpy(out_a.data.numpy()[hard_triplets]))
                out_selected_p = Variable(torch.from_numpy(out_p.data.numpy()[hard_triplets]))
                out_selected_n = Variable(torch.from_numpy(out_n.data.numpy()[hard_triplets]))

                selected_data_a = Variable(torch.from_numpy(data_a.data.numpy()[hard_triplets]))
                selected_data_p = Variable(torch.from_numpy(data_p.data.numpy()[hard_triplets]))
                selected_data_n = Variable(torch.from_numpy(data_n.data.numpy()[hard_triplets]))


            selected_label_p = torch.from_numpy(label_p.cpu().numpy()[hard_triplets])
            selected_label_n= torch.from_numpy(label_n.cpu().numpy()[hard_triplets])
            triplet_loss = TripletMarginLoss(args.margin).forward(out_selected_a, out_selected_p, out_selected_n)

            cls_a = model.forward_classifier(selected_data_a)
            cls_p = model.forward_classifier(selected_data_p)
            cls_n = model.forward_classifier(selected_data_n)

            criterion = nn.CrossEntropyLoss()
            predicted_labels = torch.cat([cls_a,cls_p,cls_n])
            if args.cuda:
                true_labels = torch.cat([Variable(selected_label_p.cuda()),Variable(selected_label_p.cuda()),Variable(selected_label_n.cuda())])

                cross_entropy_loss = criterion(predicted_labels.cuda(),true_labels.cuda())
            else:
                true_labels = torch.cat([Variable(selected_label_p),Variable(selected_label_p),Variable(selected_label_n)])

                cross_entropy_loss = criterion(predicted_labels,true_labels)

            loss = cross_entropy_loss + triplet_loss * args.loss_ratio
            # compute gradient and update weights
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()


            # log loss value for hard selected sample
            logger.log_value('selected_triplet_loss', triplet_loss.data).step()
            logger.log_value('selected_cross_entropy_loss', cross_entropy_loss.data).step()
            logger.log_value('selected_total_loss', loss.data).step()
            if batch_idx % args.log_interval == 0:
                pbar.set_description(
                    'Train Epoch: {:3d} [{:8d}/{:8d} ({:3.0f}%)]\tLoss: {:.6f} \t Number of Selected Triplets: {:4d}'.format(
                        # epoch, batch_idx * len(data_a), len(train_loader.dataset),
                        epoch, batch_idx * len(data_a), len(train_loader) * len(data_a),
                        100. * batch_idx / len(train_loader),
                        loss.data,len(hard_triplets[0])))


            dists = distance(out_selected_a, out_selected_n, args.distance)
            distances.append(dists.data.cpu().numpy())
            labels.append(np.zeros(dists.size(0)))


            dists = distance(out_selected_a, out_selected_p, args.distance)
            distances.append(dists.data.cpu().numpy())
            labels.append(np.ones(dists.size(0)))


    #accuracy for hard selected sample, not all sample.
    labels = np.array([sublabel for label in labels for sublabel in label])
    distances = np.array([subdist for dist in distances for subdist in dist])

    tpr, fpr, accuracy = evaluate(distances, labels)
    print('\33[91mTrain set: Accuracy: {:.8f}\n\33[0m'.format(np.mean(accuracy)))
    logger.log_value('Train Accuracy', np.mean(accuracy))

    # do checkpointing
    torch.save({'epoch': epoch + 1, 'state_dict': model.state_dict(),
                'optimizer': optimizer.state_dict()},
               '{}/checkpoint_{}.pth'.format(LOG_DIR, epoch))
Пример #22
0
def train_valid(model, optimizer, scheduler, epoch, dataloaders, data_size,
                start_time):

    for phase in ['train', 'valid']:
        if args.pure_validation and phase == 'train':
            continue
        if args.pure_training and phase == 'valid':
            continue
        labels, distances = [], []
        triplet_loss_sum = 0.0

        if phase == 'train':
            scheduler.step()
            model.train()
        else:
            model.eval()

        #for batch_idx in range(0, data_size[phase], 1):
        for batch_idx, batch_sample in enumerate(dataloaders[phase]):
            #print("batch_idx:", batch_idx)
            try:
                #batch_sample = dataloaders[phase][batch_idx]
                if not 'exception' in batch_sample:
                    anc_img = batch_sample['anc_img'].to(device)
                    pos_img = batch_sample['pos_img'].to(device)
                    neg_img = batch_sample['neg_img'].to(device)

                    pos_cls = batch_sample['pos_class'].to(device)
                    neg_cls = batch_sample['neg_class'].to(device)

                    with torch.set_grad_enabled(phase == 'train'):

                        # anc_embed, pos_embed and neg_embed are encoding(embedding) of image
                        anc_embed, pos_embed, neg_embed = model(
                            anc_img), model(pos_img), model(neg_img)
                        #for i in anc_embed:
                        #    print(i.item())
                        if args.num_valid_triplets <= 100:
                            anc_embed_cpu = anc_embed.cpu()
                            pos_embed_cpu = pos_embed.cpu()
                            neg_embed_cpu = neg_embed.cpu()
                            pos_cls_cpu = pos_cls.cpu()
                            neg_cls_cpu = neg_cls.cpu()
                            pd.DataFrame([t.numpy() for t in anc_embed_cpu
                                          ]).to_csv("./embeddings.csv",
                                                    mode='a',
                                                    header=None)
                            pd.DataFrame([t.numpy() for t in pos_embed_cpu
                                          ]).to_csv("./embeddings.csv",
                                                    mode='a',
                                                    header=None)
                            pd.DataFrame([t.numpy() for t in neg_embed_cpu
                                          ]).to_csv("./embeddings.csv",
                                                    mode='a',
                                                    header=None)
                            pd.DataFrame({
                                'type':
                                "anc",
                                'id':
                                batch_sample['anc_id'],
                                'class':
                                pos_cls_cpu,
                                'train_set':
                                args.train_csv_name.split('.')[0],
                                'val_set':
                                args.valid_csv_name.split('.')[0]
                            }).to_csv("./embeddings_info.csv",
                                      mode='a',
                                      header=None)
                            pd.DataFrame({
                                'type':
                                "pos",
                                'id':
                                batch_sample['pos_id'],
                                'class':
                                pos_cls_cpu,
                                'train_set':
                                args.train_csv_name.split('.')[0],
                                'val_set':
                                args.valid_csv_name.split('.')[0]
                            }).to_csv("./embeddings_info.csv",
                                      mode='a',
                                      header=None)
                            pd.DataFrame({
                                'type':
                                "neg",
                                'id':
                                batch_sample['neg_id'],
                                'class':
                                pos_cls_cpu,
                                'train_set':
                                args.train_csv_name.split('.')[0],
                                'val_set':
                                args.valid_csv_name.split('.')[0]
                            }).to_csv("./embeddings_info.csv",
                                      mode='a',
                                      header=None)

                        #print([t.size() for t in anc_embed])
                        # choose the hard negatives only for "training"
                        pos_dist = l2_dist.forward(anc_embed, pos_embed)
                        neg_dist = l2_dist.forward(anc_embed, neg_embed)

                        all = (neg_dist - pos_dist <
                               args.margin).cpu().numpy().flatten()
                        if phase == 'train':
                            hard_triplets = np.where(all == 1)
                            if len(hard_triplets[0]) == 0:
                                continue
                        else:
                            hard_triplets = np.where(all >= 0)

                        anc_hard_embed = anc_embed[hard_triplets].to(device)
                        pos_hard_embed = pos_embed[hard_triplets].to(device)
                        neg_hard_embed = neg_embed[hard_triplets].to(device)

                        anc_hard_img = anc_img[hard_triplets].to(device)
                        pos_hard_img = pos_img[hard_triplets].to(device)
                        neg_hard_img = neg_img[hard_triplets].to(device)

                        pos_hard_cls = pos_cls[hard_triplets].to(device)
                        neg_hard_cls = neg_cls[hard_triplets].to(device)

                        anc_img_pred = model.forward_classifier(
                            anc_hard_img).to(device)
                        pos_img_pred = model.forward_classifier(
                            pos_hard_img).to(device)
                        neg_img_pred = model.forward_classifier(
                            neg_hard_img).to(device)

                        triplet_loss = TripletLoss(args.margin).forward(
                            anc_hard_embed, pos_hard_embed,
                            neg_hard_embed).to(device)

                        if phase == 'train':
                            optimizer.zero_grad()
                            triplet_loss.backward()
                            optimizer.step()

                        dists = l2_dist.forward(anc_embed, pos_embed)
                        distances.append(dists.data.cpu().numpy())
                        labels.append(np.ones(dists.size(0)))

                        dists = l2_dist.forward(anc_embed, neg_embed)
                        distances.append(dists.data.cpu().numpy())
                        labels.append(np.zeros(dists.size(0)))

                        triplet_loss_sum += triplet_loss.item()
            except:
                #traceback.print_exc()
                print("traceback: ", traceback.format_exc())
                print("something went wrong with batch_idx: ", batch_idx,
                      ", batch_sample:", batch_sample, ", neg_img size: ",
                      batch_sample['neg_img'].shape, ", pos_img size: ",
                      batch_sample['pos_img'].shape, ", anc_img size: ",
                      batch_sample['anc_img'].shape)

        avg_triplet_loss = triplet_loss_sum / data_size[phase]
        labels = np.array([sublabel for label in labels for sublabel in label])
        distances = np.array(
            [subdist for dist in distances for subdist in dist])

        nrof_pairs = min(len(labels), len(distances))
        if nrof_pairs >= 10:
            tpr, fpr, accuracy, val, val_std, far = evaluate(distances, labels)
            print('  {} set - Triplet Loss       = {:.8f}'.format(
                phase, avg_triplet_loss))
            print('  {} set - Accuracy           = {:.8f}'.format(
                phase, np.mean(accuracy)))
            duration = time.time() - start_time

            with open('{}/{}_log_epoch{}.txt'.format(log_dir, phase, epoch),
                      'w') as f:
                f.write(
                    str(epoch) + '\t' + str(np.mean(accuracy)) + '\t' +
                    str(avg_triplet_loss) + '\t' + str(duration))

            if phase == 'train':
                torch.save({
                    'epoch': epoch,
                    'state_dict': model.state_dict()
                }, '{}/checkpoint_epoch{}.pth'.format(save_dir, epoch))
            else:
                plot_roc(fpr,
                         tpr,
                         figure_name='{}/roc_valid_epoch_{}.png'.format(
                             log_dir, epoch))
Пример #23
0
def test(model, queryloader, galleryloader, pool, use_gpu, ranks=[1, 5, 10, 20]):
    with torch.no_grad():
        model.eval()

        qf, q_pids, q_camids = [], [], []
        for batch_idx, (imgs, pids, camids) in tqdm(enumerate(queryloader), total=len(queryloader)):
            if use_gpu:
                imgs = imgs.cuda()
            # imgs = Variable(imgs, volatile=True)
            # b=1, n=number of clips, s=16
            b, n, s, c, h, w = imgs.size()
            assert (b == 1)
            imgs = imgs.view(b * n, s, c, h, w)
            features = model(imgs)
            features = features.view(n, -1)
            features = torch.mean(features, 0)
            features = features.data.cpu().numpy()
            qf.append(features)
            q_pids.extend(pids)
            q_camids.extend(camids)
            if batch_idx % 20 == 0:
                gc.collect()
        qf = np.asarray(qf, dtype=np.float32)
        q_pids = np.asarray(q_pids)
        q_camids = np.asarray(q_camids)
        gc.collect()
        print("Extracted features for query set, obtained {}-by-{} matrix".format(qf.shape[0], qf.shape[1]))

        gf, g_pids, g_camids = [], [], []
        for batch_idx, (imgs, pids, camids) in tqdm(enumerate(galleryloader), total=len(galleryloader)):
            if use_gpu:
                imgs = imgs.cuda()
            # imgs = Variable(imgs, volatile=True)
            b, n, s, c, h, w = imgs.size()
            imgs = imgs.view(b * n, s, c, h, w)
            assert (b == 1)
            features = model(imgs)
            features = features.view(n, -1)
            if pool == 'avg':
                features = torch.mean(features, 0)
            else:
                features, _ = torch.max(features, 0)
            features = features.data.cpu().numpy()
            gf.append(features)
            g_pids.extend(pids)
            g_camids.extend(camids)
            if batch_idx % 20 == 0:
                gc.collect()

        gf = np.asarray(gf, dtype=np.float32)
        g_pids = np.asarray(g_pids)
        g_camids = np.asarray(g_camids)
        gc.collect()
        print("Extracted features for gallery set, obtained {}-by-{} matrix".format(gf.shape[0], gf.shape[1]))
        print("Computing distance matrix")

        m, n = qf.shape[0], gf.shape[0]
        distmat = np.tile(np.sum(np.power(qf, 2), axis=1, keepdims=True), (1, n)) + \
                  np.tile(np.sum(np.power(gf, 2), axis=1, keepdims=True), (1, m)).T
        distmat -= 2 * blas.sgemm(1, qf, gf.T)

        # distmat = np.power(qf, 2).sum(dim=1, keepdim=True).expand(m, n) + \
        #           torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, m).t()
        # distmat.addmm_(1, -2, qf, gf.t())
        # distmat = distmat.numpy()

        print("Computing CMC and mAP")
        cmc, mAP = evaluate(distmat, q_pids, g_pids, q_camids, g_camids)

        print("Results ----------")
        print("mAP: {:.1%}".format(mAP))
        print("CMC curve")
        for r in ranks:
            print("Rank-{:<3}: {:.1%}".format(r, cmc[r - 1]))
        print("------------------")

        return cmc[0]
def test(model,
         queryloader,
         galleryloader,
         pool,
         use_gpu,
         ranks=[1, 5, 10, 20]):
    model.eval()

    qf, qf_d, q_pids, q_camids = [], [], [], []
    for batch_idx, (imgs, imgs_depths, pids, camids) in enumerate(queryloader):
        if use_gpu:
            imgs = imgs.cuda()
            imgs_depths = imgs_depths.cuda()
        imgs = Variable(imgs, volatile=True)
        imgs_depths = Variable(imgs_depths, volatile=True)
        # b=1, n=number of clips, s=16
        b, n, s, c, h, w = imgs.size()
        bd, nd, sd, cd, hd, wd = imgs_depths.size()
        assert (b == 1)
        assert (bd == 1)
        imgs = imgs.view(b * n, s, c, h, w)
        imgs_depths = imgs_depths.view(bd * nd, sd, cd, hd, wd)
        features = model(imgs)
        features = features.view(n, -1)
        features = torch.mean(features, 0)
        features = features.data.cpu()
        features_d = features_d.view(nd, -1)
        features_d = torch.mean(features_d, 0)
        features_d = features_d.data.cpu()
        qf.append(features)
        qf_d.append(features_d)
        q_pids.extend(pids)
        q_camids.extend(camids)
    qf = torch.stack(qf)
    qf_d = torch.stack(qf_d)
    q_pids = np.asarray(q_pids)
    q_camids = np.asarray(q_camids)

    print("Extracted features for query set, obtained {}-by-{} matrix".format(
        qf.size(0), qf.size(1)))

    gf, gf_d, g_pids, g_camids = [], [], [], []
    for batch_idx, (imgs, imgs_depths, pids,
                    camids) in enumerate(galleryloader):
        if use_gpu:
            imgs = imgs.cuda()
            imgs_depths = imgs_depths.cuda()
        imgs = Variable(imgs, volatile=True)
        imgs_depths = Variable(imgs_depths, volatile=True)
        b, n, s, c, h, w = imgs.size()
        bd, nd, sd, cd, hd, wd = imgs_depths.size()
        imgs = imgs.view(b * n, s, c, h, w)
        imgs_depths = imgs_depths.view(bd * nd, sd, cd, hd, wd)
        assert (b == 1)
        assert (bd == 1)
        features = model(imgs)
        features = features.view(n, -1)
        features_d = features_d.view(nd, -1)

        if pool == 'avg':
            features = torch.mean(features, 0)
            features_d = torch.mean(features_d, 0)
        else:
            features, _ = torch.max(features, 0)
            features_d, _ = torch.max(features_d, 0)
        features = features.data.cpu()
        features_d = features_d.data.cpu()
        gf.append(features)
        gf_d.append(features_d)
        g_pids.extend(pids)
        g_camids.extend(camids)
    gf = torch.stack(gf)
    gf_d = torch.stack(gf_d)
    g_pids = np.asarray(g_pids)
    g_camids = np.asarray(g_camids)

    print(
        "Extracted features for gallery set, obtained {}-by-{} matrix".format(
            gf.size(0), gf.size(1)))
    print("Computing distance matrix")

    m, n, md, nd = qf.size(0), gf.size(0), qf_d.size(0), gf_d.size(
        0)  #rimettere depth
    distmat = torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, n) + \
        torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, m).t()
    distmat.addmm_(1, -2, qf, gf.t())
    distmat = distmat.numpy()
    distmat_d = torch.pow(qf_d, 2).sum(dim=1, keepdim=True).expand(md, nd) + \
        torch.pow(gf_d, 2).sum(dim=1, keepdim=True).expand(nd, md).t()
    distmat_d.addmm_(1, -2, qf_d, gf_d.t())
    distmat_d = distmat_d.numpy()
    distmat_tot = (distmat + distmat_d) / 2

    print("Computing CMC and mAP")
    cmc, mAP = evaluate(distmat_tot, q_pids, g_pids, q_camids,
                        g_camids)  #rimettere distmat_tot

    print("Results ----------")
    print("mAP: {:.1%}".format(mAP))
    print("CMC curve")
    for r in ranks:
        print("Rank-{:<3}: {:.1%}".format(r, cmc[r - 1]))
    print("------------------")

    return cmc[0]
Пример #25
0
def test(model,
         classifier_model,
         queryloader,
         galleryloader,
         pool,
         use_gpu,
         ranks=[1, 5, 10, 20]):
    model.eval()
    classifier_model.eval()

    qf, q_pids, q_camids = [], [], []
    tot_part = args.part1 + args.part2
    for batch_idx, (imgs, pids, camids) in enumerate(queryloader):
        if use_gpu:
            imgs = imgs[:, :40, :, :, :, :]
            imgs = imgs.cuda()
        with torch.no_grad():
            #imgs = Variable(imgs)
            # b=1, n=number of clips, s=16
            b, n, s, c, h, w = imgs.size()
            assert (b == 1)
            features = []

            m = n
            if n > 40:
                m = 40

            for i in range(m):

                b, parts1, parts2 = model(imgs[:, i, :, :, :, :])
                features.append(classifier_model(b, parts1, parts2, 1, s))

            features = torch.cat(features, 0)
            features = features.data.cpu()
            features = features.view(m, 1024, tot_part + 1)

            fnorm = torch.norm(features, p=2, dim=1,
                               keepdim=True) * np.sqrt(tot_part + 1)
            features = features.div(fnorm.expand_as(features))
            features = features.view(features.size(0), -1)

            features = torch.mean(features, 0)

            qf.append(features)
            q_pids.extend(pids)
            q_camids.extend(camids)
    qf = torch.stack(qf)
    q_pids = np.asarray(q_pids)
    q_camids = np.asarray(q_camids)

    print("Extracted features for query set, obtained {}-by-{} matrix".format(
        qf.size(0), qf.size(1)))

    gf, g_pids, g_camids = [], [], []
    for batch_idx, (imgs, pids, camids) in enumerate(galleryloader):

        if use_gpu:
            imgs = imgs[:, :80, :, :, :, :]
            imgs = imgs.cuda()
        with torch.no_grad():
            imgs = Variable(imgs)
            b, n, s, c, h, w = imgs.size()

            features = []
            for i in range(n):
                b, parts1, parts2 = model(imgs[:, i, :, :, :, :])
                features.append(classifier_model(b, parts1, parts2, 1, s))

            features = torch.cat(features, 0)
            features = features.view(n, 1024, tot_part + 1)
            fnorm = torch.norm(features, p=2, dim=1,
                               keepdim=True) * np.sqrt(tot_part + 1)
            features = features.div(fnorm.expand_as(features))
            features = features.view(features.size(0), -1)

            if pool == 'avg':
                features = torch.mean(features, 0)
            else:
                features, _ = torch.max(features, 0)
            features = features.data.cpu()
            gf.append(features)
            g_pids.extend(pids)
            g_camids.extend(camids)
            torch.cuda.empty_cache()
    gf = torch.stack(gf)
    g_pids = np.asarray(g_pids)
    g_camids = np.asarray(g_camids)

    print(
        "Extracted features for gallery set, obtained {}-by-{} matrix".format(
            gf.size(0), gf.size(1)))
    print("Computing distance matrix")

    m, n = qf.size(0), gf.size(0)
    distmat = torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, n) + \
              torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, m).t()
    distmat.addmm_(qf, gf.t(), beta=1, alpha=-2)
    distmat = distmat.numpy()

    print("Computing CMC and mAP")
    cmc, mAP = evaluate(distmat, q_pids, g_pids, q_camids, g_camids)

    print("Results ----------")
    print("mAP: {:.1%}".format(mAP))
    print("CMC curve")
    for r in ranks:
        print("Rank-{:<3}: {:.1%}".format(r, cmc[r - 1]))
    print("------------------")
    # re-ranking
    from person_re_ranking.python_version.re_ranking_feature import re_ranking
    rerank_distmat = re_ranking(qf.numpy(),
                                gf.numpy(),
                                k1=20,
                                k2=6,
                                lambda_value=0.3)
    print("Computing CMC and mAP for re-ranking")
    cmc, mAP = evaluate(rerank_distmat, q_pids, g_pids, q_camids, g_camids)
    print("Results ----------")
    print("mAP: {:.1%}".format(mAP))
    print("CMC curve")
    for r in ranks:
        print("Rank-{:<3}: {:.1%}".format(r, cmc[r - 1]))
    print("------------------")

    return cmc[0]
Пример #26
0
def test(model, queryloader, galleryloader, use_gpu, ranks=[1, 5, 10, 20]):
    since = time.time()
    model.eval()

    qf, q_pids, q_camids = [], [], []
    for batch_idx, (vids, pids, camids, img_paths) in enumerate(queryloader):
        if use_gpu:
            vids = vids.cuda()
        feat = model(vids) #[b, c * n]

        feat_list = torch.split(feat, [bn.num_features for bn in model.module.bn], dim=1)
        norm_feat_list = []
        for i, f in enumerate(feat_list):
            f = model.module.bn[i](f) #[bs, c]
            f = F.normalize(f, p=2, dim=1, eps=1e-12)
            norm_feat_list.append(f)
        feat = torch.cat(norm_feat_list, 1)

        qf.append(feat)
        q_pids.extend(pids)
        q_camids.extend(camids)

    qf = torch.cat(qf, 0)
    q_pids = np.asarray(q_pids)
    q_camids = np.asarray(q_camids)
    print("Extracted features for query set, obtained {} matrix".format(qf.shape))

    gf, g_pids, g_camids = [], [], []
    for batch_idx, (vids, pids, camids, img_paths) in enumerate(galleryloader):
        if use_gpu:
            vids = vids.cuda()
        feat = model(vids)

        feat_list = torch.split(feat, [bn.num_features for bn in model.module.bn], dim=1)
        norm_feat_list = []
        for i, f in enumerate(feat_list):
            f = model.module.bn[i](f) #[bs, c]
            f = F.normalize(f, p=2, dim=1, eps=1e-12)
            norm_feat_list.append(f)
        feat = torch.cat(norm_feat_list, 1)

        gf.append(feat)
        g_pids.extend(pids)
        g_camids.extend(camids)

    gf = torch.cat(gf, 0)
    g_pids = np.asarray(g_pids)
    g_camids = np.asarray(g_camids)

    if args.dataset == 'mars':
        # gallery set must contain query set, otherwise 140 query imgs will not have ground truth.
        gf = torch.cat((qf, gf), 0)
        g_pids = np.append(q_pids, g_pids)
        g_camids = np.append(q_camids, g_camids)

    print("Extracted features for gallery set, obtained {} matrix".format(gf.shape))

    time_elapsed = time.time() - since
    print('Extracting features complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60))

    print("Computing distance matrix")

    distmat = - torch.mm(qf, gf.t())
    distmat = distmat.data.cpu()
    distmat = distmat.numpy()

    print("Computing CMC and mAP")
    cmc, mAP, rank5s = evaluate(distmat, q_pids, g_pids, q_camids, g_camids)

    print("Results ----------")
    print("mAP: {:.2%}".format(mAP))
    print("CMC curve")
    for r in ranks:
        print("Rank-{:<3}: {:.2%}".format(r, cmc[r-1]))
    print("------------------")
    number_wrong_rank1 = 0
    for q_idx in range(len(q_pids)):
        if g_pids[rank5s[q_idx][0]] == q_pids[q_idx]:
            continue # pass for correct answers
        #print("Wrong rank1 query {}; gallery {}".format(q_pids[q_idx], g_pids[rank5s[q_idx]]))
        number_wrong_rank1 += 1
    print("#Wrong %rank1 {}".format(number_wrong_rank1 * 1.0 / len(rank5s)))   
    return cmc[0]
Пример #27
0
def test(model, queryloader, galleryloader, use_gpu, ranks=[1, 5, 10, 20]):
    model.eval()

    qf, q_pids, q_camids = [], [], []
    print('extracting query feats')
    for batch_idx, (imgs, pids, camids) in enumerate(tqdm(queryloader)):
        if use_gpu:
            imgs = imgs.cuda()

        with torch.no_grad():
            #imgs = Variable(imgs, volatile=True)
            # b=1, n=number of clips, s=16
            b, n, s, c, h, w = imgs.size()
            assert(b == 1)
            imgs = imgs.view(b*n, s, c, h, w)
            features = get_features(model, imgs, args.test_num_tracks)
            features = torch.mean(features, 0)
            features = features.data.cpu()
            qf.append(features)
            q_pids.extend(pids)
            q_camids.extend(camids)
            torch.cuda.empty_cache()

    qf = torch.stack(qf)
    q_pids = np.asarray(q_pids)
    q_camids = np.asarray(q_camids)

    print("Extracted features for query set, obtained {}-by-{} matrix".format(qf.size(0), qf.size(1)))

    gf, g_pids, g_camids = [], [], []
    print('extracting gallery feats')
    for batch_idx, (imgs, pids, camids) in enumerate(tqdm(galleryloader)):
        if use_gpu:
            imgs = imgs.cuda()

        with torch.no_grad():
            #imgs = Variable(imgs, volatile=True)
            b, n, s, c, h, w = imgs.size()
            imgs = imgs.view(b*n, s, c, h, w)
            assert(b == 1)
            # handle chunked data
            features = get_features(model, imgs, args.test_num_tracks)
            features = torch.mean(features, 0)
            torch.cuda.empty_cache()

        features = features.data.cpu()
        gf.append(features)
        g_pids.extend(pids)
        g_camids.extend(camids)
    gf = torch.stack(gf)
    g_pids = np.asarray(g_pids)
    g_camids = np.asarray(g_camids)

    print("Extracted features for gallery set, obtained {}-by-{} matrix".format(gf.size(0), gf.size(1)))
    print("Computing distance matrix")

    m, n = qf.size(0), gf.size(0)
    distmat = torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, n) + \
        torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, m).t()
    distmat.addmm_(1, -2, qf, gf.t())
    distmat = distmat.numpy()

    print("Computing CMC and mAP")
    cmc, mAP = evaluate(distmat, q_pids, g_pids, q_camids, g_camids)

    print("Results ----------")
    print("mAP: {:.1%}".format(mAP))
    print("CMC curve")
    for r in ranks:
        print("Rank-{:<3}: {:.1%}".format(r, cmc[r-1]))
    print("------------------")

    # re-ranking
    from person_re_ranking.python_version.re_ranking_feature import re_ranking
    rerank_distmat = re_ranking(
        qf.numpy(), gf.numpy(), k1=20, k2=6, lambda_value=0.3)
    print("Computing CMC and mAP for re-ranking")
    cmc, mAP = evaluate(rerank_distmat, q_pids, g_pids, q_camids, g_camids)
    print("Results ----------")
    print("mAP: {:.1%}".format(mAP))
    print("CMC curve")
    for r in ranks:
        print("Rank-{:<3}: {:.1%}".format(r, cmc[r-1]))
    print("------------------")

    return cmc[0], mAP
Пример #28
0
def train(train_loader, model, optimizer, epoch):
    # switch to train mode
    model.train()

    pbar = tqdm(enumerate(train_loader))
    labels, distances = [], []

    for batch_idx, (data_a, data_p, data_n, label_p, label_n) in pbar:

        data_a, data_p, data_n = data_a.cuda(), data_p.cuda(), data_n.cuda()
        data_a, data_p, data_n = Variable(data_a), Variable(data_p), \
                                 Variable(data_n)

        # compute output
        out_a, out_p, out_n = model(data_a), model(data_p), model(data_n)

        # Choose the hard negatives
        d_p = l2_dist.forward(out_a, out_p)
        d_n = l2_dist.forward(out_a, out_n)
        all = (d_n - d_p < args.margin).cpu().data.numpy().flatten()
        hard_triplets = np.where(all == 1)
        if len(hard_triplets[0]) == 0:
            continue
        out_selected_a = Variable(torch.from_numpy(out_a.cpu().data.numpy()[hard_triplets]).cuda())
        out_selected_p = Variable(torch.from_numpy(out_p.cpu().data.numpy()[hard_triplets]).cuda())
        out_selected_n = Variable(torch.from_numpy(out_n.cpu().data.numpy()[hard_triplets]).cuda())

        selected_data_a = Variable(torch.from_numpy(data_a.cpu().data.numpy()[hard_triplets]).cuda())
        selected_data_p = Variable(torch.from_numpy(data_p.cpu().data.numpy()[hard_triplets]).cuda())
        selected_data_n = Variable(torch.from_numpy(data_n.cpu().data.numpy()[hard_triplets]).cuda())

        selected_label_p = torch.from_numpy(label_p.cpu().numpy()[hard_triplets])
        selected_label_n = torch.from_numpy(label_n.cpu().numpy()[hard_triplets])
        triplet_loss = TripletMarginLoss(args.margin).forward(out_selected_a, out_selected_p, out_selected_n)

        cls_a = model.forward_classifier(selected_data_a)
        cls_p = model.forward_classifier(selected_data_p)
        cls_n = model.forward_classifier(selected_data_n)

        criterion = nn.CrossEntropyLoss()
        predicted_labels = torch.cat([cls_a, cls_p, cls_n])
        true_labels = torch.cat(
            [Variable(selected_label_p.cuda()), Variable(selected_label_p.cuda()), Variable(selected_label_n.cuda())])

        cross_entropy_loss = criterion(predicted_labels.cuda(), true_labels.cuda())

        loss = cross_entropy_loss + triplet_loss
        # compute gradient and update weights
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        # update the optimizer learning rate
        adjust_learning_rate(optimizer)

        # log loss value
        logger.log_value('triplet_loss', triplet_loss.data[0]).step()
        logger.log_value('cross_entropy_loss', cross_entropy_loss.data[0]).step()
        logger.log_value('total_loss', loss.data[0]).step()
        if batch_idx % args.log_interval == 0:
            pbar.set_description(
                'Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f} \t # of Selected Triplets: {}'.format(
                    epoch, batch_idx * len(data_a), len(train_loader.dataset),
                           100. * batch_idx / len(train_loader),
                    loss.data[0], len(hard_triplets[0])))

        dists = l2_dist.forward(out_selected_a,
                                out_selected_n)  # torch.sqrt(torch.sum((out_a - out_n) ** 2, 1))  # euclidean distance
        distances.append(dists.data.cpu().numpy())
        labels.append(np.zeros(dists.size(0)))

        dists = l2_dist.forward(out_selected_a,
                                out_selected_p)  # torch.sqrt(torch.sum((out_a - out_p) ** 2, 1))  # euclidean distance
        distances.append(dists.data.cpu().numpy())
        labels.append(np.ones(dists.size(0)))

    labels = np.array([sublabel for label in labels for sublabel in label])
    distances = np.array([subdist[0] for dist in distances for subdist in dist])

    tpr, fpr, accuracy, val, val_std, far = evaluate(distances, labels)
    print('\33[91mTrain set: Accuracy: {:.8f}\n\33[0m'.format(np.mean(accuracy)))
    logger.log_value('Train Accuracy', np.mean(accuracy))

    plot_roc(fpr, tpr, figure_name="roc_train_epoch_{}.png".format(epoch))

    # do checkpointing
    torch.save({'epoch': epoch + 1, 'state_dict': model.state_dict()},
               '{}/checkpoint_{}.pth'.format(LOG_DIR, epoch))
def test(model, queryloader, galleryloader, use_gpu, ranks=[1, 5, 10, 20]):
    batch_time = AverageMeter()

    model.eval()

    with torch.no_grad():
        qf, q_pids, q_camids = [], [], []
        for batch_idx, (imgs, pids, camids) in enumerate(queryloader):
            if use_gpu: imgs = imgs.cuda()

            end = time.time()
            features = model(imgs)
            batch_time.update(time.time() - end)

            features = features.data.cpu()
            # print('features', features.size(), features)
            qf.append(features)
            # print('qf', len(qf))
            q_pids.extend(pids)
            q_camids.extend(camids)
        qf = torch.cat(qf, 0)
        q_pids = np.asarray(q_pids)
        q_camids = np.asarray(q_camids)

        print("Extracted features for query set, obtained {}-by-{} matrix".
              format(qf.size(0), qf.size(1)))

        gf, g_pids, g_camids = [], [], []
        end = time.time()
        for batch_idx, (imgs, pids, camids) in enumerate(galleryloader):
            if use_gpu: imgs = imgs.cuda()

            end = time.time()
            features = model(imgs)
            batch_time.update(time.time() - end)

            features = features.data.cpu()
            gf.append(features)
            g_pids.extend(pids)
            g_camids.extend(camids)
        gf = torch.cat(gf, 0)
        g_pids = np.asarray(g_pids)
        g_camids = np.asarray(g_camids)

        print("Extracted features for gallery set, obtained {}-by-{} matrix".
              format(gf.size(0), gf.size(1)))

    print("==> BatchTime(s)/BatchSize(img): {:.3f}/{}".format(
        batch_time.avg, args.test_batch))

    # print('qf', qf.size(), qf)
    # print('gf', gf.size(), gf)

    m, n = qf.size(0), gf.size(0)
    # print('m,n',m,n)
    # print('qf', qf.size(), qf)
    # qf_pow = torch.pow(qf, 2)
    # # print('qf_pow', qf_pow.size(), qf_pow)
    # qf_sum = qf_pow.sum(dim=1, keepdim=True)
    # # print('qf_sum', qf_sum.size(), qf_sum)
    # qf_exp = qf_sum.expand(m, n)
    # # print('qf_exp', qf_exp.size(), qf_exp)

    # # print('gf', gf.size(), gf)
    # gf_pow = torch.pow(gf, 2)
    # # print('gf_pow', gf_pow.size(), gf_pow)
    # gf_sum = gf_pow.sum(dim=1, keepdim=True)
    # # print('gf_sum', gf_sum.size(), gf_sum)
    # gf_exp = gf_sum.expand(n, m)
    # # print('gf_exp', gf_exp.size(), gf_exp)
    # gf_t = gf_exp.t()
    # print('gf_t', gf_t.size(), gf_t)
    distmat = torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, n) + \
              torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, m).t()
    # distmat = qf_exp + gf_t
    # # print('distmat', distmat.size(), distmat)

    # # print('mm', distmat.size, qf.size, gf.size, gf.t().size)
    # qf = qf.numpy()
    # gf = gf.numpy()
    # mm = np.dot(qf, gf.T)
    # # mm = torch.mm(qf, gf.t())
    # print('mm', mm.shape, distmat.shape, qf.shape, gf.T.shape)
    # distmat = distmat.numpy() + mm*(-2)

    distmat.addmm_(1, -2, qf, gf.t())
    # print('distmat', distmat.shape, distmat)
    distmat = distmat.numpy()

    print("Computing CMC and mAP")
    # cmc, mAP = evaluate(distmat, q_pids, g_pids, q_camids, g_camids, use_metric_cuhk03=args.use_metric_cuhk03)
    cmc, mAP = evaluate(distmat,
                        q_pids,
                        g_pids,
                        q_camids,
                        g_camids,
                        dataset_type=args.dataset)

    print("Results ----------")
    print("mAP: {:.1%}".format(mAP))
    print("CMC curve")
    for r in ranks:
        print("Rank-{:<3}: {:.1%}".format(r, cmc[r - 1]))
    print("------------------")

    return cmc[0]
Пример #30
0
def test(model,
         queryloader,
         galleryloader,
         pool,
         use_gpu,
         ranks=[1, 5, 10, 20],
         return_distmat=False):
    batch_time = AverageMeter()
    model.eval()

    with torch.no_grad():
        qf, q_pids, q_camids = [], [], []
        for batch_idx, (imgs, pids, camids) in enumerate(queryloader):
            if use_gpu: imgs = imgs.cuda()
            b, n, s, c, h, w = imgs.size()
            assert (b == 1)
            imgs = imgs.view(b * n, s, c, h, w)

            end = time.time()
            features = model(imgs)
            batch_time.update(time.time() - end)

            features = features.view(n, -1)
            features = torch.mean(features, 0)
            features = features.cpu()
            qf.append(features.numpy())
            q_pids.extend(pids.numpy())
            q_camids.extend(camids.numpy())
        qf = np.stack(qf, 0)
        q_pids = np.asarray(q_pids)
        q_camids = np.asarray(q_camids)

        print("Extracted features for query set, obtained {}-by-{} matrix".
              format(qf.shape[0], qf.shape[1]))

        gf, g_pids, g_camids = [], [], []
        for batch_idx, (imgs, pids, camids) in enumerate(galleryloader):
            if use_gpu: imgs = imgs.cuda()
            b, n, s, c, h, w = imgs.size()
            assert (b == 1)
            imgs = imgs.view(b * n, s, c, h, w)

            end = time.time()
            features = model(imgs)
            batch_time.update(time.time() - end)
            features = features.view(n, -1)
            features = torch.mean(features, 0)
            features = features.cpu()
            gf.append(features.numpy())
            g_pids.extend(pids.numpy())
            g_camids.extend(camids.numpy())
        gf = np.stack(gf, 0)
        g_pids = np.asarray(g_pids)
        g_camids = np.asarray(g_camids)
        print("Extracted features for gallery set, obtained {}-by-{} matrix".
              format(gf.shape[0], gf.shape[1]))

    print("==> BatchTime(s)/BatchSize(img): {:.3f}/{}".format(
        batch_time.avg, args.test_batch * args.seq_len))

    m, n = qf.shape[0], gf.shape[0]
    distmat = np.tile((qf**2).sum(axis=1, keepdims=True),(1, n)) + \
              np.tile((gf**2).sum(axis=1, keepdims=True),(1, m)).transpose()
    distmat = distmat - 2 * np.dot(qf, gf.transpose())

    print("Computing CMC and mAP")
    cmc, mAP, mINP = evaluate(distmat, q_pids, g_pids, q_camids, g_camids)

    print("Results ----------")
    print("mINP: {:.1%} mAP: {:.1%} CMC curve {:.1%} {:.1%} {:.1%} {:.1%}".
          format(mINP, mAP, cmc[1 - 1], cmc[5 - 1], cmc[10 - 1], cmc[20 - 1]))
    print("------------------")

    if return_distmat:
        return distmat
    return cmc[0]
Пример #31
0
def train_valid(model, optimizer, triploss, scheduler, epoch, dataloaders, data_size):
    for phase in ['train', 'valid']:

        labels, distances = [], []
        triplet_loss_sum = 0.0

        if phase == 'train':
            scheduler.step()
            if scheduler.last_epoch % scheduler.step_size == 0:
                print("LR decayed to:", ', '.join(map(str, scheduler.get_lr())))
            model.train()
        else:
            model.eval()

        for batch_idx, batch_sample in enumerate(dataloaders[phase]):

            anc_img = batch_sample['anc_img'].to(device)
            pos_img = batch_sample['pos_img'].to(device)
            neg_img = batch_sample['neg_img'].to(device)

            # pos_cls = batch_sample['pos_class'].to(device)
            # neg_cls = batch_sample['neg_class'].to(device)

            with torch.set_grad_enabled(phase == 'train'):

                # anc_embed, pos_embed and neg_embed are encoding(embedding) of image
                anc_embed, pos_embed, neg_embed = model(anc_img), model(pos_img), model(neg_img)

                # choose the semi hard negatives only for "training"
                pos_dist = l2_dist.forward(anc_embed, pos_embed)
                neg_dist = l2_dist.forward(anc_embed, neg_embed)

                all = (neg_dist - pos_dist < args.margin).cpu().numpy().flatten()
                if phase == 'train':
                    hard_triplets = np.where(all == 1)
                    if len(hard_triplets[0]) == 0:
                        continue
                else:
                    hard_triplets = np.where(all >= 0)

                anc_hard_embed = anc_embed[hard_triplets]
                pos_hard_embed = pos_embed[hard_triplets]
                neg_hard_embed = neg_embed[hard_triplets]

                anc_hard_img = anc_img[hard_triplets]
                pos_hard_img = pos_img[hard_triplets]
                neg_hard_img = neg_img[hard_triplets]

                # pos_hard_cls = pos_cls[hard_triplets]
                # neg_hard_cls = neg_cls[hard_triplets]

                model.module.forward_classifier(anc_hard_img)
                model.module.forward_classifier(pos_hard_img)
                model.module.forward_classifier(neg_hard_img)

                triplet_loss = triploss.forward(anc_hard_embed, pos_hard_embed, neg_hard_embed)

                if phase == 'train':
                    optimizer.zero_grad()
                    triplet_loss.backward()
                    optimizer.step()

                distances.append(pos_dist.data.cpu().numpy())
                labels.append(np.ones(pos_dist.size(0)))

                distances.append(neg_dist.data.cpu().numpy())
                labels.append(np.zeros(neg_dist.size(0)))

                triplet_loss_sum += triplet_loss.item()

        avg_triplet_loss = triplet_loss_sum / data_size[phase]
        labels = np.array([sublabel for label in labels for sublabel in label])
        distances = np.array([subdist for dist in distances for subdist in dist])

        tpr, fpr, accuracy, val, val_std, far = evaluate(distances, labels)
        print('  {} set - Triplet Loss       = {:.8f}'.format(phase, avg_triplet_loss))
        print('  {} set - Accuracy           = {:.8f}'.format(phase, np.mean(accuracy)))

        time = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
        lr = '_'.join(map(str, scheduler.get_lr()))
        layers = '+'.join(args.unfreeze.split(','))
        write_csv(f'log/{phase}.csv', [time, epoch, np.mean(accuracy), avg_triplet_loss, layers, args.batch_size, lr])

        if phase == 'valid':
            save_last_checkpoint({'epoch': epoch,
                                  'state_dict': model.module.state_dict(),
                                  'optimizer_state': optimizer.state_dict(),
                                  'accuracy': np.mean(accuracy),
                                  'loss': avg_triplet_loss
                                  })
            save_if_best({'epoch': epoch,
                          'state_dict': model.module.state_dict(),
                          'optimizer_state': optimizer.state_dict(),
                          'accuracy': np.mean(accuracy),
                          'loss': avg_triplet_loss
                          }, np.mean(accuracy))
        else:
            plot_roc(fpr, tpr, figure_name='./log/roc_valid_epoch_{}.png'.format(epoch))