示例#1
0
def test(test_loader, model, epoch):
    # switch to evaluate mode
    model.eval()

    labels, distances = [], []

    pbar = tqdm(enumerate(test_loader))
    for batch_idx, (data_a, data_p, label) in pbar:
        if args.cuda:
            data_a, data_p = data_a.cuda(), data_p.cuda()
        data_a, data_p, label = Variable(data_a, volatile=True), \
                                Variable(data_p, volatile=True), Variable(label)

        # compute output
        out_a, out_p = model(data_a), model(data_p)
        dists = torch.sqrt(torch.sum((out_a - out_p)**2,
                                     1))  # euclidean distance
        distances.append(dists.data.cpu().numpy())
        labels.append(label.data.cpu().numpy())

        if batch_idx % args.log_interval == 0:
            pbar.set_description('Test Epoch: {} [{}/{} ({:.0f}%)]'.format(
                epoch, batch_idx * len(data_a), len(test_loader.dataset),
                100. * batch_idx / len(test_loader)))

    # measure accuracy (FPR95)
    num_tests = test_loader.dataset.matches.size(0)
    labels = np.vstack(labels).reshape(num_tests)
    distances = np.vstack(distances).reshape(num_tests)

    fpr95 = ErrorRateAt95Recall(labels, distances)
    print('\33[91mTest set: Accuracy(FPR95): {:.8f}\n\33[0m'.format(fpr95))

    logger.log_value('fpr95', fpr95)
def test(test_loader, model, epoch, logger, logger_test_name):
    # switch to evaluate mode
    model.eval()

    labels, distances = [], []

    pbar = tqdm(enumerate(test_loader))
    for batch_idx, (data_a, data_p, label) in pbar:

        if args.cuda:
            data_a, data_p = data_a.cuda(), data_p.cuda()

        data_a, data_p, label = Variable(data_a, volatile=True), \
                                Variable(data_p, volatile=True), Variable(label)

        out_a = model(data_a)
        out_p = model(data_p)
        dists = torch.sqrt(torch.sum((out_a - out_p)**2,
                                     1))  # euclidean distance
        distances.append(dists.data.cpu().numpy().reshape(-1, 1))
        ll = label.data.cpu().numpy().reshape(-1, 1)
        labels.append(ll)

        if batch_idx % args.log_interval == 0:
            pbar.set_description(logger_test_name +
                                 ' Test Epoch: {} [{}/{} ({:.0f}%)]'.format(
                                     epoch, batch_idx * len(data_a),
                                     len(test_loader.dataset), 100. *
                                     batch_idx / len(test_loader)))

    num_tests = test_loader.dataset.matches.size(0)
    labels = np.vstack(labels).reshape(num_tests)
    distances = np.vstack(distances).reshape(num_tests)

    fpr95 = ErrorRateAt95Recall(labels, 1.0 / (distances + 1e-8))
    #fdr95 = ErrorRateFDRAt95Recall(labels, 1.0 / (distances + 1e-8))

    #fpr2 = convertFDR2FPR(fdr95, 0.95, 50000, 50000)
    #fpr2fdr = convertFPR2FDR(fpr2, 0.95, 50000, 50000)

    #print('\33[91mTest set: Accuracy(FDR95): {:.8f}\n\33[0m'.format(fdr95))
    print('\33[91mTest set: Accuracy(FPR95): {:.8f}\n\33[0m'.format(fpr95))
    #print('\33[91mTest set: Accuracy(FDR2FPR): {:.8f}\n\33[0m'.format(fpr2))
    #print('\33[91mTest set: Accuracy(FPR2FDR): {:.8f}\n\33[0m'.format(fpr2fdr))

    #fpr2 = convertFDR2FPR(round(fdr95,2), 0.95, 50000, 50000)
    #fpr2fdr = convertFPR2FDR(round(fpr2,2), 0.95, 50000, 50000)

    #print('\33[91mTest set: Accuracy(FDR2FPR): {:.8f}\n\33[0m'.format(fpr2))
    #print('\33[91mTest set: Accuracy(FPR2FDR): {:.8f}\n\33[0m'.format(fpr2fdr))

    if (args.enable_logging):
        logger.log_value(logger_test_name + ' fpr95', fpr95)
    return
示例#3
0
def test(test_loader, model, epoch, logger, logger_test_name):
    # switch to evaluate mode
    model.eval()

    labels, distances = [], []

    pbar = tqdm(enumerate(test_loader))
    for batch_idx, (data_a, data_p, label) in pbar:

        if args.cuda:
            data_a, data_p = data_a.cuda(), data_p.cuda()

        data_a, data_p, label = Variable(data_a, volatile=True), \
                                Variable(data_p, volatile=True), Variable(label)
        out_a = model(data_a)
        out_p = model(data_p)
        dists = torch.sqrt(torch.sum((out_a - out_p)**2,
                                     1))  # euclidean distance
        distances.append(dists.data.cpu().numpy().reshape(-1, 1))
        ll = label.data.cpu().numpy().reshape(-1, 1)
        labels.append(ll)

        if batch_idx % args.log_interval == 0:
            pbar.set_description(logger_test_name +
                                 ' Test Epoch: {} [{}/{} ({:.0f}%)]'.format(
                                     epoch, batch_idx * len(data_a),
                                     len(test_loader.dataset), 100. *
                                     batch_idx / len(test_loader)))

    num_tests = test_loader.dataset.matches.size(0)
    labels = np.vstack(labels).reshape(num_tests)
    distances = np.vstack(distances).reshape(num_tests)

    fpr95 = ErrorRateAt95Recall(labels, 1.0 / (distances + 1e-8))
    print('\33[91mTest set: Accuracy(FPR95): {:.8f}\n\33[0m'.format(fpr95))
    #va_writer.add_scalar("fpr95", fpr95, global_step=epoch)
    tr_writer.add_scalar("fpr95", fpr95, global_step=epoch)
    f = open(fpr95_log, "a+")
    np.savetxt(f, [epoch, fpr95], delimiter=',')
    f.close()

    if (args.enable_logging):
        logger.log_value(logger_test_name + ' fpr95', fpr95)
    return
示例#4
0
def test(test_loader, model, epoch, logger, logger_test_name):
    # switch to evaluate mode
    model.eval()

    labels, distances = [], []

    # data_anchor(img_idx), data_positive(img_idx), match_or_not(0/1)
    pbar = tqdm(enumerate(test_loader))
    for batch_idx, (data_a, data_p, label) in pbar:

        if args.cuda:
            data_a, data_p = data_a.cuda(), data_p.cuda()

        data_a, data_p, label = Variable(data_a, volatile=True), \
                                Variable(data_p, volatile=True), Variable(label)

        out_a, out_p = model(data_a), model(data_p)
        dists = torch.sqrt(torch.sum((out_a - out_p)**2,
                                     1))  # euclidean distance
        distances.append(dists.data.cpu().numpy().reshape(-1, 1))
        ll = label.data.cpu().numpy().reshape(-1, 1)
        labels.append(ll)

        if batch_idx % args.log_interval == 0:
            pbar.set_description(logger_test_name +
                                 ' Test Epoch: {} [{}/{} ({:.0f}%)]'.format(
                                     epoch, batch_idx * len(data_a),
                                     len(test_loader.dataset), 100. *
                                     batch_idx / len(test_loader)))

    num_tests = test_loader.dataset.matches.size(0)
    labels = np.vstack(labels).reshape(num_tests)
    distances = np.vstack(distances).reshape(num_tests)

    fpr95 = ErrorRateAt95Recall(labels, 1.0 / (distances + 1e-8))
    print('Test set: Accuracy(FPR95): {:.8f}\n'.format(fpr95))

    if (args.enable_logging):
        logger.log_string(
            'logs', 'Test Epoch {}/{}: Accuracy(FPR95)@{}: {:.8f}'.format(
                epoch, args.start_epoch + args.epochs, logger_test_name,
                fpr95))
    return
示例#5
0
    def test(self, test_loader, model, epoch, logger, logger_test_name):
        print("Testing model")
        # switch to evaluate mode
        model.eval()

        labels, distances = [], []

        pbar = tqdm(enumerate(test_loader))
        for batch_idx, (data_a, data_p, label) in pbar:

            if self.args.cuda:
                data_a, data_p = data_a.cuda(), data_p.cuda()

            with torch.no_grad():
                data_a, data_p, label = Variable(data_a), \
                                        Variable(data_p), Variable(label)
                out_a = model(data_a)
                out_p = model(data_p)
            dists = torch.sqrt(torch.sum((out_a - out_p) ** 2, 1))  # euclidean distance
            distances.append(dists.data.cpu().numpy().reshape(-1,1))
            ll = label.data.cpu().numpy().reshape(-1, 1)
            labels.append(ll)

            if batch_idx % self.args.log_interval == 0:
                pbar.set_description(logger_test_name+' Test Epoch: {} [{}/{} ({:.0f}%)]'.format(
                    epoch, batch_idx * len(data_a), len(test_loader.dataset),
                        100. * batch_idx / len(test_loader)))

        num_tests = test_loader.dataset.matches.size(0)
        labels = np.vstack(labels).reshape(num_tests)
        distances = np.vstack(distances).reshape(num_tests)

        fpr95 = ErrorRateAt95Recall(labels, 1.0 / (distances + 1e-8))
        print('\33[91mTest set: Accuracy(FPR95): {:.8f}\n\33[0m'.format(fpr95))

        if (self.args.enable_logging):
            logger.log_value(logger_test_name+' fpr95', fpr95)
        return
示例#6
0
def test(test_loader, model, epoch, logger, logger_test_name):
    # switch to evaluate mode
    model.eval()

    labels, distances = [], []

    pbar = tqdm(enumerate(test_loader))
    for batch_idx, (data_a, data_p, label) in pbar:
        if args.cuda:
            data_a, data_p = data_a.cuda(), data_p.cuda()

        data_a, data_p, label = Variable(data_a, volatile=True), \
                                Variable(data_p, volatile=True), Variable(label)

        out_a, out_p = model(data_a), model(data_p)

            
        dists = torch.sqrt(torch.sum((out_a - out_p) ** 2, 1))  # euclidean distance
        distances.append(dists.data.cpu().numpy().reshape(-1,1))
        ll = label.data.cpu().numpy().reshape(-1, 1)
        labels.append(ll)

    
    num_tests = test_loader.dataset.matches.size(0)
    labels = np.vstack(labels).reshape(num_tests)
    distances = np.vstack(distances).reshape(num_tests)

    fpr95 = ErrorRateAt95Recall(labels, distances)
    print('\33[91mTest set: Accuracy(FPR95): {:.8f}\n\33[0m'.format(fpr95))
    if (args.enable_logging):
        logger.log_value('fpr95', fpr95)
        
    if True:
        try: 
            os.stat('../histogram_map/'+suffix)
        except:
            os.mkdir('../histogram_map/'+suffix)

        bins = np.linspace(0, 2, 100)
        cos_dists = distances;
        pos_dists = cos_dists[labels==1]
        neg_dists = cos_dists[labels==0]
        plt.hist(pos_dists, bins, alpha = 0.5, label = 'Matched Pairs')
        plt.hist(neg_dists, bins, alpha = 0.5, label = 'Non-Matched Pairs')
        plt.legend(loc='upper left')
        plt.xlim(0, 2)
        plt.ylim(0, 2e4)
        plt.xlabel('l2')
        plt.ylabel('#Pairs')
        plt.savefig('../histogram_map/{}/iter_{}.png'.format(suffix,epoch), bbox_inches='tight')
        plt.clf()

    good_match_ratio = np.sum((distances<0.3)*labels)/np.sum(labels==1)
    print('Good match ratio for test: {}'.format(good_match_ratio))
    if (args.enable_logging):
        logger.log_value('good_match_ratio', good_match_ratio)

    good_mismatch_ratio = np.sum((distances>0.8)*(1-labels))/np.sum(labels==0)
    print('Good mismatch ratio for test: {}'.format(good_mismatch_ratio))
    if (args.enable_logging):
        logger.log_value('good_mismatch_ratio', good_mismatch_ratio)

    bad_mismatch_ratio = np.sum((distances<0.4)*(1-labels))/np.sum(labels==0)
    print('Bad mismatch ratio for test: {}'.format(bad_mismatch_ratio))
    if (args.enable_logging):
        logger.log_value('bad_mismatch_ratio', bad_mismatch_ratio)

    return