示例#1
0
def test(**kwargs):
    import glob
    pths = glob.glob('checkpoints/%s/*.pth' % (opt.model))
    pths.sort(key=os.path.getmtime, reverse=True)
    print(pths)
    opt.parse(kwargs)
    # 模型
    opt.load_model_path = pths[0]
    model = getattr(models, opt.model)().eval()
    assert os.path.exists(opt.load_model_path)
    if opt.load_model_path:
        model.load(opt.load_model_path)
    if opt.use_gpu: model.cuda()
    model.train(False)
    # 数据
    #result_name = '../../model/se-resnet/test_se_resnet50'
    test_data = myData(
        filelists=opt.test_filelists,
        image_size=opt.image_size,
        #transform =data_transforms['val'],
        transform=None,
        scale=opt.cropscale,
        test=True,
        data_source='none')

    #	test_data = myData(root = opt.test_roo,datatxt='test.txt',
    #				test = True,transform = data_transforms['test'])
    test_loader = DataLoader(dataset=test_data,
                             batch_size=opt.batch_size // 2,
                             shuffle=False)
    #test_loader =DataLoader(dataset = test_data,batch_size = opt.batch_size//2,shuffle =True)

    result_list = []

    label_list = []

    for step, batch in enumerate(tqdm(test_loader, desc='test', unit='batch')):
        data, label, image_path = batch
        with torch.no_grad():
            if opt.use_gpu:
                data = data.cuda()
            outputs = model(data)
            outputs = torch.softmax(outputs, dim=-1)
            preds = outputs.to('cpu').numpy()
            for i in range(preds.shape[0]):
                result_list.append(preds[i, 1])
                label_list.append(label[i])
    metric = roc.cal_metric(label_list, result_list)
    eer = metric[0]
    tprs = metric[1]
    auc = metric[2]
    xy_dic = metric[3]
    pickle.dump(xy_dic, open('result/xy.pickle', 'wb'))
    print('EER: {:.6f} TPR(1.0%): {:.6f} TPR(.5%): {:.6f} AUC: {:.8f}'.format(
        eer, tprs["TPR(1.%)"], tprs["TPR(.5%)"], auc),
          file=open('result/test.txt', 'a'))
    print('EER: {:.6f} TPR(1.0%): {:.6f} TPR(.5%): {:.6f} AUC: {:.8f}'.format(
        eer, tprs["TPR(1.%)"], tprs["TPR(.5%)"], auc))
示例#2
0
def val(model, dataloader, data_len):
    # 把模型设为验证模式
    criterion = FocalLoss(2)
    model.train(False)
    running_loss = 0
    running_corrects = 0
    confusion_matrix = meter.ConfusionMeter(2)
    result_list = []

    label_list = []
    for ii, data in enumerate(tqdm(dataloader, desc='Val %s On Anti-spoofing' % (opt.model), unit='batch')):
        input, label = data
        with torch.no_grad():
            val_input = Variable(input)
            val_label = Variable(label)
        if opt.use_gpu:
            val_input = val_input.cuda()
            val_label = val_label.cuda()
        score = model(val_input)
        _, preds = torch.max(score, 1)
        loss = criterion(score, val_label)
        # confusion_matrix.add(score.data.squeeze(), val_label)
        running_loss += loss.item() * val_input.size(0)
        running_corrects += torch.sum(preds == val_label.data)

        outputs = torch.softmax(score, dim=-1)
        preds = outputs.to('cpu').detach().numpy()
        for i_batch in range(preds.shape[0]):
            result_list.append(preds[i_batch, 1])
            label_list.append(label[i_batch])
    # 把模型恢复为训练模式
    model.train(True)

    metric = roc.cal_metric(label_list, result_list)
    # cm_value = confusion_matrix.value()
    val_loss = running_loss / data_len
    val_accuracy = running_corrects.double() / float(data_len)
    return val_loss, val_accuracy, metric
示例#3
0
文件: test.py 项目: wgqtmac/cvprw2020
                FN += 1
            elif labels[i] == 0 and preds[i] == 1:
                FP += 1

        outputs = torch.softmax(outputs, dim=-1)
        preds_prob = outputs.to('cpu').detach().numpy()
        labels = labels.to('cpu').detach().numpy()
        for i_batch in range(preds.shape[0]):
            result_list.append(preds_prob[i_batch, 1])
            label_list.append(labels[i_batch])

    TP_rate = float(TP / (TP + FN))
    TN_rate = float(TN / (TN + FP))

    HTER = 1 - (TP_rate + TN_rate) / 2
    metric = roc.cal_metric(label_list, result_list, True)

    print('Test set: Accuracy: {:.4f}, Auc: {:.4f}, HTER: {:.4f}'.format(
        correct.float() / len(test_loader.dataset), metric[0], HTER))
    print()
    # _, pred = output.topk(1, 1, largest=True, sorted=True)

    # label = label.view(label.size(0), -1).expand_as(pred)
    # correct = pred.eq(label).float()
    #
    # # #compute top 5
    # # correct_5 += correct[:, :5].sum()
    #
    # #compute top1
    # correct_1 += correct[:, :1].sum()
示例#4
0
def validate(val_loader, model, criterion, epoch):
    global time_stp
    batch_time = AverageMeter()
    losses = AverageMeter()
    top1 = AverageMeter()
    result_list = []
    label_list = []
    predicted_list = []

    # switch to evaluate mode
    model.eval()

    end = time.time()
    with torch.no_grad():
        for i, (input, target, depth_dirs) in enumerate(val_loader):
            with torch.no_grad():
                input_var = []
                for _input in input:
                    input_var.append(Variable(_input).float().to(device))
                target_var = Variable(target).long().to(device)

                # compute output
                output = model(input_var)
                loss = criterion(output, target_var)

                # measure accuracy and record loss
                prec1, prec2 = accuracy(output.data, target_var, topk=(1, 2))
                losses.update(loss.data, input[0].size(0))
                top1.update(prec1[0], input[0].size(0))

                soft_output = torch.softmax(output, dim=-1)
                preds = soft_output.to('cpu').detach().numpy()
                label = target.to('cpu').detach().numpy()
                _, predicted = torch.max(soft_output.data, 1)
                predicted = predicted.to('cpu').detach().numpy()

                for i_batch in range(preds.shape[0]):
                    result_list.append(preds[i_batch, 1])
                    label_list.append(label[i_batch])
                    predicted_list.append(predicted[i_batch])
                    if args.val_save:
                        f = open(
                            'submission/{}_{}_{}_{}_{}_{}_submission.txt'.format(sub, modal, time_stp, args.arch, epoch,
                                                                                 'test' if args.phase_test else 'dev'),
                            'a+')
                        depth_dir = depth_dirs[i_batch].replace('D:\\py_workspace\\data\\test_cut\\', '')
                        f.write(depth_dir + ' ' + str(preds[i_batch, 1]) + '\n')

                # measure elapsed time
                batch_time.update(time.time() - end)
                end = time.time()
                if i % args.print_freq == 0:
                    line = 'Test: [{0}/{1}]\t' \
                           'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t' \
                           'Loss {loss.val:.4f} ({loss.avg:.4f})\t' \
                           'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'.format(i, len(val_loader), batch_time=batch_time,
                                                                             loss=losses, top1=top1)

                    with open('logs/{}_{}_{}_{}.log'.format(sub, modal, time_stp, args.arch), 'a+') as flog:
                        flog.write('{}\n'.format(line))
                        print(line)

    culc = confusion_matrix(label_list, predicted_list).ravel()
    matrix_edge = int(math.sqrt(len(culc)))
    print(culc)
    tn, fp, fn, tp = np.hstack((culc[:2], culc[matrix_edge:matrix_edge + 2]))
    apcer = fp / (tn + fp)
    npcer = fn / (fn + tp)
    acer = (apcer + npcer) / 2
    metric = roc.cal_metric(label_list, result_list)
    eer = metric[0]
    tprs = metric[1]
    auc = metric[2]
    xy_dic = metric[3]
    #     tpr1 = tprs['TPR@FPR=10E-2']
    #     logger.info('eer: {}\t'
    #                 'tpr1: {}\t'
    #                 'auc: {}\t'
    #                 'acer: {}\t'
    #                 'accuracy: {top1.avg:.3f} ({top1.avg:.3f})'
    #           .format(eer,tpr1,auc,acer,top1=top1))
    #     pickle.dump(xy_dic, open('xys/xy_{}_{}_{}.pickle'.format(time_stp, args.arch,epoch),'wb'))
    with open('logs/val_result_{}_{}_{}.txt'.format(sub, time_stp, args.arch), 'a+') as f_result:
        result_line = 'epoch: {} EER: {:.6f} TPR@FPR=10E-2: {:.6f} TPR@FPR=10E-3: {:.6f} APCER:{:.6f} NPCER:{:.6f} AUC: {:.8f} Acc:{:.3f} TN:{} FP : {} FN:{} TP:{}  ACER:{:.8f} '.format(
            epoch, eer, tprs["TPR@FPR=10E-2"], tprs["TPR@FPR=10E-3"], apcer, npcer, auc, top1.avg, tn, fp, fn, tp, acer)
        f_result.write('{}\n'.format(result_line))
        print(result_line)
    return top1.avg
def validate(val_loader, model, criterion, epoch):
    global time_stp
    batch_time = AverageMeter()
    losses = AverageMeter()
    top1 = AverageMeter()
    result_list = []
    label_list = []
    predicted_list = []
    subject_name_list = ['start']
    subject_preds = []

    # switch to evaluate mode
    model.eval()

    end = time.time()
    with torch.no_grad():
        for i, (input, target, dirs) in enumerate(val_loader):
            with torch.no_grad():
                input_var = Variable(input).float().to(device)
                target_var = Variable(target).long().to(device)

                # compute output
                output = model(input_var)
                loss = criterion(output, target_var)

                # measure accuracy and record loss
                prec1, prec2 = accuracy(output.data, target_var, topk=(1, 2))
                losses.update(loss.data, input.size(0))
                top1.update(prec1[0], input.size(0))

                soft_output = torch.softmax(output, dim=-1)
                preds = soft_output.to('cpu').detach().numpy()
                label = target.to('cpu').detach().numpy()
                _, predicted = torch.max(soft_output.data, 1)
                predicted = predicted.to('cpu').detach().numpy()

                for i_batch in range(preds.shape[0]):
                    result_list.append(preds[i_batch, 1])
                    label_list.append(label[i_batch])
                    predicted_list.append(predicted[i_batch])
                    subject = dirs[i_batch].split('/')[-3]
                    prefix = dirs[i_batch].split('/')[-4]
                    pre_subject = subject_name_list[-1]
                    if subject not in subject_name_list:
                        subject_name_list.append(subject)
                    if i == len(
                            val_loader) - 1 and i_batch == preds.shape[0] - 1:
                        subject_name_list.append('end')
                    subject_preds.append(preds[i_batch, 1])
                    if args.val_save and pre_subject != 'start' and pre_subject != subject_name_list[
                            -1]:
                        mean_pred = np.mean(subject_preds[:-1])
                        # 将0.4-0.9之间的都认为是假的
                        if mean_pred < 0.9 and mean_pred > 0.4:
                            mean_pred = mean_pred - 0.4
                        subject_preds = [preds[i_batch, 1]]
                        if 'phase1' in args.data_root:
                            txt = 'dev'
                        else:
                            txt = 'test'
                        f = open(
                            'submission/{}_{}_{}@{}_submission_'.format(
                                time_stp, args.arch, args.mode,
                                args.sub_prot_test) + txt + '.txt', 'a+')
                        #                             dir_ = dirs[i_batch].replace('/home/zp/dataset/CASIA-CeFA/phase1/','').replace(subject_name_list[-1],pre_subject)
                        dir_ = prefix + '/' + pre_subject
                        if 'test' in dir_:
                            img_dirs = glob(
                                os.path.join(args.data_root + dir_,
                                             'profile/*.jpg'))
                            length = len(img_dirs)
                            if length < 10:
                                mean_pred = np.random.uniform(0, 0.3)
                        f.write(dir_ + ' ' + str(mean_pred) + '\n')
                # measure elapsed time
                batch_time.update(time.time() - end)
                end = time.time()
                if i % args.print_freq == 0:
                    line = 'Test: [{0}/{1}]\t' \
                           'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t' \
                           'Loss {loss.val:.4f} ({loss.avg:.4f})\t' \
                           'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'.format(i, len(val_loader), batch_time=batch_time,
                                    loss=losses, top1=top1)

                    with open(
                            'logs/{}_{}_{}_@{}.log'.format(
                                time_stp, args.arch, args.mode,
                                args.sub_prot_val), 'a+') as flog:
                        flog.write('{}\n'.format(line))
                        print(line)
    print(len(label_list), len(predicted_list),
          confusion_matrix(label_list, predicted_list).ravel())
    tn, fp, fn, tp = confusion_matrix(label_list, predicted_list).ravel()
    apcer = fp / (tn + fp)
    npcer = fn / (fn + tp)
    acer = (apcer + npcer) / 2
    metric = roc.cal_metric(label_list, result_list)
    eer = metric[0]
    tprs = metric[1]
    auc = metric[2]
    xy_dic = metric[3]
    #     tpr1 = tprs['TPR@FPR=10E-2']
    #     logger.info('eer: {}\t'
    #                 'tpr1: {}\t'
    #                 'auc: {}\t'
    #                 'acer: {}\t'
    #                 'accuracy: {top1.avg:.3f} ({top1.avg:.3f})'
    #           .format(eer,tpr1,auc,acer,top1=top1))
    #     pickle.dump(xy_dic, open('xys/xy_{}_{}_{}.pickle'.format(time_stp, args.arch,epoch),'wb'))
    with open(
            'logs/val_result_{}_{}_{}_@{}.txt'.format(time_stp, args.arch,
                                                      args.mode,
                                                      args.sub_prot_val),
            'a+') as f_result:
        result_line = 'epoch: {} EER: {:.6f} TPR@FPR=10E-2: {:.6f} TPR@FPR=10E-3: {:.6f} APCER:{:.6f} NPCER:{:.6f} AUC: {:.8f} Acc:{:.3f} TN:{} FP : {} FN:{} TP:{}  ACER:{:.8f} '.format(
            epoch, eer, tprs["TPR@FPR=10E-2"], tprs["TPR@FPR=10E-3"], apcer,
            npcer, auc, top1.avg, tn, fp, fn, tp, acer)
        f_result.write('{}\n'.format(result_line))
        print(result_line)
    return top1.avg
示例#6
0
def eval_training(epoch):
    net.eval()

    test_loss = 0.0 # cost function error
    correct = 0.0

    result_list = []
    label_list = []
    TP = 0.
    TN = 0.
    FP = 0.
    FN = 0.


    for (images, labels) in test_loader:
        images = Variable(images)
        labels = Variable(labels)

        images = images.cuda()
        labels = labels.cuda()

        outputs = net(images)
        loss = loss_function(outputs, labels)
        # loss = CB_loss(labels, outputs, samples_per_cls, 2, 'softmax', 0.9999, 2.0)
        test_loss += loss.item()
        _, preds = outputs.max(1)
        correct += preds.eq(labels).sum()

        for i in range(len(preds)):
            if labels[i] == 1 and preds[i] == 1:
                TP += 1
            elif labels[i] == 0 and preds[i] == 0:
                TN += 1
            elif labels[i] == 1 and preds[i] == 0:
                FN += 1
            elif labels[i] == 0 and preds[i] == 1:
                FP += 1



        outputs = torch.softmax(outputs, dim=-1)
        preds_prob = outputs.to('cpu').detach().numpy()
        labels = labels.to('cpu').detach().numpy()
        for i_batch in range(preds.shape[0]):
            result_list.append(preds_prob[i_batch, 1])
            label_list.append(labels[i_batch])

    TP_rate = float(TP / (TP + FN))
    TN_rate = float(TN / (TN + FP))

    HTER = 1 - (TP_rate + TN_rate) / 2
    metric = roc.cal_metric(label_list, result_list, False)

    # print('Test set: Average loss: {:.4f}, Accuracy: {:.4f}, Auc: {:.4f}, HTER: {:.4f}'.format(
    #     test_loss / len(test_loader.dataset),
    #     correct.float() / len(test_loader.dataset),
    #     metric[2], HTER
    # ))
    log.write('Test set: Average loss: {:.4f}, Accuracy: {:.4f}, Auc: {:.4f}, HTER: {:.4f}'.format(
        test_loss / len(test_loader.dataset),
        correct.float() / len(test_loader.dataset),
        metric[2], HTER
    ))
    print()

    #add informations to tensorboard
    writer.add_scalar('Test/Average loss', test_loss / len(test_loader.dataset), epoch)
    writer.add_scalar('Test/Accuracy', correct.float() / len(test_loader.dataset), epoch)

    return correct.float() / len(test_loader.dataset)
示例#7
0
文件: train.py 项目: skJack/challange
def validate(val_loader, model, criterion, epoch):
    global time_stp
    batch_time = AverageMeter()
    losses = AverageMeter()
    top1 = AverageMeter()
    result_list = []
    label_list = []
    predicted_list = []

    # switch to evaluate mode
    model.eval()

    end = time.time()
    with torch.no_grad():
        for i, data in enumerate(val_loader):
            with torch.no_grad():
                rgb_img, depth_img, ir_img, hsv_img, YCbCr_img, label, dirs = data[
                    0], data[1], data[2], data[3], data[4], data[5], data[6]
                rgb_var = Variable(rgb_img).float().to(device)
                depth_var = Variable(depth_img).float().to(device)
                ir_var = Variable(ir_img).float().to(device)
                hsv_img_var = Variable(hsv_img).float().to(device)
                YCbCr_img_var = Variable(YCbCr_img).float().to(device)
                target_var = Variable(label).long().to(device)

                # compute output
                output = model(rgb_var, depth_var, ir_var, hsv_img_var,
                               YCbCr_img_var, args.weight_list)

                loss = criterion(output, target_var)

                # measure accuracy and record loss
                prec1, prec2 = accuracy(output.data, target_var, topk=(1, 2))
                losses.update(loss.data, rgb_img.size(0))
                top1.update(prec1[0], rgb_img.size(0))

                soft_output = torch.softmax(output, dim=-1)
                preds = soft_output.to('cpu').detach().numpy()
                label = label.to('cpu').detach().numpy()
                _, predicted = torch.max(soft_output.data, 1)
                predicted = predicted.to('cpu').detach().numpy()

                for i_batch in range(preds.shape[0]):
                    result_list.append(preds[i_batch, 1])
                    label_list.append(label[i_batch])
                    predicted_list.append(predicted[i_batch])
                    if args.val_save:
                        f = open(
                            'submission/{}_{}_{}_submission.txt'.format(
                                time_stp, args.arch, epoch), 'a+')
                        rgb_dir = dirs[i_batch].replace(
                            os.getcwd() + '/data/', '')
                        depth_dir = rgb_dir.replace('profile', 'depth')
                        ir_dir = rgb_dir.replace('profile', 'ir')
                        f.write(rgb_dir + ' ' + depth_dir + ' ' + ir_dir +
                                ' ' + str(preds[i_batch, 1]) + '\n')

                # measure elapsed time
                batch_time.update(time.time() - end)
                end = time.time()
                if i % args.print_freq == 0:
                    line = 'Test: [{0}/{1}]\t' \
                           'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t' \
                           'Loss {loss.val:.4f} ({loss.avg:.4f})\t' \
                           'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'.format(i, len(val_loader), batch_time=batch_time,
                                    loss=losses, top1=top1)

                    logger.Print(line)
    tn, fp, fn, tp = confusion_matrix(label_list, predicted_list).ravel()
    apcer = fp / (tn + fp)
    npcer = fn / (fn + tp)
    acer = (apcer + npcer) / 2
    metric = roc.cal_metric(label_list, result_list)
    eer = metric[0]
    tprs = metric[1]
    auc = metric[2]
    xy_dic = metric[3]
    with open(log_path + '/val_result_{}_{}.txt'.format(time_stp, args.arch),
              'a+') as f_result:
        result_line = 'epoch: {} EER: {:.6f} TPR@FPR=10E-2: {:.6f} TPR@FPR=10E-3: {:.6f} APCER:{:.6f} NPCER:{:.6f} AUC: {:.8f} Acc:{:.3f} TN:{} FP : {} FN:{} TP:{}  ACER:{:.8f} '.format(
            epoch, eer, tprs["TPR@FPR=10E-2"], tprs["TPR@FPR=10E-3"], apcer,
            npcer, auc, top1.avg, tn, fp, fn, tp, acer)
        f_result.write('{}\n'.format(result_line))
        logger.Print(result_line)
    return top1.avg