Esempio n. 1
0
def test_model(model, S_MAX):
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    N_TST = 500
    NS = np.linspace(10, 1000, 100)
    ys = np.zeros((100,4))
    p= 0.001
    model.set_p(p=p)
    model = model.to(device)
    model.eval()
    
    for i, N_MC in enumerate(NS):
        N_MC = int(N_MC)
        tst_data = myData(N=int(N_TST), s_max=S_MAX)
        X_tst, Y_tst, b0s_tst, b1s_tst, ss_tst = tst_data.get_data()
        
        MC_SAMPLES = [model(Variable(torch.FloatTensor(Y_tst)).to(device)) for _ in range(N_MC)]
        b0_model = torch.stack([tup[0] for tup in MC_SAMPLES]).view(N_MC,Y_tst.shape[0]).cpu().data.numpy()
        b1_model = torch.stack([tup[1] for tup in MC_SAMPLES]).view(N_MC,Y_tst.shape[0]).cpu().data.numpy()
        
        dropout_cov = np.array([calc_cov(b0_tmp-b0s_tst, b1_tmp-b1s_tst) for b0_tmp, b1_tmp in zip(b0_model, b1_model)])
        dropout_cov = np.mean(dropout_cov, 0)
        del MC_SAMPLES, b0_model, b1_model
        ys[i,0] = dropout_cov[0,0]
        ys[i,1] = dropout_cov[0,1]
        ys[i,2] = dropout_cov[1,0]
        ys[i,3] = dropout_cov[1,1]
    return NS, ys
Esempio n. 2
0
def test_model(model, S_MAX):
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    N_TST = 500
    N_MC = 50
    
    model = model.to(device)
    model.eval()
    
    tst_data_1 = myData(N=N_TST, s_max=S_MAX)
    X_tst_1, Y_tst_1, b0s_tst_1, b1s_tst_1, ss_tst_1 = tst_data_1.get_data()
    
    tst_data_2 = myData(N=N_TST, s_max=S_MAX)
    X_tst, Y_tst, b0s_tst, b1s_tst, ss_tst = tst_data_2.get_data()
    
    print_dropout(model)
    # test model on tst_data_1
    b0s_pred_1, b1s_pred_1, _ = model(Variable(torch.FloatTensor(Y_tst_1)).to(device))
    b0s_pred_1 = b0s_pred_1.cpu().data.numpy().flatten()
    b1s_pred_1 = b1s_pred_1.cpu().data.numpy().flatten()
    
#    pylab.figure()
#    pylab.scatter(b0s_tst_1, b0s_pred_1)
#    pylab.figure()
#    pylab.scatter(b1s_tst_1, b1s_pred_1)
#    pylab.show()
    
    pred_1_cov = calc_cov(b0s_pred_1-b0s_tst_1, b1s_pred_1-b1s_tst_1)
    print(pred_1_cov)
    #ps = np.logspace(np.log10(.00005), np.log10(.5), 50)
    #ps = np.linspace(.00005, .25, 50)
    ps = np.linspace(.000005, .15, 300)
    ys = np.zeros_like(ps)
    
    for i, p in enumerate(ps):
        model.set_p(p=p)
        model = model.to(device)
    
        MC_SAMPLES = [model(Variable(torch.FloatTensor(Y_tst)).to(device)) for _ in range(N_MC)]
        b0_model = torch.stack([tup[0] for tup in MC_SAMPLES]).view(N_MC,Y_tst.shape[0]).cpu().data.numpy()
        b1_model = torch.stack([tup[1] for tup in MC_SAMPLES]).view(N_MC,Y_tst.shape[0]).cpu().data.numpy()
        
        dropout_cov = np.array([calc_cov(b0_tmp, b1_tmp) for b0_tmp, b1_tmp in zip(b0_model.T, b1_model.T)])
        dropout_cov = np.mean(dropout_cov, 0)        
        # calculate frobenius norm for the difference
        ys[i] = np.sqrt(np.sum((dropout_cov - pred_1_cov).flatten()**2))
        del MC_SAMPLES, b0_model, b1_model, dropout_cov
    return ps, ys
Esempio n. 3
0
def test(**kwargs):
    import glob
    pths = glob.glob('checkpoints/%s/*.pth' % (opt.model))
    pths.sort(key=os.path.getmtime, reverse=True)
    print(pths)
    opt.parse(kwargs)
    # 模型
    opt.load_model_path = pths[0]
    model = getattr(models, opt.model)().eval()
    assert os.path.exists(opt.load_model_path)
    if opt.load_model_path:
        model.load(opt.load_model_path)
    if opt.use_gpu: model.cuda()
    model.train(False)
    # 数据
    #result_name = '../../model/se-resnet/test_se_resnet50'
    test_data = myData(
        filelists=opt.test_filelists,
        image_size=opt.image_size,
        #transform =data_transforms['val'],
        transform=None,
        scale=opt.cropscale,
        test=True,
        data_source='none')

    #	test_data = myData(root = opt.test_roo,datatxt='test.txt',
    #				test = True,transform = data_transforms['test'])
    test_loader = DataLoader(dataset=test_data,
                             batch_size=opt.batch_size // 2,
                             shuffle=False)
    #test_loader =DataLoader(dataset = test_data,batch_size = opt.batch_size//2,shuffle =True)

    result_list = []

    label_list = []

    for step, batch in enumerate(tqdm(test_loader, desc='test', unit='batch')):
        data, label, image_path = batch
        with torch.no_grad():
            if opt.use_gpu:
                data = data.cuda()
            outputs = model(data)
            outputs = torch.softmax(outputs, dim=-1)
            preds = outputs.to('cpu').numpy()
            for i in range(preds.shape[0]):
                result_list.append(preds[i, 1])
                label_list.append(label[i])
    metric = roc.cal_metric(label_list, result_list)
    eer = metric[0]
    tprs = metric[1]
    auc = metric[2]
    xy_dic = metric[3]
    pickle.dump(xy_dic, open('result/xy.pickle', 'wb'))
    print('EER: {:.6f} TPR(1.0%): {:.6f} TPR(.5%): {:.6f} AUC: {:.8f}'.format(
        eer, tprs["TPR(1.%)"], tprs["TPR(.5%)"], auc),
          file=open('result/test.txt', 'a'))
    print('EER: {:.6f} TPR(1.0%): {:.6f} TPR(.5%): {:.6f} AUC: {:.8f}'.format(
        eer, tprs["TPR(1.%)"], tprs["TPR(.5%)"], auc))
Esempio n. 4
0
def test_covariances(model, S_MAX):
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    N_TST = 500
    N_MC = 100

    model = model.to(device)
    model.eval()

    tst_data_1 = myData(N=N_TST, s_max=S_MAX)
    X_tst_1, Y_tst_1, b0s_tst_1, b1s_tst_1, ss_tst_1 = tst_data_1.get_data()

    tst_data_2 = myData(N=N_TST, s_max=S_MAX)
    X_tst, Y_tst, b0s_tst, b1s_tst, ss_tst = tst_data_2.get_data()

    print_dropout(model)

    # test model on tst_data_1
    b0s_pred_1, b1s_pred_1, _ = model(
        Variable(torch.FloatTensor(Y_tst_1)).to(device))
    b0s_pred_1 = b0s_pred_1.cpu().data.numpy().flatten()
    b1s_pred_1 = b1s_pred_1.cpu().data.numpy().flatten()
    # calculate covariance on prediction
    pred_1_cov = calc_cov(b0s_pred_1 - b0s_tst_1, b1s_pred_1 - b1s_tst_1)
    print(pred_1_cov)

    MC_SAMPLES = [
        model(Variable(torch.FloatTensor(Y_tst)).to(device))
        for _ in range(N_MC)
    ]
    b0_model = torch.stack([tup[0] for tup in MC_SAMPLES
                            ]).view(N_MC, Y_tst.shape[0]).cpu().data.numpy()
    b1_model = torch.stack([tup[1] for tup in MC_SAMPLES
                            ]).view(N_MC, Y_tst.shape[0]).cpu().data.numpy()

    dropout_cov = np.array([
        calc_cov(b0_tmp, b1_tmp)
        for b0_tmp, b1_tmp in zip(b0_model.T, b1_model.T)
    ])
    dropout_cov = np.mean(dropout_cov, 0)
    # calculate frobenius norm for the difference
    fnorm = np.sqrt(np.sum((dropout_cov - pred_1_cov).flatten()**2))
    return pred_1_cov, dropout_cov, fnorm
Esempio n. 5
0
def test(**kwargs):
    import glob
    pths = glob.glob('checkpoints/%s/*.pth' % (opt.model))
    pths.sort(key=os.path.getmtime, reverse=True)
    print(pths)
    opt.parse(kwargs)
    # 模型
    opt.load_model_path = pths[0]
    model = getattr(models, opt.model)().eval()
    assert os.path.exists(opt.load_model_path)
    if opt.load_model_path:
        model.load(opt.load_model_path)
    if opt.use_gpu: model.cuda()
    model.train(False)
    # 数据
    #result_name = '../../model/se-resnet/test_se_resnet50'
    test_data = myData(root=opt.test_root,
                       datatxt='test.txt',
                       test=True,
                       transform=data_transforms['test'])
    test_loader = DataLoader(dataset=test_data,
                             batch_size=opt.batch_size,
                             shuffle=False)

    result_list = []

    for step, batch in enumerate(tqdm(test_loader, desc='test', unit='batch')):
        data, name = batch
        with torch.no_grad():
            if opt.use_gpu:
                data = data.cuda()
            outputs = model(data)
            _, preds = torch.max(outputs, 1)
            #print(preds)
            preds = preds.to("cpu").numpy()
            preds = preds.data
            for i in range(len(name)):
                result_dict = {}
                result_dict["image_id"] = name[i]
                result_dict["disease_class"] = preds[i]
                result_list.append(result_dict)
    with open('checkpoints/' + opt.model + '/' + opt.result_name + '.json',
              'w') as outfile:
        json.dump(result_list, outfile, ensure_ascii=False)
        outfile.write('\n')
Esempio n. 6
0
def test_model(model, S_MAX):
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    NS = np.linspace(10, 1000, 100)
    ys = np.zeros((100, 4))
    model.eval()
    for i, N_TST in enumerate(NS):
        tst_data_1 = myData(N=int(N_TST), s_max=S_MAX)
        X_tst_1, Y_tst_1, b0s_tst_1, b1s_tst_1, ss_tst_1 = tst_data_1.get_data(
        )
        # test model on tst_data_1
        b0s_pred_1, b1s_pred_1, _ = model(
            Variable(torch.FloatTensor(Y_tst_1)).to(device))
        b0s_pred_1 = b0s_pred_1.cpu().data.numpy().flatten()
        b1s_pred_1 = b1s_pred_1.cpu().data.numpy().flatten()

        pred_1_cov = calc_cov(b0s_pred_1 - b0s_tst_1, b1s_pred_1 - b1s_tst_1)
        ys[i, 0] = pred_1_cov[0, 0]
        ys[i, 1] = pred_1_cov[0, 1]
        ys[i, 2] = pred_1_cov[1, 0]
        ys[i, 3] = pred_1_cov[1, 1]
        #ps = np.logspace(np.log10(.00005), np.log10(.5), 50)
        #ps = np.linspace(.00005, .25, 50)
    return NS, ys
Esempio n. 7
0
def visualize(**kwargs):
    # 根据命令行参数更新配置
    opt.parse(kwargs)
    vis = Visualizer(opt.env)
    # step1: 模型
    model = getattr(models, opt.model)()
    '''
	model_ft = torchvision.models.vgg16_bn(pretrained = True)
	pretrained_dict = model_ft.state_dict()
	model_dict = model.state_dict()
	# 将pretrained_dict里不属于model_dict的键剔除掉
	pretrained_dict =  {k: v for k, v in pretrained_dict.items() 
					if k in model_dict}
	model_dict.update(pretrained_dict)
	model.load_state_dict(model_dict)
	'''
    if opt.load_model_path:
        model.load(opt.load_model_path)
    if opt.use_gpu:
        model.cuda()
        summary(model, (3, 224, 224))
    print(opt)
    # step2: 数据
    train_data = myData(
        filelists=opt.train_filelists,
        #transform = data_transforms['train'],
        scale=opt.cropscale,
        transform=None,
        test=False,
        data_source='none')
    val_data = myData(
        filelists=opt.test_filelists,
        #transform =data_transforms['val'],
        transform=None,
        scale=opt.cropscale,
        test=False,
        data_source='none')

    train_loader = DataLoader(dataset=train_data, batch_size=1, shuffle=False)
    val_loader = DataLoader(dataset=val_data, batch_size=1, shuffle=False)

    dataloaders = {'train': train_loader, 'val': val_loader}
    dataset_sizes = {'train': len(train_data), 'val': len(val_data)}

    imgshape = 64
    imgwidth_num = 32

    def vis(train_loader, outputjpg, imgshape, imgwidth_num):
        print(len(train_loader))
        showtmp = np.zeros((imgshape, imgshape, 3), dtype=np.uint8)
        showall = None
        lastnum = imgwidth_num - len(train_loader) % imgwidth_num
        for step, batch in enumerate(
                tqdm(train_loader,
                     desc='Visual Cropface On Anti-spoofing',
                     unit='batch')):
            inputs, labels = batch
            inputs = inputs.numpy().squeeze()
            inputs = np.transpose(inputs, (1, 2, 0))
            inputs = np.uint8(inputs)
            inputs = cv2.resize(inputs, (imgshape, imgshape))
            if step % imgwidth_num == 0:
                if showall is not None:
                    showall = np.vstack([showall, showtmp])
                elif step > 0:
                    showall = showtmp
                #print(showtmp.shape)
                showtmp = inputs
            else:
                showtmp = np.hstack([showtmp, inputs])
        #print(showtmp.shape)
        for i in range(lastnum):
            showtmp = np.hstack(
                [showtmp,
                 np.zeros((imgshape, imgshape, 3), dtype=np.uint8)])
        #print(showtmp.shape)
        showall = np.vstack([showall, showtmp])

        cv2.imwrite(outputjpg, showall)

    vis(train_loader, 'data/showcropface_train.jpg', imgshape, imgwidth_num)
    vis(val_loader, 'data/showcropface_val.jpg', imgshape, imgwidth_num)
Esempio n. 8
0
def train(**kwargs):
    # 根据命令行参数更新配置
    opt.parse(kwargs)
    # vis = Visualizer(opt.env)
    # step1: 模型
    model = getattr(mymodels, opt.model)()

    '''
	model_ft = torchvision.models.vgg16_bn(pretrained = True)
	pretrained_dict = model_ft.state_dict()
	model_dict = model.state_dict()
	# 将pretrained_dict里不属于model_dict的键剔除掉
	pretrained_dict =  {k: v for k, v in pretrained_dict.items() 
					if k in model_dict}
	model_dict.update(pretrained_dict)x
	model.load_state_dict(model_dict)
	'''
    if opt.load_model_path:
        model.load(opt.load_model_path)
    if opt.use_gpu:
        model.cuda()
        summary(model, (3, 224, 224))
    print(opt)
    # step2: 数据
    train_data = myData(
        filelists=opt.train_filelists,
        # transform = data_transforms['train'],
        scale=opt.cropscale,
        transform=None,
        test=False,
        data_source='none')

    val_data = myData(
        filelists=opt.test_filelists,
        # transform =data_transforms['val'],
        transform=None,
        scale=opt.cropscale,
        test=False, data_source='none')
    train_loader = DataLoader(dataset=train_data,
                              batch_size=opt.batch_size, shuffle=True)
    print(train_loader)
    val_loader = DataLoader(dataset=val_data,
                            batch_size=opt.batch_size, shuffle=False)

    dataloaders = {'train': train_loader, 'val': val_loader}
    dataset_sizes = {'train': len(train_data), 'val': len(val_data)}

    # step3: 目标函数和优化器
    criterion = FocalLoss(2)
    # criterion = torch.nn.CrossEntropyLoss()
    lr = opt.lr
    # optimizer = torch.optim.Adam(model.parameters(),
    #                       lr = lr,
    #                       weight_decay = opt.weight_decay)
    optimizer = torch.optim.SGD(model.parameters(),
                                lr=opt.lr,
                                momentum=0.5,
                                weight_decay=opt.weight_decay)
    exp_lr_scheduler = lr_scheduler.StepLR(optimizer,
                                           step_size=opt.lr_stepsize, gamma=0.5)
    # set learning rate every 10 epoch decrease 10%
    # step4: 统计指标:平滑处理之后的损失,还有混淆矩阵

    confusion_matrix = meter.ConfusionMeter(2)
    train_loss = meter.AverageValueMeter()  # 为了可视化增加的内容
    val_loss = meter.AverageValueMeter()
    train_acc = meter.AverageValueMeter()  # 为了可视化增加的内容
    val_acc = meter.AverageValueMeter()
    previous_loss = 1e100
    best_tpr = 0.0
    # 训练
    for epoch in range(opt.max_epoch):
        print('Epoch {}/{}'.format(epoch, opt.max_epoch - 1))
        print('-' * 10)
        train_loss.reset()
        train_acc.reset()
        running_loss = 0.0
        running_corrects = 0
        exp_lr_scheduler.step()
        for step, batch in enumerate(tqdm(train_loader, desc='Train %s On Anti-spoofing' % (opt.model), unit='batch')):
            inputs, labels = batch
            if opt.use_gpu:
                inputs = Variable(inputs.cuda())
                labels = Variable(labels.cuda())
            else:
                inputs = Variable(inputs)
                labels = Variable(labels)
            optimizer.zero_grad()  # zero the parameter gradients
            with torch.set_grad_enabled(True):
                outputs = model(inputs)
                # print(outputs.shape)
                _, preds = torch.max(outputs, 1)

                loss0 = criterion(outputs, labels)
                loss = loss0
                loss.backward()  # backward of gradient
                optimizer.step()  # strategy to drop
                if step % 20 == 0:
                    pass
                # print('epoch:%d/%d step:%d/%d loss: %.4f loss0: %.4f loss1: %.4f'%(epoch, opt.max_epoch, step, len(train_loader),
                # loss.item(),loss0.item(),loss1.item()))
            running_loss += loss.item() * inputs.size(0)
            running_corrects += torch.sum(preds == labels.data)
            '''
			if step%opt.print_freq==opt.print_freq-1:
				vis.plot('loss', train_loss.value()[0])
			   
			   # 如果需要的话,进入debug模式
			   if os.path.exists(opt.debug_file):
				   import ipdb;
				   ipdb.set_trace()	
			'''
        epoch_loss = running_loss / dataset_sizes['train']
        epoch_acc = running_corrects.double() / float(dataset_sizes['train'])
        print('Train Loss: {:.8f} Acc: {:.4f}'.format(epoch_loss, epoch_acc))
        train_loss.add(epoch_loss)
        train_acc.add(epoch_acc)

        val_loss.reset()
        val_acc.reset()
        val_cm, v_loss, v_accuracy, metric = val(model, val_loader, dataset_sizes['val'])
        print('Val Loss: {:.8f} Acc: {:.4f}'.format(v_loss, v_accuracy))
        val_loss.add(v_loss)
        val_acc.add(v_accuracy)

        eer = metric[0]
        tprs = metric[1]
        auc = metric[2]
        xy_dic = metric[3]
        tpr1 = tprs['TPR(1.%)']

        # vis.plot_many_stack({'train_loss':train_loss.value()[0],\
        # 				'val_loss':val_loss.value()[0]},win_name ="Loss")
        # vis.plot_many_stack({'train_acc':train_acc.value()[0],\
        # 				'val_acc':val_acc.value()[0]},win_name = 'Acc')
        # vis.log("epoch:{epoch},lr:{lr},\
        # 		train_loss:{train_loss},train_acc:{train_acc},\
        # 		val_loss:{val_loss},val_acc:{val_acc},\
        # 		train_cm:{train_cm},val_cm:{val_cm}"
        # .format(
        # 		   epoch = epoch,
        # 		   train_loss = train_loss.value()[0],
        # 		   train_acc = train_acc.value()[0],
        # 		   val_loss = val_loss.value()[0],
        # 		   val_acc = val_acc.value()[0],
        # 		   train_cm=str(confusion_matrix.value()),
        # 		   val_cm = str(val_cm.value()),
        # 		   lr=lr))
        '''
		if v_loss > previous_loss:          
			lr = lr * opt.lr_decay
			for param_group in optimizer.param_groups:
				param_group['lr'] = lr
		'''
        # vis.plot_many_stack({'lr':lr},win_name ='lr')
        previous_loss = val_loss.value()[0]
        # if tpr1 > best_tpr:
        best_tpr = tpr1
        best_tpr_epoch = epoch
        # best_model_wts = model.state_dict()
        os.system('mkdir -p %s' % (os.path.join('checkpoints', opt.model)))
        model.save(name='checkpoints/' + opt.model + '/' + str(epoch) + '.pth')
        # print('Epoch: {:d} Val Loss: {:.8f} Acc: {:.4f}'.format(epoch,v_loss,v_accuracy),file=open('result/val.txt','a'))
        print(
            'Epoch: {:d} Val Loss: {:.8f} Acc: {:.4f} EER: {:.6f} TPR(1.0%): {:.6f} TPR(.5%): {:.6f} AUC: {:.8f}'.format(
                epoch, v_loss, v_accuracy, eer, tprs["TPR(1.%)"], tprs["TPR(.5%)"], auc),
            file=open('D:\\dingding\\xiazai\\test\\val.txt', 'a'))
        print(
            'Epoch: {:d} Val Loss: {:.8f} Acc: {:.4f} EER: {:.6f} TPR(1.0%): {:.6f} TPR(.5%): {:.6f} AUC: {:.8f}'.format(
                epoch, v_loss, v_accuracy, eer, tprs["TPR(1.%)"], tprs["TPR(.5%)"], auc))
    # model.load_state_dict(best_model_wts)
    print('Best val Epoch: {},Best val TPR: {:4f}'.format(best_tpr_epoch, best_tpr))
Esempio n. 9
0
from model import myNet
from data import myData

np.random.seed(0)
torch.manual_seed(0)
torch.cuda.manual_seed(0)

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
N = 10000

N_EPOCHS = 1000

S_MAX = 0.25

training_data = myData(N=N, s_max=S_MAX)
X, Y, b0s, b1s, ss = training_data.get_data()


def print_dropout(model):
    print('')
    print('Model parameters:')
    for module in model.modules():
        if hasattr(module, 'p_logit'):
            print('drop rate = {:1.1e}'.format(
                torch.sigmoid(module.p_logit).cpu().data.numpy().flatten()[0]))


def plot_pred_vs_gt(gt, pred_lsqm, pred_model, param, title=''):
    _, (ax1, ax2) = pylab.subplots(1, 2, sharex=True, sharey=True)
    ax1.scatter(gt, pred_lsqm, alpha=0.2)