Beispiel #1
0
def train_test():


    print("test:\n ")

     
    #model = CDCNpp( basic_conv=Conv2d_cd, theta=0.7)
	model = CDCNpp( basic_conv=Conv2d_cd, theta=args.theta)
Beispiel #2
0
def main():
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    model = CDCNpp()
    model.load_state_dict(torch.load(args.model_path))
    model.eval()
    model = model.to(device)

    image = cv2.resize(cv2.imread(args.image_path), (256, 256))
    image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
    # normalize into [-1, 1]
    image = (image - 127.5) / 128
    # convert to tensor
    image = transforms.functional.to_tensor(image)
    image.unsqueeze_(0)
    image = image.to(device, dtype=torch.float)
    # Get outputs
    depth_map, embedding, cv_block1, cv_block2, cv_block3, input_image = model(
        image)
    # Save feature maps if you want
    FeatureMap2Heatmap(input_image, cv_block1, cv_block2, cv_block3, depth_map)
    liveness_score = torch.sum(depth_map)
    depth_map = depth_map.cpu().detach().numpy()
    print(f"Liveness Score: {liveness_score:.2f} Threshold = 148.90")
    prediction = "Real" if liveness_score > 148.90 else "Fake"
    print(f"Prediction: {prediction}")
Beispiel #3
0
def train_test():
    # GPU  & log file  -->   if use DataParallel, please comment this command
    #os.environ["CUDA_VISIBLE_DEVICES"] = "%d" % (args.gpu)

    isExists = os.path.exists(args.log)
    if not isExists:
        os.makedirs(args.log)
    log_file = open(args.log+'/'+ args.log+'_log_P1.txt', 'w')
    
    echo_batches = args.echo_batches

    print("Oulu-NPU, P1:\n ")

    log_file.write('Oulu-NPU, P1:\n ')
    log_file.flush()

    # load the network, load the pre-trained model in UCF101?
    finetune = args.finetune
    if finetune==True:
        print('finetune!\n')
        log_file.write('finetune!\n')
        log_file.flush()
            
        model = CDCN()
        #model = model.cuda()
        model = model.to(device[0])
        model = nn.DataParallel(model, device_ids=device, output_device=device[0])
        model.load_state_dict(torch.load('xxx.pkl'))

        lr = args.lr
        optimizer = optim.Adam(model.parameters(), lr=lr, weight_decay=0.00005)
        scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=args.step_size, gamma=args.gamma)
        

    else:
        print('train from scratch!\n')
        log_file.write('train from scratch!\n')
        log_file.flush()


        
        #model = CDCN(basic_conv=Conv2d_cd, theta=0.7)
		model = CDCNpp(basic_conv=Conv2d_cd, theta=0.7)
        


        model = model.cuda()
        #model = model.to(device[0])
        #model = nn.DataParallel(model, device_ids=device, output_device=device[0])

        lr = args.lr
        optimizer = optim.Adam(model.parameters(), lr=lr, weight_decay=0.00005)
        scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=args.step_size, gamma=args.gamma)
def build_network(cfg):
    """ Build the network based on the cfg
    Args:
        cfg (dict): a dict of configuration
    Returns:
        network (nn.Module) 
    """
    network = None

    if cfg['model']['base'] == 'CDCNpp':
        network = CDCNpp()
    elif cfg['model']['base'] == 'CDCN':
        network = CDCN()
    else:
        raise NotImplementedError

    return network
def train_test():
    # GPU  & log file  -->   if use DataParallel, please comment this command
    os.environ["CUDA_VISIBLE_DEVICES"] = "%d" % (args.gpu)

    isExists = os.path.exists(args.log)
    if not isExists:
        os.makedirs(args.log)
    log_file = open(args.log + '/' + args.log + '_log.txt', 'w')

    echo_batches = args.echo_batches

    print("Oulu-NPU, P1:\n ")

    log_file.write('Oulu-NPU, P1:\n ')
    log_file.flush()

    # load the network, load the pre-trained model in UCF101?
    finetune = args.finetune
    if finetune == True:
        print('finetune!\n')

    else:
        print('train from scratch!\n')
        log_file.write('train from scratch!\n')
        log_file.flush()

        #model = CDCNpp( basic_conv=Conv2d_cd, theta=0.7)
        model = CDCNpp(basic_conv=Conv2d_cd, theta=args.theta)
        #model = CDCN( basic_conv=Conv2d_cd, theta=args.theta)
        #model = CDCNpp1( basic_conv=Conv2d_cd, theta=args.theta)

        model = model.cuda()

        lr = args.lr
        optimizer = optim.Adam(model.parameters(), lr=lr, weight_decay=0.00005)
        scheduler = optim.lr_scheduler.StepLR(optimizer,
                                              step_size=args.step_size,
                                              gamma=args.gamma)

    print(model)

    criterion_absolute_loss = nn.MSELoss().cuda()
    criterion_contrastive_loss = Contrast_depth_loss().cuda()

    ACER_save = 1.0

    MSELoss_list = []

    Contrast_depth_loss_list = []
    accuracy_list = []
    accuracy1_list = []
    total_loss_list = []
    for epoch in range(args.epochs):  # loop over the dataset multiple times
        # meanLoss = []

        scheduler.step()
        if (epoch + 1) % args.step_size == 0:
            lr *= args.gamma

        loss_absolute = AvgrageMeter()
        loss_contra = AvgrageMeter()
        loss_total = AvgrageMeter()

        model.train()

        # load random 16-frame clip data every epoch

        train_data = Spoofing_train("/home/chang/dataset/oulu/train_face1/",
                                    transform=transforms.Compose([
                                        RandomErasing(),
                                        RandomHorizontalFlip(),
                                        ToTensor(),
                                        Cutout(),
                                        Normaliztion()
                                    ]))
        #/home/chang/dataset/oulu/train_face1/
        #train_data = Spoofing_train("../../../../trainset", transform=transforms.Compose([RandomErasing(), RandomHorizontalFlip(),  ToTensor(), Cutout(), Normaliztion()]))
        dataloader_train = DataLoader(train_data,
                                      batch_size=args.batchsize,
                                      shuffle=True,
                                      num_workers=4)

        for i, sample_batched in enumerate(dataloader_train):
            # get the inputs
            inputs, binary_mask, spoof_label = sample_batched['image_x'].cuda(
            ), sample_batched['binary_mask'].cuda(
            ), sample_batched['spoofing_label'].cuda()
            # inputs, binary_mask, spoof_label = sample_batched['image_x'], sample_batched['binary_mask'], sample_batched['spoofing_label']

            optimizer.zero_grad()

            # forward + backward + optimize
            map_x, embedding, x_Block1, x_Block2, x_Block3, x_input = model(
                inputs)

            #pdb.set_trace()
            #pdb.set_trace()
            absolute_loss = criterion_absolute_loss(map_x, binary_mask)

            contrastive_loss = criterion_contrastive_loss(map_x, binary_mask)

            loss = absolute_loss + contrastive_loss

            #loss.update(loss.item(), n)
            #total_loss_list.append(loss.item())

            loss.backward()

            optimizer.step()

            n = inputs.size(0)
            loss_absolute.update(absolute_loss.data, n)
            loss_contra.update(contrastive_loss.data, n)
            loss_total.update(loss.data, n)
            #total_loss.append(loss.item())
            torch.cuda.empty_cache()
            # if i > 1:
            #     break
            # print(np.mean(meanLoss))

            if i % echo_batches == echo_batches - 1:  # print every 50 mini-batches

                #         # visualization
                #         #FeatureMap2Heatmap(x_input, x_Block1, x_Block2, x_Block3, map_x)

                #         # log written
                print(
                    'epoch:%d, mini-batch:%3d, lr=%f, Absolute_Depth_loss= %.4f, Contrastive_Depth_loss= %.4f'
                    %
                    (epoch + 1, i + 1, lr, loss_absolute.avg, loss_contra.avg))

        #     #break

        # # whole epoch average
        print(
            'epoch:%d, Train:  Absolute_Depth_loss= %.4f, Contrastive_Depth_loss= %.4f, loss= %.4f \n'
            % (epoch + 1, loss_absolute.avg, loss_contra.avg, loss_total.avg))
        MSELoss_list.append(loss_absolute.avg)
        Contrast_depth_loss_list.append(loss_contra.avg)
        total_loss_list.append(loss_total.avg)

        log_file.write(
            'epoch:%d, Train: Absolute_Depth_loss= %.4f, Contrastive_Depth_loss= %.4f \n'
            % (epoch + 1, loss_absolute.avg, loss_contra.avg))
        log_file.write('loss= %.4f \n' % loss_total.avg)
        log_file.flush()

        threshold = 0.5

        # epoch_test = 1
        # if epoch>25 and epoch % 5 == 0:
        if True:
            model.eval()
            meanAcT = []
            meanAcF = []
            with torch.no_grad():

                #rootDir = "D:/dataset/oulu/oulu/trainset/"
                val_data = Spoofing_test("/home/chang/dataset/oulu/dev_face1/",
                                         transform=transforms.Compose(
                                             [Normaliztion(),
                                              ToTensor()]))
                #/home/chang/dataset/oulu/dev_face1/
                #val_data = Spoofing_train("../../../../devset", transform=transforms.Compose([RandomErasing(), RandomHorizontalFlip(),  ToTensor(), Cutout(), Normaliztion()]))
                # val_data = Spoofing_valtest(image_dir, transform=transforms.Compose([Normaliztion_valtest(), ToTensor_valtest()]))
                test_ba = 1
                dataloader_val = DataLoader(val_data,
                                            batch_size=test_ba,
                                            shuffle=False,
                                            num_workers=4)

                # map_score_list = []

                num = 0
                for i, sample_batched in enumerate(dataloader_val):
                    #             # get the inputs
                    inputs, binary_mask, spoof_label = sample_batched[
                        'image_x'].cuda(), sample_batched['binary_mask'].cuda(
                        ), sample_batched['spoofing_label'].cuda()
                    # inputs = sample_batched['image_x'].cuda()
                    # binary_mask = sample_batched['binary_mask'].cuda()

                    optimizer.zero_grad()
                    map_x, embedding, x_Block1, x_Block2, x_Block3, x_input = model(
                        inputs)
                    # map_x shape: batch,N,N
                    #pre_label = 0

                    for ba in range(test_ba):
                        pre_label = 0
                        map_score = torch.sum(map_x[ba]) / (32 * 32)
                        if map_score >= threshold:
                            pre_label = 1
                        if pre_label == spoof_label:
                            num += 1
                        if spoof_label != 1:
                            meanAcF.append(1 - map_score.item())
                        else:
                            meanAcT.append(map_score.item())
                        # print(spoof_label,map_score)

                    torch.cuda.empty_cache()

            with torch.no_grad():

                #rootDir = "D:/dataset/oulu/oulu/trainset/"
                val_data = Spoofing_test1("/home/chang/dataset/B_face1/",
                                          transform=transforms.Compose(
                                              [ToTensor(),
                                               Normaliztion()]))
                #val_data = Spoofing_train("../../../../devset", transform=transforms.Compose([RandomErasing(), RandomHorizontalFlip(),  ToTensor(), Cutout(), Normaliztion()]))
                # val_data = Spoofing_valtest(image_dir, transform=transforms.Compose([Normaliztion_valtest(), ToTensor_valtest()]))
                test_ba = 1
                dataloader_val1 = DataLoader(val_data,
                                             batch_size=test_ba,
                                             shuffle=False,
                                             num_workers=4)

                # map_score_list = []

                num1 = 0
                for i, sample_batched in enumerate(dataloader_val1):
                    #             # get the inputs
                    inputs, binary_mask, spoof_label = sample_batched[
                        'image_x'].cuda(), sample_batched['binary_mask'].cuda(
                        ), sample_batched['spoofing_label'].cuda()
                    # inputs = sample_batched['image_x'].cuda()
                    # binary_mask = sample_batched['binary_mask'].cuda()

                    optimizer.zero_grad()
                    map_x, embedding, x_Block1, x_Block2, x_Block3, x_input = model(
                        inputs)
                    # map_x shape: batch,N,N
                    #pre_label = 0

                    for ba in range(test_ba):
                        pre_label = 0
                        map_score = torch.sum(map_x[ba]) / (32 * 32)
                        if map_score >= threshold:
                            pre_label = 1
                        if pre_label == spoof_label:
                            num1 += 1

                        # print(spoof_label,map_score)

                    torch.cuda.empty_cache()

        #     # save the model until the next improvement
            print("TP", np.mean(meanAcT))
            print("TN", np.mean(meanAcF))
            meanAcT = np.array(meanAcT)
            meanAcF = np.array(meanAcF)
            TP = len(meanAcT[meanAcT > threshold])
            TN = len(meanAcF[meanAcF > 1 - threshold])
            acc = (TP + TN) / (len(meanAcF) + len(meanAcT))
            accuracy = num / len(dataloader_val)
            print("ACC", acc, ":T ", TP, ":", len(meanAcT), " F ", TN, ":",
                  len(meanAcF))
            print("ACCURACY:", accuracy)
            accuracy_list.append(accuracy)

            accuracy1 = num1 / len(dataloader_val1)
            accuracy1_list.append(accuracy1)
            print("ACCURACY1:", accuracy1)

            log_file.write('val: TP= %.4f,:%.4f TN= %.4f,:%.4f ACC=%.4f \n' %
                           (TP, len(meanAcT), TN, len(meanAcF), acc))
            log_file.write('ACCURACY = %.4f \n' % accuracy)
            log_file.write('ACCURACY1 = %.4f \n' % accuracy1)

            #print(args.log+'/'+args.log)
            torch.save(model.state_dict(),
                       args.log + '/' + args.log + '_%d.pkl' % (epoch + 1))
            # break

    print('Finished Training')
    log_file.close()

    plt.plot(MSELoss_list, label="MSELoss")
    plt.plot(Contrast_depth_loss_list, label="depth_loss")
    plt.plot(total_loss_list, label="total_loss")
    plt.title("loss")
    plt.legend()
    plt.show()

    plt.plot(accuracy_list, label="accuracy")
    plt.plot(accuracy1_list, label="accuracy1")
    #plt.plot(total_loss_list, label = "total_loss")
    plt.title("accuracy")
    plt.legend()
    plt.show()
Beispiel #6
0
def train_test():
    # GPU  & log file  -->   if use DataParallel, please comment this command
    #os.environ["CUDA_VISIBLE_DEVICES"] = "%d" % (args.gpu)

    if args.exp_name == 'None':
        args.exp_name = time.strftime("%m-%d %H:%M:%S", time.localtime())

    args.log = 'exp/{}/{}'.format(args.exp_name, args.log)

    isExists = os.path.exists(args.log)
    if not isExists:
        os.makedirs(args.log)
    log_file = open(args.log + '_log_P1.txt', 'w')

    writer = SummaryWriter(args.log + '/runs/')

    echo_batches = args.echo_batches

    print(args)
    log_file.write(str(args) + '\n')
    log_file.flush()

    print("Oulu-NPU, P1:\n ")

    log_file.write('Oulu-NPU, P1:\n ')
    log_file.flush()

    # load the network, load the pre-trained model in UCF101?
    is_load_model = args.is_load_model
    if is_load_model == True:
        print('loading model...\n')
        print(args.model_path)
        log_file.write('loading model...\n')
        log_file.write(args.model_path)
        log_file.flush()

        model = CDCNpp(basic_conv=Conv2d_cd, theta=0.7)

        model = nn.DataParallel(model)
        model = model.cuda()
        model.load_state_dict(torch.load(args.model_path))

        lr = args.lr
        optimizer = optim.Adam(model.parameters(), lr=lr, weight_decay=0.00005)
        milestones = [
            int(step.strip()) for step in args.lr_drop_step.split(' ')
        ]
        scheduler = optim.lr_scheduler.MultiStepLR(optimizer,
                                                   milestones=milestones,
                                                   gamma=args.gamma)

    else:
        print('train from scratch!\n')
        log_file.write('train from scratch!\n')
        log_file.flush()

        #model = CDCN(basic_conv=Conv2d_cd, theta=0.7)
        model = CDCNpp(basic_conv=Conv2d_cd, theta=0.7)
        model = nn.DataParallel(model)
        model = model.cuda()

        lr = args.lr
        optimizer = optim.Adam(model.parameters(), lr=lr, weight_decay=0.00005)
        milestones = [
            int(step.strip()) for step in args.lr_drop_step.split(' ')
        ]
        scheduler = optim.lr_scheduler.MultiStepLR(optimizer,
                                                   milestones=milestones,
                                                   gamma=args.gamma)

    # print(model)
    # log_file.write(str(model))
    # log_file.flush()

    criterion_absolute_loss = nn.MSELoss().cuda()
    criterion_contrastive_loss = Contrast_depth_loss().cuda()

    #bandpass_filter_numpy = build_bandpass_filter_numpy(30, 30)  # fs, order  # 61, 64

    ACER_save = 1.0

    iteration = 0
    for epoch in range(args.epochs):  # loop over the dataset multiple times
        scheduler.step()

        if epoch < args.start_epochs:
            continue

        lr = scheduler.get_lr()[0]

        loss_absolute = AvgrageMeter()
        loss_contra = AvgrageMeter()

        ###########################################
        '''                train             '''
        ###########################################
        model.train()

        # load random 16-frame clip data every epoch
        # train_data = Spoofing_train(train_list, train_image_dir, map_dir, transform=transforms.Compose([RandomErasing(), RandomHorizontalFlip(),  ToTensor(), Cutout(), Normaliztion()]))
        train_data = Fas_train(train_list,
                               transform=transforms.Compose([
                                   RandomErasing(),
                                   RandomHorizontalFlip(),
                                   ToTensor(),
                                   Cutout(),
                                   Normaliztion()
                               ]))
        dataloader_train = DataLoader(train_data,
                                      batch_size=args.batchsize,
                                      shuffle=True,
                                      num_workers=4)
        print('train_set read done!')

        for i, sample_batched in enumerate(dataloader_train):
            iteration += 1

            # get the inputs
            inputs, map_label, spoof_label = sample_batched['image_x'].cuda(
            ), sample_batched['map_x'].cuda(
            ), sample_batched['spoofing_label'].cuda()

            optimizer.zero_grad()

            # forward + backward + optimize
            map_x, embedding, x_Block1, x_Block2, x_Block3, x_input = model(
                inputs)

            absolute_loss = criterion_absolute_loss(map_x, map_label)
            contrastive_loss = criterion_contrastive_loss(map_x, map_label)

            loss = absolute_loss + contrastive_loss
            #loss =  absolute_loss

            loss.backward()

            optimizer.step()

            n = inputs.size(0)
            loss_absolute.update(absolute_loss.data, n)
            loss_contra.update(contrastive_loss.data, n)

            if i % echo_batches == echo_batches - 1:  # print every 50 mini-batches

                # visualization
                FeatureMap2Heatmap(x_input, x_Block1, x_Block2, x_Block3,
                                   map_x)

                # log written
                print(
                    'epoch:%d, mini-batch:%3d/%4d, lr=%f, Absolute_Depth_loss= %.4f, Contrastive_Depth_loss= %.4f'
                    % (epoch + 1, i + 1, len(dataloader_train), lr,
                       loss_absolute.avg, loss_contra.avg))
                log_file.write(
                    'epoch:%d, mini-batch:%3d/%4d, lr=%f, Absolute_Depth_loss= %.4f, Contrastive_Depth_loss= %.4f \n'
                    % (epoch + 1, i + 1, len(dataloader_train), lr,
                       loss_absolute.avg, loss_contra.avg))
                log_file.flush()

            writer.add_scalar('loss', loss_absolute.avg + loss_contra.avg,
                              iteration)
            writer.add_scalar('lr', lr, iteration)

        # whole epoch average
        print(
            'epoch:%d, Train:  Absolute_Depth_loss= %.4f, Contrastive_Depth_loss= %.4f\n'
            % (epoch + 1, loss_absolute.avg, loss_contra.avg))
        log_file.write(
            'epoch:%d, Train: Absolute_Depth_loss= %.4f, Contrastive_Depth_loss= %.4f \n'
            % (epoch + 1, loss_absolute.avg, loss_contra.avg))
        log_file.flush()

        # #### validation/test
        # if epoch <300:
        #      epoch_test = 300
        # else:
        #     epoch_test = 5
        # #epoch_test = 1
        if epoch % args.epoch_test == args.epoch_test - 1:  # test every 5 epochs

            model.eval()

            with torch.no_grad():
                ###########################################
                '''                val             '''
                ###########################################
                # val for threshold
                # val_data = Spoofing_valtest(val_list, val_image_dir, val_map_dir, transform=transforms.Compose([Normaliztion_valtest(), ToTensor_valtest()]))
                val_data = Fas_valtest(val_list,
                                       transform=transforms.Compose([
                                           Normaliztion_valtest(),
                                           ToTensor_valtest()
                                       ]),
                                       mode='val')
                dataloader_val = DataLoader(val_data,
                                            batch_size=1,
                                            shuffle=False,
                                            num_workers=8)

                map_score_list = []
                print('start to validate...')
                for i, sample_batched in enumerate(dataloader_val):
                    # get the inputs
                    inputs, spoof_label = sample_batched['image_x'].cuda(
                    ), sample_batched['spoofing_label'].cuda()
                    val_maps = sample_batched['val_map_x'].cuda(
                    )  # binary map from PRNet

                    optimizer.zero_grad()

                    #pdb.set_trace()
                    map_score = 0.0
                    for frame_t in range(inputs.shape[1]):
                        map_x, embedding, x_Block1, x_Block2, x_Block3, x_input = model(
                            inputs[:, frame_t, :, :, :])

                        score_norm = torch.sum(map_x) / torch.sum(
                            val_maps[:, frame_t, :, :])
                        map_score += score_norm
                    map_score = map_score / inputs.shape[1]

                    map_score_list.append('{} {}\n'.format(
                        map_score, spoof_label[0][0]))
                    #pdb.set_trace()

                    if i % (len(dataloader_val) // 5) == 0:
                        # visualization
                        FeatureMap2Heatmap(x_input, x_Block1, x_Block2,
                                           x_Block3, map_x)
                        # log written
                        print('val ==> epoch:%d, mini-batch:%3d/%4d...' %
                              (epoch + 1, i + 1, len(dataloader_val)))
                        log_file.write(
                            'val ==> epoch:%d, mini-batch:%3d/%4d...' %
                            (epoch + 1, i + 1, len(dataloader_val)))
                        log_file.flush()

                map_score_val_filename = args.log + '_map_score_val.txt'
                with open(map_score_val_filename, 'w') as file:
                    file.writelines(map_score_list)

                ###########################################
                '''                test                '''
                ##########################################
                # test for ACC
                test_data = Fas_valtest(test_list,
                                        transform=transforms.Compose([
                                            Normaliztion_valtest(),
                                            ToTensor_valtest()
                                        ]),
                                        mode='test')
                dataloader_test = DataLoader(test_data,
                                             batch_size=1,
                                             shuffle=False,
                                             num_workers=4)

                map_score_list = []
                print('start to test...')
                for i, sample_batched in enumerate(dataloader_test):
                    # get the inputs
                    inputs, spoof_label = sample_batched['image_x'].cuda(
                    ), sample_batched['spoofing_label'].cuda()
                    test_maps = sample_batched['val_map_x'].cuda(
                    )  # binary map from PRNet

                    optimizer.zero_grad()

                    map_score = 0.0
                    for frame_t in range(inputs.shape[1]):
                        map_x, embedding, x_Block1, x_Block2, x_Block3, x_input = model(
                            inputs[:, frame_t, :, :, :])

                        score_norm = torch.sum(map_x) / torch.sum(
                            test_maps[:, frame_t, :, :])
                        map_score += score_norm
                    map_score = map_score / inputs.shape[1]

                    map_score_list.append('{} {}\n'.format(
                        map_score, spoof_label[0][0]))

                    if i % (len(dataloader_test) // 5) == 0:
                        # visualization
                        FeatureMap2Heatmap(x_input, x_Block1, x_Block2,
                                           x_Block3, map_x)
                        # log written
                        print('test ==> epoch:%d, mini-batch:%3d/%4d...' %
                              (epoch + 1, i + 1, len(dataloader_val)))
                        log_file.write(
                            'test ==> epoch:%d, mini-batch:%3d/%4d...' %
                            (epoch + 1, i + 1, len(dataloader_val)))
                        log_file.flush()

                map_score_test_filename = args.log + '_map_score_test.txt'
                with open(map_score_test_filename, 'w') as file:
                    file.writelines(map_score_list)

                #############################################################
                #       performance measurement both val and test
                #############################################################
                val_threshold, test_threshold, val_ACC, val_ACER, test_ACC, test_APCER, test_BPCER, test_ACER, test_ACER_test_threshold = performances(
                    map_score_val_filename, map_score_test_filename)

                print(
                    'epoch:%d, Val:  val_threshold= %.4f, val_ACC= %.4f, val_ACER= %.4f'
                    % (epoch + 1, val_threshold, val_ACC, val_ACER))
                log_file.write(
                    '\n epoch:%d, Val:  val_threshold= %.4f, val_ACC= %.4f, val_ACER= %.4f \n'
                    % (epoch + 1, val_threshold, val_ACC, val_ACER))

                print(
                    'epoch:%d, Test:  ACC= %.4f, APCER= %.4f, BPCER= %.4f, ACER= %.4f'
                    % (epoch + 1, test_ACC, test_APCER, test_BPCER, test_ACER))
                log_file.write(
                    'epoch:%d, Test:  ACC= %.4f, APCER= %.4f, BPCER= %.4f, ACER= %.4f \n'
                    % (epoch + 1, test_ACC, test_APCER, test_BPCER, test_ACER))
                log_file.flush()

                writer.add_scalar('val_ACER', val_ACER, iteration)
                writer.add_scalar('test_ACER', test_ACER, iteration)

            # save the model until the next improvement
            print("saving model to {}".format(args.log +
                                              '_%d.pkl'.format(epoch + 1)))
            log_file.write("saving model to {}".format(args.log + '_%d.pkl' %
                                                       (epoch + 1)))
            log_file.flush()
            torch.save(model.state_dict(), args.log + '_%d.pkl' % (epoch + 1))

    print('Finished Training')
    writer.close()
    log_file.close()
Beispiel #7
0
def train_test():
    # GPU  & log file  -->   if use DataParallel, please comment this command
    #os.environ["CUDA_VISIBLE_DEVICES"] = "%d" % (args.gpu)

    isExists = os.path.exists(args.log)
    if not isExists:
        os.makedirs(args.log)
    log_file = open(args.log + '/' + args.log + f'_log_{args.protocol}.txt',
                    'w')

    echo_batches = args.echo_batches

    print(f"SIW, {args.protocol}:\n ")

    log_file.write(f"SIW, {args.protocol}:\n ")
    log_file.flush()

    # load the network, load the pre-trained model in UCF101?
    finetune = args.finetune
    if finetune == True:
        print('finetune!\n')
        log_file.write('finetune!\n')
        log_file.flush()

        model = CDCNpp()
        #model = model.cuda()
        model = model.to(device[0])
        model = nn.DataParallel(model,
                                device_ids=device,
                                output_device=device[0])
        model.load_state_dict(torch.load('xxx.pkl'))

        lr = args.lr
        optimizer = optim.Adam(model.parameters(), lr=lr, weight_decay=0.00005)
        scheduler = optim.lr_scheduler.StepLR(optimizer,
                                              step_size=args.step_size,
                                              gamma=args.gamma)

    else:
        print('train from scratch!\n')
        log_file.write('train from scratch!\n')
        log_file.flush()

        #model = CDCN(basic_conv=Conv2d_cd, theta=0.7)
        model = CDCNpp(basic_conv=Conv2d_cd, theta=0.7)

        model = model.cuda()
        #model = model.to(device[0])
        #model = nn.DataParallel(model, device_ids=device, output_device=device[0])

        lr = args.lr
        optimizer = optim.Adam(model.parameters(), lr=lr, weight_decay=0.00005)
        scheduler = optim.lr_scheduler.StepLR(optimizer,
                                              step_size=args.step_size,
                                              gamma=args.gamma)

    print(model)

    criterion_absolute_loss = nn.MSELoss().cuda()
    criterion_contrastive_loss = Contrast_depth_loss().cuda()

    #bandpass_filter_numpy = build_bandpass_filter_numpy(30, 30)  # fs, order  # 61, 64

    ACER_save = 1.0

    for epoch in range(args.epochs):  # loop over the dataset multiple times
        scheduler.step()
        if (epoch + 1) % args.step_size == 0:
            lr *= args.gamma

        loss_absolute = AvgrageMeter()
        loss_contra = AvgrageMeter()
        #top5 = utils.AvgrageMeter()

        ###########################################
        '''                train             '''
        ###########################################
        model.train()

        # load random 16-frame clip data every epoch
        #train_data = Spoofing_train(train_list, train_image_dir, map_dir, transform=transforms.Compose([RandomErasing(), RandomHorizontalFlip(),  ToTensor(), Cutout(), Normaliztion()]))
        train_data = SiwDataset(
            "train",
            dir_path="/storage/alperen/sodecsapp/datasets/SiW/lists",
            protocol=args.protocol,
            transform=transforms.Compose([
                RandomErasing(),
                RandomHorizontalFlip(),
                ToTensor(),
                Cutout(),
                Normaliztion()
            ]))
        #train_data = SodecDataset(dataset_type="train",dir_path="dataset_with_margin",protocol=args.protocol, transform=transforms.Compose([RandomErasing(), RandomHorizontalFlip(),  ToTensor(), Normaliztion()]))
        dataloader_train = DataLoader(train_data,
                                      batch_size=args.batchsize,
                                      shuffle=True,
                                      num_workers=4)

        for i, sample_batched in enumerate(dataloader_train):
            # get the inputs
            inputs, map_label, spoof_label = sample_batched['image_x'].cuda(
            ), sample_batched['map_x'].cuda(
            ), sample_batched['spoofing_label'].cuda()

            optimizer.zero_grad()

            #pdb.set_trace()

            # forward + backward + optimize
            map_x, embedding, x_Block1, x_Block2, x_Block3, x_input = model(
                inputs)

            absolute_loss = criterion_absolute_loss(map_x, map_label)
            contrastive_loss = criterion_contrastive_loss(map_x, map_label)

            loss = absolute_loss + contrastive_loss
            #loss =  absolute_loss

            loss.backward()

            optimizer.step()

            n = inputs.size(0)
            loss_absolute.update(absolute_loss.data, n)
            loss_contra.update(contrastive_loss.data, n)

            if i % echo_batches == echo_batches - 1:  # print every 50 mini-batches

                # visualization
                FeatureMap2Heatmap(x_input, x_Block1, x_Block2, x_Block3,
                                   map_x)

                # log written
                print(
                    'epoch:%d, mini-batch:%3d, lr=%f, Absolute_Depth_loss= %.4f, Contrastive_Depth_loss= %.4f'
                    %
                    (epoch + 1, i + 1, lr, loss_absolute.avg, loss_contra.avg))
                #log_file.write('epoch:%d, mini-batch:%3d, lr=%f, Absolute_Depth_loss= %.4f, Contrastive_Depth_loss= %.4f \n' % (epoch + 1, i + 1, lr, loss_absolute.avg, loss_contra.avg))
                #log_file.flush()

            #break

        # whole epoch average
        print(
            'epoch:%d, Train:  Absolute_Depth_loss= %.4f, Contrastive_Depth_loss= %.4f\n'
            % (epoch + 1, loss_absolute.avg, loss_contra.avg))
        log_file.write(
            'epoch:%d, Train: Absolute_Depth_loss= %.4f, Contrastive_Depth_loss= %.4f \n'
            % (epoch + 1, loss_absolute.avg, loss_contra.avg))
        log_file.flush()

        #### validation/test
        """
        if epoch <300:
             epoch_test = 300   
        else:
            epoch_test = 20   
        """
        epoch_test = 1
        if epoch % epoch_test == epoch_test - 1:  # test every 5 epochs
            model.eval()

            with torch.no_grad():
                ###########################################
                '''                val             '''
                ###########################################
                # val for threshold
                #val_data = Spoofing_valtest(val_list, val_image_dir, val_map_dir, transform=transforms.Compose([Normaliztion_valtest(), ToTensor_valtest()]))
                val_data = SiwDataset(
                    "dev",
                    dir_path="/storage/alperen/sodecsapp/datasets/SiW/lists",
                    protocol=args.protocol,
                    transform=transforms.Compose(
                        [Normaliztion_valtest(),
                         ToTensor_valtest()]))
                #val_data = SodecDataset(dataset_type="test",dir_path="dataset_with_margin",protocol=args.protocol,transform=transforms.Compose([Normaliztion_valtest(), ToTensor_valtest()]))
                dataloader_val = DataLoader(val_data,
                                            batch_size=args.batchsize,
                                            shuffle=False,
                                            num_workers=4)

                map_score_list = []

                for i, sample_batched in enumerate(dataloader_val):
                    # get the inputs
                    inputs, spoof_label = sample_batched['image_x'].cuda(
                    ), sample_batched['spoofing_label'].cuda()
                    val_maps = sample_batched['map_x'].cuda(
                    )  # binary map from PRNet

                    optimizer.zero_grad()

                    #pdb.set_trace()
                    map_score = 0.0
                    map_x, embedding, x_Block1, x_Block2, x_Block3, x_input = model(
                        inputs)
                    score_norm = torch.sum(map_x, (1, 2))

                    for j, score in enumerate(score_norm):
                        map_score_list.append('{} {}\n'.format(
                            score.item(), spoof_label[j].item()))

                    #pdb.set_trace()
                map_score_val_filename = args.log + '/' + args.protocol + '_map_score_val.txt'
                with open(map_score_val_filename, 'w') as file:
                    file.writelines(map_score_list)

                ###########################################
                '''                test             '''
                ##########################################
                # test for ACC
                #test_data = Spoofing_valtest(test_list, test_image_dir, test_map_dir, transform=transforms.Compose([Normaliztion_valtest(), ToTensor_valtest()]))
                test_data = SiwDataset(
                    "eval",
                    dir_path="/storage/alperen/sodecsapp/datasets/SiW/lists",
                    protocol=args.protocol,
                    transform=transforms.Compose(
                        [Normaliztion_valtest(),
                         ToTensor_valtest()]))
                #test_data = SodecDataset(dataset_type="test",dir_path="dataset_with_margin",protocol=args.protocol,transform=transforms.Compose([Normaliztion_valtest(), ToTensor_valtest()]))
                dataloader_test = DataLoader(test_data,
                                             batch_size=args.batchsize,
                                             shuffle=False,
                                             num_workers=4)

                map_score_list = []

                for i, sample_batched in enumerate(dataloader_test):
                    # get the inputs
                    inputs, spoof_label = sample_batched['image_x'].cuda(
                    ), sample_batched['spoofing_label'].cuda()
                    test_maps = sample_batched['map_x'].cuda(
                    )  # binary map from PRNet

                    optimizer.zero_grad()

                    #pdb.set_trace()
                    map_score = 0.0
                    map_x, embedding, x_Block1, x_Block2, x_Block3, x_input = model(
                        inputs)
                    score_norm = torch.sum(map_x, (1, 2))

                    for j, score in enumerate(score_norm):
                        map_score_list.append('{} {}\n'.format(
                            score.item(), spoof_label[j].item()))

                map_score_test_filename = args.log + '/' + args.protocol + '_map_score_test.txt'
                with open(map_score_test_filename, 'w') as file:
                    file.writelines(map_score_list)

                #############################################################
                #       performance measurement both val and test
                #############################################################
                val_threshold, test_threshold, val_ACC, val_ACER, test_ACC, test_APCER, test_BPCER, test_ACER, test_ACER_test_threshold = performances(
                    map_score_val_filename, map_score_test_filename)

                print(
                    'epoch:%d, Val:  val_threshold= %.4f, val_ACC= %.4f, val_ACER= %.4f'
                    % (epoch + 1, val_threshold, val_ACC, val_ACER))
                log_file.write(
                    '\n epoch:%d, Val:  val_threshold= %.4f, val_ACC= %.4f, val_ACER= %.4f \n'
                    % (epoch + 1, val_threshold, val_ACC, val_ACER))

                print(
                    'epoch:%d, Test:  ACC= %.4f, APCER= %.4f, BPCER= %.4f, ACER= %.4f'
                    % (epoch + 1, test_ACC, test_APCER, test_BPCER, test_ACER))
                #print('epoch:%d, Test:  test_threshold= %.4f, test_ACER_test_threshold= %.4f\n' % (epoch + 1, test_threshold, test_ACER_test_threshold))
                log_file.write(
                    'epoch:%d, Test:  ACC= %.4f, APCER= %.4f, BPCER= %.4f, ACER= %.4f \n'
                    % (epoch + 1, test_ACC, test_APCER, test_BPCER, test_ACER))
                #log_file.write('epoch:%d, Test:  test_threshold= %.4f, test_ACER_test_threshold= %.4f \n\n' % (epoch + 1, test_threshold, test_ACER_test_threshold))
                log_file.flush()

        if epoch > 0:
            #save the model until the next improvement
            torch.save(model.state_dict(),
                       args.log + '/' + args.log + '_%d.pkl' % (epoch + 1))

    print('Finished Training')
    log_file.close()
Beispiel #8
0
                #if (img.split(".")[0]).split("(")[0] != 'zheng ':
                    #num_right += 1
                continue
                # plot_image(image)
            else:
                face_img = recognize.crop_faces(image)[0]
                save_path = save_folder + '/' + str(img)
                face_img.save(save_path)

    # num_directly_N = num_right
    # print("num_directly_TN: %d" % num_directly_N)
    # print("num_failed: %d" % num_failed)

    test_face_path = save_folder + "/"

    model = CDCNpp(basic_conv=Conv2d_cd, theta=args.theta)
    # model = CDCN( basic_conv=Conv2d_cd, theta=args.theta)
    # model = CDCNpp1( basic_conv=Conv2d_cd, theta=args.theta)

    model.load_state_dict(torch.load('load_model.pkl'))##########################################
    model = model.cuda()

    # print(model)
    model.eval()

    # meanAcT = []
    # meanAcF = []
    with torch.no_grad():

        val_data = Spoofing_test1(test_face_path,
                                   transform=transforms.Compose([Normaliztion(), ToTensor()]))