Exemple #1
0
def aggregation(data_train, data_test, args, clientIDs, model, optimizer):
    mean_train_acc, mean_train_loss = AvgrageMeter(), AvgrageMeter()
    mean_test_acc, mean_test_loss = AvgrageMeter(), AvgrageMeter()

    initial_weights = copy.deepcopy(model.state_dict())

    num_examples = []
    weight_dict_list = []

    for clientID in clientIDs:
        model.load_state_dict(initial_weights)
        model, train_acc, train_loss, test_acc_list, test_loss_list = lstm_train(
            data_train[clientID], data_test[clientID], args, model, optimizer,
            1)
        num_examples.append(len(data_train[clientID]))
        # load state_dict for each client
        weight_dict_list.append(copy.deepcopy(model.state_dict()))
        mean_train_acc.update(train_acc, 1)
        mean_train_loss.update(train_loss, 1)
        mean_test_acc.update(test_acc_list[-1], 1)
        mean_test_loss.update(test_loss_list[-1], 1)

    # meta-learning
    for key in weight_dict_list[0].keys():
        for model_id in range(1, len(weight_dict_list)):
            weight_dict_list[0][key].add_(weight_dict_list[model_id][key])
        weight_dict_list[0][key].mul_(args.global_lr / len(clientIDs)).add_(
            (1 - args.global_lr) * initial_weights[key])

    return weight_dict_list[
        0], mean_train_acc.avg, mean_train_loss.avg, mean_test_acc.avg, mean_test_loss.avg
Exemple #2
0
def aggregation(data_train, data_test, args, clientIDs, model, optimizer):
    mean_train_acc, mean_train_loss = AvgrageMeter(), AvgrageMeter()
    mean_test_acc, mean_test_loss = AvgrageMeter(), AvgrageMeter()

    initial_weights = copy.deepcopy(model.state_dict())

    num_examples = []
    weight_dict_list = []

    for clientID in clientIDs:
        model.load_state_dict(initial_weights)
        model, train_acc, train_loss, test_acc_list, test_loss_list = client_update(
            data_train[clientID], data_test[clientID], args, model, optimizer,
            args.train_epochs)
        num_examples.append(len(data_train[clientID]))
        # load state_dict for each client
        weight_dict_list.append(copy.deepcopy(model.state_dict()))
        mean_train_acc.update(train_acc, 1)
        mean_train_loss.update(train_loss, 1)
        mean_test_acc.update(test_acc_list[-1], 1)
        mean_test_loss.update(test_loss_list[-1], 1)

    # fedAveraging
    for key in weight_dict_list[0].keys():
        weight_dict_list[0][key] *= num_examples[0]
        for model_id in range(1, len(weight_dict_list)):
            weight_dict_list[0][key].add_(weight_dict_list[model_id][key] *
                                          num_examples[model_id])
        weight_dict_list[0][key].div_(np.sum(num_examples))

    return weight_dict_list[
        0], mean_train_acc.avg, mean_train_loss.avg, mean_test_acc.avg, mean_test_loss.avg
Exemple #3
0
def train(epoch, epochs, train_loader, device, model, criterion, optimizer,
          scheduler, tensorboard_path):
    model.train()
    top1 = AvgrageMeter()
    model = model.to(device)
    train_loss = 0.0
    for i, data in enumerate(train_loader, 0):  # 0是下标起始位置默认为0
        inputs, labels, batch_seq_len = data[0].to(device), data[1].to(
            device), data[2]
        # 初始为0,清除上个batch的梯度信息
        optimizer.zero_grad()
        outputs, hidden = model(inputs, batch_seq_len)

        loss = criterion(outputs, labels)
        loss.backward()
        optimizer.step()
        _, pred = outputs.topk(1)
        prec1, prec2 = accuracy(outputs, labels, topk=(1, 2))
        n = inputs.size(0)
        top1.update(prec1.item(), n)
        train_loss += loss.item()
        postfix = {
            'train_loss': '%.6f' % (train_loss / (i + 1)),
            'train_acc': '%.6f' % top1.avg
        }
        train_loader.set_postfix(log=postfix)

        # ternsorboard 曲线绘制
        if os.path.exists(tensorboard_path) == False:
            os.mkdir(tensorboard_path)
        writer = SummaryWriter(tensorboard_path)
        writer.add_scalar('Train/Loss', loss.item(), epoch)
        writer.add_scalar('Train/Accuracy', top1.avg, epoch)
        writer.flush()
    scheduler.step()
Exemple #4
0
def test(validate_loader, device, model, criterion):
    val_acc = 0.0
    model = model.to(device)
    model.eval()
    confuse_meter = ConfuseMeter()
    with torch.no_grad():  # 进行评测的时候网络不更新梯度
        val_top1 = AvgrageMeter()
        validate_loader = tqdm(validate_loader)
        validate_loss = 0.0
        for i, data in enumerate(validate_loader, 0):  # 0是下标起始位置默认为0
            inputs, labels, batch_seq_len = data[0].to(device), data[1].to(
                device), data[2]
            #         inputs,labels = data[0],data[1]
            outputs, _ = model(inputs, batch_seq_len)
            #             loss = criterion(outputs, labels)

            prec1, prec2 = accuracy(outputs, labels, topk=(1, 2))
            n = inputs.size(0)
            val_top1.update(prec1.item(), n)
            confuse_meter.update(outputs, labels)
            #             validate_loss += loss.item()
            postfix = {
                'test_acc': '%.6f' % val_top1.avg,
                'confuse_acc': '%.6f' % confuse_meter.acc
            }
            validate_loader.set_postfix(log=postfix)
        val_acc = val_top1.avg
    return confuse_meter
Exemple #5
0
def validate(epoch, validate_loader, device, model, criterion,
             tensorboard_path):
    val_acc = 0.0
    model = model.to(device)
    model.eval()
    with torch.no_grad():  # 进行评测的时候网络不更新梯度
        val_top1 = AvgrageMeter()
        validate_loader = tqdm(validate_loader)
        validate_loss = 0.0
        for i, data in enumerate(validate_loader, 0):  # 0是下标起始位置默认为0
            inputs, labels, batch_seq_len = data[0].to(device), data[1].to(
                device), data[2]
            #         inputs,labels = data[0],data[1]
            outputs, _ = model(inputs, batch_seq_len)
            loss = criterion(outputs, labels)

            prec1, prec2 = accuracy(outputs, labels, topk=(1, 2))
            n = inputs.size(0)
            val_top1.update(prec1.item(), n)
            validate_loss += loss.item()
            postfix = {
                'validate_loss': '%.6f' % (validate_loss / (i + 1)),
                'validate_acc': '%.6f' % val_top1.avg
            }
            validate_loader.set_postfix(log=postfix)

            # ternsorboard 曲线绘制
            if os.path.exists(tensorboard_path) == False:
                os.mkdir(tensorboard_path)
            writer = SummaryWriter(tensorboard_path)
            writer.add_scalar('Validate/Loss', loss.item(), epoch)
            writer.add_scalar('Validate/Accuracy', val_top1.avg, epoch)
            writer.flush()
        val_acc = val_top1.avg
    return val_acc
Exemple #6
0
    def update_MLP(self):
        all_archs = torch.zeros(self.max, 6).cuda()
        all_target = torch.zeros(self.max).cuda()
        self.MLP.train()
        for i, structure_father in enumerate(self.group):
            all_archs[i][:] = torch.tensor([
                item for sublist in structure_father.structure
                for item in sublist
            ])[:]
            all_target[i] = structure_father.loss

        indx = all_target < 15
        all_archs = all_archs[indx, :]
        all_target = all_target[indx]
        epoch = 20
        objs = AvgrageMeter()
        batch_size = 32

        for i in range(epoch):
            start = (batch_size * i) % all_archs.size(0)
            end = start + batch_size
            archs = all_archs[start:end]
            target = all_target[start:end]
            output = self.MLP(archs)
            loss = self.criterion(output, target)
            self.optimizer.zero_grad()
            loss.backward()
            self.optimizer.step()

            n = archs.size(0)
            objs.update(loss.item(), n)

        logInfo = 'MLP: loss = {:.6f},\t'.format(objs.avg)
        logging.info(logInfo)
Exemple #7
0
    def __init__(self,
                 network,
                 w_lr=0.01,
                 w_mom=0.9,
                 w_wd=1e-4,
                 t_lr=0.001,
                 t_wd=3e-3,
                 t_beta=(0.5, 0.999),
                 init_temperature=5.0,
                 temperature_decay=0.965,
                 logger=logging,
                 lr_scheduler={'T_max': 200},
                 gpus=[0],
                 save_theta_prefix='',
                 save_tb_log=''):
        assert isinstance(network, FBNet)
        network.apply(weights_init)
        network = network.train().cuda()
        if isinstance(gpus, str):
            gpus = [int(i) for i in gpus.strip().split(',')]
        # network = DataParallel(network, gpus)
        device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

        network.to(device)
        self.gpus = gpus
        self._mod = network
        theta_params = network.theta
        mod_params = network.parameters()
        self.theta = theta_params
        self.w = mod_params
        self._tem_decay = temperature_decay
        self.temp = init_temperature
        self.logger = logger
        self.tensorboard = Tensorboard('logs/' + save_tb_log)
        self.save_theta_prefix = save_theta_prefix

        self._acc_avg = AvgrageMeter('acc')
        self._ce_avg = AvgrageMeter('ce')
        self._lat_avg = AvgrageMeter('lat')
        self._loss_avg = AvgrageMeter('loss')
        self._ener_avg = AvgrageMeter('ener')

        self.w_opt = torch.optim.SGD(mod_params,
                                     w_lr,
                                     momentum=w_mom,
                                     weight_decay=w_wd)

        self.w_sche = CosineDecayLR(self.w_opt, **lr_scheduler)

        self.t_opt = torch.optim.Adam(theta_params,
                                      lr=t_lr,
                                      betas=t_beta,
                                      weight_decay=t_wd)
Exemple #8
0
    def train_fn(self, optimizer, criterion, loader, device, train=True):
        """
        Training method
        :param optimizer: optimization algorithm
        :criterion: loss function
        :param loader: data loader for either training or testing set
        :param device: torch device
        :param train: boolean to indicate if training or test set is used
        :return: (accuracy, loss) on the data
        """
        score = AvgrageMeter()
        objs = AvgrageMeter()
        self.train()

        t = tqdm(loader)
        for images, labels in t:
            images = images.to(device)
            labels = labels.to(device)

            optimizer.zero_grad()
            logits = self(images)
            loss = criterion(logits, labels)
            loss.backward()
            optimizer.step()

            acc, _ = accuracy(logits, labels, topk=(1, 5))
            n = images.size(0)
            objs.update(loss.item(), n)
            score.update(acc.item(), n)

            t.set_description('(=> Training) Loss: {:.4f}'.format(objs.avg))

        return score.avg, objs.avg
Exemple #9
0
    def eval_fn(self, loader, device, train=False, confusion_m = False, criterion = None):
        """
        Evaluation method
        :param loader: data loader for either training or testing set
        :param device: torch device
        :param train: boolean to indicate if training or test set is used
        :return: accuracy on the data
        """
        objs = AvgrageMeter()
        score = AvgrageMeter()
        self.eval()
        with torch.no_grad():
            for step, (images, labels) in enumerate(loader):
                images = images.to(device)
                labels = labels.to(device)
                outputs = self(images)

                acc, _ = accuracy(outputs, labels, topk=(1, 5))
                score.update(acc.item(), images.size(0))

                if(criterion):
                    loss = criterion(outputs, labels)
                    objs.update(loss.item(), images.size(0))

                if step % self.report_freq == 0:
                    logging.info('Evaluation | step: %d | accuracy: %f' % (step, score.avg))   

        return score.avg, objs.avg
Exemple #10
0
    def eval_fn(self, loader, device, train=False, confusion_m = False, criterion = None):
        """
        Evaluation method
        :param loader: data loader for either training or testing set
        :param device: torch device
        :param train: boolean to indicate if training or test set is used
        :return: accuracy on the data
        """
        objs = AvgrageMeter()
        score = AvgrageMeter()
        self.eval()

        t = tqdm(loader)
        with torch.no_grad():
            for images, labels in t:
                images = images.to(device)
                labels = labels.to(device)

                outputs = self(images)
                acc, _ = accuracy(outputs, labels, topk=(1, 5))
                score.update(acc.item(), images.size(0))

                if(criterion):
                  loss = criterion(outputs, labels)
                  objs.update(loss.data, images.size(0))

                if(confusion_m):
                  # Plot confusion matrix
                  plot_confusion_matrix(labels.cpu(), outputs.topk(1, 1, True, True)[1].cpu(), normalize = True, title='Confusion matrix')

                t.set_description('(=> Test) Score: {:.4f}'.format(score.avg))

        return score.avg, objs.avg
Exemple #11
0
    def __init__(self,
                 network,
                 w_lr=0.01,
                 w_mom=0.9,
                 w_wd=1e-4,
                 t_lr=0.001,
                 t_wd=3e-3,
                 t_beta=(0.5, 0.999),
                 init_temperature=5.0,
                 temperature_decay=0.965,
                 logger=logging,
                 lr_scheduler={'T_max': 200},
                 gpus=[0],
                 save_theta_prefix='',
                 theta_result_path='./theta-result',
                 checkpoints_path='./checkpoints'):
        assert isinstance(network, FBNet)
        network.apply(weights_init)
        network = network.train().cuda()
        if isinstance(gpus, str):
            gpus = [int(i) for i in gpus.strip().split(',')]
        network = DataParallel(network, gpus)
        self.gpus = gpus
        self._mod = network
        theta_params = network.theta
        mod_params = network.parameters()
        self.theta = theta_params
        self.w = mod_params
        self._tem_decay = temperature_decay
        self.temp = init_temperature
        self.logger = logger
        self.save_theta_prefix = save_theta_prefix
        if not os.path.exists(theta_result_path):
            os.makedirs(theta_result_path)
        self.theta_result_path = theta_result_path
        if not os.path.exists(checkpoints_path):
            os.makedirs(checkpoints_path)
        self.checkpoints_path = checkpoints_path

        self._acc_avg = AvgrageMeter('acc')
        self._ce_avg = AvgrageMeter('ce')
        self._lat_avg = AvgrageMeter('lat')
        self._loss_avg = AvgrageMeter('loss')

        self.w_opt = torch.optim.SGD(mod_params,
                                     w_lr,
                                     momentum=w_mom,
                                     weight_decay=w_wd)

        self.w_sche = CosineDecayLR(self.w_opt, **lr_scheduler)

        self.t_opt = torch.optim.Adam(theta_params,
                                      lr=t_lr,
                                      betas=t_beta,
                                      weight_decay=t_wd)
def validate(model, device, args, *, all_iters=None, arch_loader=None):
    assert arch_loader is not None

    objs = AvgrageMeter()
    top1 = AvgrageMeter()
    top5 = AvgrageMeter()

    loss_function = args.loss_function
    val_dataloader = args.val_dataloader

    model.eval()
    # model.apply(bn_calibration_init)

    max_val_iters = 0
    t1 = time.time()

    result_dict = {}

    arch_dict = arch_loader.get_arch_dict()

    base_model = mutableResNet20(10).cuda()

    with torch.no_grad():
        for key, value in arch_dict.items():  # 每一个网络
            max_val_iters += 1
            # print('\r ', key, ' iter:', max_val_iters, end='')

            for data, target in val_dataloader:  # 过一遍数据集
                target = target.type(torch.LongTensor)
                data, target = data.to(device), target.to(device)

                output = model(data, value["arch"])

                prec1, prec5 = accuracy(output, target, topk=(1, 5))

                print("acc1: ", prec1.item())
                n = data.size(0)

                top1.update(prec1.item(), n)
                top5.update(prec5.item(), n)

            tmp_dict = {}
            tmp_dict['arch'] = value['arch']
            tmp_dict['acc'] = top1.avg

            result_dict[key] = tmp_dict

    with open("acc_result.json", "w") as f:
        json.dump(result_dict, f)
Exemple #13
0
    def __init__(self,
                 network,
                 w_lr=0.01,
                 w_mom=0.9,
                 w_wd=1e-4,
                 init_temperature=5.0,
                 temperature_decay=0.965,
                 target_lat=0.0,
                 eta=0.1,
                 lmd=2.0,
                 logger=logging,
                 lr_scheduler={'T_max': 200},
                 gpus=[0],
                 model_save_path="",
                 save_theta_prefix=''):
        assert isinstance(network, EDNetV2)
        network.apply(weights_init)
        network = network.train().cuda()
        if isinstance(gpus, str):
            gpus = [int(i) for i in gpus.strip().split(',')]
        network = DataParallel(network, gpus)
        self.gpus = gpus
        self._mod = network
        mod_params = network.parameters()
        self._tem_decay = temperature_decay
        self.temp = init_temperature
        self.logger = logger
        self.save_theta_prefix = save_theta_prefix
        self.eta = eta
        self.target_lat = target_lat
        self.lmd = lmd
        self.model_save_path = model_save_path
        self.criterion = nn.CrossEntropyLoss().cuda()

        self._acc_avg = AvgrageMeter('acc')
        self._ce_avg = AvgrageMeter('ce')
        self._lat_avg = AvgrageMeter('lat')
        self._loss_avg = AvgrageMeter('loss')
        self._valid_acc = [0.0]

        #self.opt = torch.optim.SGD(
        #                mod_params,
        #                w_lr,
        #                momentum=w_mom,
        #                weight_decay=w_wd)
        self.opt = torch.optim.Adam(mod_params, w_lr, weight_decay=w_wd)
        self.sche = torch.optim.lr_scheduler.CosineAnnealingWarmRestarts(
            self.opt, T_0=1, T_mult=2)

        #self.sche = CosineDecayLR(self.opt, **lr_scheduler)

        self.base_lat = None
        self.input_shape = None
Exemple #14
0
def infer(model):

    test_data = dset.CIFAR10(
        root=TestConfig['data_path'],
        train=False,
        download=True,
        transform=data_transforms_cifar10(0, False),
    )

    if DEBUG:
        sampler = torch.utils.data.sampler.SubsetRandomSampler(list(
            range(256)))
        test_queue = torch.utils.data.DataLoader(
            test_data,
            sampler=sampler,
            batch_size=TestConfig['batch_size'],
            shuffle=False,
            pin_memory=True,
            num_workers=16,
        )

    else:
        test_queue = torch.utils.data.DataLoader(
            test_data,
            batch_size=TestConfig['batch_size'],
            shuffle=False,
            pin_memory=True,
            num_workers=16,
        )

    model.eval().cuda()
    acc_avg = AvgrageMeter('acc')
    for step, (X, y) in enumerate(test_queue):
        X = Variable(X, requires_grad=False).cuda()
        y = Variable(y, requires_grad=False).cuda(non_blocking=True)
        logits, _ = model(X, TestConfig['drop_path_prob'])
        pred = torch.argmax(logits, dim=1)
        acc = torch.sum(pred == y).float() / TestConfig['batch_size']
        acc_avg.update(acc)

        if step % TestConfig['log_freq'] is 0:
            print(f"test batch {step}: {acc_avg}")
    print(f"Final test: {acc_avg}")
Exemple #15
0
def eval_one_epoch():
    model.eval()
    acc = 0.0
    map_score_list = []

    loss_absolute = AvgrageMeter()
    loss_contra = AvgrageMeter()

    for i, batch in enumerate(tqdm(val_dataloader)):
        # get the inputs
        with torch.no_grad():
            data, binary_mask, label = batch
            data, binary_mask, label = data.cuda(), binary_mask.cuda(
            ), label.cuda()

            optimizer.zero_grad()
            map_score = 0.0

            map_x, embedding, x_Block1, x_Block2, x_Block3, x_input = model(
                data)

            absolute_loss = criterion_absolute_loss(map_x, binary_mask)
            contrastive_loss = criterion_contrastive_loss(map_x, binary_mask)

            loss = absolute_loss + contrastive_loss

            n = data.size(0)
            loss_absolute.update(absolute_loss.data, n)
            loss_contra.update(contrastive_loss.data, n)

            map_score = torch.mean(map_x)

        map_score = 1.0 if map_score > 1 else map_score.item()
        map_score_list.append(map_score)

        # need another way to evaluate
        pred = 1 if map_score > 0.5 else 0
        acc += (pred == label.item())

    loss_avg = loss_absolute.avg + loss_contra.avg
    aou = metrics.roc_auc_score(labels, map_score_list)

    print(
        'epoch:%d, Eval:  Absolute_Depth_loss= %.4f, Contrastive_Depth_loss= %.4f, Total Loss: %.4f, AOU: %.4f\n'
        % (epoch + 1, loss_absolute.avg, loss_contra.avg, loss_avg, aou))

    return acc / len(val_dataset), loss_avg, aou, map_score_list
Exemple #16
0
    def eval_fn(self, loader, device, train=False):
        """
        Evaluation method
        :param loader: data loader for either training or testing set
        :param device: torch device
        :param train: boolean to indicate if training or test set is used
        :return: accuracy on the data
        """
        score = AvgrageMeter()
        self.eval()

        t = tqdm(loader)
        with torch.no_grad():  # no gradient needed
            for images, labels in t:
                images = images.to(device)
                labels = labels.to(device)

                outputs = self(images)
                acc, _ = accuracy(outputs, labels, topk=(1, 5))
                score.update(acc.item(), images.size(0))

                t.set_description('(=> Test) Score: {:.4f}'.format(score.avg))

        return score.avg
def test(model, data_test_loader):
    objs = AvgrageMeter()
    top1 = AvgrageMeter()
    criterion = torch.nn.CrossEntropyLoss().cuda()

    model.eval()
    with torch.no_grad():
        for i, (images_test, labels_test) in enumerate(data_test_loader):
            images_test, labels_test = images_test.cuda(), labels_test.cuda()
            output_test = model(images_test)
            loss_test = criterion(output_test, labels_test)
            prec_test, = accuracy(output_test, labels_test)

            n_test = images_test.size(0)
            objs.update(loss_test.item(), n_test)
            top1.update(prec_test.item(), n_test)
            if i % 50 == 0:
                print(f'Finished {i+1}/{len(data_test_loader)}')

    print(f'Avg Loss = {objs.avg}' f'Test Acc = {top1.avg}')
Exemple #18
0
def train_one_epoch():
    model.train()
    loss_absolute = AvgrageMeter()
    loss_contra = AvgrageMeter()

    trange = tqdm(train_dataloader)

    for i, batch in enumerate(trange):
        # get the inputs
        data, binary_mask, label = batch
        data, binary_mask, label = data.cuda(), binary_mask.cuda(), label.cuda(
        )

        optimizer.zero_grad()

        # forward + backward + optimize
        map_x, embedding, x_Block1, x_Block2, x_Block3, x_input = model(data)

        absolute_loss = criterion_absolute_loss(map_x, binary_mask)
        contrastive_loss = criterion_contrastive_loss(map_x, binary_mask)

        loss = absolute_loss + contrastive_loss
        loss.backward()
        optimizer.step()

        n = data.size(0)
        loss_absolute.update(absolute_loss.data, n)
        loss_contra.update(contrastive_loss.data, n)

        postfix_dict = {
            "loss_absolute": absolute_loss.item(),
            "loss_contra": contrastive_loss.item(),
            "loss": loss.item()
        }
        trange.set_postfix(**postfix_dict)

    print(
        'epoch:%d, Train:  Absolute_Depth_loss= %.4f, Contrastive_Depth_loss= %.4f\n'
        % (epoch + 1, loss_absolute.avg, loss_contra.avg))
Exemple #19
0
  def __init__(self, network,
               w_lr=0.01,
               w_mom=0.9,
               w_wd=1e-4,
               t_lr=0.001,
               t_wd=3e-3,
               t_beta=(0.5, 0.999),
               init_temperature=5.0,
               temperature_decay=0.965,
               logger=logging,
               lr_scheduler={'T_max' : 200},
               gpus=[0],
               save_theta_prefix='',
               resource_weight=0.001):
    assert isinstance(network, SNAS)
    network.apply(weights_init)
    network = network.train().cuda()
    self._criterion = nn.CrossEntropyLoss().cuda()

    alpha_params = network.arch_parameters()
    mod_params = network.model_parameters()
    self.alpha = alpha_params
    if isinstance(gpus, str):
      gpus = [int(i) for i in gpus.strip().split(',')]
    network = DataParallel(network, gpus)
    self._mod = network
    self.gpus = gpus

    self.w = mod_params
    self._tem_decay = temperature_decay
    self.temp = init_temperature
    self.logger = logger
    self.save_theta_prefix = save_theta_prefix
    self._resource_weight = resource_weight

    self._loss_avg = AvgrageMeter('loss')
    self._acc_avg = AvgrageMeter('acc')
    self._res_cons_avg = AvgrageMeter('resource-constraint')

    self.w_opt = torch.optim.SGD(
                    mod_params,
                    w_lr,
                    momentum=w_mom,
                    weight_decay=w_wd)
    self.w_sche = CosineDecayLR(self.w_opt, **lr_scheduler)
    self.t_opt = torch.optim.Adam(
                    alpha_params,
                    lr=t_lr, betas=t_beta,
                    weight_decay=t_wd)
Exemple #20
0
    def train_fn(self, optimizer, criterion, loader, device, train=True):
        score = AvgrageMeter()
        objs = AvgrageMeter()
        self.train()

        for step, (images, labels) in enumerate(loader):
            images = images.to(device)
            labels = labels.to(device)

            optimizer.zero_grad()
            logits = self(images)
            loss = criterion(logits, labels)
            loss.backward()
            optimizer.step()

            acc, _ = accuracy(logits, labels, topk=(1, 5))
            n = images.size(0)
            objs.update(loss.item(), n)
            score.update(acc.item(), n)

            if step % self.report_freq == 0:
                logging.info('Training | step: %d | loss: %e | accuracy: %f' % (step, objs.avg, score.avg))

        return score.avg, objs.avg
Exemple #21
0
    def __init__(self,
                 network,
                 w_lr=0.01,
                 w_mom=0.9,
                 w_wd=1e-4,
                 t_lr=0.001,
                 t_wd=3e-3,
                 init_temperature=5.0,
                 temperature_decay=0.965,
                 logger=logging):
        assert isinstance(network, FBNet)
        network.train()
        self._mod = network
        theta_params = network.theta
        mod_params = []
        for v in network.parameters():
            if v not in theta_params:
                mod_params.append(v)
        self.theta = theta_params
        self.w = mod_params
        self._tem_decay = temperature_decay
        self.temp = init_temperature
        self.logger = logger

        self._acc_avg = AvgrageMeter('acc')
        self._ce_avg = AvgrageMeter('ce')
        self._lat_avg = AvgrageMeter('lat')

        self.w_opt = torch.optim.SGD(mod_params,
                                     w_lr,
                                     momentum=w_mom,
                                     weight_decay=w_wd)

        self.t_opt = torch.optim.Adam(theta_params,
                                      lr=t_lr,
                                      betas=(0.5, 0.999),
                                      weight_decay=t_wd)
def train_test():
    # GPU  & log file  -->   if use DataParallel, please comment this command
    os.environ["CUDA_VISIBLE_DEVICES"] = "%d" % (args.gpu)

    isExists = os.path.exists(args.log)
    if not isExists:
        os.makedirs(args.log)
    log_file = open(args.log + '/' + args.log + '_log.txt', 'w')

    echo_batches = args.echo_batches

    print("Oulu-NPU, P1:\n ")

    log_file.write('Oulu-NPU, P1:\n ')
    log_file.flush()

    # load the network, load the pre-trained model in UCF101?
    finetune = args.finetune
    if finetune == True:
        print('finetune!\n')

    else:
        print('train from scratch!\n')
        log_file.write('train from scratch!\n')
        log_file.flush()

        #model = CDCNpp( basic_conv=Conv2d_cd, theta=0.7)
        model = CDCNpp(basic_conv=Conv2d_cd, theta=args.theta)
        #model = CDCN( basic_conv=Conv2d_cd, theta=args.theta)
        #model = CDCNpp1( basic_conv=Conv2d_cd, theta=args.theta)

        model = model.cuda()

        lr = args.lr
        optimizer = optim.Adam(model.parameters(), lr=lr, weight_decay=0.00005)
        scheduler = optim.lr_scheduler.StepLR(optimizer,
                                              step_size=args.step_size,
                                              gamma=args.gamma)

    print(model)

    criterion_absolute_loss = nn.MSELoss().cuda()
    criterion_contrastive_loss = Contrast_depth_loss().cuda()

    ACER_save = 1.0

    MSELoss_list = []

    Contrast_depth_loss_list = []
    accuracy_list = []
    accuracy1_list = []
    total_loss_list = []
    for epoch in range(args.epochs):  # loop over the dataset multiple times
        # meanLoss = []

        scheduler.step()
        if (epoch + 1) % args.step_size == 0:
            lr *= args.gamma

        loss_absolute = AvgrageMeter()
        loss_contra = AvgrageMeter()
        loss_total = AvgrageMeter()

        model.train()

        # load random 16-frame clip data every epoch

        train_data = Spoofing_train("/home/chang/dataset/oulu/train_face1/",
                                    transform=transforms.Compose([
                                        RandomErasing(),
                                        RandomHorizontalFlip(),
                                        ToTensor(),
                                        Cutout(),
                                        Normaliztion()
                                    ]))
        #/home/chang/dataset/oulu/train_face1/
        #train_data = Spoofing_train("../../../../trainset", transform=transforms.Compose([RandomErasing(), RandomHorizontalFlip(),  ToTensor(), Cutout(), Normaliztion()]))
        dataloader_train = DataLoader(train_data,
                                      batch_size=args.batchsize,
                                      shuffle=True,
                                      num_workers=4)

        for i, sample_batched in enumerate(dataloader_train):
            # get the inputs
            inputs, binary_mask, spoof_label = sample_batched['image_x'].cuda(
            ), sample_batched['binary_mask'].cuda(
            ), sample_batched['spoofing_label'].cuda()
            # inputs, binary_mask, spoof_label = sample_batched['image_x'], sample_batched['binary_mask'], sample_batched['spoofing_label']

            optimizer.zero_grad()

            # forward + backward + optimize
            map_x, embedding, x_Block1, x_Block2, x_Block3, x_input = model(
                inputs)

            #pdb.set_trace()
            #pdb.set_trace()
            absolute_loss = criterion_absolute_loss(map_x, binary_mask)

            contrastive_loss = criterion_contrastive_loss(map_x, binary_mask)

            loss = absolute_loss + contrastive_loss

            #loss.update(loss.item(), n)
            #total_loss_list.append(loss.item())

            loss.backward()

            optimizer.step()

            n = inputs.size(0)
            loss_absolute.update(absolute_loss.data, n)
            loss_contra.update(contrastive_loss.data, n)
            loss_total.update(loss.data, n)
            #total_loss.append(loss.item())
            torch.cuda.empty_cache()
            # if i > 1:
            #     break
            # print(np.mean(meanLoss))

            if i % echo_batches == echo_batches - 1:  # print every 50 mini-batches

                #         # visualization
                #         #FeatureMap2Heatmap(x_input, x_Block1, x_Block2, x_Block3, map_x)

                #         # log written
                print(
                    'epoch:%d, mini-batch:%3d, lr=%f, Absolute_Depth_loss= %.4f, Contrastive_Depth_loss= %.4f'
                    %
                    (epoch + 1, i + 1, lr, loss_absolute.avg, loss_contra.avg))

        #     #break

        # # whole epoch average
        print(
            'epoch:%d, Train:  Absolute_Depth_loss= %.4f, Contrastive_Depth_loss= %.4f, loss= %.4f \n'
            % (epoch + 1, loss_absolute.avg, loss_contra.avg, loss_total.avg))
        MSELoss_list.append(loss_absolute.avg)
        Contrast_depth_loss_list.append(loss_contra.avg)
        total_loss_list.append(loss_total.avg)

        log_file.write(
            'epoch:%d, Train: Absolute_Depth_loss= %.4f, Contrastive_Depth_loss= %.4f \n'
            % (epoch + 1, loss_absolute.avg, loss_contra.avg))
        log_file.write('loss= %.4f \n' % loss_total.avg)
        log_file.flush()

        threshold = 0.5

        # epoch_test = 1
        # if epoch>25 and epoch % 5 == 0:
        if True:
            model.eval()
            meanAcT = []
            meanAcF = []
            with torch.no_grad():

                #rootDir = "D:/dataset/oulu/oulu/trainset/"
                val_data = Spoofing_test("/home/chang/dataset/oulu/dev_face1/",
                                         transform=transforms.Compose(
                                             [Normaliztion(),
                                              ToTensor()]))
                #/home/chang/dataset/oulu/dev_face1/
                #val_data = Spoofing_train("../../../../devset", transform=transforms.Compose([RandomErasing(), RandomHorizontalFlip(),  ToTensor(), Cutout(), Normaliztion()]))
                # val_data = Spoofing_valtest(image_dir, transform=transforms.Compose([Normaliztion_valtest(), ToTensor_valtest()]))
                test_ba = 1
                dataloader_val = DataLoader(val_data,
                                            batch_size=test_ba,
                                            shuffle=False,
                                            num_workers=4)

                # map_score_list = []

                num = 0
                for i, sample_batched in enumerate(dataloader_val):
                    #             # get the inputs
                    inputs, binary_mask, spoof_label = sample_batched[
                        'image_x'].cuda(), sample_batched['binary_mask'].cuda(
                        ), sample_batched['spoofing_label'].cuda()
                    # inputs = sample_batched['image_x'].cuda()
                    # binary_mask = sample_batched['binary_mask'].cuda()

                    optimizer.zero_grad()
                    map_x, embedding, x_Block1, x_Block2, x_Block3, x_input = model(
                        inputs)
                    # map_x shape: batch,N,N
                    #pre_label = 0

                    for ba in range(test_ba):
                        pre_label = 0
                        map_score = torch.sum(map_x[ba]) / (32 * 32)
                        if map_score >= threshold:
                            pre_label = 1
                        if pre_label == spoof_label:
                            num += 1
                        if spoof_label != 1:
                            meanAcF.append(1 - map_score.item())
                        else:
                            meanAcT.append(map_score.item())
                        # print(spoof_label,map_score)

                    torch.cuda.empty_cache()

            with torch.no_grad():

                #rootDir = "D:/dataset/oulu/oulu/trainset/"
                val_data = Spoofing_test1("/home/chang/dataset/B_face1/",
                                          transform=transforms.Compose(
                                              [ToTensor(),
                                               Normaliztion()]))
                #val_data = Spoofing_train("../../../../devset", transform=transforms.Compose([RandomErasing(), RandomHorizontalFlip(),  ToTensor(), Cutout(), Normaliztion()]))
                # val_data = Spoofing_valtest(image_dir, transform=transforms.Compose([Normaliztion_valtest(), ToTensor_valtest()]))
                test_ba = 1
                dataloader_val1 = DataLoader(val_data,
                                             batch_size=test_ba,
                                             shuffle=False,
                                             num_workers=4)

                # map_score_list = []

                num1 = 0
                for i, sample_batched in enumerate(dataloader_val1):
                    #             # get the inputs
                    inputs, binary_mask, spoof_label = sample_batched[
                        'image_x'].cuda(), sample_batched['binary_mask'].cuda(
                        ), sample_batched['spoofing_label'].cuda()
                    # inputs = sample_batched['image_x'].cuda()
                    # binary_mask = sample_batched['binary_mask'].cuda()

                    optimizer.zero_grad()
                    map_x, embedding, x_Block1, x_Block2, x_Block3, x_input = model(
                        inputs)
                    # map_x shape: batch,N,N
                    #pre_label = 0

                    for ba in range(test_ba):
                        pre_label = 0
                        map_score = torch.sum(map_x[ba]) / (32 * 32)
                        if map_score >= threshold:
                            pre_label = 1
                        if pre_label == spoof_label:
                            num1 += 1

                        # print(spoof_label,map_score)

                    torch.cuda.empty_cache()

        #     # save the model until the next improvement
            print("TP", np.mean(meanAcT))
            print("TN", np.mean(meanAcF))
            meanAcT = np.array(meanAcT)
            meanAcF = np.array(meanAcF)
            TP = len(meanAcT[meanAcT > threshold])
            TN = len(meanAcF[meanAcF > 1 - threshold])
            acc = (TP + TN) / (len(meanAcF) + len(meanAcT))
            accuracy = num / len(dataloader_val)
            print("ACC", acc, ":T ", TP, ":", len(meanAcT), " F ", TN, ":",
                  len(meanAcF))
            print("ACCURACY:", accuracy)
            accuracy_list.append(accuracy)

            accuracy1 = num1 / len(dataloader_val1)
            accuracy1_list.append(accuracy1)
            print("ACCURACY1:", accuracy1)

            log_file.write('val: TP= %.4f,:%.4f TN= %.4f,:%.4f ACC=%.4f \n' %
                           (TP, len(meanAcT), TN, len(meanAcF), acc))
            log_file.write('ACCURACY = %.4f \n' % accuracy)
            log_file.write('ACCURACY1 = %.4f \n' % accuracy1)

            #print(args.log+'/'+args.log)
            torch.save(model.state_dict(),
                       args.log + '/' + args.log + '_%d.pkl' % (epoch + 1))
            # break

    print('Finished Training')
    log_file.close()

    plt.plot(MSELoss_list, label="MSELoss")
    plt.plot(Contrast_depth_loss_list, label="depth_loss")
    plt.plot(total_loss_list, label="total_loss")
    plt.title("loss")
    plt.legend()
    plt.show()

    plt.plot(accuracy_list, label="accuracy")
    plt.plot(accuracy1_list, label="accuracy1")
    #plt.plot(total_loss_list, label = "total_loss")
    plt.title("accuracy")
    plt.legend()
    plt.show()
Exemple #23
0
def validate(model, device, args, *, all_iters=None):
    objs = AvgrageMeter()
    top1 = AvgrageMeter()
    top5 = AvgrageMeter()

    loss_function = args.loss_function
    val_dataprovider = args.val_dataprovider

    model.eval()
    max_val_iters = 250
    t1 = time.time()
    with torch.no_grad():
        for _ in range(1, max_val_iters + 1):
            data, target = val_dataprovider.next()
            target = target.type(torch.LongTensor)
            data, target = data.to(device), target.to(device)

            output = model(data)
            loss = loss_function(output, target)

            prec1, prec5 = accuracy(output, target, topk=(1, 5))
            n = data.size(0)
            objs.update(loss.item(), n)
            top1.update(prec1.item(), n)
            top5.update(prec5.item(), n)

    logInfo = 'TEST Iter {}: loss = {:.6f},\t'.format(all_iters, objs.avg) + \
              'Top-1 err = {:.6f},\t'.format(1 - top1.avg / 100) + \
              'Top-5 err = {:.6f},\t'.format(1 - top5.avg / 100) + \
              'val_time = {:.6f}'.format(time.time() - t1)
    logging.info(logInfo)
Exemple #24
0
def validate(model, device, args):
    objs = AvgrageMeter()
    top1 = AvgrageMeter()
    top5 = AvgrageMeter()

    loss_function = args.loss_function
    val_dataloader = args.val_dataloader
    L = len(val_dataloader)

    model.eval()
    with torch.no_grad():
        data_iterator = enumerate(val_dataloader)
        for _ in tqdm(range(250)):
            _, data = next(data_iterator)
            target = data[1].type(torch.LongTensor)
            data, target = data[0].to(device), target.to(device)
            output = model(data)
            loss = loss_function(output, target)
            prec1, prec5 = accuracy(output, target, topk=(1, 5))
            n = data.size(0)
            objs.update(loss.item())
            top1.update(prec1.item())
            top5.update(prec5.item())

    if args.local_rank == 0:
        logInfo = 'TEST: loss = {:.6f},\t'.format(objs.avg) + \
                  'Top-1 err = {:.6f},\t'.format(100 - top1.avg) + \
                  'Top-5 err = {:.6f},\t'.format(100 - top5.avg)
        logging.info(logInfo)
Exemple #25
0
class Trainer(object):
    """Training network parameters and theta separately.
  """
    def __init__(self,
                 network,
                 w_lr=0.01,
                 w_mom=0.9,
                 w_wd=1e-4,
                 t_lr=0.001,
                 t_wd=3e-3,
                 t_beta=(0.5, 0.999),
                 init_temperature=5.0,
                 temperature_decay=0.965,
                 logger=logging,
                 lr_scheduler={'T_max': 200},
                 gpus=[0],
                 save_theta_prefix=''):
        assert isinstance(network, FBNet)
        network.apply(weights_init)
        network = network.train().cuda()
        if isinstance(gpus, str):
            gpus = [int(i) for i in gpus.strip().split(',')]
        network = DataParallel(network, gpus)
        self.gpus = gpus
        self._mod = network
        theta_params = network.theta
        mod_params = network.parameters()
        self.theta = theta_params
        self.w = mod_params
        self._tem_decay = temperature_decay
        self.temp = init_temperature
        self.logger = logger
        self.save_theta_prefix = save_theta_prefix

        self._acc_avg = AvgrageMeter('acc')
        self._ce_avg = AvgrageMeter('ce')
        self._lat_avg = AvgrageMeter('lat')
        self._loss_avg = AvgrageMeter('loss')

        self.w_opt = torch.optim.SGD(mod_params,
                                     w_lr,
                                     momentum=w_mom,
                                     weight_decay=w_wd)

        self.w_sche = CosineDecayLR(self.w_opt, **lr_scheduler)

        self.t_opt = torch.optim.Adam(theta_params,
                                      lr=t_lr,
                                      betas=t_beta,
                                      weight_decay=t_wd)

    def train_w(self, input, target, decay_temperature=False):
        """Update model parameters.
    """
        self.w_opt.zero_grad()
        loss, ce, lat, acc, energy = self._mod(input, target, self.temp)
        loss.backward()
        self.w_opt.step()
        if decay_temperature:
            tmp = self.temp
            self.temp *= self._tem_decay
            self.logger.info("Change temperature from %.5f to %.5f" %
                             (tmp, self.temp))
        return loss.item(), ce.item(), lat.item(), acc.item(), energy.item()

    def train_t(self, input, target, decay_temperature=False):
        """Update theta.
    """
        self.t_opt.zero_grad()
        loss, ce, lat, acc, energy = self._mod(input, target, self.temp)
        loss.backward()
        self.t_opt.step()
        if decay_temperature:
            tmp = self.temp
            self.temp *= self._tem_decay
            self.logger.info("Change temperature from %.5f to %.5f" %
                             (tmp, self.temp))
        return loss.item(), ce.item(), lat.item(), acc.item(), energy.item()

    def decay_temperature(self, decay_ratio=None):
        tmp = self.temp
        if decay_ratio is None:
            self.temp *= self._tem_decay
        else:
            self.temp *= decay_ratio
        self.logger.info("Change temperature from %.5f to %.5f" %
                         (tmp, self.temp))

    def _step(self, input, target, epoch, step, log_frequence, func):
        """Perform one step of training.
    """
        input = input.cuda()
        target = target.cuda()
        loss, ce, lat, acc, energy = func(input, target)

        # Get status
        batch_size = self._mod.batch_size

        self._acc_avg.update(acc)
        self._ce_avg.update(ce)
        self._lat_avg.update(lat)
        self._loss_avg.update(loss)

        if step > 1 and (step % log_frequence == 0):
            self.toc = time.time()
            speed = 1.0 * (batch_size * log_frequence) / (self.toc - self.tic)

            self.logger.info(
                "Epoch[%d] Batch[%d] Speed: %.6f samples/sec %s %s %s %s" %
                (epoch, step, speed, self._loss_avg, self._acc_avg,
                 self._ce_avg, self._lat_avg))
            map(lambda avg: avg.reset(),
                [self._loss_avg, self._acc_avg, self._ce_avg, self._lat_avg])
            self.tic = time.time()

    def search(self,
               train_w_ds,
               train_t_ds,
               total_epoch=90,
               start_w_epoch=10,
               log_frequence=100):
        """Search model.
    """
        assert start_w_epoch >= 1, "Start to train w"
        self.tic = time.time()
        for epoch in range(start_w_epoch):
            self.logger.info("Start to train w for epoch %d" % epoch)
            for step, (input, target) in enumerate(train_w_ds):
                self._step(input, target, epoch, step, log_frequence,
                           lambda x, y: self.train_w(x, y, False))
                self.w_sche.step()
                # print(self.w_sche.last_epoch, self.w_opt.param_groups[0]['lr'])

        self.tic = time.time()
        for epoch in range(total_epoch):
            self.logger.info("Start to train theta for epoch %d" %
                             (epoch + start_w_epoch))
            for step, (input, target) in enumerate(train_t_ds):
                self._step(input, target, epoch + start_w_epoch, step,
                           log_frequence,
                           lambda x, y: self.train_t(x, y, False))
                self.save_theta(
                    './theta-result/%s_theta_epoch_%d.txt' %
                    (self.save_theta_prefix, epoch + start_w_epoch))
            self.decay_temperature()
            self.logger.info("Start to train w for epoch %d" %
                             (epoch + start_w_epoch))
            for step, (input, target) in enumerate(train_w_ds):
                self._step(input, target, epoch + start_w_epoch, step,
                           log_frequence,
                           lambda x, y: self.train_w(x, y, False))
                self.w_sche.step()

    def save_theta(self, save_path='theta.txt'):
        """Save theta.
    """
        res = []
        with open(save_path, 'w') as f:
            for t in self.theta:
                t_list = list(t.detach().cpu().numpy())
                res.append(t_list)
                s = ' '.join([str(tmp) for tmp in t_list])
                f.write(s + '\n')
        return res
Exemple #26
0
    
    
    criterion_absolute_loss = nn.MSELoss().cuda()
    criterion_contrastive_loss = Contrast_depth_loss().cuda() 
    


    ACER_save = 1.0
    
    for epoch in range(args.epochs):  # loop over the dataset multiple times
        scheduler.step()
        if (epoch + 1) % args.step_size == 0:
            lr *= args.gamma

        
        loss_absolute = AvgrageMeter()
        loss_contra =  AvgrageMeter()
        #top5 = utils.AvgrageMeter()
        
        
        ###########################################
        '''                train             '''
        ###########################################
        model.train()
        
        # load random 16-frame clip data every epoch
        train_data = Spoofing_train(train_list, image_dir, transform=transforms.Compose([RandomErasing(), RandomHorizontalFlip(),  ToTensor(), Cutout(), Normaliztion()]))
        dataloader_train = DataLoader(train_data, batch_size=args.batchsize, shuffle=True, num_workers=4)

        for i, sample_batched in enumerate(dataloader_train):
            # get the inputs
def train(
    args,
    test_mode='intra',
):
    if not os.path.exists(args.log):
        os.makedirs(args.log)
    if not os.path.exists(args.model_save_dir):
        os.makedirs(args.model_save_dir)

    log_file = open(args.log + '/' + test_mode + '_log.txt', 'w')

    print('Start Training !')
    log_file.write('Start Training !')
    log_file.flush()

    model = SingleBackBoneNet()
    model = model.cuda() if args.gpu else model.cpu()
    for name, param in model.named_parameters():
        print(name)

    lr = args.lr
    optimizer = optim.Adam(model.parameters(), lr=lr)
    scheduler = optim.lr_scheduler.StepLR(optimizer,
                                          step_size=args.step_size,
                                          gamma=args.gamma)

    focal_lost = FocalLoss(alpha=torch.Tensor([0.5, 5, 50, 20]))
    focal_lost = focal_lost.cuda() if args.gpu else focal_lost.cpu()
    macro_f1_lost = MacroF1Loss().cuda() if args.gpu else MacroF1Loss().cpu()
    ce_weights_lost = CrossEntropyWithWeights(weigths=[0.5, 5, 50, 20])
    ce_weights_lost = ce_weights_lost.cuda(
    ) if args.gpu else ce_weights_lost.cpu()

    for epoch in range(args.epochs):
        loss_log = AvgrageMeter()
        f1_macro_log = AvgrageMeter()
        precision_macro_log = AvgrageMeter()
        recall_macro_log = AvgrageMeter()
        accuracy_log = AvgrageMeter()
        print("######################  TRAIN  #####################")
        model.train()

        train_data = Singledata_train_test(train_or_test='train',
                                           test_mode=test_mode)
        dataloader_train = DataLoader(train_data,
                                      batch_size=args.batchsize,
                                      shuffle=True)
        for i, sample_batched in enumerate(dataloader_train):
            inputs, (labels, one_hot_labels) = sample_batched
            inputs = torch.stack(inputs).permute(1, 0, 2)
            one_hot_labels = torch.stack(one_hot_labels).transpose(1, 0)

            if args.gpu:
                inputs, labels, one_hot_labels = inputs.cuda(), labels.cuda(
                ), one_hot_labels.cuda()
            else:
                inputs, labels, one_hot_labels = inputs.cpu(), labels.cpu(
                ), one_hot_labels.cpu()

            optimizer.zero_grad()

            predict_logits, attention_score = model(inputs.float())
            loss=0.9*macro_f1_lost(y_pred=predict_logits, y_true=one_hot_labels.float())\
                 +0.1*ce_weights_lost(y_pred=predict_logits, y_true=labels.long())

            loss.backward()
            optimizer.step()

            n = inputs.size(0)
            predict_labels = torch.argmax(predict_logits,
                                          dim=1).cpu().data.numpy()
            labels = labels.cpu().data.numpy()
            loss_log.update(loss.cpu().data, n)

            print(labels)
            print(predict_labels)
            f1_macro_log.update(
                f1_score(y_true=labels, y_pred=predict_labels,
                         average='macro'), n)
            precision_macro_log.update(
                precision_score(y_true=labels,
                                y_pred=predict_labels,
                                average='macro'), n)
            recall_macro_log.update(
                recall_score(y_true=labels,
                             y_pred=predict_labels,
                             average='macro'), n)
            accuracy_log.update(
                accuracy_score(y_true=labels, y_pred=predict_labels), n)

            if i % args.echo_batches == args.echo_batches - 1:
                print(
                    'TRAIN epoch:%d, mini-batch:%3d, lr=%f, Loss= %.4f, f1= %.4f, precision= %.4f, recall= %.4f, acc= %.4f'
                    % (epoch + 1, i + 1, lr, loss_log.avg, f1_macro_log.avg,
                       precision_macro_log.avg, recall_macro_log.avg,
                       accuracy_log.avg))

        print(
            'epoch:%d, TRAIN : Loss= %.4f, f1= %.4f, precision= %.4f, recall= %.4f\n, acc= %.4f'
            %
            (epoch + 1, loss_log.avg, f1_macro_log.avg,
             precision_macro_log.avg, recall_macro_log.avg, accuracy_log.avg))
        log_file.write(
            'epoch:%d, TRAIN : Loss= %.4f, f1= %.4f, precision= %.4f, recall= %.4f, acc= %.4f\n'
            %
            (epoch + 1, loss_log.avg, f1_macro_log.avg,
             precision_macro_log.avg, recall_macro_log.avg, accuracy_log.avg))
        log_file.flush()
        scheduler.step()
        torch.save(
            model.state_dict(), args.model_save_dir + '/' + 'save_' +
            str(test_mode) + '_' + str(epoch) + '.pth')

        print("######################  VAL   #####################")
        model.eval()
        f1_macro_log_val = AvgrageMeter()
        precision_macro_log_val = AvgrageMeter()
        recall_macro_log_val = AvgrageMeter()
        accuracy_log_val = AvgrageMeter()
        with torch.no_grad():
            val_data = Singledata_train_test(train_or_test='test',
                                             test_mode=test_mode)
            dataloader_val = DataLoader(val_data,
                                        batch_size=args.batchsize,
                                        shuffle=True)

            for i, sample_batched in enumerate(dataloader_val):
                inputs, (labels, one_hot_labels) = sample_batched
                inputs = torch.stack(inputs).permute(1, 0, 2)
                one_hot_labels = torch.stack(one_hot_labels).transpose(1, 0)

                if args.gpu:
                    inputs, labels, one_hot_labels = inputs.cuda(
                    ), labels.cuda(), one_hot_labels.cuda()
                else:
                    inputs, labels, one_hot_labels = inputs.cpu(), labels.cpu(
                    ), one_hot_labels.cpu()

                optimizer.zero_grad()

                n = inputs.size(0)
                predict_logits, attention_score = model(inputs.float())

                predict_labels = torch.argmax(predict_logits,
                                              dim=1).cpu().data.numpy()
                labels = labels.cpu().data.numpy()

                f1_macro_log_val.update(
                    f1_score(y_true=labels,
                             y_pred=predict_labels,
                             average='macro'), n)
                precision_macro_log_val.update(
                    precision_score(y_true=labels,
                                    y_pred=predict_labels,
                                    average='macro'), n)
                recall_macro_log_val.update(
                    recall_score(y_true=labels,
                                 y_pred=predict_labels,
                                 average='macro'), n)
                accuracy_log_val.update(
                    accuracy_score(y_true=labels, y_pred=predict_labels), n)
                if i % args.echo_batches == args.echo_batches - 1:
                    print(
                        'VAL epoch:%d, mini-batch:%3d, lr=%f, f1= %.4f, precision= %.4f, recall= %.4f, acc= %.4f'
                        % (epoch + 1, i + 1, lr, f1_macro_log_val.avg,
                           precision_macro_log_val.avg,
                           recall_macro_log_val.avg, accuracy_log_val.avg))

            print(
                'epoch:%d, VAL : f1= %.4f, precision= %.4f, recall= %.4f, acc= %.4f\n'
                %
                (epoch + 1, f1_macro_log_val.avg, precision_macro_log_val.avg,
                 recall_macro_log_val.avg, accuracy_log_val.avg))
            log_file.write(
                'epoch:%d, VAL : f1= %.4f, precision= %.4f, recall= %.4f, acc= %.4f\n'
                %
                (epoch + 1, f1_macro_log_val.avg, precision_macro_log_val.avg,
                 recall_macro_log_val.avg, accuracy_log_val.avg))
            log_file.flush()
Exemple #28
0
def train_test():
    # GPU  & log file  -->   if use DataParallel, please comment this command
    #os.environ["CUDA_VISIBLE_DEVICES"] = "%d" % (args.gpu)

    if args.exp_name == 'None':
        args.exp_name = time.strftime("%m-%d %H:%M:%S", time.localtime())

    args.log = 'exp/{}/{}'.format(args.exp_name, args.log)

    isExists = os.path.exists(args.log)
    if not isExists:
        os.makedirs(args.log)
    log_file = open(args.log + '_log_P1.txt', 'w')

    writer = SummaryWriter(args.log + '/runs/')

    echo_batches = args.echo_batches

    print(args)
    log_file.write(str(args) + '\n')
    log_file.flush()

    print("Oulu-NPU, P1:\n ")

    log_file.write('Oulu-NPU, P1:\n ')
    log_file.flush()

    # load the network, load the pre-trained model in UCF101?
    is_load_model = args.is_load_model
    if is_load_model == True:
        print('loading model...\n')
        print(args.model_path)
        log_file.write('loading model...\n')
        log_file.write(args.model_path)
        log_file.flush()

        model = CDCNpp(basic_conv=Conv2d_cd, theta=0.7)

        model = nn.DataParallel(model)
        model = model.cuda()
        model.load_state_dict(torch.load(args.model_path))

        lr = args.lr
        optimizer = optim.Adam(model.parameters(), lr=lr, weight_decay=0.00005)
        milestones = [
            int(step.strip()) for step in args.lr_drop_step.split(' ')
        ]
        scheduler = optim.lr_scheduler.MultiStepLR(optimizer,
                                                   milestones=milestones,
                                                   gamma=args.gamma)

    else:
        print('train from scratch!\n')
        log_file.write('train from scratch!\n')
        log_file.flush()

        #model = CDCN(basic_conv=Conv2d_cd, theta=0.7)
        model = CDCNpp(basic_conv=Conv2d_cd, theta=0.7)
        model = nn.DataParallel(model)
        model = model.cuda()

        lr = args.lr
        optimizer = optim.Adam(model.parameters(), lr=lr, weight_decay=0.00005)
        milestones = [
            int(step.strip()) for step in args.lr_drop_step.split(' ')
        ]
        scheduler = optim.lr_scheduler.MultiStepLR(optimizer,
                                                   milestones=milestones,
                                                   gamma=args.gamma)

    # print(model)
    # log_file.write(str(model))
    # log_file.flush()

    criterion_absolute_loss = nn.MSELoss().cuda()
    criterion_contrastive_loss = Contrast_depth_loss().cuda()

    #bandpass_filter_numpy = build_bandpass_filter_numpy(30, 30)  # fs, order  # 61, 64

    ACER_save = 1.0

    iteration = 0
    for epoch in range(args.epochs):  # loop over the dataset multiple times
        scheduler.step()

        if epoch < args.start_epochs:
            continue

        lr = scheduler.get_lr()[0]

        loss_absolute = AvgrageMeter()
        loss_contra = AvgrageMeter()

        ###########################################
        '''                train             '''
        ###########################################
        model.train()

        # load random 16-frame clip data every epoch
        # train_data = Spoofing_train(train_list, train_image_dir, map_dir, transform=transforms.Compose([RandomErasing(), RandomHorizontalFlip(),  ToTensor(), Cutout(), Normaliztion()]))
        train_data = Fas_train(train_list,
                               transform=transforms.Compose([
                                   RandomErasing(),
                                   RandomHorizontalFlip(),
                                   ToTensor(),
                                   Cutout(),
                                   Normaliztion()
                               ]))
        dataloader_train = DataLoader(train_data,
                                      batch_size=args.batchsize,
                                      shuffle=True,
                                      num_workers=4)
        print('train_set read done!')

        for i, sample_batched in enumerate(dataloader_train):
            iteration += 1

            # get the inputs
            inputs, map_label, spoof_label = sample_batched['image_x'].cuda(
            ), sample_batched['map_x'].cuda(
            ), sample_batched['spoofing_label'].cuda()

            optimizer.zero_grad()

            # forward + backward + optimize
            map_x, embedding, x_Block1, x_Block2, x_Block3, x_input = model(
                inputs)

            absolute_loss = criterion_absolute_loss(map_x, map_label)
            contrastive_loss = criterion_contrastive_loss(map_x, map_label)

            loss = absolute_loss + contrastive_loss
            #loss =  absolute_loss

            loss.backward()

            optimizer.step()

            n = inputs.size(0)
            loss_absolute.update(absolute_loss.data, n)
            loss_contra.update(contrastive_loss.data, n)

            if i % echo_batches == echo_batches - 1:  # print every 50 mini-batches

                # visualization
                FeatureMap2Heatmap(x_input, x_Block1, x_Block2, x_Block3,
                                   map_x)

                # log written
                print(
                    'epoch:%d, mini-batch:%3d/%4d, lr=%f, Absolute_Depth_loss= %.4f, Contrastive_Depth_loss= %.4f'
                    % (epoch + 1, i + 1, len(dataloader_train), lr,
                       loss_absolute.avg, loss_contra.avg))
                log_file.write(
                    'epoch:%d, mini-batch:%3d/%4d, lr=%f, Absolute_Depth_loss= %.4f, Contrastive_Depth_loss= %.4f \n'
                    % (epoch + 1, i + 1, len(dataloader_train), lr,
                       loss_absolute.avg, loss_contra.avg))
                log_file.flush()

            writer.add_scalar('loss', loss_absolute.avg + loss_contra.avg,
                              iteration)
            writer.add_scalar('lr', lr, iteration)

        # whole epoch average
        print(
            'epoch:%d, Train:  Absolute_Depth_loss= %.4f, Contrastive_Depth_loss= %.4f\n'
            % (epoch + 1, loss_absolute.avg, loss_contra.avg))
        log_file.write(
            'epoch:%d, Train: Absolute_Depth_loss= %.4f, Contrastive_Depth_loss= %.4f \n'
            % (epoch + 1, loss_absolute.avg, loss_contra.avg))
        log_file.flush()

        # #### validation/test
        # if epoch <300:
        #      epoch_test = 300
        # else:
        #     epoch_test = 5
        # #epoch_test = 1
        if epoch % args.epoch_test == args.epoch_test - 1:  # test every 5 epochs

            model.eval()

            with torch.no_grad():
                ###########################################
                '''                val             '''
                ###########################################
                # val for threshold
                # val_data = Spoofing_valtest(val_list, val_image_dir, val_map_dir, transform=transforms.Compose([Normaliztion_valtest(), ToTensor_valtest()]))
                val_data = Fas_valtest(val_list,
                                       transform=transforms.Compose([
                                           Normaliztion_valtest(),
                                           ToTensor_valtest()
                                       ]),
                                       mode='val')
                dataloader_val = DataLoader(val_data,
                                            batch_size=1,
                                            shuffle=False,
                                            num_workers=8)

                map_score_list = []
                print('start to validate...')
                for i, sample_batched in enumerate(dataloader_val):
                    # get the inputs
                    inputs, spoof_label = sample_batched['image_x'].cuda(
                    ), sample_batched['spoofing_label'].cuda()
                    val_maps = sample_batched['val_map_x'].cuda(
                    )  # binary map from PRNet

                    optimizer.zero_grad()

                    #pdb.set_trace()
                    map_score = 0.0
                    for frame_t in range(inputs.shape[1]):
                        map_x, embedding, x_Block1, x_Block2, x_Block3, x_input = model(
                            inputs[:, frame_t, :, :, :])

                        score_norm = torch.sum(map_x) / torch.sum(
                            val_maps[:, frame_t, :, :])
                        map_score += score_norm
                    map_score = map_score / inputs.shape[1]

                    map_score_list.append('{} {}\n'.format(
                        map_score, spoof_label[0][0]))
                    #pdb.set_trace()

                    if i % (len(dataloader_val) // 5) == 0:
                        # visualization
                        FeatureMap2Heatmap(x_input, x_Block1, x_Block2,
                                           x_Block3, map_x)
                        # log written
                        print('val ==> epoch:%d, mini-batch:%3d/%4d...' %
                              (epoch + 1, i + 1, len(dataloader_val)))
                        log_file.write(
                            'val ==> epoch:%d, mini-batch:%3d/%4d...' %
                            (epoch + 1, i + 1, len(dataloader_val)))
                        log_file.flush()

                map_score_val_filename = args.log + '_map_score_val.txt'
                with open(map_score_val_filename, 'w') as file:
                    file.writelines(map_score_list)

                ###########################################
                '''                test                '''
                ##########################################
                # test for ACC
                test_data = Fas_valtest(test_list,
                                        transform=transforms.Compose([
                                            Normaliztion_valtest(),
                                            ToTensor_valtest()
                                        ]),
                                        mode='test')
                dataloader_test = DataLoader(test_data,
                                             batch_size=1,
                                             shuffle=False,
                                             num_workers=4)

                map_score_list = []
                print('start to test...')
                for i, sample_batched in enumerate(dataloader_test):
                    # get the inputs
                    inputs, spoof_label = sample_batched['image_x'].cuda(
                    ), sample_batched['spoofing_label'].cuda()
                    test_maps = sample_batched['val_map_x'].cuda(
                    )  # binary map from PRNet

                    optimizer.zero_grad()

                    map_score = 0.0
                    for frame_t in range(inputs.shape[1]):
                        map_x, embedding, x_Block1, x_Block2, x_Block3, x_input = model(
                            inputs[:, frame_t, :, :, :])

                        score_norm = torch.sum(map_x) / torch.sum(
                            test_maps[:, frame_t, :, :])
                        map_score += score_norm
                    map_score = map_score / inputs.shape[1]

                    map_score_list.append('{} {}\n'.format(
                        map_score, spoof_label[0][0]))

                    if i % (len(dataloader_test) // 5) == 0:
                        # visualization
                        FeatureMap2Heatmap(x_input, x_Block1, x_Block2,
                                           x_Block3, map_x)
                        # log written
                        print('test ==> epoch:%d, mini-batch:%3d/%4d...' %
                              (epoch + 1, i + 1, len(dataloader_val)))
                        log_file.write(
                            'test ==> epoch:%d, mini-batch:%3d/%4d...' %
                            (epoch + 1, i + 1, len(dataloader_val)))
                        log_file.flush()

                map_score_test_filename = args.log + '_map_score_test.txt'
                with open(map_score_test_filename, 'w') as file:
                    file.writelines(map_score_list)

                #############################################################
                #       performance measurement both val and test
                #############################################################
                val_threshold, test_threshold, val_ACC, val_ACER, test_ACC, test_APCER, test_BPCER, test_ACER, test_ACER_test_threshold = performances(
                    map_score_val_filename, map_score_test_filename)

                print(
                    'epoch:%d, Val:  val_threshold= %.4f, val_ACC= %.4f, val_ACER= %.4f'
                    % (epoch + 1, val_threshold, val_ACC, val_ACER))
                log_file.write(
                    '\n epoch:%d, Val:  val_threshold= %.4f, val_ACC= %.4f, val_ACER= %.4f \n'
                    % (epoch + 1, val_threshold, val_ACC, val_ACER))

                print(
                    'epoch:%d, Test:  ACC= %.4f, APCER= %.4f, BPCER= %.4f, ACER= %.4f'
                    % (epoch + 1, test_ACC, test_APCER, test_BPCER, test_ACER))
                log_file.write(
                    'epoch:%d, Test:  ACC= %.4f, APCER= %.4f, BPCER= %.4f, ACER= %.4f \n'
                    % (epoch + 1, test_ACC, test_APCER, test_BPCER, test_ACER))
                log_file.flush()

                writer.add_scalar('val_ACER', val_ACER, iteration)
                writer.add_scalar('test_ACER', test_ACER, iteration)

            # save the model until the next improvement
            print("saving model to {}".format(args.log +
                                              '_%d.pkl'.format(epoch + 1)))
            log_file.write("saving model to {}".format(args.log + '_%d.pkl' %
                                                       (epoch + 1)))
            log_file.flush()
            torch.save(model.state_dict(), args.log + '_%d.pkl' % (epoch + 1))

    print('Finished Training')
    writer.close()
    log_file.close()
Exemple #29
0
def train_test():
    # GPU  & log file  -->   if use DataParallel, please comment this command
    #os.environ["CUDA_VISIBLE_DEVICES"] = "%d" % (args.gpu)

    isExists = os.path.exists(args.log)
    if not isExists:
        os.makedirs(args.log)
    log_file = open(args.log + '/' + args.log + f'_log_{args.protocol}.txt',
                    'w')

    echo_batches = args.echo_batches

    print(f"SIW, {args.protocol}:\n ")

    log_file.write(f"SIW, {args.protocol}:\n ")
    log_file.flush()

    # load the network, load the pre-trained model in UCF101?
    finetune = args.finetune
    if finetune == True:
        print('finetune!\n')
        log_file.write('finetune!\n')
        log_file.flush()

        model = CDCNpp()
        #model = model.cuda()
        model = model.to(device[0])
        model = nn.DataParallel(model,
                                device_ids=device,
                                output_device=device[0])
        model.load_state_dict(torch.load('xxx.pkl'))

        lr = args.lr
        optimizer = optim.Adam(model.parameters(), lr=lr, weight_decay=0.00005)
        scheduler = optim.lr_scheduler.StepLR(optimizer,
                                              step_size=args.step_size,
                                              gamma=args.gamma)

    else:
        print('train from scratch!\n')
        log_file.write('train from scratch!\n')
        log_file.flush()

        #model = CDCN(basic_conv=Conv2d_cd, theta=0.7)
        model = CDCNpp(basic_conv=Conv2d_cd, theta=0.7)

        model = model.cuda()
        #model = model.to(device[0])
        #model = nn.DataParallel(model, device_ids=device, output_device=device[0])

        lr = args.lr
        optimizer = optim.Adam(model.parameters(), lr=lr, weight_decay=0.00005)
        scheduler = optim.lr_scheduler.StepLR(optimizer,
                                              step_size=args.step_size,
                                              gamma=args.gamma)

    print(model)

    criterion_absolute_loss = nn.MSELoss().cuda()
    criterion_contrastive_loss = Contrast_depth_loss().cuda()

    #bandpass_filter_numpy = build_bandpass_filter_numpy(30, 30)  # fs, order  # 61, 64

    ACER_save = 1.0

    for epoch in range(args.epochs):  # loop over the dataset multiple times
        scheduler.step()
        if (epoch + 1) % args.step_size == 0:
            lr *= args.gamma

        loss_absolute = AvgrageMeter()
        loss_contra = AvgrageMeter()
        #top5 = utils.AvgrageMeter()

        ###########################################
        '''                train             '''
        ###########################################
        model.train()

        # load random 16-frame clip data every epoch
        #train_data = Spoofing_train(train_list, train_image_dir, map_dir, transform=transforms.Compose([RandomErasing(), RandomHorizontalFlip(),  ToTensor(), Cutout(), Normaliztion()]))
        train_data = SiwDataset(
            "train",
            dir_path="/storage/alperen/sodecsapp/datasets/SiW/lists",
            protocol=args.protocol,
            transform=transforms.Compose([
                RandomErasing(),
                RandomHorizontalFlip(),
                ToTensor(),
                Cutout(),
                Normaliztion()
            ]))
        #train_data = SodecDataset(dataset_type="train",dir_path="dataset_with_margin",protocol=args.protocol, transform=transforms.Compose([RandomErasing(), RandomHorizontalFlip(),  ToTensor(), Normaliztion()]))
        dataloader_train = DataLoader(train_data,
                                      batch_size=args.batchsize,
                                      shuffle=True,
                                      num_workers=4)

        for i, sample_batched in enumerate(dataloader_train):
            # get the inputs
            inputs, map_label, spoof_label = sample_batched['image_x'].cuda(
            ), sample_batched['map_x'].cuda(
            ), sample_batched['spoofing_label'].cuda()

            optimizer.zero_grad()

            #pdb.set_trace()

            # forward + backward + optimize
            map_x, embedding, x_Block1, x_Block2, x_Block3, x_input = model(
                inputs)

            absolute_loss = criterion_absolute_loss(map_x, map_label)
            contrastive_loss = criterion_contrastive_loss(map_x, map_label)

            loss = absolute_loss + contrastive_loss
            #loss =  absolute_loss

            loss.backward()

            optimizer.step()

            n = inputs.size(0)
            loss_absolute.update(absolute_loss.data, n)
            loss_contra.update(contrastive_loss.data, n)

            if i % echo_batches == echo_batches - 1:  # print every 50 mini-batches

                # visualization
                FeatureMap2Heatmap(x_input, x_Block1, x_Block2, x_Block3,
                                   map_x)

                # log written
                print(
                    'epoch:%d, mini-batch:%3d, lr=%f, Absolute_Depth_loss= %.4f, Contrastive_Depth_loss= %.4f'
                    %
                    (epoch + 1, i + 1, lr, loss_absolute.avg, loss_contra.avg))
                #log_file.write('epoch:%d, mini-batch:%3d, lr=%f, Absolute_Depth_loss= %.4f, Contrastive_Depth_loss= %.4f \n' % (epoch + 1, i + 1, lr, loss_absolute.avg, loss_contra.avg))
                #log_file.flush()

            #break

        # whole epoch average
        print(
            'epoch:%d, Train:  Absolute_Depth_loss= %.4f, Contrastive_Depth_loss= %.4f\n'
            % (epoch + 1, loss_absolute.avg, loss_contra.avg))
        log_file.write(
            'epoch:%d, Train: Absolute_Depth_loss= %.4f, Contrastive_Depth_loss= %.4f \n'
            % (epoch + 1, loss_absolute.avg, loss_contra.avg))
        log_file.flush()

        #### validation/test
        """
        if epoch <300:
             epoch_test = 300   
        else:
            epoch_test = 20   
        """
        epoch_test = 1
        if epoch % epoch_test == epoch_test - 1:  # test every 5 epochs
            model.eval()

            with torch.no_grad():
                ###########################################
                '''                val             '''
                ###########################################
                # val for threshold
                #val_data = Spoofing_valtest(val_list, val_image_dir, val_map_dir, transform=transforms.Compose([Normaliztion_valtest(), ToTensor_valtest()]))
                val_data = SiwDataset(
                    "dev",
                    dir_path="/storage/alperen/sodecsapp/datasets/SiW/lists",
                    protocol=args.protocol,
                    transform=transforms.Compose(
                        [Normaliztion_valtest(),
                         ToTensor_valtest()]))
                #val_data = SodecDataset(dataset_type="test",dir_path="dataset_with_margin",protocol=args.protocol,transform=transforms.Compose([Normaliztion_valtest(), ToTensor_valtest()]))
                dataloader_val = DataLoader(val_data,
                                            batch_size=args.batchsize,
                                            shuffle=False,
                                            num_workers=4)

                map_score_list = []

                for i, sample_batched in enumerate(dataloader_val):
                    # get the inputs
                    inputs, spoof_label = sample_batched['image_x'].cuda(
                    ), sample_batched['spoofing_label'].cuda()
                    val_maps = sample_batched['map_x'].cuda(
                    )  # binary map from PRNet

                    optimizer.zero_grad()

                    #pdb.set_trace()
                    map_score = 0.0
                    map_x, embedding, x_Block1, x_Block2, x_Block3, x_input = model(
                        inputs)
                    score_norm = torch.sum(map_x, (1, 2))

                    for j, score in enumerate(score_norm):
                        map_score_list.append('{} {}\n'.format(
                            score.item(), spoof_label[j].item()))

                    #pdb.set_trace()
                map_score_val_filename = args.log + '/' + args.protocol + '_map_score_val.txt'
                with open(map_score_val_filename, 'w') as file:
                    file.writelines(map_score_list)

                ###########################################
                '''                test             '''
                ##########################################
                # test for ACC
                #test_data = Spoofing_valtest(test_list, test_image_dir, test_map_dir, transform=transforms.Compose([Normaliztion_valtest(), ToTensor_valtest()]))
                test_data = SiwDataset(
                    "eval",
                    dir_path="/storage/alperen/sodecsapp/datasets/SiW/lists",
                    protocol=args.protocol,
                    transform=transforms.Compose(
                        [Normaliztion_valtest(),
                         ToTensor_valtest()]))
                #test_data = SodecDataset(dataset_type="test",dir_path="dataset_with_margin",protocol=args.protocol,transform=transforms.Compose([Normaliztion_valtest(), ToTensor_valtest()]))
                dataloader_test = DataLoader(test_data,
                                             batch_size=args.batchsize,
                                             shuffle=False,
                                             num_workers=4)

                map_score_list = []

                for i, sample_batched in enumerate(dataloader_test):
                    # get the inputs
                    inputs, spoof_label = sample_batched['image_x'].cuda(
                    ), sample_batched['spoofing_label'].cuda()
                    test_maps = sample_batched['map_x'].cuda(
                    )  # binary map from PRNet

                    optimizer.zero_grad()

                    #pdb.set_trace()
                    map_score = 0.0
                    map_x, embedding, x_Block1, x_Block2, x_Block3, x_input = model(
                        inputs)
                    score_norm = torch.sum(map_x, (1, 2))

                    for j, score in enumerate(score_norm):
                        map_score_list.append('{} {}\n'.format(
                            score.item(), spoof_label[j].item()))

                map_score_test_filename = args.log + '/' + args.protocol + '_map_score_test.txt'
                with open(map_score_test_filename, 'w') as file:
                    file.writelines(map_score_list)

                #############################################################
                #       performance measurement both val and test
                #############################################################
                val_threshold, test_threshold, val_ACC, val_ACER, test_ACC, test_APCER, test_BPCER, test_ACER, test_ACER_test_threshold = performances(
                    map_score_val_filename, map_score_test_filename)

                print(
                    'epoch:%d, Val:  val_threshold= %.4f, val_ACC= %.4f, val_ACER= %.4f'
                    % (epoch + 1, val_threshold, val_ACC, val_ACER))
                log_file.write(
                    '\n epoch:%d, Val:  val_threshold= %.4f, val_ACC= %.4f, val_ACER= %.4f \n'
                    % (epoch + 1, val_threshold, val_ACC, val_ACER))

                print(
                    'epoch:%d, Test:  ACC= %.4f, APCER= %.4f, BPCER= %.4f, ACER= %.4f'
                    % (epoch + 1, test_ACC, test_APCER, test_BPCER, test_ACER))
                #print('epoch:%d, Test:  test_threshold= %.4f, test_ACER_test_threshold= %.4f\n' % (epoch + 1, test_threshold, test_ACER_test_threshold))
                log_file.write(
                    'epoch:%d, Test:  ACC= %.4f, APCER= %.4f, BPCER= %.4f, ACER= %.4f \n'
                    % (epoch + 1, test_ACC, test_APCER, test_BPCER, test_ACER))
                #log_file.write('epoch:%d, Test:  test_threshold= %.4f, test_ACER_test_threshold= %.4f \n\n' % (epoch + 1, test_threshold, test_ACER_test_threshold))
                log_file.flush()

        if epoch > 0:
            #save the model until the next improvement
            torch.save(model.state_dict(),
                       args.log + '/' + args.log + '_%d.pkl' % (epoch + 1))

    print('Finished Training')
    log_file.close()
Exemple #30
0
def train_test():
    isExists = os.path.exists(args.log)
    if not isExists:
        os.makedirs(args.log)
    log_file = open(args.log + '/' + args.log + '_log_P1.txt', 'a')

    log_file.write('Oulu-NPU, P1:\n ')
    log_file.flush()

    print('train from scratch!\n')
    log_file.write('train from scratch!\n')
    log_file.write('lr:%.6f, lamda_kl:%.6f , batchsize:%d\n' %
                   (args.lr, args.kl_lambda, args.batchsize))
    log_file.flush()

    model = CDCN_u(basic_conv=Conv2d_cd, theta=0.7)
    # model = ResNet18_u()

    model = model.cuda()
    model = torch.nn.DataParallel(model)

    lr = args.lr
    optimizer = optim.Adam(model.parameters(), lr=lr, weight_decay=0.00005)
    scheduler = optim.lr_scheduler.StepLR(optimizer,
                                          step_size=args.step_size,
                                          gamma=args.gamma)

    print(model)

    criterion_absolute_loss = nn.MSELoss().cuda()
    criterion_contrastive_loss = Contrast_depth_loss().cuda()

    for epoch in range(args.epochs):
        if (epoch + 1) % args.step_size == 0:
            lr *= args.gamma

        loss_absolute_real = AvgrageMeter()
        loss_absolute_fake = AvgrageMeter()
        loss_contra_real = AvgrageMeter()
        loss_contra_fake = AvgrageMeter()
        loss_kl_real = AvgrageMeter()
        loss_kl_fake = AvgrageMeter()

        ###########################################
        '''                train             '''
        ###########################################
        model.train()

        # load random 16-frame clip data every epoch
        train_data = Spoofing_train_g(train_list,
                                      train_image_dir,
                                      train_map_dir,
                                      transform=transforms.Compose([
                                          RandomErasing(),
                                          RandomHorizontalFlip(),
                                          ToTensor(),
                                          Cutout(),
                                          Normaliztion()
                                      ]))
        train_real_idx, train_fake_idx = train_data.get_idx()
        batch_sampler = SeparateBatchSampler(train_real_idx,
                                             train_fake_idx,
                                             batch_size=args.batchsize,
                                             ratio=args.ratio)
        dataloader_train = DataLoader(train_data,
                                      num_workers=8,
                                      batch_sampler=batch_sampler)

        for i, sample_batched in enumerate(dataloader_train):
            # get the inputs
            inputs, map_label, spoof_label = sample_batched['image_x'].cuda(), sample_batched['map_x'].cuda(), \
                                             sample_batched['spoofing_label'].cuda()

            optimizer.zero_grad()

            # forward + backward + optimize
            mu, logvar, map_x, x_concat, x_Block1, x_Block2, x_Block3, x_input = model(
                inputs)

            mu_real = mu[:int(args.batchsize * args.ratio), :, :]
            logvar_real = logvar[:int(args.batchsize * args.ratio), :, :]
            map_x_real = map_x[:int(args.batchsize * args.ratio), :, :]
            map_label_real = map_label[:int(args.batchsize * args.ratio), :, :]

            absolute_loss_real = criterion_absolute_loss(
                map_x_real, map_label_real)
            contrastive_loss_real = criterion_contrastive_loss(
                map_x_real, map_label_real)
            kl_loss_real = -(1 + logvar_real -
                             (mu_real - map_label_real).pow(2) -
                             logvar_real.exp()) / 2
            kl_loss_real = kl_loss_real.sum(dim=1).sum(dim=1).mean()
            kl_loss_real = args.kl_lambda * kl_loss_real

            mu_fake = mu[int(args.batchsize * args.ratio):, :, :]
            logvar_fake = logvar[int(args.batchsize * args.ratio):, :, :]
            map_x_fake = map_x[int(args.batchsize * args.ratio):, :, :]
            map_label_fake = map_label[int(args.batchsize * args.ratio):, :, :]

            absolute_loss_fake = 0.1 * criterion_absolute_loss(
                map_x_fake, map_label_fake)
            contrastive_loss_fake = 0.1 * criterion_contrastive_loss(
                map_x_fake, map_label_fake)
            kl_loss_fake = -(1 + logvar_fake -
                             (mu_fake - map_label_fake).pow(2) -
                             logvar_fake.exp()) / 2
            kl_loss_fake = kl_loss_fake.sum(dim=1).sum(dim=1).mean()
            kl_loss_fake = 0.1 * args.kl_lambda * kl_loss_fake

            absolute_loss = absolute_loss_real + absolute_loss_fake
            contrastive_loss = contrastive_loss_real + contrastive_loss_fake
            kl_loss = kl_loss_real + kl_loss_fake

            loss = absolute_loss + contrastive_loss + kl_loss

            loss.backward()

            optimizer.step()

            n = inputs.size(0)
            loss_absolute_real.update(absolute_loss_real.data, n)
            loss_absolute_fake.update(absolute_loss_fake.data, n)
            loss_contra_real.update(contrastive_loss_real.data, n)
            loss_contra_fake.update(contrastive_loss_fake.data, n)
            loss_kl_real.update(kl_loss_real.data, n)
            loss_kl_fake.update(kl_loss_fake.data, n)

        scheduler.step()
        # whole epoch average
        print(
            'epoch:%d, Train:  Absolute_loss: real=%.4f,fake=%.4f, '
            'Contrastive_loss: real=%.4f,fake=%.4f, kl_loss: real=%.4f,fake=%.4f'
            % (epoch + 1, loss_absolute_real.avg, loss_absolute_fake.avg,
               loss_contra_real.avg, loss_contra_fake.avg, loss_kl_real.avg,
               loss_kl_fake.avg))

        # validation/test
        if epoch < 200:
            epoch_test = 200
        else:
            epoch_test = 50
        # epoch_test = 1
        if epoch % epoch_test == epoch_test - 1:
            model.eval()

            with torch.no_grad():
                ###########################################
                '''                val             '''
                ###########################################
                # val for threshold
                val_data = Spoofing_valtest(val_list,
                                            val_image_dir,
                                            val_map_dir,
                                            transform=transforms.Compose([
                                                Normaliztion_valtest(),
                                                ToTensor_valtest()
                                            ]))
                dataloader_val = DataLoader(val_data,
                                            batch_size=1,
                                            shuffle=False,
                                            num_workers=4)

                map_score_list = []

                for i, sample_batched in enumerate(dataloader_val):
                    # get the inputs
                    inputs, spoof_label = sample_batched['image_x'].cuda(
                    ), sample_batched['spoofing_label'].cuda()
                    val_maps = sample_batched['val_map_x'].cuda(
                    )  # binary map from PRNet

                    optimizer.zero_grad()

                    mu, logvar, map_x, x_concat, x_Block1, x_Block2, x_Block3, x_input = model(
                        inputs.squeeze(0))
                    score_norm = mu.sum(dim=1).sum(
                        dim=1) / val_maps.squeeze(0).sum(dim=1).sum(dim=1)
                    map_score = score_norm.mean()
                    map_score_list.append('{} {}\n'.format(
                        map_score, spoof_label[0][0]))

                map_score_val_filename = args.log + '/' + args.log + '_map_score_val.txt'
                with open(map_score_val_filename, 'w') as file:
                    file.writelines(map_score_list)

                ###########################################
                '''                test             '''
                ##########################################
                # test for ACC
                test_data = Spoofing_valtest(test_list,
                                             test_image_dir,
                                             test_map_dir,
                                             transform=transforms.Compose([
                                                 Normaliztion_valtest(),
                                                 ToTensor_valtest()
                                             ]))
                dataloader_test = DataLoader(test_data,
                                             batch_size=1,
                                             shuffle=False,
                                             num_workers=4)

                map_score_list = []

                for i, sample_batched in enumerate(dataloader_test):
                    # get the inputs
                    inputs, spoof_label = sample_batched['image_x'].cuda(
                    ), sample_batched['spoofing_label'].cuda()
                    test_maps = sample_batched['val_map_x'].cuda()

                    optimizer.zero_grad()
                    mu, logvar, map_x, x_concat, x_Block1, x_Block2, x_Block3, x_input = model(
                        inputs.squeeze(0))
                    score_norm = mu.sum(dim=1).sum(
                        dim=1) / test_maps.squeeze(0).sum(dim=1).sum(dim=1)
                    map_score = score_norm.mean()
                    map_score_list.append('{} {}\n'.format(
                        map_score, spoof_label[0][0]))

                map_score_test_filename = args.log + '/' + args.log + '_map_score_test.txt'
                with open(map_score_test_filename, 'w') as file:
                    file.writelines(map_score_list)

                #############################################################
                #       performance measurement both val and test
                #############################################################
                val_threshold, test_threshold, val_ACC, val_ACER, test_ACC, test_APCER, test_BPCER, test_ACER, test_ACER_test_threshold = performances(
                    map_score_val_filename, map_score_test_filename)

                print(
                    'epoch:%d, Val:  val_threshold= %.4f, val_ACC= %.4f, val_ACER= %.4f'
                    % (epoch + 1, val_threshold, val_ACC, val_ACER))
                log_file.write(
                    '\n epoch:%d, Val:  val_threshold= %.4f, val_ACC= %.4f, val_ACER= %.4f \n'
                    % (epoch + 1, val_threshold, val_ACC, val_ACER))

                print(
                    'epoch:%d, Test:  ACC= %.4f, APCER= %.4f, BPCER= %.4f, ACER= %.4f'
                    % (epoch + 1, test_ACC, test_APCER, test_BPCER, test_ACER))
                log_file.write(
                    'epoch:%d, Test:  ACC= %.4f, APCER= %.4f, BPCER= %.4f, ACER= %.4f \n'
                    % (epoch + 1, test_ACC, test_APCER, test_BPCER, test_ACER))
                log_file.flush()

        if epoch % epoch_test == epoch_test - 1:
            # save the model until the next improvement
            torch.save(model.state_dict(),
                       args.log + '/' + args.log + '_%d.pkl' % (epoch + 1))

    print('Finished Training')
    log_file.close()