예제 #1
0
def start_evaluation(args):
    """Launches the evaluation process"""

    if args.dataset == 'vgg':
        dataset = VGGFace2(args.val, args.v_list, args.v_land, landmarks_training=True)
    elif args.dataset == 'celeb':
        dataset = CelebA(args.val, args.v_land, test=True)
    else:
        dataset = NDG(args.val, args.v_land)

    if dataset.have_landmarks:
        log.info('Use alignment for the train data')
        dataset.transform = t.Compose([Rescale((48, 48)), ToTensor(switch_rb=True)])
    else:
        exit()

    val_loader = DataLoader(dataset, batch_size=args.val_batch_size, num_workers=4, shuffle=False, pin_memory=True)

    model = LandmarksNet()

    assert args.snapshot is not None

    log.info('Testing snapshot ' + args.snapshot + ' ...')
    model = load_model_state(model, args.snapshot, args.device, eval_state=True)
    model.eval()
    cudnn.benchmark = True
    model = torch.nn.DataParallel(model, device_ids=[args.device], )

    log.info('Face landmarks model:')
    log.info(model)

    avg_err, per_point_avg_err, failures_rate = evaluate(val_loader, model)
    log.info('Avg RMSE error: {}'.format(avg_err))
    log.info('Per landmark RMSE error: {}'.format(per_point_avg_err))
    log.info('Failure rate: {}'.format(failures_rate))
def start_evaluation(args):
    """Launches the evaluation process"""

    if args.dataset == 'vgg':
        dataset = VGGFace2(args.val,
                           args.v_list,
                           args.v_land,
                           landmarks_training=True)
    elif args.dataset == 'celeb':
        dataset = CelebA(args.val, args.v_land, test=True)
    else:
        dataset = NDG(args.val, args.v_land)

    if dataset.have_landmarks:
        log.info('Use alignment for the train data')
        dataset.transform = t.Compose(
            [Rescale((48, 48)), ToTensor(switch_rb=True)])
    else:
        exit()

    val_loader = DataLoader(dataset,
                            batch_size=args.val_batch_size,
                            num_workers=4,
                            shuffle=False,
                            pin_memory=True)

    model = models_landmarks['landnet']()

    assert args.snapshot is not None
    if args.compr_config:
        config = Config.from_json(args.compr_config)
        compression_algo = create_compression_algorithm(model, config)
        model = compression_algo.model

    log.info('Testing snapshot ' + args.snapshot + ' ...')
    model = load_model_state(model,
                             args.snapshot,
                             args.device,
                             eval_state=True)
    model.eval()
    cudnn.benchmark = True
    model = torch.nn.DataParallel(
        model,
        device_ids=[args.device],
    )

    log.info('Face landmarks model:')
    log.info(model)

    avg_err, per_point_avg_err, failures_rate = evaluate(val_loader, model)
    log.info('Avg RMSE error: {}'.format(avg_err))
    log.info('Per landmark RMSE error: {}'.format(per_point_avg_err))
    log.info('Failure rate: {}'.format(failures_rate))
    if args.compr_config and "sparsity_level" in compression_algo.statistics():
        log.info("Sparsity level: {0:.2f}".format(
            compression_algo.statistics()
            ['sparsity_rate_for_sparsified_modules']))
def train(args):
    """Launches training of landmark regression model"""
    input_size = models_landmarks['landnet']().get_input_res()
    if args.dataset == 'vgg':
        drops_schedule = [1, 6, 9, 13]
        dataset = VGGFace2(args.train, args.t_list, args.t_land, landmarks_training=True)
    elif args.dataset == 'celeba':
        drops_schedule = [10, 20]
        dataset = CelebA(args.train, args.t_land)
    else:
        drops_schedule = [90, 140, 200]
        dataset = NDG(args.train, args.t_land)

    if dataset.have_landmarks:
        log.info('Use alignment for the train data')
        dataset.transform = transforms.Compose([landmarks_augmentation.Rescale((56, 56)),
                                                landmarks_augmentation.Blur(k=3, p=.2),
                                                landmarks_augmentation.HorizontalFlip(p=.5),
                                                landmarks_augmentation.RandomRotate(50),
                                                landmarks_augmentation.RandomScale(.8, .9, p=.4),
                                                landmarks_augmentation.RandomCrop(48),
                                                landmarks_augmentation.ToTensor(switch_rb=True)])
    else:
        log.info('Error: training dataset has no landmarks data')
        exit()

    train_loader = DataLoader(dataset, batch_size=args.train_batch_size, num_workers=4, shuffle=True)
    writer = SummaryWriter('./logs_landm/{:%Y_%m_%d_%H_%M}_'.format(datetime.datetime.now()) + args.snap_prefix)
    model = models_landmarks['landnet']()

    set_dropout_fn = model.set_dropout_ratio

    compression_algo = None
    if args.snap_to_resume is not None:
            config = Config.from_json(args.compr_config)
            compression_algo = create_compression_algorithm(model, config)
            model = compression_algo.model

        log.info('Resuming snapshot ' + args.snap_to_resume + ' ...')
        model = load_model_state(model, args.snap_to_resume, args.device, eval_state=False)
        model = torch.nn.DataParallel(model, device_ids=[args.device])
예제 #4
0
from IMMmodel import IMM
from datasets import CelebA
from utils.tps import TPS_Twice
from utils.transformers import Rescale, ToTensor

warnings.filterwarnings("ignore", category=UserWarning)
import os

os.environ["CUDA_VISIBLE_DEVICES"] = "1"

if __name__ == '__main__':
    model = IMM(dim=10, heatmap_std=0.1, h_channel=32)
    model.load_state_dict(torch.load('Celeba_originaldata_experiment2/checkpoint_model_CelebA.pt'))
    model.eval()

    data_set = CelebA(transform=Compose(
        [Rescale([128, 128]), ToTensor()]))
    trainset, testset = data_set(10000, 1000)
    data_loader = DataLoader(dataset=testset, batch_size=10, drop_last=True,
                             shuffle=True)

    # train_set = AFLW(is_train=True, transform=Compose(
    #     [Rescale([128,128], is_labeled=True), ToTensor(is_labeled=True)]))
    # data_loader = DataLoader(dataset=train_set, batch_size=10, drop_last=True,
    #                                shuffle=True)

    sample = next(iter(data_loader))
    tps_transform = TPS_Twice(5, 0.05, 0.05)
    image = sample['image']
    x1, mask1, x2, mask2 = tps_transform(image)
    recovered_x2, cord = model(x1, x2)
예제 #5
0
def train(args):
    """Launches training of landmark regression model"""
    if args.dataset == 'vgg':
        drops_schedule = [1, 6, 9, 13]
        dataset = VGGFace2(args.train,
                           args.t_list,
                           args.t_land,
                           landmarks_training=True)
    elif args.dataset == 'celeba':
        drops_schedule = [10, 20]
        dataset = CelebA(args.train, args.t_land)
    else:
        drops_schedule = [90, 140, 200]
        dataset = NDG(args.train, args.t_land)

    if dataset.have_landmarks:
        log.info('Use alignment for the train data')
        dataset.transform = transforms.Compose([
            landmarks_augmentation.Rescale((56, 56)),
            landmarks_augmentation.Blur(k=3, p=.2),
            landmarks_augmentation.HorizontalFlip(p=.5),
            landmarks_augmentation.RandomRotate(50),
            landmarks_augmentation.RandomScale(.8, .9, p=.4),
            landmarks_augmentation.RandomCrop(48),
            landmarks_augmentation.ToTensor(switch_rb=True)
        ])
    else:
        log.info('Error: training dataset has no landmarks data')
        exit()

    train_loader = DataLoader(dataset,
                              batch_size=args.train_batch_size,
                              num_workers=4,
                              shuffle=True)
    writer = SummaryWriter(
        './logs_landm/{:%Y_%m_%d_%H_%M}_'.format(datetime.datetime.now()) +
        args.snap_prefix)
    model = LandmarksNet()

    set_dropout_fn = model.set_dropout_ratio

    if args.snap_to_resume is not None:
        log.info('Resuming snapshot ' + args.snap_to_resume + ' ...')
        model = load_model_state(model,
                                 args.snap_to_resume,
                                 args.device,
                                 eval_state=False)
        model = torch.nn.DataParallel(model, device_ids=[args.device])
    else:
        model = torch.nn.DataParallel(model, device_ids=[args.device])
        model.cuda()
        model.train()
        cudnn.enabled = True
        cudnn.benchmark = True

    log.info('Face landmarks model:')
    log.info(model)

    criterion = AlignmentLoss('wing')
    optimizer = optim.SGD(model.parameters(),
                          lr=args.lr,
                          momentum=args.momentum,
                          weight_decay=args.weight_decay)
    scheduler = optim.lr_scheduler.MultiStepLR(optimizer, drops_schedule)

    log.info('Epoch length: %d' % len(train_loader))
    for epoch_num in range(args.epoch_total_num):
        log.info('Epoch: %d' % epoch_num)

        if epoch_num > 5:
            set_dropout_fn(0.)

        for i, data in enumerate(train_loader, 0):
            iteration = epoch_num * len(train_loader) + i

            if iteration % args.val_step == 0 and iteration > 0:
                snapshot_name = osp.join(
                    args.snap_folder,
                    args.snap_prefix + '_{0}.pt'.format(iteration))
                log.info('Saving Snapshot: ' + snapshot_name)
                save_model_cpu(model, optimizer, snapshot_name, epoch_num)

                model.eval()
                log.info('Evaluating Snapshot: ' + snapshot_name)
                avg_err, per_point_avg_err, failures_rate = evaluate(
                    train_loader, model)
                weights = per_point_avg_err / np.sum(per_point_avg_err)
                criterion.set_weights(weights)
                log.info(str(weights))
                log.info('Avg train error: {}'.format(avg_err))
                log.info('Train failure rate: {}'.format(failures_rate))
                writer.add_scalar('Quality/Avg_error', avg_err, iteration)
                writer.add_scalar('Quality/Failure_rate', failures_rate,
                                  iteration)
                writer.add_scalar('Epoch', epoch_num, iteration)
                model.train()

            data, gt_landmarks = data['img'].cuda(), data['landmarks'].cuda()
            predicted_landmarks = model(data)

            optimizer.zero_grad()
            loss = criterion(predicted_landmarks, gt_landmarks)
            loss.backward()
            optimizer.step()

            if i % 10 == 0:
                log.info('Iteration %d, Loss: %.4f' % (iteration, loss))
                log.info('Learning rate: %f' % scheduler.get_lr()[0])
                writer.add_scalar('Loss/train_loss', loss.item(), iteration)
                writer.add_scalar('Learning_rate',
                                  scheduler.get_lr()[0], iteration)
        scheduler.step()
예제 #6
0
파일: run.py 프로젝트: smarsu/facenet
import numpy as np
from datasets import CelebA
from facenet import FaceNet
from euclidean import euclidean_distance

#np.random.seed(196)
dataset_root = '/datasets/CelebA'

if __name__ == '__main__':
    parse = sys.argv[1]  # 'train'
    print(parse)
    face_size = (96, 96)

    if parse == 'train':
        batch_size = 8
        celebA = CelebA(dataset_root, face_size=face_size)
        facenet = FaceNet(batch_size=batch_size, face_size=face_size, alpha=0.5)
        # Restore from:
        #   1. 0.2943756_7_8.npz test loss: 0
        #   2. 0.2817538_2_0.8.npz test loss: 0
        #   3. 0.28175184_22_0.08.npz test loss: 0
        #   4. 0.2740853_5_0.008.npz  test loss: 0

        #   1. 0.52165955_1_0.8.npz
        facenet.sess.restore(osp.join(facenet.model_root, '0.33327728476725504_94_1.npz'))
        # Train stage:
        #   1. epoch: 100, lr: 1
        #   2. epoch: 100, lr: 0.1
        #   3. epoch: 100, lr: 0.1
        #   4. epoch: 100, lr: 0.1
        #   5. epoch: 100, lr: 0.01