Ejemplo n.º 1
0
def run(args):

    if args.seed is not None:
        from csl_common.utils.common import init_random
        init_random(args.seed)
    # log.info(json.dumps(vars(args), indent=4))

    datasets = {}
    dsname = args.dataset_val[0]
    root, cache_root = cfg.get_dataset_paths(dsname)
    dataset_cls = cfg.get_dataset_class(dsname)
    datasets[VAL] = dataset_cls(root=root,
                                cache_root=cache_root,
                                train=False,
                                test_split=args.test_split,
                                max_samples=args.val_count,
                                start=args.st,
                                use_cache=args.use_cache,
                                align_face_orientation=args.align,
                                crop_source=args.crop_source,
                                return_landmark_heatmaps=True,
                                landmark_sigma=args.sigma,
                                image_size=args.input_size)
    print(datasets[VAL])

    fntr = FabrecEval(datasets, args, args.sessionname, workers=args.workers, wait=args.wait)

    import torch
    torch.backends.cudnn.benchmark = True
    fntr.eval_epoch()
Ejemplo n.º 2
0
def run(args):

    if args.seed is not None:
        from csl_common.utils.common import init_random
        init_random(args.seed)

    # log.info(json.dumps(vars(args), indent=4))

    phase_cfg = {
        TRAIN: {'dsnames': args.dataset_train,
                'count': args.train_count},
        VAL: {'dsnames': args.dataset_val,
              'count': args.val_count}
    }
    datasets = {}
    for phase in args.phases:
        dsnames = phase_cfg[phase]['dsnames']
        if dsnames is None:
            continue
        num_samples = phase_cfg[phase]['count']
        is_single_dataset = isinstance(dsnames, str) or len(dsnames) == 1
        train = phase == TRAIN
        datasets_for_phase = []
        for name in dsnames:
            root, cache_root = cfg.get_dataset_paths(name)
            transform = ds_utils.build_transform(deterministic=not train, daug=args.daug)
            dataset_cls = cfg.get_dataset_class(name)
            ds = dataset_cls(root=root,
                             cache_root=cache_root,
                             train=train,
                             max_samples=num_samples,
                             use_cache=args.use_cache,
                             start=args.st if train else None,
                             test_split=args.test_split,
                             align_face_orientation=args.align,
                             crop_source=args.crop_source,
                             transform=transform,
                             image_size=args.input_size)
            datasets_for_phase.append(ds)
        if is_single_dataset:
            datasets[phase] = datasets_for_phase[0]
        else:
            datasets[phase] = multi.ConcatFaceDataset(datasets_for_phase, max_samples=args.train_count_multi)

        print(datasets[phase])

    fntr = AAEUnsupervisedTraining(datasets, args, session_name=args.sessionname,
                                   snapshot_interval=args.save_freq, workers=args.workers,
                                   wait=args.wait)

    torch.backends.cudnn.benchmark = True
    if args.eval:
        fntr.eval_epoch()
    else:
        fntr.train(num_epochs=args.epochs)
def run():

    from utils import random

    if args.seed is not None:
        random.init_random(args.seed)
    # log.info(json.dumps(vars(args), indent=4))

    datasets = {}
    for phase, dsnames, num_samples in zip(
        (TRAIN, VAL),
        (args.dataset_train, args.dataset_val),
        (args.train_count, args.val_count),
    ):
        train = phase == TRAIN
        name = dsnames[0]
        transform = nn.build_transform(deterministic=not train, daug=args.daug)
        root, cache_root = cfg.get_dataset_paths(name)
        dataset_cls = cfg.get_dataset_class(name)
        datasets[phase] = dataset_cls(
            root=root,
            cache_root=cache_root,
            train=train,
            max_samples=num_samples,
            use_cache=args.use_cache,
            start=args.st,
            align_face_orientation=args.align,
            crop_source=args.crop_source,
            return_landmark_heatmaps=lmcfg.PREDICT_HEATMAP,
            with_occlusions=args.occ and train,
            landmark_sigma=args.sigma,
            transform=transform,
            image_size=args.input_size,
        )
        print(datasets[phase])

    fntr = AAELandmarkTraining(
        datasets,
        args,
        session_name=args.sessionname,
        snapshot_interval=args.save_freq,
        workers=args.workers,
        wait=args.wait,
    )

    torch.backends.cudnn.benchmark = True
    if args.eval:
        fntr.eval_epoch()
    else:
        fntr.train(num_epochs=args.epochs)
Ejemplo n.º 4
0
def create_dataset(dataset_name, train, transform=None, indices=None, repeat_factor=None):
    root, cache_root = cfg.get_dataset_paths(dataset_name)
    dataset_cls = cfg.get_dataset_class(dataset_name)

    ds = dataset_cls(root=root, train=train, transform=transform, )

    if indices is not None:
        indices = [i for i in indices if i < len(ds)]
        ds = Subset(ds, indices)

    if repeat_factor is not None:
        ds =  RepeatDataset(ds, repeat_factor)

    return ds
Ejemplo n.º 5
0
                               landmarks_to_return=sample.landmarks.astype(
                                   np.float32))


config.register_dataset(WFLW)

if __name__ == '__main__':
    from csl_common.utils.nn import Batch
    from csl_common.utils.common import init_random
    from csl_common.utils.ds_utils import build_transform
    from csl_common.vis import vis
    import config

    init_random(3)

    path = config.get_dataset_paths('wflw')[0]
    ds = WFLW(root=path,
              train=False,
              deterministic=True,
              use_cache=False,
              daug=0,
              image_size=256,
              transform=build_transform(deterministic=False, daug=0))
    ds.filter_labels({'pose': 1, 'occlusion': 0, 'make-up': 1})
    dl = td.DataLoader(ds, batch_size=10, shuffle=False, num_workers=0)
    print(ds)

    for data in dl:
        batch = Batch(data, gpu=False)
        images = vis.to_disp_images(batch.images, denorm=True)
        # lms = lmutils.convert_landmarks(to_numpy(batch.landmarks), lmutils.LM98_TO_LM68)
Ejemplo n.º 6
0
                        choices=['unet', 'vlight'],
                        type=str)
    parser.add_argument('--modelname', default='drive_vlight', type=str)
    parser.add_argument('--patch-size', default=512, type=int)
    parser.add_argument('--show',
                        type=bool_str,
                        default=False,
                        help='show results')
    parser.add_argument('--gpu', type=bool_str, default=True)
    args = parser.parse_args()

    model = args.model
    dsname = args.dataset
    modelname = args.modelname

    root, cache_root = cfg.get_dataset_paths(dsname)
    dataset_cls = cfg.get_dataset_class(dsname)
    dataset = dataset_cls(root=root, train=False)
    print(dataset)

    model_dir = './'

    if model == 'unet':
        net = unet.load_net(os.path.join(model_dir, modelname)).cuda()
    elif model == 'vlight':
        net = vlight.load_net(os.path.join(model_dir, modelname))
        if args.gpu:
            net = net.cuda()
    else:
        raise ValueError
Ejemplo n.º 7
0
if __name__ == '__main__':
    import argparse
    import torch
    from csl_common.vis import vis
    import config

    parser = argparse.ArgumentParser()
    parser.add_argument('--extract', default=False, type=bool)
    parser.add_argument('--st', default=None, type=int)
    parser.add_argument('--nd', default=None, type=int)
    args = parser.parse_args()

    torch.manual_seed(0)
    torch.cuda.manual_seed_all(0)

    dirs = config.get_dataset_paths('affectnet')
    train = True
    ds = AffectNet(root=dirs[0],
                   image_size=256,
                   cache_root=dirs[1],
                   train=train,
                   use_cache=False,
                   transform=ds_utils.build_transform(deterministic=not train,
                                                      daug=0),
                   crop_source='lm_ground_truth')
    dl = td.DataLoader(ds, batch_size=10, shuffle=False, num_workers=0)
    # print(ds)

    for data in dl:
        batch = Batch(data, gpu=False)
Ejemplo n.º 8
0
                               bb,
                               landmarks_for_crop,
                               landmarks_to_return=landmarks)


config.register_dataset(W300)

if __name__ == '__main__':
    from csl_common.vis import vis
    import torch
    import config

    torch.manual_seed(0)
    torch.cuda.manual_seed_all(0)

    dirs = config.get_dataset_paths('w300')
    ds = W300(root=dirs[0],
              cache_root=dirs[1],
              train=False,
              deterministic=True,
              use_cache=False,
              image_size=256,
              test_split='challenging',
              daug=0,
              align_face_orientation=True,
              crop_source='lm_ground_truth')
    dl = td.DataLoader(ds, batch_size=10, shuffle=False, num_workers=0)

    for data in dl:
        batch = Batch(data, gpu=False)
        inputs = batch.images.clone()