Beispiel #1
0
def run(args):

    if args.seed is not None:
        from csl_common.utils.common import init_random
        init_random(args.seed)
    # log.info(json.dumps(vars(args), indent=4))

    datasets = {}
    dsname = args.dataset_val[0]
    root, cache_root = cfg.get_dataset_paths(dsname)
    dataset_cls = cfg.get_dataset_class(dsname)
    datasets[VAL] = dataset_cls(root=root,
                                cache_root=cache_root,
                                train=False,
                                test_split=args.test_split,
                                max_samples=args.val_count,
                                start=args.st,
                                use_cache=args.use_cache,
                                align_face_orientation=args.align,
                                crop_source=args.crop_source,
                                return_landmark_heatmaps=True,
                                landmark_sigma=args.sigma,
                                image_size=args.input_size)
    print(datasets[VAL])

    fntr = FabrecEval(datasets, args, args.sessionname, workers=args.workers, wait=args.wait)

    import torch
    torch.backends.cudnn.benchmark = True
    fntr.eval_epoch()
def run(args):

    if args.seed is not None:
        from csl_common.utils.common import init_random
        init_random(args.seed)

    # log.info(json.dumps(vars(args), indent=4))

    phase_cfg = {
        TRAIN: {'dsnames': args.dataset_train,
                'count': args.train_count},
        VAL: {'dsnames': args.dataset_val,
              'count': args.val_count}
    }
    datasets = {}
    for phase in args.phases:
        dsnames = phase_cfg[phase]['dsnames']
        if dsnames is None:
            continue
        num_samples = phase_cfg[phase]['count']
        is_single_dataset = isinstance(dsnames, str) or len(dsnames) == 1
        train = phase == TRAIN
        datasets_for_phase = []
        for name in dsnames:
            root, cache_root = cfg.get_dataset_paths(name)
            transform = ds_utils.build_transform(deterministic=not train, daug=args.daug)
            dataset_cls = cfg.get_dataset_class(name)
            ds = dataset_cls(root=root,
                             cache_root=cache_root,
                             train=train,
                             max_samples=num_samples,
                             use_cache=args.use_cache,
                             start=args.st if train else None,
                             test_split=args.test_split,
                             align_face_orientation=args.align,
                             crop_source=args.crop_source,
                             transform=transform,
                             image_size=args.input_size)
            datasets_for_phase.append(ds)
        if is_single_dataset:
            datasets[phase] = datasets_for_phase[0]
        else:
            datasets[phase] = multi.ConcatFaceDataset(datasets_for_phase, max_samples=args.train_count_multi)

        print(datasets[phase])

    fntr = AAEUnsupervisedTraining(datasets, args, session_name=args.sessionname,
                                   snapshot_interval=args.save_freq, workers=args.workers,
                                   wait=args.wait)

    torch.backends.cudnn.benchmark = True
    if args.eval:
        fntr.eval_epoch()
    else:
        fntr.train(num_epochs=args.epochs)
Beispiel #3
0
def run():

    from csl_common.utils.common import init_random

    if args.seed is not None:
        init_random(args.seed)
    # log.info(json.dumps(vars(args), indent=4))

    datasets = {}
    for phase, dsnames, num_samples in zip(
        (TRAIN, VAL), (args.dataset_train, args.dataset_val),
        (args.train_count, args.val_count)):
        train = phase == TRAIN
        name = dsnames[0]
        transform = ds_utils.build_transform(deterministic=not train,
                                             daug=args.daug)
        root, cache_root = cfg.get_dataset_paths(name)
        dataset_cls = cfg.get_dataset_class(name)
        datasets[phase] = dataset_cls(
            root=root,
            cache_root=cache_root,
            train=train,
            max_samples=num_samples,
            use_cache=args.use_cache,
            start=args.st,
            test_split=args.test_split,
            align_face_orientation=args.align,
            crop_source=args.crop_source,
            return_landmark_heatmaps=lmcfg.PREDICT_HEATMAP,
            with_occlusions=args.occ and train,
            landmark_sigma=args.sigma,
            transform=transform,
            image_size=args.input_size)
        print(datasets[phase])

    fntr = AAELandmarkTraining(datasets,
                               args,
                               session_name=args.sessionname,
                               snapshot_interval=args.save_freq,
                               workers=args.workers,
                               wait=args.wait)

    torch.backends.cudnn.benchmark = True
    if args.eval:
        fntr.eval_epoch()
    else:
        fntr.train(num_epochs=args.epochs)
Beispiel #4
0
                               bb=bb,
                               id=face_id,
                               landmarks_to_return=sample.landmarks.astype(
                                   np.float32))


config.register_dataset(WFLW)

if __name__ == '__main__':
    from csl_common.utils.nn import Batch
    from csl_common.utils.common import init_random
    from csl_common.utils.ds_utils import build_transform
    from csl_common.vis import vis
    import config

    init_random(3)

    path = config.get_dataset_paths('wflw')[0]
    ds = WFLW(root=path,
              train=False,
              deterministic=True,
              use_cache=False,
              daug=0,
              image_size=256,
              transform=build_transform(deterministic=False, daug=0))
    ds.filter_labels({'pose': 1, 'occlusion': 0, 'make-up': 1})
    dl = td.DataLoader(ds, batch_size=10, shuffle=False, num_workers=0)
    print(ds)

    for data in dl:
        batch = Batch(data, gpu=False)
Beispiel #5
0
def run():

    if args.seed is not None:
        from csl_common.utils.common import init_random
        init_random(args.seed)

    # log.info(json.dumps(vars(args), indent=4))

    full_sizes = {
        'drive': 512,
        'stare': 512,
        'chase': 1024,
        'hrf': 2560,
    }
    full_size = full_sizes[args.dataset_train[0]]

    transform_train = alb.Compose([
        alb.Rotate(60, border_mode=cv2.BORDER_CONSTANT),
        alb.RandomSizedCrop(min_max_height=(int(full_size * 0.25),
                                            int(full_size * 0.5)),
                            height=args.input_size,
                            width=args.input_size,
                            p=1.0),

        # alb.RandomSizedCrop(
        #     min_max_height=(int(full_size*0.5), int(full_size*0.5)),
        #     height=1600, width=1600,
        # ),

        # alb.Resize(width=565*2, height=584*2),
        # alb.RandomCrop(args.input_size, args.input_size),

        # alb.CenterCrop(args.input_size, args.input_size),
        # alb.Resize(args.input_size, args.input_size),
        alb.RGBShift(p=0.5),
        alb.RandomBrightnessContrast(brightness_limit=0.5,
                                     contrast_limit=0.5,
                                     p=0.5),
        alb.RandomGamma(),
        alb.HorizontalFlip(p=0.5),
        alb.VerticalFlip(p=0.5),
        alb_torch.ToTensor(
            normalize=dict(mean=[0.518, 0.418, 0.361], std=[1, 1, 1]))
    ])

    transform_val = alb.Compose([
        alb.RandomSizedCrop(min_max_height=(int(full_size * 0.25),
                                            int(full_size * 0.5)),
                            height=args.input_size,
                            width=args.input_size),
        alb.Resize(args.input_size, args.input_size, always_apply=True),
        alb_torch.ToTensor(
            normalize=dict(mean=[0.518, 0.418, 0.361], std=[1, 1, 1]))
    ])

    torch.backends.cudnn.benchmark = True

    datasets = {}
    datasets[VAL] = retinadataset.create_dataset_multi(
        args.dataset_val,
        transform_val,
        num_samples=args.val_count,
        repeat_factor=5,
        train=False)

    if args.eval:
        fntr = VesselTraining(datasets, args)
        fntr.evaluate()
    else:
        datasets[TRAIN] = retinadataset.create_dataset_multi(
            args.dataset_train,
            transform_train,
            num_samples=args.train_count,
            train=True,
            repeat_factor=args.n_dataset_repeats)
        fntr = VesselTraining(datasets, args)
        fntr.train(num_epochs=args.epochs)
Beispiel #6
0
            transform_train,
            num_samples=args.train_count,
            train=True,
            repeat_factor=args.n_dataset_repeats)
        fntr = VesselTraining(datasets, args)
        fntr.train(num_epochs=args.epochs)


if __name__ == '__main__':

    import sys
    import configargparse
    from training import bool_str
    from csl_common.utils import common

    common.init_random(0)

    np.set_printoptions(linewidth=np.inf)

    # Disable traceback on Ctrl+c
    import signal
    signal.signal(signal.SIGINT, lambda x, y: sys.exit(0))

    parser = configargparse.get_argument_parser()
    defaults = dict(input_size=512,
                    print_freq=50,
                    eval_freq=1,
                    batchsize=10,
                    save_freq=1,
                    batchsize_eval=25,
                    print_freq_eval=2,