Esempio n. 1
0
def get_dataset_split(config):
    ft = config['split'].lower().strip()
    testf = try_parse(config['test'])
    trainf = try_parse(config['train'])
    validf = try_parse(config['valid'])

    if ft == 'train_test_split':
        seed = try_parse_or_default(config, 'seed', 42)
        def split(images):
            tv, test = train_test_split(images, train_size=1-testf, test_size=testf, random_state=seed)
            train, valid = train_test_split(tv, train_size=trainf, test_size=validf, random_state=seed)
            return {dp.TRAIN:train, dp.VALID:valid, dp.TEST:test}
    elif ft == 'filter':
        def split(images):
            train = np.array(list(filter(lambda x: x.find(trainf) != -1, images)))
            valid = np.array(list(filter(lambda x: x.find(validf) != -1, images)))
            test = np.array(list(filter(lambda x: x.find(testf) != -1, images)))
            return {dp.TRAIN:train, dp.VALID:valid, dp.TEST:test}
    else:
        raise ValueError(f"Unknown filter type: {ft}")
    return split
Esempio n. 2
0
def get_optimizer(config):
    config = deepcopy(config)
    name = config['name'].strip()
    del config['name']
    props = {k: try_parse(v) for k, v in config.items()}

    try:
        opt_class = getattr(optimizers, name)
        opt = opt_class(**props)
        return opt
    except:
        raise ValueError(f"Unknown optimizer: {name}")
Esempio n. 3
0
def get_input_mapping(config):
    config = deepcopy(config)
    mt = config['mapping'].lower().strip()
    del config['mapping']
    props = {k: try_parse(v) for k, v in config.items()}

    try:
        func = getattr(dp, mt)
        f = lambda dataset: func(dataset, **props)
    except:
        raise ValueError(f"Unknown dataset mapping: {mt}")
    return f
Esempio n. 4
0
def get_metrics(config):
    def load_metric(name):
        try:
            metric_cls = getattr(metrics, name)
            metric = metric_cls() if inspect.isclass(metric_cls) else metric_cls
            return metric
        except:
            raise ValueError(f"Unknown metric: {name}")

    names = try_parse(config['names'].strip())
    if type(names) is str:
        return [load_metric(names)]
    else:
        return list(map(lambda x: load_metric(x), names))
Esempio n. 5
0
def get_dataset_mapping(config):
    config = deepcopy(config)
    mt = config['mapping'].lower().strip()
    del config['mapping']
    props = {k: try_parse(v) for k, v in config.items()}

    if mt == 'image_histogram_mapping_segmentation':
        f = lambda *x: dp.image_histogram_mapping_segmentation(*x, **props)
    elif mt == 'image_histogram_mapping':
        f = lambda *x: dp.image_histogram_mapping(*x, **props)
    else:
        try:
            func = getattr(dp, mt)
            f = lambda *x: func(*x, **props)
        except:
            raise ValueError(f"Unknown dataset mapping: {mt}")
    return f
Esempio n. 6
0
def get_loss(config):
    config = deepcopy(config)
    loss_name = config['name'].strip()
    del config['name']

    loss_weight = try_parse_or_default(config, "weight", default=1.0)
    try:
        del config['weight']
    except:
        pass

    props = {k: try_parse(v) for k, v in config.items()}

    try:
        loss_cls = getattr(losses, loss_name)
        loss = loss_cls(**props)
        return loss, loss_weight
    except:
        raise ValueError(f"Unknown loss: {loss_name}")
Esempio n. 7
0
def get_parameters(config):
    return {k.lower().strip(): try_parse(v) for k, v in config.items()}
Esempio n. 8
0
def get_dataset(config,type_, paths_filter=lambda x: x, out_mapping=lambda *x: x):
    dt = config['dataset_type'].lower().strip()
    dataset_path = config['dataset_path'].strip()

    batch_size = try_parse_or_default(config, 'batch_size', 1)
    size = try_parse(config['size'])

    if dt == "cube2":
        regression = try_parse_or_default(config, 'regression', False)
        return_gt = try_parse_or_default(config, 'return_gt', True)
        return_gt_mask = try_parse_or_default(config, 'return_gt_mask', False)
        round_mask = try_parse_or_default(config, 'round', True)
        gamma = try_parse_or_default(config, 'gamma', False)
        resample = try_parse_or_default(config, 'resample', 1)
        camera = try_parse_or_default(config, 'camera', None)
        scene_type = try_parse_or_default(config, 'scene_type', None)

        im_paths = paths_filter(Cube2Dataset.load_paths(dataset_path, config['list_name']))
        ds = Cube2Dataset.dataset(im_paths, type_, batch_size, cache=False,
                                  regression=regression, gt=return_gt, gt_mask=return_gt_mask,
                                  round=round_mask, gamma=gamma, resample=resample,
                                  camera=camera, scene_type=scene_type,
                                  h=size[0], w=size[1], map_fn=out_mapping)
        if type(ds) == tuple:
            ds, im_paths = ds
    elif dt == "newcube2":
        regression = try_parse_or_default(config, 'regression', False)
        return_gt = try_parse_or_default(config, 'return_gt', True)
        return_gt_mask = try_parse_or_default(config, 'return_gt_mask', False)
        round_mask = try_parse_or_default(config, 'round', True)
        gamma = try_parse_or_default(config, 'gamma', False)
        resample = try_parse_or_default(config, 'resample', 1)
        camera = try_parse_or_default(config, 'camera', None)
        scene_type = try_parse_or_default(config, 'scene_type', None)

        im_paths = paths_filter(Cube2Dataset.load_paths(dataset_path, config['list_name']))
        ds = NewCube2Dataset.dataset(im_paths, type_, batch_size, cache=False,
                                     regression=regression, gt=return_gt, gt_mask=return_gt_mask,
                                     round=round_mask, gamma=gamma, resample=resample,
                                     camera=camera, scene_type=scene_type,
                                     h=size[0], w=size[1], map_fn=out_mapping)
        if type(ds) == tuple:
            ds, im_paths = ds
    elif dt == "cuben":
        regression = try_parse_or_default(config, 'regression', False)
        return_gt = try_parse_or_default(config, 'return_gt', True)
        round_mask = try_parse_or_default(config, 'round', True)
        gamma = try_parse_or_default(config, 'gamma', False)
        n = try_parse_or_default(config, 'n', 3)

        im_paths = paths_filter(Cube2Dataset.load_paths(dataset_path, config['list_name']))
        ds = CubeNDataset.dataset(im_paths, type_, batch_size, cache=False,
                                  regression=regression, gt=return_gt, round=round_mask, gamma=gamma,
                                  h=size[0], w=size[1], n=n, map_fn=out_mapping)
    elif dt == 'cube':
        im_paths = paths_filter(CubeDataset.load_image_names(config['list_name'], dataset_path))
        gts = np.loadtxt(os.path.join(dataset_path, config['gts_name']))
        indices = np.array(list(map(lambda x: int(x[x.rfind('/') + 1:-4]) - 1, im_paths)))

        ds = CubeDataset.regression_dataset(im_paths, indices, type=type_,
                                            bs=batch_size, cache=False, uv=False,
                                            gts=gts, map_fn=out_mapping, sz=size)
    elif dt == 'tau':
        im_paths = np.loadtxt(os.path.join(dataset_path, config['list_name']), dtype=str)
        im_paths = paths_filter(im_paths)
        im_paths = list(map(lambda x: os.path.join(dataset_path, x[1:]), im_paths))

        ds = TauDataset.regression_dataset(im_paths, type=type_, bs=batch_size,
                                           cache=False, uv=False,
                                           sz=size, map_fn=out_mapping)

    else:
        raise ValueError(f"Unknown dataset type: {dt}")
    return ds, len(im_paths)