예제 #1
0
def test_lr_of_dataset(dataset_name):
    print('Getting datasets')
    if dataset_name == 'voc':
        cfg = voc_cfg.get_default_config()
        # unordered
        cfg['ordering'] = None
        instanceseg.utils.scripts.set_random_seeds()
        train_dataset_unordered, _ = dataset_generator_registry.get_dataset(
            'voc', cfg)

        # ordered
        cfg['ordering'] = 'LR'
        instanceseg.utils.scripts.set_random_seeds()
        train_dataset_ordered, _ = dataset_generator_registry.get_dataset(
            'voc', cfg)
    elif dataset_name == 'synthetic':
        cfg = synthetic_cfg.get_default_config()
        # unordered
        cfg['ordering'] = None
        instanceseg.utils.scripts.set_random_seeds()
        train_dataset_unordered, _ = dataset_generator_registry.get_dataset(
            'synthetic', cfg)

        # ordered
        cfg['ordering'] = 'LR'
        instanceseg.utils.scripts.set_random_seeds()
        train_dataset_ordered, _ = dataset_generator_registry.get_dataset(
            'synthetic', cfg)
    else:
        raise ValueError

    print('Testing right-left ordering...')
    test_lr_from_datasets(train_dataset_unordered, train_dataset_ordered)
예제 #2
0
def get_dataloaders(cfg, dataset_type, cuda, sampler_cfg=None, splits=('train', 'val', 'train_for_val')):
    non_derivative_splits = (s for s in splits if s != 'train_for_val')
    build_train_for_val = splits != non_derivative_splits

    # 1. dataset
    datasets = {
        split: dataset_generator_registry.get_dataset(dataset_type, cfg, split, transform=True)
                      for split in non_derivative_splits
    }
    if 'train_for_val' in splits:
        datasets['train_for_val'] = datasets['train']

    # 2. samplers
    if sampler_cfg is not None and 'val' in sampler_cfg and isinstance(sampler_cfg['val'], str) and \
            sampler_cfg['val'] == 'copy_train':
        assert 'train' in splits
        datasets['val'] = datasets['train']
    samplers = sampler_factory.get_samplers(dataset_type, sampler_cfg, datasets, splits=splits)

    # Create dataloaders from datasets and samplers
    loader_kwargs = {'num_workers': 4, 'pin_memory': True} if cuda else {}
    batch_sizes = {split: cfg['val_batch_size'] if split == 'train_for_val' else cfg['{}_batch_size'.format(split)]
                   for split in splits}
    dataloaders = {
        split: torch.utils.data.DataLoader(datasets[split], batch_size=batch_sizes[split],
                                           sampler=samplers[split], **loader_kwargs) for split in splits
    }

    if DEBUG_ASSERTS:
        #        try:
        #            i, [sl, il] = [d for i, d in enumerate(train_loader) if i == 0][0]
        #        except:
        #            raise
        pass
    return dataloaders
예제 #3
0
def main():
    # Setup
    cfg = voc_cfg.get_default_config()
    print('Getting datasets')
    train_dataset, val_dataset = dataset_generator_registry.get_dataset(
        'voc', cfg)
    gpu = 0
    os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu)
    cuda = torch.cuda.is_available()
    torch.manual_seed(1337)
    if cuda:
        torch.cuda.manual_seed(1337)

    loader_kwargs = {'num_workers': 4, 'pin_memory': True} if cuda else {}
    print('Running single-image test')
    test_single_image_sampler(train_dataset, loader_kwargs, image_index=10)
    print('Running vanilla test')
    test_vanilla_sampler(train_dataset, loader_kwargs)
예제 #4
0
def main():
    # Setup
    cfg = cityscapes_cfg.get_default_train_config()
    print('Getting datasets')
    train_dataset, val_dataset = dataset_generator_registry.get_dataset('cityscapes', cfg)
    gpu = 0
    os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu) if isinstance(gpu, int) else ','.join(str(gpu))
    cuda = torch.cuda.is_available()
    torch.manual_seed(1337)
    if cuda:
        torch.cuda.manual_seed(1337)

    loader_kwargs = {'num_workers': 4, 'pin_memory': True} if cuda else {}
    print('Running occlusion on all cityscapes')
    test_all_cityscapes_occlusion_sampler(train_dataset, loader_kwargs)
    print('Running occlusion-based sampler test')
    test_occlusion_sampler(train_dataset, loader_kwargs)
    print('Running instance-based sampler test')
    test_instance_sampler(train_dataset, loader_kwargs)
    print('Running single-image test')
    test_single_image_sampler(train_dataset, loader_kwargs, image_index=10)
    print('Running vanilla test')
    test_vanilla_sampler(train_dataset, loader_kwargs)
예제 #5
0
import instanceseg.factory.data
import instanceseg.factory.models
from scripts.configurations import voc_cfg
from instanceseg.datasets.voc import ALL_VOC_CLASS_NAMES
from instanceseg.datasets import dataset_generator_registry
import os
import torch

if __name__ == '__main__':
    gpu = 0
    os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu)
    cuda = torch.cuda.is_available()

    cfg = voc_cfg.get_default_config()
    print('Getting datasets')
    train_dataset, val_dataset = dataset_generator_registry.get_dataset(
        'voc', cfg)
    img, (sem_lbl, inst_lbl) = train_dataset[0]

    problem_config = instanceseg.factory.models.get_problem_config(
        ALL_VOC_CLASS_NAMES, cfg['n_instances_per_class'])
    model, start_epoch, start_iteration = instanceseg.factory.models.get_model(
        cfg,
        problem_config,
        checkpoint_file=None,
        semantic_init=None,
        cuda=cuda)
    dataloaders = instanceseg.factory.data.get_dataloaders(cfg,
                                                           dataset_type='voc',
                                                           cuda=cuda)
    import ipdb
    ipdb.set_trace()
        self.dataset_generator = dataset_generator


REGISTRY = {
    'cityscapes':
    RegisteredDataset(
        name='cityscapes',
        default_config=scripts.configurations.cityscapes_cfg.
        get_default_config(),
        config_option_dict=scripts.configurations.cityscapes_cfg.
        configurations,
        original_semantic_class_names=instanceseg.datasets.cityscapes.
        RawCityscapesBase.get_semantic_class_names(),
        dataset_path=instanceseg.datasets.cityscapes.
        get_default_cityscapes_root(),
        dataset_generator=lambda cfg: dataset_generator_registry.get_dataset(
            'cityscapes', cfg, transform=True),
    ),
    'voc':
    RegisteredDataset(
        name='voc',
        default_config=scripts.configurations.voc_cfg.get_default_config(),
        config_option_dict=scripts.configurations.voc_cfg.configurations,
        original_semantic_class_names=instanceseg.datasets.voc.
        ALL_VOC_CLASS_NAMES,
        dataset_path=instanceseg.datasets.voc.get_default_voc_root(),
        dataset_generator=lambda cfg: dataset_generator_registry.get_dataset(
            'voc', cfg, transform=True)),
    'synthetic':
    RegisteredDataset(
        name='synthetic',
        default_config=scripts.configurations.synthetic_cfg.get_default_config(
예제 #7
0
def get_datasets_with_transformations(dataset_type, cfg, transform=True):
    train_dataset, val_dataset = dataset_generator_registry.get_dataset(
        dataset_type, cfg, transform)
    return train_dataset, val_dataset
예제 #8
0
def get_dataset_with_transformations(dataset_type, cfg, split, transform=True):
    dataset = dataset_generator_registry.get_dataset(dataset_type, cfg, split, transform)
    return dataset