def test_forward_hook(): model = build_example_model() cfg = voc_cfg.get_default_config() print('Getting datasets') gpu = 0 os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu) cuda = torch.cuda.is_available() torch.manual_seed(1337) if cuda: torch.cuda.manual_seed(1337) dataloaders = instanceseg.factory.data.get_dataloaders(cfg, 'voc', cuda, sampler_cfg=None) layer_names = ['conv1x1_instance_to_semantic' ] if model.map_to_semantic else [] layer_names += ['upscore8', 'score_pool4'] activations = None for i, (x, y) in enumerate(dataloaders['train']): activations = model.get_activations(torch.autograd.Variable(x.cuda()), layer_names) if i >= 2: break assert set(activations.keys()) == set(layer_names) try: [activations[k].size() for k in activations.keys()] except: raise Exception('activations should all be tensors')
def get_default_datasets_for_instance_counts(dataset_type): """ Dataset before any of the precomputed_file_transformation, runtime_transformation runs on it """ if dataset_type == 'voc': default_cfg = voc_cfg.get_default_config() default_cfg[ 'n_instances_per_class'] = None # don't want to cap instances when running stats precomputed_file_transformation, runtime_transformation = get_transformations( default_cfg, voc.ALL_VOC_CLASS_NAMES) train_dataset, val_dataset = get_voc_datasets( dataset_path=default_cfg['dataset_path'], precomputed_file_transformation=precomputed_file_transformation, runtime_transformation=runtime_transformation) elif dataset_type == 'cityscapes': default_cfg = cityscapes_cfg.get_default_config() default_cfg[ 'n_instances_per_class'] = None # don't want to cap instances when running stats precomputed_file_transformation, runtime_transformation = get_transformations( default_cfg) train_dataset, val_dataset = get_cityscapes_datasets( default_cfg['dataset_path'], precomputed_file_transformation=precomputed_file_transformation, runtime_transformation=runtime_transformation) elif dataset_type == 'synthetic': return None, None, None # raise NotImplementedError('synthetic is different every time -- cannot save instance counts in between') else: raise ValueError transformer_tag = get_transformer_identifier_tag( precomputed_file_transformation, runtime_transformation) return train_dataset, val_dataset, transformer_tag
def test_lr_of_dataset(dataset_name): print('Getting datasets') if dataset_name == 'voc': cfg = voc_cfg.get_default_config() # unordered cfg['ordering'] = None instanceseg.utils.scripts.set_random_seeds() train_dataset_unordered, _ = dataset_generator_registry.get_dataset( 'voc', cfg) # ordered cfg['ordering'] = 'LR' instanceseg.utils.scripts.set_random_seeds() train_dataset_ordered, _ = dataset_generator_registry.get_dataset( 'voc', cfg) elif dataset_name == 'synthetic': cfg = synthetic_cfg.get_default_config() # unordered cfg['ordering'] = None instanceseg.utils.scripts.set_random_seeds() train_dataset_unordered, _ = dataset_generator_registry.get_dataset( 'synthetic', cfg) # ordered cfg['ordering'] = 'LR' instanceseg.utils.scripts.set_random_seeds() train_dataset_ordered, _ = dataset_generator_registry.get_dataset( 'synthetic', cfg) else: raise ValueError print('Testing right-left ordering...') test_lr_from_datasets(train_dataset_unordered, train_dataset_ordered)
def build_example_model(**model_cfg_override_kwargs): # script_utils.check_clean_work_tree() gpu = 0 os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu) cuda = torch.cuda.is_available() cfg = voc_cfg.get_default_config() for k, v in model_cfg_override_kwargs.items(): cfg[k] = v problem_config = instanceseg.factory.models.get_problem_config( ALL_VOC_CLASS_NAMES, 2, map_to_semantic=cfg['map_to_semantic']) model, start_epoch, start_iteration = instanceseg.factory.models.get_model( cfg, problem_config, checkpoint_file=None, semantic_init=None, cuda=cuda) return model
def main(): # Setup cfg = voc_cfg.get_default_config() print('Getting datasets') train_dataset, val_dataset = dataset_generator_registry.get_dataset( 'voc', cfg) gpu = 0 os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu) cuda = torch.cuda.is_available() torch.manual_seed(1337) if cuda: torch.cuda.manual_seed(1337) loader_kwargs = {'num_workers': 4, 'pin_memory': True} if cuda else {} print('Running single-image test') test_single_image_sampler(train_dataset, loader_kwargs, image_index=10) print('Running vanilla test') test_vanilla_sampler(train_dataset, loader_kwargs)
import instanceseg.factory.data from scripts.configurations import voc_cfg if __name__ == '__main__': cfg = voc_cfg.get_default_config() cfg['ordering'] = 'LR' print('Getting datasets') train_dataset, val_dataset = instanceseg.factory.data.get_datasets_with_transformations( 'voc', cfg) num_images = len(train_dataset) print('Loaded {}/{}'.format(0, num_images)) for idx, (img, (sem_lbl, inst_lbl)) in enumerate(train_dataset): if divmod(idx + 1, 100)[1] == 0: print('Loaded {}/{}'.format(idx + 1, num_images)) print('Loaded {}/{}'.format(num_images, num_images))
def test(frozen=True): gpu = 0 os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu) cuda = torch.cuda.is_available() cfg = voc_cfg.get_default_config() cfg_override_args = { 'n_instances_per_class': 1, 'max_iteration': 1, 'interval_validate': 1, 'lr': 0.01 } for k, v in cfg_override_args.items(): cfg.pop(k) # ensures the key actually existed before cfg[k] = v problem_config = instanceseg.factory.models.get_problem_config( ALL_VOC_CLASS_NAMES, cfg['n_instances_per_class']) model, start_epoch, start_iteration = instanceseg.factory.models.get_model( cfg, problem_config, checkpoint_file=None, semantic_init=None, cuda=cuda) initial_model = copy.deepcopy(model) # script_utils.check_clean_work_tree() if frozen: model_utils.freeze_all_children(model) for module_name, module in model.named_children(): assert all([p.requires_grad is False for p in module.parameters() ]), '{} not frozen'.format(module_name) sampler_cfg = sampler_cfgs['default'] sampler_cfg['train'].n_images = 1 sampler_cfg['train_for_val'].n_images = 1 sampler_cfg['val'].n_images = 1 dataloaders = instanceseg.factory.data.get_dataloaders( cfg, 'voc', cuda, sampler_cfg) optim = instanceseg.factory.optimizer.get_optimizer(cfg, model, None) out_dir = '/tmp/{}'.format(osp.basename(__file__)) trainer = trainers.get_trainer(cfg, cuda, model, optim, dataloaders, problem_config, out_dir=out_dir) trainer.epoch = start_epoch trainer.iteration = start_iteration trainer.train() state1 = initial_model.state_dict() state2 = model.state_dict() if frozen: assert set(state1.keys()) == set(state2.keys()), 'Debug Error' for param_name in state1.keys(): assert torch.equal(state1[param_name], state2[param_name]) print('Confirmed that the network does not learn when frozen') else: assert not torch.equal(state1['conv1_1.weight'], state2['conv1_1.weight']) print('Confirmed that the network does learn (when weights aren' 't frozen)')