Пример #1
0
def create_experiment(config):
    # Fetch dataset
    dataset = get_dataset(config)
    # Create experiment and set indexes for train, val and test
    exp = Experiment(config=config)
    exp.define_runs_splits(dataset)
    return exp, dataset
Пример #2
0
def run(experiment_name, idx_k=0):
    # Restore configuration and get experiment run
    exp = Experiment(load_exp_name=experiment_name)
    config = exp.config
    exp_run = exp.get_experiment_run(idx_k=idx_k)

    # Fetch dataset
    dataset = get_dataset(config)

    # Create task and class mappings
    nr_tasks = 5
    task_splitter = ClassTaskSplitter(dataset=dataset,
                                      save_path=exp_run.paths['obj'],
                                      nr_tasks=nr_tasks)

    # Get PyTorch datasets
    splits = ['train', 'val', 'test']
    batch_size = config.get('batch_size', 128)
    pytorch_datasets = [{split: None
                         for split in splits} for task_ix in range(nr_tasks)]
    for task_ix in range(nr_tasks):
        for split in splits:
            index_list = task_splitter.get_task_ixs(
                exp_ixs=exp_run.dataset_ixs[split], task_ix=task_ix)
            pytorch_datasets[task_ix][split] = TorchDS(dataset_obj=dataset,
                                                       index_list=index_list)
    print('Got datasets')

    # Apply oracles
    #task_oracle = get_class('src.continual_learning.oracles.task_oracle.TaskOracle')(
    #    pytorch_datasets=pytorch_datasets, save_path=exp_run.paths['root'])

    oracle = get_class(
        'src.continual_learning.oracles.autoencoder_oracle.AutoencoderOracle'
    )(pytorch_datasets=pytorch_datasets,
      save_path=exp_run.paths['root'],
      autoencoder_path=
      'src.models.autoencoding.pretrained_autoencoder.PretrainedAutoencoder',
      feature_model_name='AlexNet')
    print('Initialized oracles')

    # Plot confusion matrix
    for split in splits:
        cm = oracle.get_domain_confusion(split=split)
        cm.plot(exp_run.paths['results'],
                oracle.name + '_' + split + '_domain_confusion',
                label_predicted='Selected Model (predicted)',
                label_actual='Data Task (actual)',
                figure_size=(7, 5))
Пример #3
0
def run(experiment_name, idx_k=0):
    # Restore configuration and get experiment run
    exp = Experiment(load_exp_name=experiment_name)
    config = exp.config
    exp_run = exp.get_experiment_run(idx_k=idx_k)

    # Set GPU
    set_gpu(config.get('gpu', 0))
    
    # Fetch dataset
    dataset = get_dataset(config)

    # Create task and class mappings
    nr_tasks = 5
    task_splitter = ClassTaskSplitter(dataset=dataset, save_path=exp_run.paths['obj'], nr_tasks=nr_tasks)

    # Get PyTorch datasets and dataloaders
    splits = ['train', 'val', 'test']
    batch_size = config.get('batch_size', 128)
    pytorch_datasets = [{split: None for split in splits} for task_ix in range(nr_tasks)]
    dataloaders = [{split: None for split in splits} for task_ix in range(nr_tasks)]
    for task_ix in range(nr_tasks):
        for split in splits:
            index_list = task_splitter.get_task_ixs(exp_ixs=exp_run.dataset_ixs[split], task_ix=task_ix)
            pytorch_datasets[task_ix][split] = TorchDS(dataset_obj=dataset, index_list=index_list, transform='pretrained')
            shuffle = True if split == 'train' else False
            dataloaders[task_ix][split] = torch.utils.data.DataLoader(pytorch_datasets[task_ix][split], batch_size=batch_size, shuffle=shuffle)

    # Get model and agent
    model = get_class(config['model_class_path'])(config, in_channels=dataset.nr_channels, img_size=dataset.img_shape)
    model.cuda()
    optimizer = torch.optim.Adam(model.parameters(), weight_decay=1e-4)
    criterion = nn.MSELoss()
    agent = get_class(config['agent_class_path'])(config, model=model, optimizer=optimizer, criterion=criterion)

    # Train with data from the first task
    for task_ix in range(nr_tasks):
        agent.train_model(dataloaders=dataloaders[task_ix], nr_epochs=20)
        agent.save_state(path=exp_run.paths['agent_states'], name='task_'+str(task_ix))
Пример #4
0
    for data, _ in dataloader:
        b, c, h, w = data.shape
        nb_pixels = b * h * w
        sum_ = torch.sum(data, dim=[0, 2, 3])
        sum_of_square = torch.sum(data**2, dim=[0, 2, 3])
        mean = (count * mean + sum_) / (count + nb_pixels)
        std = (count * std + sum_of_square) / (count + nb_pixels)
        count += nb_pixels

    return mean, torch.sqrt(std - mean**2)


#%%
# segChallengeProstate
dataset = get_dataset({"dataset_name": "segChallengeProstate"})
transform = transforms.Compose([
    transforms.ToTensor(),
    transforms.ToPILImage(),
    transforms.Resize(size=(320, 320)),
    transforms.ToTensor()
])
pytorch_dataset = TorchSegmentationDataset(dataset_obj=dataset,
                                           index_list=list(
                                               range(len(dataset.instances))),
                                           transform=transform)
print(normalization_values(pytorch_dataset))

# %%
dataset = get_dataset({
    "dataset_name": "medcom",