Exemplo n.º 1
0
def get_isbi_loader(config):
    """
    Gets ISBI2012 Loader given a the path to a configuration file. Supported file formats are
    HDF5 (.h5) and TIF (.tif or .tiff).

    Parameters
    ----------
    config : str or dict
        (Path to) Data configuration.

    Returns
    -------
    torch.utils.data.dataloader.DataLoader
        Data loader built as configured.
    """
    config = yaml2dict(config)
    dataset = ISBI2012Dataset.from_config(config)
    loader = DataLoader(dataset, **config.get('loader_config'))
    return loader
def load_Unet3D(config_dict):
    """
    loads Unet3D with from neurofire
    :param config_dict: dictionary with all important paths
    :return: Unet3D model
    """

    import torch
    import neurofire.models as models
    from inferno.utils.io_utils import yaml2dict

    config = yaml2dict(config_dict["train_config_folder"])
    model_name = config.get('model_name')
    model = getattr(models, model_name)(**config.get('model_kwargs'))

    if torch.cuda.is_available():
        model.cuda()

    return model
Exemplo n.º 3
0
def get_cremi_loader(config):
    """
    Get Cremi loader given a the path to a configuration file.

    Parameters
    ----------
    config : str or dict
        (Path to) Data configuration.

    Returns
    -------
    torch.utils.data.dataloader.DataLoader
        Data loader built as configured.
    """
    config = yaml2dict(config)
    loader_config = config.pop('loader_config')
    datasets = CremiDatasets.from_config(config)
    loader = DataLoader(datasets, **loader_config)
    return loader
Exemplo n.º 4
0
def make_validation_config(template_config,
                           validation_config_file,
                           affinity_config=None):
    template = os.path.join(template_config, 'validation_config.yml')
    template = yaml2dict(template)

    key = 'nucleus_instance_labels'
    affinity_config.update({
        'segmentation_to_binary': False,
        'retain_segmentation': True
    })
    template['volume_config']['segmentation'][
        'affinity_config'] = affinity_config
    template['volume_config']['segmentation']['dtype'] = 'int64'

    key = 'volumes/labels/' + key
    for block_id in (2, 4):
        template['volume_config']['segmentation']['path_in_file'][str(
            block_id)] = key

    with open(validation_config_file, 'w') as f:
        yaml.dump(template, f)
def get_criterion_and_optimizer(net, config_dict):
    """
    Initializes criterion and optimizer for net
    :param net: NeuralNet
    :param config_dict: dictionary with all important paths
    :return: criterion and optimizer
    """

    from inferno.utils.io_utils import yaml2dict
    import torch.optim as optim
    from SorensenDiceLoss import SorensenDiceLoss

    criterion = SorensenDiceLoss()

    config = yaml2dict(config_dict["train_config_folder"])
    optimizer_kwargs = config.get('training_optimizer_kwargs')

    optimizer = optim.SGD(net.parameters(),
                          lr=optimizer_kwargs.get('lr'),
                          weight_decay=optimizer_kwargs.get('weight_decay'))

    return criterion, optimizer
Exemplo n.º 6
0
    def __init__(self,
                 path,
                 defect_augmentation_config,
                 name=None,
                 path_in_file=None,
                 data_slice=None,
                 dtype='float32',
                 ignore_slice_list=None,
                 mean=None,
                 std=None,
                 sigma=None,
                 zero_mean_unit_variance=True,
                 p_augment_ws=0.,
                 **slicing_config):

        super().__init__(path=path,
                         path_in_file=path_in_file,
                         data_slice=data_slice,
                         name=name,
                         dtype=dtype,
                         mean=mean,
                         std=std,
                         sigma=sigma,
                         p_augment_ws=p_augment_ws,
                         zero_mean_unit_variance=zero_mean_unit_variance,
                         **slicing_config)

        defect_augmentation_config = yaml2dict(defect_augmentation_config)
        defect_augmentation_config.update(
            {'ignore_slice_list': ignore_slice_list})
        self.defect_augmentation = DefectAugmentation.from_config(
            defect_augmentation_config)
        self.cast = Cast(self.dtype)

        # Check if we should keep track of defected slices:
        self.keep_track_defected_slices = len(
            self.defect_augmentation.keep_track_of) > 0
Exemplo n.º 7
0
def make_validation_config(validation_config_file, offsets):
    template = yaml2dict('./template_config/validation_config_hed.yml')
    template['volume_config']['segmentation']['affinity_offsets'] = offsets
    with open(validation_config_file, 'w') as f:
        yaml.dump(template, f)
Exemplo n.º 8
0
def make_validation_config(config_folder, validation_config_file):
    template = yaml2dict(f'./{config_folder}/validation_config.yml')
    with open(validation_config_file, 'w') as f:
        yaml.dump(template, f)
Exemplo n.º 9
0
def make_data_config(config_folder, data_config_file, n_batches):
    template = yaml2dict(f'./{config_folder}/data_config.yml')
    template['loader_config']['batch_size'] = n_batches
    template['loader_config']['num_workers'] = 8 * n_batches
    with open(data_config_file, 'w') as f:
        yaml.dump(template, f)
Exemplo n.º 10
0
def get_autophagosom_loader(config):
    config = yaml2dict(config)
    loader_config = config.pop('loader_config')
    datasets = AutophagosomDatasets.from_config(config)
    loader = DataLoader(datasets, **loader_config)
    return loader
Exemplo n.º 11
0
LOG_DIRECTORY = ensure_dir('./logs_2')


BATCHSIZE = 8
N_DIRECTIONS = 8


# unsq = transforms.Lambda(lambda x: torch.unsqueeze(x, 0))
transpose = transforms.Lambda(lambda x: torch.transpose(x, 0, 1))
squeeze = transforms.Lambda(lambda x: torch.squeeze(x, 1))
fromnumpy = transforms.Lambda(lambda x: torch.from_numpy(x))
trans = transforms.Compose([fromnumpy])
trans2 = transforms.Compose([fromnumpy, squeeze])

imageset_train = HDF5VolumeLoader(path='./train-volume.h5', path_in_h5_dataset='data',
                                  transforms=trans, **yaml2dict('config_train.yml')['slicing_config'])
labelset_train = HDF5VolumeLoader(path='./stardistance.h5', path_in_h5_dataset='data',
                                  transforms=trans2, **yaml2dict('config_train.yml')['slicing_config_truth'])
trainset = Zip(imageset_train, labelset_train)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=BATCHSIZE,
                                          shuffle=True, num_workers=2)
imageset_val = HDF5VolumeLoader(path='./val-volume.h5', path_in_h5_dataset='data',
                                transforms=trans, **yaml2dict('config_val.yml')['slicing_config'])
labelset_val = HDF5VolumeLoader(path='./stardistance_val.h5', path_in_h5_dataset='data',
                                transforms=trans2, **yaml2dict('config_val.yml')['slicing_config_truth'])
trainset = Zip(imageset_val, labelset_val)
valloader = torch.utils.data.DataLoader(trainset, batch_size=BATCHSIZE,
                                        shuffle=True, num_workers=2)


net = torch.nn.Sequential(
Exemplo n.º 12
0
import torchvision.transforms as transforms

import matplotlib.pyplot as plt

transpose = transforms.Lambda(lambda x: torch.transpose(x, 0, 1))
squeeze = transforms.Lambda(lambda x: torch.squeeze(x, 1))
fromnumpy = transforms.Lambda(lambda x: torch.from_numpy(x))
trans = transforms.Compose([fromnumpy])
trans2 = transforms.Compose([fromnumpy, squeeze])

imageset_val = HDF5VolumeLoader(
    path='./val-volume.h5',
    path_in_h5_dataset='data',
    transforms=trans,
    **yaml2dict('config_val.yml')['slicing_config'])
labelset_val = HDF5VolumeLoader(
    path='./stardistance_val.h5',
    path_in_h5_dataset='data',
    transforms=trans2,
    **yaml2dict('config_val.yml')['slicing_config_truth'])

trainer = Trainer()

trainer.load(from_directory='checkpoints', map_location='cpu', best=False)

result, loss = trainer.apply_model_and_loss(
    imageset_val[5].unsqueeze(0).unsqueeze(0),  #.to('cuda'),
    labelset_val[5].unsqueeze(0).unsqueeze(0))  #.to('cuda'))

print(loss)
def make_train_config(train_config_file, gpus):
    template = yaml2dict('./template_config/train_config.yml')
    template['devices'] = gpus
    with open(train_config_file, 'w') as f:
        yaml.dump(template, f)
Exemplo n.º 14
0
 def from_config(cls, config):
     config = yaml2dict(config)
     volume_config = config.get('volume_config')
     slicing_config = config.get('slicing_config')
     return cls(volume_config=volume_config, slicing_config=slicing_config)
Exemplo n.º 15
0
def make_validation_config(validation_config_file, affinity_config):
    template = yaml2dict('./template_config/validation_config.yml')
    template['volume_config']['segmentation'][
        'affinity_config'] = affinity_config
    with open(validation_config_file, 'w') as f:
        yaml.dump(template, f)
Exemplo n.º 16
0
def make_validation_config(validation_config_file, name):
    template = yaml2dict('./template_config/validation_%s.yaml' % name)
    template['master_config']['affinity_config'] = None
    template['master_config']['train_semantic'] = True
    with open(validation_config_file, 'w') as f:
        yaml.dump(template, f)
def make_train_config(train_config_file, offsets, gpus):
    template = yaml2dict('./template_config/train_config.yml')
    template['model_kwargs']['out_channels'] = len(offsets)
    template['devices'] = gpus
    with open(train_config_file, 'w') as f:
        yaml.dump(template, f)
Exemplo n.º 18
0
def load_volume(inference_config):
    config = yaml2dict(inference_config)
    vol_config = config['volume_config']['raw']
    slicing_config = config['slicing_config']
    return RawVolumeHDF5(**vol_config, **slicing_config)
Exemplo n.º 19
0
class LabelToTarget(Transform):
    def __init__(self):
        super().__init__()

    def batch_function(self, tensors):
        prediction, target = tensors
        target = np.moveaxis(star_dist(target[0].numpy(), N_DIRECTIONS, opencl=OPENCL_AVAILABLE), -1, 0)
        return prediction, target

tosignedint = transforms.Lambda(lambda x: torch.tensor(np.int32(x), dtype=torch.int32))

sdist = transforms.Lambda(lambda x: np.moveaxis(star_dist(x[0], N_DIRECTIONS, opencl=OPENCL_AVAILABLE), -1, 0))
labeltotarget = transforms.Compose([sdist])

train_images = HDF5VolumeLoader(path='./train-volume.h5', path_in_h5_dataset='data',
                                **yaml2dict('config_train.yml')['slicing_config'])
train_labels = HDF5VolumeLoader(path='labeled_segmentation.h5', path_in_h5_dataset='data',
                                transforms=tosignedint, **yaml2dict('config_train.yml')['slicing_config'])
trainset = Zip(train_images, train_labels)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=BATCHSIZE,
                                          shuffle=True, num_workers=2)


val_images = HDF5VolumeLoader(path='./val-volume.h5', path_in_h5_dataset='data',
                              **yaml2dict('config_val.yml')['slicing_config'])
val_labels = HDF5VolumeLoader(path='labeled_segmentation_validation.h5', path_in_h5_dataset='data',
                              transforms=tosignedint, **yaml2dict('config_val.yml')['slicing_config'])
valset = Zip(val_images, val_labels)
valloader = torch.utils.data.DataLoader(trainset, batch_size=BATCHSIZE,
                                        shuffle=True, num_workers=2)