コード例 #1
0
                    for i in range(3)]
    target_h5data = [(os.path.join(data_root, f'barrier_int16_{i}.h5'), 'lab')
                     for i in range(3)]

    aniso_factor = 2  # Anisotropy in z dimension. E.g. 2 means half resolution in z dimension.
    common_data_kwargs = {  # Common options for training and valid sets.
        'aniso_factor': aniso_factor,
        'patch_shape': (44, 88, 88),
        # 'offset': (8, 20, 20),
        'out_channels': 2,
        # 'in_memory': True  # Uncomment to avoid disk I/O (if you have enough host memory for the data)
    }
    norm_mean = (155.291411, )
    norm_std = (42.599973, )
    valid_transform = transforms.Normalize(mean=norm_mean,
                                           std=norm_std,
                                           inplace=True)

    print('Loading dataset...')
    valid_dataset = PatchCreator(
        input_sources=input_h5data,
        target_sources=target_h5data,
        train=False,
        epoch_size=40,  # How many samples to use for each validation run
        warp_prob=0,
        warp_kwargs={'sample_aniso': aniso_factor != 1},
        transform=valid_transform,
        **common_data_kwargs)
    valid_loader = DataLoader(valid_dataset, num_workers=4, pin_memory=True)

    # Validation metrics
コード例 #2
0
            optimizer_state_dict = state.get('optimizer_state_dict')
            lr_sched_state_dict = state.get('lr_sched_state_dict')
            if optimizer_state_dict is None:
                logger.warning('optimizer_state_dict not found.')
            if lr_sched_state_dict is None:
                logger.warning('lr_sched_state_dict not found.')
        elif isinstance(state, nn.Module):
            logger.warning(_warning_str)
            model = state
        else:
            raise ValueError(f'Can\'t load {pretrained}.')

# Transformations to be applied to samples before feeding them to the network
common_transforms = [
    transforms.SqueezeTarget(dim=0),  # Workaround for neuro_data_cdhw
    transforms.Normalize(mean=dataset_mean, std=dataset_std)
]
train_transform = transforms.Compose(common_transforms + [
    # transforms.RandomRotate2d(prob=0.9),
    # transforms.RandomGrayAugment(channels=[0], prob=0.3),
    # transforms.RandomGammaCorrection(gamma_std=0.25, gamma_min=0.25, prob=0.3),
    # transforms.AdditiveGaussianNoise(sigma=0.1, channels=[0], prob=0.3),
])
valid_transform = transforms.Compose(common_transforms + [])

# Specify data set
aniso_factor = 2  # Anisotropy in z dimension. E.g. 2 means half resolution in z dimension.
common_data_kwargs = {  # Common options for training and valid sets.
    'aniso_factor': aniso_factor,
    'patch_shape': (44, 88, 88),
    # 'offset': (8, 20, 20),
コード例 #3
0
            optimizer_state_dict = state.get('optimizer_state_dict')
            lr_sched_state_dict = state.get('lr_sched_state_dict')
            if optimizer_state_dict is None:
                logger.warning('optimizer_state_dict not found.')
            if lr_sched_state_dict is None:
                logger.warning('lr_sched_state_dict not found.')
        elif isinstance(state, nn.Module):
            logger.warning(_warning_str)
            model = state
        else:
            raise ValueError(f'Can\'t load {pretrained}.')

# Transformations to be applied to samples before feeding them to the network
common_transforms = [
    transforms.SqueezeTarget(dim=0),  # Workaround for neuro_data_cdhw
    transforms.Normalize(mean=dataset_mean, std=dataset_std)
]
train_transform = transforms.Compose(common_transforms + [
    # transforms.RandomGrayAugment(channels=[0], prob=0.3),
    # transforms.RandomGammaCorrection(gamma_std=0.25, gamma_min=0.25, prob=0.3),
    # transforms.AdditiveGaussianNoise(sigma=0.1, channels=[0], prob=0.3),
])
valid_transform = transforms.Compose(common_transforms + [])

# Specify data set
aniso_factor = 2  # Anisotropy in z dimension. E.g. 2 means half resolution in z dimension.
common_data_kwargs = {  # Common options for training and valid sets.
    'aniso_factor': aniso_factor,
    'patch_shape': (48, 96, 96),
    # 'offset': (8, 20, 20),
    'num_classes': 2,
コード例 #4
0
max_steps = args.max_steps
lr = 0.0004
lr_stepsize = 1000
lr_dec = 0.995
batch_size = 1

if args.resume is not None:  # Load pretrained network params
    model.load_state_dict(torch.load(os.path.expanduser(args.resume)))

dataset_mean = (143.97594, )
dataset_std = (44.264744, )

# Transformations to be applied to samples before feeding them to the network
common_transforms = [
    transforms.Normalize(mean=dataset_mean, std=dataset_std, inplace=True)
]
train_transform = transforms.Compose(common_transforms + [
    transforms.RandomCrop((128, 128)),  # Use smaller patches for training
    transforms.RandomFlip(),
    transforms.AdditiveGaussianNoise(prob=0.5, sigma=0.1)
])
valid_transform = transforms.Compose(common_transforms +
                                     [transforms.RandomCrop((144, 144))])
# Specify data set
train_dataset = SimpleNeuroData2d(train=True,
                                  transform=train_transform,
                                  out_channels=out_channels)
valid_dataset = SimpleNeuroData2d(train=False,
                                  transform=valid_transform,
                                  out_channels=out_channels)
コード例 #5
0
ファイル: train_noise2void.py プロジェクト: flmuk/elektronn3
                logger.warning('optimizer_state_dict not found.')
            if lr_sched_state_dict is None:
                logger.warning('lr_sched_state_dict not found.')
        elif isinstance(state, nn.Module):
            logger.warning(_warning_str)
            model = state
        else:
            raise ValueError(f'Can\'t load {pretrained}.')

dataset_mean = (0.0, )
dataset_std = (1.0, )

# Transformations to be applied to samples before feeding them to the network
common_transforms = [
    transforms.SqueezeTarget(dim=0),  # Workaround for neuro_data_cdhw
    transforms.Normalize(mean=dataset_mean, std=dataset_std)
]
train_transform = transforms.Compose(common_transforms + [
    # transforms.RandomRotate2d(prob=0.9),
    # transforms.RandomGrayAugment(channels=[0], prob=0.3),
    # transforms.RandomGammaCorrection(gamma_std=0.25, gamma_min=0.25, prob=0.3),
    # transforms.AdditiveGaussianNoise(sigma=0.1, channels=[0], prob=0.3),
])
valid_transform = transforms.Compose(common_transforms + [])

# Specify data set
aniso_factor = 2  # Anisotropy in z dimension. E.g. 2 means half resolution in z dimension.
common_data_kwargs = {  # Common options for training and valid sets.
    'aniso_factor': aniso_factor,
    'patch_shape': (44, 88, 88),
    # 'offset': (8, 20, 20),