def main(): import random import numpy as np import torch import torch.nn as nn print(np.random.rand(10)) print(random.random()) print(nn.Conv2d(2, 2, 3, 1, 0).weight.data) print(torch.backends.cudnn.deterministic) print(torch.backends.cudnn.benchmark) print('------') with fixed_seed(0, True): print(np.random.rand(10)) print(random.random()) print(nn.Conv2d(2, 2, 3, 1, 0).weight.data) print(torch.backends.cudnn.deterministic) print(torch.backends.cudnn.benchmark) print(np.random.rand(10)) print(random.random()) print(nn.Conv2d(2, 2, 3, 1, 0).weight.data) print(torch.backends.cudnn.deterministic) print(torch.backends.cudnn.benchmark)
def main(): parser = argparse.ArgumentParser( description='Example: Uncertainty estimates in segmentation', formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('--data_root', '-d', type=str, default='./preprocessed', help='Directory to dataset') parser.add_argument('--batchsize', '-b', type=int, default=2, help='Number of images in each mini-batch') parser.add_argument('--iteration', '-i', type=int, default=50000, help='Number of sweeps over the dataset to train') parser.add_argument('--frequency', '-f', type=int, default=-1, help='Frequency of taking a snapshot') parser.add_argument('--gpu', '-g', type=str, default='cuda:0', help='GPU Device') parser.add_argument('--out', '-o', default='logs', help='Directory to output the result') parser.add_argument('--resume', '-r', default='', help='Resume the training from snapshot') parser.add_argument('--valid_augment', action='store_true', help='Enable data augmentation during validation') parser.add_argument('--valid_split_ratio', type=float, default=0.1, help='Ratio of validation data to training data') parser.add_argument( '--valid_split_type', type=str, default='slice', choices=['slice', 'patient'], help='How to choice validation data from training data') parser.add_argument('--lr', type=float, default=1e-4, help='Learning rate') parser.add_argument('--decay', type=float, default=-1, help='Weight of L2 regularization') parser.add_argument('--mc_iteration', type=int, default=15, help='Number of iteration of MCMC') parser.add_argument('--pinfall', type=int, default=-1, help='Countdown for early stopping of training.') parser.add_argument( '--freeze_upconv', action='store_true', help= 'Disables updating the up-convolutional weights. If weights are initialized with \ bilinear kernels, up-conv acts as bilinear upsampler.' ) parser.add_argument('--test_on_test', action='store_true', help='Switch to the testing phase on test dataset') parser.add_argument('--test_on_valid', action='store_true', help='Switch to the testing phase on valid dataset') parser.add_argument('--seed', type=int, default=0, help='Fix the random seed') args = parser.parse_args() print('GPU: {}'.format(args.gpu)) print('# Minibatch-size: {}'.format(args.batchsize)) print('') # setup output directory os.makedirs(args.out, exist_ok=True) # NOTE: ad-hoc # setup a normalizer normalizer = Normalizer() normalizer.add(Clip2D('minmax')) normalizer.add(Subtract2D(0.5)) normalizer.add(Divide2D(1. / 255.)) # setup an augmentor augmentor = DataAugmentor(n_dim=2) augmentor.add(Flip2D(axis=2)) augmentor.add( Affine2D(rotation=15., translate=(10., 10.), shear=0.25, zoom=(0.8, 1.2), keep_aspect_ratio=True, fill_mode=('reflect', 'reflect'), cval=(0., 0.), interp_order=(1, 0))) with fixed_seed(args.seed, strict=False): # setup a predictor conv_param = { # NOTE: you can change layer type if you want.. 'name':'conv', 'kernel_size': 3, 'stride': 1, 'padding': 2, 'padding_mode': 'reflect', 'dilation': 2, 'initialW': {'name': 'he_normal'}, 'initial_bias': {'name': 'zero'}, } upconv_param = { # NOTE: you can change layer type if you want.. 'name':'deconv', 'kernel_size': 3, 'stride': 2, 'padding': 0, 'initialW': {'name': 'bilinear'}, 'initial_bias': {'name': 'zero'}, } norm_param = {'name': 'batch'} predictor = BayesianUNet(ndim=2, in_channels=3, out_channels=2, nlayer=4, nfilter=32, conv_param=conv_param, upconv_param=upconv_param, norm_param=norm_param) if args.freeze_upconv: predictor.freeze_layers(name='upconv', recursive=True, verbose=True) # setup dataset train, valid, test = get_dataset(args.data_root, args.valid_split_type, args.valid_split_ratio, args.valid_augment, normalizer, augmentor) # run if args.test_on_test: test_phase(predictor, test, args) elif args.test_on_valid: test_phase(predictor, valid, args) else: save_args(args, args.out) predictor.save_args(os.path.join(args.out, 'model.json')) normalizer.summary(os.path.join(args.out, 'norm.json')) augmentor.summary(os.path.join(args.out, 'augment.json')) train_phase(predictor, train, valid, args)
def main(): parser = argparse.ArgumentParser( description='Example: Uncertainty estimates in regression') parser.add_argument('--batchsize', '-b', type=int, default=50, help='Number of images in each mini-batch') parser.add_argument('--epoch', '-e', type=int, default=300, help='Number of sweeps over the dataset to train') parser.add_argument('--frequency', '-f', type=int, default=-1, help='Frequency of taking a snapshot') parser.add_argument('--gpu', '-g', type=str, default='cuda:0', help='GPU Device') parser.add_argument('--out', '-o', default='logs', help='Directory to output the log files') parser.add_argument('--resume', '-r', default='', help='Resume the training from snapshot') parser.add_argument('--unit', '-u', type=int, default=20, help='Number of units') parser.add_argument('--noplot', dest='plot', action='store_false', help='Disable PlotReport extension') parser.add_argument('--test_on_test', action='store_true', help='Switch to the testing phase on test dataset') parser.add_argument('--test_on_valid', action='store_true', help='Switch to the testing phase on valid dataset') parser.add_argument('--mc_iteration', type=int, default=50, help='Number of iteration of MCMC') parser.add_argument('--decay', type=float, default=-1, help='Weight of L2 regularization') parser.add_argument('--seed', type=int, default=0, help='Fix the random seed') args = parser.parse_args() os.makedirs(args.out, exist_ok=True) with fixed_seed(args.seed, strict=False): # setup a predictor predictor = BayesianMLP(n_in=1, n_units=args.unit, n_out=1, drop_ratio=0.1) # setup dataset train = Dataset(x_lim=(-5, 5), n_samples=1000) valid = Dataset(x_lim=(-5, 5), n_samples=1000) test = Dataset(x_lim=(-10, 10), n_samples=500) # run if args.test_on_test: test_phase(predictor, test, args) elif args.test_on_valid: test_phase(predictor, valid, args) else: train_phase(predictor, train, valid, args)
def main(): parser = argparse.ArgumentParser(description='Example: Uncertainty estimates with adversarial training in image synthesis', formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('--data_root', '-d', type=str, default='./preprocessed', help='Directory to dataset') parser.add_argument('--batchsize', '-b', type=int, default=5, help='Number of images in each mini-batch') parser.add_argument('--iteration', '-i', type=int, default=200000, help='Number of sweeps over the dataset to train') parser.add_argument('--frequency', '-f', type=int, default=-1, help='Frequency of taking a snapshot') parser.add_argument('--gpu', '-g', type=str, default='cuda:0', help='GPU Device') parser.add_argument('--out', '-o', default='logs', help='Directory to output the result') parser.add_argument('--resume', '-r', default='', help='Resume the training from snapshot') parser.add_argument('--valid_augment', action='store_true', help='Enable data augmentation during validation') parser.add_argument('--valid_split_ratio', type=float, default=0.1, help='Ratio of validation data to training data') parser.add_argument('--lr', type=float, default=4e-4, help='Learning rate') parser.add_argument('--alpha', type=float, default=50., help='Weight of conditional loss') parser.add_argument('--beta', type=float, default=0.5, help='Exponential decay rate of the first order moment in Adam') parser.add_argument('--decay', type=float, default=-1, help='Weight of L2 regularization') parser.add_argument('--mc_iteration', type=int, default=15, help='Number of iteration of MCMC') parser.add_argument('--pinfall', type=int, default=-1, help='Countdown for early stopping of training.') parser.add_argument('--freeze_upconv', action='store_true', help='Disables updating the up-convolutional weights. If weights are initialized with \ bilinear kernels, up-conv acts as bilinear upsampler.') parser.add_argument('--test_on_test', action='store_true', help='Switch to the testing phase on test dataset') parser.add_argument('--test_on_valid', action='store_true', help='Switch to the testing phase on valid dataset') parser.add_argument('--seed', type=int, default=0, help='Fix the random seed') args = parser.parse_args() print('GPU: {}'.format(args.gpu)) print('# Minibatch-size: {}'.format(args.batchsize)) print('') # setup output directory os.makedirs(args.out, exist_ok=True) # NOTE: ad-hoc normalizer = get_normalizer() augmentor = get_augmentor() # setup a generator with fixed_seed(args.seed, strict=False): generator = build_generator() if args.freeze_upconv: generator.freeze_layers(name='upconv', recursive=True, verbose=True) # setup dataset train, valid, test = get_dataset(args.data_root, args.valid_split_ratio, args.valid_augment, normalizer, augmentor) # run if args.test_on_test: raise RuntimeError('This example is under construction. Please tune the hyperparameters first..') test_phase(generator, test, args) elif args.test_on_valid: test_phase(generator, valid, args) else: save_args(args, args.out) generator.save_args(os.path.join(args.out, 'model.json')) normalizer.summary(os.path.join(args.out, 'norm.json')) augmentor.summary(os.path.join(args.out, 'augment.json')) train_phase(generator, train, valid, args)