def test_2Dexperiment(): c = Config() c.batch_size = 200 c.n_epochs = 40 c.learning_rate = 0.001 if torch.cuda.is_available(): c.use_cuda = True else: c.use_cuda = False c.rnd_seed = 1 c.log_interval = 200 # model-specific c.n_coupling = 8 c.prior = 'gauss' exp = SmileyExperiment( c, name='gauss', n_epochs=c.n_epochs, seed=42, base_dir='experiment_dir', loggers={'visdom': ['visdom', { "exp_name": "myenv" }]}) exp.run() # sampling samples = exp.model.sample(1000).cpu().numpy() sns.jointplot(samples[:, 0], samples[:, 1]) plt.show()
def get_config(): c = Config() # cli flags using trixi, overwrite using e.g. --learning_rate=0.001 c.txt_file = 'assets/001ssb.txt' # Path to a .txt file to train on c.seq_length = 30 # Length of an input sequence c.gen_length = 250 # Length of the generated sequence c.lstm_num_hidden = 128 # Number of hidden units in the LSTM c.lstm_num_layers = 2 # Number of LSTM layers in the model # Training params c.batch_size = 64 # Number of examples to process in a batch c.learning_rate = 2e-3 # Learning rate # It is not necessary to implement the following three params, but it may help training. c.learning_rate_decay = 0.96 # Learning rate decay fraction c.learning_rate_step = 5000 # Learning rate step c.dropout_keep_prob = 1.0 # Dropout keep probability c.train_steps = 1e6 # Number of training steps c.max_norm = 5.0 # Misc params c.summary_path = './summaries/' # Output path for summaries c.print_every = 5 # How often to print training progress c.sample_every = 100 # How often to sample from the model c.device = 'cuda:0' # Training device 'cpu' or 'cuda:0' c.temperature = 0.5 # balances the sampling strategy between fully-greedy (near 0) and fully-random (higher). e.g. 0.5, 1.0, 2.0. return c
def get_config(): c = Config() # cli flags using trixi, overwrite using e.g. --learning_rate=0.001 c.model_type = 'RNN' # Model type, should be 'RNN' or 'LSTM' c.input_length = 10 # Length of an input sequence c.input_dim = 1 # Dimensionality of input sequence c.num_classes = 10 # Dimensionality of output sequence c.num_hidden = 128 # Number of hidden units in the model c.batch_size = 128 # Number of examples to process in a batch c.learning_rate = 0.001 # Learning rate c.train_steps = 10000 # Number of training steps c.max_norm = 10.0 c.device = 'cuda:0' # Training device 'cpu' or 'cuda:0' return c
def test_MNIST_experiment(): c = Config() c.batch_size = 64 c.n_epochs = 50 c.learning_rate = 0.001 c.weight_decay = 5e-5 if torch.cuda.is_available(): c.use_cuda = True else: c.use_cuda = False c.rnd_seed = 1 c.log_interval = 100 c.subset_size = 10 # model-specific c.n_coupling = 8 c.n_filters = 64 exp = MNISTExperiment( c, name='mnist_test', n_epochs=c.n_epochs, seed=42, base_dir='experiment_dir', loggers={'visdom': ['visdom', { "exp_name": "myenv" }]}) exp.run() exp.model.eval() exp.model.to('cpu') with torch.no_grad(): samples = exp.model.sample(16, device='cpu') img_grid = make_grid(samples).permute((1, 2, 0)) plt.imshow(img_grid) plt.show() return exp.model
def test_Resnet(): c = Config() c.batch_size = 64 c.batch_size_test = 1000 c.n_epochs = 10 c.learning_rate = 0.01 c.momentum = 0.9 if torch.cuda.is_available(): c.use_cuda = True else: c.use_cuda = False c.rnd_seed = 1 c.log_interval = 200 exp = MNIST_classification(config=c, name='experiment', n_epochs=c.n_epochs, seed=42, base_dir='./experiment_dir', loggers={"visdom": "visdom"}) exp.run()
def get_config(): c = Config() c.batch_size = 6 c.patch_size = 512 c.n_epochs = 20 c.learning_rate = 0.0002 c.do_ce_weighting = True c.do_batchnorm = True if torch.cuda.is_available(): c.use_cuda = True else: c.use_cuda = False c.rnd_seed = 1 c.log_interval = 200 c.base_dir='/media/kleina/Data2/output/meddec' c.data_dir='/media/kleina/Data2/Data/meddec/Task07_Pancreas_expert_preprocessed' c.split_dir='/media/kleina/Data2/Data/meddec/Task07_Pancreas_preprocessed' c.data_file = 'C:/dev/data/Endoviz2018/GIANA/polyp_detection_segmentation/image_gt_data_file_list_all_640x640.csv' c.additional_slices=5 c.name='' print(c) return c
import numpy as np import torch from trixi.util import Config from experiment import MNISTexperiment from util import plot_dependency_map import matplotlib.pyplot as plt c = Config() c.batch_size = 128 c.n_epochs = 10 c.learning_rate = 0.001 if torch.cuda.is_available(): c.use_cuda = True else: c.use_cuda = False c.rnd_seed = 1 c.log_interval = 100 exp = MNISTexperiment(config=c, name='test', n_epochs=c.n_epochs, seed=c.rnd_seed, base_dir='./experiment_dir', loggers={"visdom": ["visdom", { "exp_name": "myenv" }]}) # # run backpropagation for each dimension to compute what other # # dimensions it depends on. # exp.setup() # d = 28