def test_format_named_configs(): ingred = Ingredient('ingred') ex = Experiment(name='experiment', ingredients=[ingred]) @ingred.named_config def named_config1(): pass @ex.named_config def named_config2(): """named config with doc""" pass dict_config = dict(v=42) ingred.add_named_config('dict_config', dict_config) named_configs_text = _format_named_configs( OrderedDict(ex.gather_named_configs())) assert named_configs_text.startswith('Named Configurations (' + COLOR_DOC + 'doc' + ENDC + '):') assert 'named_config2' in named_configs_text assert '# named config with doc' in named_configs_text assert 'ingred.named_config1' in named_configs_text assert 'ingred.dict_config' in named_configs_text
def test_format_named_configs(): ingred = Ingredient("ingred") ex = Experiment(name="experiment", ingredients=[ingred]) @ingred.named_config def named_config1(): pass @ex.named_config def named_config2(): """named config with doc""" pass dict_config = dict(v=42) ingred.add_named_config("dict_config", dict_config) named_configs_text = _format_named_configs( OrderedDict(ex.gather_named_configs())) assert named_configs_text.startswith("Named Configurations (" + COLOR_DOC + "doc" + ENDC + "):") assert "named_config2" in named_configs_text assert "# named config with doc" in named_configs_text assert "ingred.named_config1" in named_configs_text assert "ingred.dict_config" in named_configs_text
pixel_prior = { 'p': 0.0, # probability of success for pixel prior Bernoulli 'mu': 0.0, # mean of pixel prior Gaussian 'sigma': 0.25 # std of pixel prior Gaussian } # em k = 3 # number of components nr_steps = 10 # number of (RN)N-EM steps e_sigma = 0.25 # sigma used in the e-step when pixel distributions are Gaussian (acts as a temperature) pred_init = 0.0 # initial prediction used to compute the input # named config to be used when processing sequential data nem.add_named_config('sequential', { 'gradient_gamma': False, 'loss_step_weights': 'all' }) class NEMCell(RNNCell): """A RNNCell like implementation of (RN)N-EM.""" @nem.capture def __init__(self, cell, input_shape, distribution, pred_init, e_sigma): self.cell = cell if not isinstance(input_shape, tf.TensorShape): input_shape = tf.TensorShape(input_shape) self.input_shape = input_shape self.gamma_shape = tf.TensorShape(input_shape.as_list()[:-1] + [1]) self.distribution = distribution self.pred_init = pred_init self.e_sigma = e_sigma
ds = Ingredient('dataset') @ds.config def cfg(): name = 'shapes' path = './data' binary = True train_size = None # subset of training set (None, int) valid_size = 1000 # subset of valid set (None, int) test_size = None # subset of test set (None, int) queue_capacity = 100 # nr of batches in the queue ds.add_named_config('shapes', {'name': 'shapes', 'binary': True}) ds.add_named_config('flying_shapes', {'name': 'flying_shapes', 'binary': True}) ds.add_named_config('flying_shapes_4', { 'name': 'flying_shapes_4', 'binary': True }) ds.add_named_config('flying_shapes_5', { 'name': 'flying_shapes_5', 'binary': True }) ds.add_named_config('flying_mnist_medium_20_2', { 'name': 'flying_mnist_medium20_2digits', 'binary': False }) ds.add_named_config('flying_mnist_medium_500_2', { 'name': 'flying_mnist_medium500_2digits',
reconstruction_loss = {'name': 'recons_nll', 'params': {'loss': 'bce'}} bvae_loss = {'name': 'beta-vae', 'params': {'reconstruction_loss': 'bce', 'beta': 4.0}} cap_const = {'name': 'constrained-beta-vae', 'params': {'reconstruction_loss': 'bce', 'gamma': 100, 'capacity': 7}} bxent_loss = {'name': 'bxent', 'params': {}} xent_loss = {'name': 'xent', 'params': {}} accuracy = {'name': 'acc', 'params': {'output_transform': thresholded_output_transform}} mse_loss = {'name': 'mse', 'params': {}} kl_div = {'name': 'kl-div', 'params': {}} training = Ingredient('training') training.add_named_config('vae', loss=vae_loss, metrics=[reconstruction_loss, kl_div]) training.add_named_config('bvae', loss=bvae_loss, metrics=[reconstruction_loss, kl_div]) training.add_named_config('capconst', loss=cap_const, metrics=[reconstruction_loss, kl_div]) training.add_named_config('2afc', loss=bxent_loss, metrics=[bxent_loss, accuracy]) training.add_named_config('mafc', loss=xent_loss, metrics=[xent_loss, accuracy]) training.add_named_config('recons_nll', loss=reconstruction_loss, metrics=[reconstruction_loss]) init_optimizer = training.capture(init_optimizer)
net.add_named_config( 'enc_dec_84_atari', { 'input': [ { 'name': 'reshape', 'shape': (84, 84, 1) }, { 'name': 'conv', 'size': 16, 'act': 'elu', 'stride': [4, 4], 'kernel': (8, 8), 'ln': True }, { 'name': 'conv', 'size': 32, 'act': 'elu', 'stride': [2, 2], 'kernel': (4, 4), 'ln': True }, { 'name': 'reshape', 'shape': -1 }, { 'name': 'fc', 'size': 250, 'act': 'elu', 'ln': True }, ], 'output': [ { 'name': 'fc', 'size': 250, 'act': 'relu', 'ln': True }, { 'name': 'fc', 'size': 10 * 10 * 32, 'act': 'relu', 'ln': True }, { 'name': 'reshape', 'shape': (10, 10, 32) }, { 'name': 'r_conv', 'size': 16, 'act': 'relu', 'stride': [2, 2], 'kernel': (4, 4), 'ln': True, 'offset': 1 }, { 'name': 'r_conv', 'size': 1, 'act': 'sigmoid', 'stride': [4, 4], 'kernel': (8, 8) }, { 'name': 'reshape', 'shape': -1 }, ] })
net.add_named_config( 'flying_mnist', { 'input': [{ 'name': 'input_norm' }, { 'name': 'reshape', 'shape': (24, 24, 1) }, { 'name': 'conv', 'size': 32, 'act': 'elu', 'stride': [2, 2], 'kernel': (4, 4), 'ln': True }, { 'name': 'conv', 'size': 64, 'act': 'elu', 'stride': [2, 2], 'kernel': (4, 4), 'ln': True }, { 'name': 'conv', 'size': 128, 'act': 'elu', 'stride': [2, 2], 'kernel': (4, 4), 'ln': True }, { 'name': 'reshape', 'shape': -1 }, { 'name': 'fc', 'size': 512, 'act': 'elu', 'ln': True }], 'recurrent': [{ 'name': 'rnn', 'size': 250, 'act': 'sigmoid', 'ln': True }], 'output': [{ 'name': 'fc', 'size': 512, 'act': 'relu', 'ln': True }, { 'name': 'fc', 'size': 3 * 3 * 128, 'act': 'relu', 'ln': True }, { 'name': 'reshape', 'shape': (3, 3, 128) }, { 'name': 'r_conv', 'size': 64, 'act': 'relu', 'stride': [2, 2], 'kernel': (4, 4), 'ln': True }, { 'name': 'r_conv', 'size': 32, 'act': 'relu', 'stride': [2, 2], 'kernel': (4, 4), 'ln': True }, { 'name': 'r_conv', 'size': 1, 'act': '*', 'stride': [2, 2], 'kernel': (4, 4), 'ln': False }, { 'name': 'reshape', 'shape': -1 }] })
from sacred import Ingredient ds = Ingredient('dataset') @ds.config def cfg(): name = 'balls4mass64' path = './data' train_size = None # subset of training set (None, int) valid_size = 1000 # subset of valid set (None, int) test_size = None # subset of test set (None, int) queue_capacity = 100 # nr of batches in the queue ds.add_named_config('balls4mass64', {'name': 'balls4mass64'}) ds.add_named_config('balls678mass64', {'name': 'balls678mass64'}) ds.add_named_config('balls3curtain64', {'name': 'balls3curtain64'}) ds.add_named_config('atari', {'name': 'atari'}) class InputPipeLine(object): @ds.capture def _open_dataset(self, out_list, path, name, train_size, valid_size, test_size): # open dataset file self._hdf5_file = h5py.File(os.path.join(path, name + '.h5'), 'r') self._data_in_file = { data_name: self._hdf5_file[self.usage][data_name] for data_name in out_list }
# from dataset.tdisc import load_tdata from dataset.sprites import load_sprites from dataset.shapes3d import load_shapes3d from dataset.mpi import load_mpi3d from dataset.transforms import Triplets import configs.datasplits as splits dataset = Ingredient('dataset') load_sprites = dataset.capture(load_sprites) load_shapes3d = dataset.capture(load_shapes3d) load_mpi3d = dataset.capture(load_mpi3d) load_composition = dataset.capture(Triplets) dataset.add_config(setting='unsupervised') dataset.add_named_config('unsupervised', setting='unsupervised') dataset.add_named_config('supervised', setting='supervised') @dataset.capture def get_dataset(dataset): if dataset == 'dsprites': dataset_loader = load_sprites elif dataset == 'shapes3d': dataset_loader = load_shapes3d elif dataset == 'mpi3d': dataset_loader = load_mpi3d elif dataset == 'composition': dataset_loader = load_composition else: raise ValueError('Unrecognized dataset {}'.format(dataset))