def build_graph(self): graph = tf.get_default_graph() with graph.as_default(): self.samples = tf.Variable(self.get_samples(self.num_samples)) self.X = tf.Variable( tfd.Uniform(low=-5.0, high=5.0).sample( [self.num_positions, self.input_dim])) self.w = tf.Variable( tfd.Uniform(low=-1.0, high=1.0).sample( [self.num_dist, self.num_positions])) z = tf.Variable(tf.zeros_like(self.w)) beta = tf.constant(self.beta) alpha = tf.constant(self.alpha) grad = self.estimate_weights_partials() self.z_update = z.assign(beta * z + grad) self.w_update = self.w.assign(self.w + alpha * z) self.error = tf.Print(tf.norm(grad, axis=1), [tf.norm(grad, axis=1)], message='Gradient errors : ') self.samples_update = self.samples.assign( self.get_samples(self.num_samples)) self.X_update = self.X.assign(self.estimate_positions_update()) return graph
def add_noise(data, noise, dataset): noise_type = noise['noise_type'] if noise_type in ['None', 'none', None]: return data if noise_type == 'data': noise_type = 'bitflip' if dataset['binary'] else 'masked_uniform' with tf.name_scope('input_noise'): shape = tf.stack([ s.value if s.value is not None else tf.shape(data)[i] for i, s in enumerate(data.get_shape()) ]) if noise_type == 'bitflip': noise_dist = dist.Bernoulli(probs=noise['prob'], dtype=data.dtype) n = noise_dist.sample(shape) corrupted = data + n - 2 * data * n # hacky way of implementing (data XOR n) elif noise_type == 'masked_uniform': noise_dist = dist.Uniform(low=0., high=1.) noise_uniform = noise_dist.sample(shape) # sample mask mask_dist = dist.Bernoulli(probs=noise['prob'], dtype=data.dtype) mask = mask_dist.sample(shape) # produce output corrupted = mask * noise_uniform + (1 - mask) * data else: raise KeyError('Unknown noise_type "{}"'.format(noise_type)) corrupted.set_shape(data.get_shape()) return corrupted
def __init__(self, config, attention, latent_space, scope='UniformSampler'): """ Initialize the sampler """ super(UniformSampler, self).__init__( config, attention, latent_space, scope=scope) shape = (config.batch_size, self.sample_size) self.prior = distributions.Uniform(tf.zeros(shape), tf.ones(shape), name='prior')
def approximate_posterior(self, tensor, scope='posterior'): """ Calculate the approximate posterior given the tensor """ # Generate mu and sigma of the Gaussian for the approximate posterior sample_size = self.prior.batch_shape.as_list()[-1] with tf.variable_scope(scope, 'posterior', [tensor]): mean = layers.linear(tensor, sample_size, scope='mean') # Use the log of sigma for numerical stability log_variance = layers.linear(tensor, sample_size, scope='log_variance') # Create the Uniform distribution variance = tf.exp(log_variance) delta = tf.sqrt(3.0 * variance) posterior = distributions.Uniform(mean - delta, mean + delta, name='posterior') self.collect_named_outputs(posterior.low) self.collect_named_outputs(posterior.high) self.posteriors.append(posterior) return posterior
train_op = optimizer.minimize(loss, global_step=global_step, var_list=var_list) return train_op # ----------------------------------------------------------------------------------- # Computational Graph # ----------------------------------------------------------------------------------- # MoG & Generator Samples mog_x = MixtureOfGaussians(FLAGS.batch_size) if FLAGS.uniform_prior: z = tfcds.Uniform(-tf.ones(FLAGS.z_dim), tf.ones(FLAGS.z_dim)).sample(FLAGS.batch_size) else: z = tfcds.Normal(tf.zeros(FLAGS.z_dim), tf.ones(FLAGS.z_dim)).sample(FLAGS.batch_size) gen_x = Generator(z).x # Discriminator Scores D1 = Discriminator(mog_x).p D1_logits = Discriminator(mog_x, reuse=True).p_logits D2 = Discriminator(gen_x, reuse=True).p D2_logits = Discriminator(gen_x, reuse=True).p_logits tf.summary.histogram("D1", D1, family="disc") tf.summary.histogram("D2", D2, family="disc")
def _init_ref_dis(self): self.ref_dis = ds.Uniform(low=np.ones(self.env.action_dim) * -1, high=np.ones(self.env.action_dim) * 1)