Example #1
0
 def __call__(self, features, training=False):
     raw_init_std = np.log(np.exp(self._init_std) - 1)
     x = features
     for index in range(self._layers):
         x = self.get(f'h{index}', tfkl.Dense, self._units, self._act)(x)
     if self._dist == 'tanh_normal':  # Original from Dreamer
         # https://www.desmos.com/calculator/rcmcf5jwe7
         x = self.get(f'hout', tfkl.Dense, 2 * self._size)(x)
         mean, std = tf.split(x, 2, -1)
         mean = self._mean_scale * tf.tanh(mean / self._mean_scale)
         std = tf.nn.softplus(std + raw_init_std) + self._min_std
         dist = tfd.Normal(mean, std)
         dist = tfd.TransformedDistribution(dist, tools.TanhBijector())
         dist = tfd.Independent(dist, 1)
         dist = tools.SampleDist(dist)
     elif self._dist == 'normalized_tanhtransformed_normal':
         # Normalized variation of the original actor: (mu,std) normalized, then create tanh normal from them
         # The normalization params (moving avg, std) are updated only during training
         x = self.get(f'hout', tfkl.Dense, 2 * self._size)(x)
         x = tf.reshape(x, [-1, 2 * self._size])
         x = self.get(f'hnorm', tfkl.BatchNormalization)(x, training=training)  # `training` true only in imagination
         x = tf.reshape(x, [*features.shape[:-1], -1])
         mean, std = tf.split(x, 2, -1)
         std = tf.nn.softplus(std) + self._min_std  # to have positive values
         dist = tfd.Normal(mean, std)
         dist = tfd.TransformedDistribution(dist, tools.TanhBijector())
         dist = tfd.Independent(dist, 1)
         dist = tools.SampleDist(dist)
     else:
         raise NotImplementedError(self._dist)
     return dist
Example #2
0
    def _build(self, inputs):
        mean, covariance, scale = self.create_mean_n_cov_layers(inputs)

        #TODO is this the kind of regularization we want. I think it makes sense.
        self.set_contractive_regularizer(mean, covariance,
                                        self._contractive_regularizer_inputs,
                                        self._contractive_regularizer_tuple,
                                        self._contractive_collection_network_str)
        
        gaussian = tfd.Normal(loc=mean, scale=scale)
        
        sigmoid_bijector = tfb.Sigmoid()
        logitnormal = tfd.TransformedDistribution(distribution = gaussian, bijector = sigmoid_bijector)
        
        # add reconstruction_node method (needed to some sort of mean or median to get reconstructions without sampling)
        def reconstruction_node(self):
            # this is because there is not been for the LogitNormalDiagonal distribution
            return sigmoid_bijector.forward(gaussian.mean())
        
        logitnormal.reconstruction_node = types.MethodType(reconstruction_node, logitnormal)

        clip_value = self._clip_value
         
        # make sure a rescale the input for log_prob
        def log_prob(self, x, name='log_prob', **kwargs):
            # kinda of dirty I know, it is used to avoid recursion (Luigi)
            return self._call_log_prob(tf_clip(x, low=-1.0 + clip_value, high=1.0 -clip_value), name=name, **kwargs)
        
        logitnormal.log_prob = types.MethodType(log_prob, logitnormal)

        
        return logitnormal
Example #3
0
def affine_flow_actor_critic(a, k):
    d = r = act_dim = a.shape.as_list()[-1]
    DTYPE = tf.float32
    bijectors = []
    initializer = tf.initializers.truncated_normal(0, 0.1)
    for i in range(k):
        with tf.variable_scope('bijector_%d' % i):
            V = tf.get_variable('V', [d, r],
                                dtype=DTYPE,
                                initializer=initializer)
            shift = tf.get_variable('shift', [d],
                                    dtype=DTYPE,
                                    initializer=initializer)
            L = tf.get_variable('L', [d * (d + 1) / 2],
                                dtype=DTYPE,
                                initializer=initializer)
            bijectors.append(
                tfpb.Affine(
                    scale_tril=tfpd.fill_triangular(L),
                    scale_perturb_factor=V,
                    shift=shift,
                ))
            alpha = tf.abs(tf.get_variable('alpha', [], dtype=DTYPE)) + .01
            bijectors.append(PReLU(alpha=alpha))
    mlp_bijector = tfpb.Chain(list(reversed(bijectors[:-1])),
                              name='mlp_bijector')
    dist = tfpd.TransformedDistribution(
        distribution=tfpd.MultivariateNormalDiag(loc=tf.zeros(act_dim),
                                                 scale_diag=0.1 *
                                                 tf.ones(act_dim)),
        bijector=mlp_bijector)
    pi = dist.sample(1)
    logp_pi = tf.squeeze(dist.log_prob(pi))
    logp = dist.log_prob(a)
    return pi, logp, logp_pi
Example #4
0
File: flow.py Project: gumpfly/PSVO
	def transform(self, base_dist, name=None):
		dist = tfd.TransformedDistribution(
			distribution=base_dist,
			bijector=self.bijector,
			name=name)

		return dist
Example #5
0
 def __call__(self, features):
     raw_init_std = np.log(np.exp(self._init_std) - 1)
     x = features
     for index in range(self._layers):
         x = self.get(f'h{index}', tfkl.Dense, self._units, self._act)(x)
     if self._dist == 'tanh_normal':
         # https://www.desmos.com/calculator/rcmcf5jwe7
         x = self.get(f'hout', tfkl.Dense, 2 * self._size)(x)
         mean, std = tf.split(x, 2, -1)
         mean = self._mean_scale * tf.tanh(mean / self._mean_scale)
         std = tf.nn.softplus(std + raw_init_std) + self._min_std
         dist = tfd.Normal(mean, std)
         dist = tfd.TransformedDistribution(dist, tools.TanhBijector())
         dist = tfd.Independent(dist, 1)
         dist = tools.SampleDist(dist)
     elif self._dist == 'onehot':
         x = self.get(f'hout', tfkl.Dense, self._size)(x)
         dist = tools.OneHotDist(x)
     elif self._dist == 'gumbel':
         x = self.get(f'hout', tfkl.Dense, self._size)(x)
         dist = tfd.RelaxedOneHotCategorical(temperature=1e-1, logits=x)
         dist = tools.SampleDist(dist)
     else:
         raise NotImplementedError
     return dist
Example #6
0
def SoftplusNormal(loc, scale, name="SoftplusNormal"):
    return tfd.TransformedDistribution(
        distribution=tfd.Normal(
            loc=loc,
            scale=scale),
        bijector=tfp.bijectors.Softplus(),
        name=name)
Example #7
0
def feed_forward(
        state, data_shape, num_layers=2, activation=tf.nn.relu,
        mean_activation=None, stop_gradient=False, trainable=True, units=100,
        std=1.0, low=-1.0, high=1.0, dist='normal'):
    """Create a model returning unnormalized MSE distribution."""
    hidden = state
    if stop_gradient:
        hidden = tf.stop_gradient(hidden)
    for _ in range(num_layers):
        hidden = tf.compat.v1.layers.dense(hidden, units, activation)
    mean = tf.compat.v1.layers.dense(
        hidden, int(np.prod(data_shape)), mean_activation, trainable=trainable)
    mean = tf.reshape(mean, tools.shape(state)[:-1] + data_shape)
    if std == 'learned':
        std = tf.compat.v1.layers.dense(
            hidden, int(np.prod(data_shape)), None, trainable=trainable)
        std = tf.nn.softplus(std + 0.55) + 0.01
        std = tf.reshape(std, tools.shape(state)[:-1] + data_shape)
    if dist == 'normal':
        dist = tfd.Normal(mean, std)
    elif dist == 'truncated_normal':
        # https://www.desmos.com/calculator/3o96eyqxib
        dist = tfd.TruncatedNormal(mean, std, low, high)
    elif dist == 'tanh_normal':
        # https://www.desmos.com/calculator/sxpp7ectjv
        dist = tfd.Normal(mean, std)
        dist = tfd.TransformedDistribution(dist, tfp.bijectors.Tanh())
    elif dist == 'deterministic':
        dist = tfd.Deterministic(mean)
    else:
        raise NotImplementedError(dist)
    dist = tfd.Independent(dist, len(data_shape))
    return dist
Example #8
0
 def __call__(self, inputs):
   out = self.get('out', tfkl.Dense, np.prod(self._shape))(inputs)
   out = tf.reshape(out, tf.concat([tf.shape(inputs)[:-1], self._shape], 0))
   out = tf.cast(out, tf.float32)
   if self._dist in ('normal', 'tanh_normal', 'trunc_normal'):
     std = self.get('std', tfkl.Dense, np.prod(self._shape))(inputs)
     std = tf.reshape(std, tf.concat([tf.shape(inputs)[:-1], self._shape], 0))
     std = tf.cast(std, tf.float32)
   if self._dist == 'mse':
     dist = tfd.Normal(out, 1.0)
     return tfd.Independent(dist, len(self._shape))
   if self._dist == 'normal':
     dist = tfd.Normal(out, std)
     return tfd.Independent(dist, len(self._shape))
   if self._dist == 'binary':
     dist = tfd.Bernoulli(out)
     return tfd.Independent(dist, len(self._shape))
   if self._dist == 'tanh_normal':
     mean = 5 * tf.tanh(out / 5)
     std = tf.nn.softplus(std + self._init_std) + self._min_std
     dist = tfd.Normal(mean, std)
     dist = tfd.TransformedDistribution(dist, common.TanhBijector())
     dist = tfd.Independent(dist, len(self._shape))
     return common.SampleDist(dist)
   if self._dist == 'trunc_normal':
     std = 2 * tf.nn.sigmoid((std + self._init_std) / 2) + self._min_std
     dist = common.TruncNormalDist(tf.tanh(out), std, -1, 1)
     return tfd.Independent(dist, 1)
   if self._dist == 'onehot':
     return common.OneHotDist(out)
   raise NotImplementedError(self._dist)
Example #9
0
 def _init_distribution(conditions):
     loc, scale = conditions["loc"], conditions["scale"]
     return tfd.TransformedDistribution(
         distribution=tfd.Normal(loc=loc, scale=scale),
         bijector=bij.Sigmoid(),
         name="LogitNormal",
     )
Example #10
0
File: flow.py Project: gumpfly/PSVO
    def transform(self, base_dist, name=None):
        dist = tfd.TransformedDistribution(distribution=base_dist,
                                           bijector=self.bijector,
                                           validate_args=True,
                                           name=name or self.name)

        return dist
Example #11
0
    def __init__(self, *args, **kwargs):
        self._parents = []
        # Override default bijector if provided
        self._bijector = kwargs.pop("bijector", self._bijector)

        self._untransformed_distribution = self._base_dist(*args, **kwargs)
        self._sample_shape = ()
        self._dim_names = ()
        ctx = contexts.get_context()
        self.name = kwargs.get("name", None)
        if isinstance(ctx, contexts.InferenceContext) and self.name is None:
            # Unfortunately autograph does not allow changing the AST,
            # thus we instead retrieve the name from when it was set
            # ForwardContext where AST parsing is possible.
            order_id = len(ctx.vars)  # where am I in the order of RV creation?
            self.name = ctx._names[order_id]

        if not isinstance(ctx, contexts.FreeForwardContext) and self.name is None:
            # We only require names for book keeping during inference
            raise ValueError("No name was set. Supply one via the name kwarg.")

        self._creation_context_id = id(ctx)
        self._backend_tensor = None

        self._distribution = tfd.TransformedDistribution(
            distribution=self._untransformed_distribution, bijector=bijectors.Invert(self._bijector)
        )
        ctx.add_variable(self)
Example #12
0
 def __call__(self, observation):
     x = self._policy(observation)
     multivariate_normal_diag = tfd.MultivariateNormalDiag(
         loc=self._mu(x), scale_diag=self._stddev(x))
     # Squash actions to [-1, 1]
     squashed = tfd.TransformedDistribution(multivariate_normal_diag,
                                            utils.StableTanhBijector())
     return utils.SampleDist(squashed)
Example #13
0
 def _base_dist(self, *args, **kwargs):
     """
     Logit normal base distribution.
     
     A LogitNormal is the standard logistic (i.e. sigmoid) of a Normal.
     """
     return tfd.TransformedDistribution(
         distribution=tfd.Normal(*args, **kwargs),
         bijector=tfp.bijectors.Sigmoid(),
         name="LogitNormal",
     )
Example #14
0
 def _base_dist(self, *args, **kwargs):
     """
     Half student-T base distribution.
     
     A HalfStudentT is the absolute value of a StudentT.
     """
     return tfd.TransformedDistribution(
         distribution=tfd.StudentT(*args, **kwargs),
         bijector=tfp.bijectors.AbsoluteValue(),
         name="HalfStudentT",
     )
Example #15
0
    def __init__(self, *args, **kwargs):
        """Initialize PositiveContinuousRV.

        Developer Note
        --------------
            The inverse of the exponential bijector is the log bijector.
        """
        super().__init__(*args, **kwargs)
        self._transformed_distribution = tfd.TransformedDistribution(
            distribution=self._distribution,
            bijector=bijectors.Invert(bijectors.Exp()))
Example #16
0
    def __init__(self, *args, **kwargs):
        """Initialize UnitContinuousRV.

        Developer Note
        --------------
            The inverse of the sigmoid bijector is the logodds bijector.
        """
        super().__init__(*args, **kwargs)
        self._transformed_distribution = tfd.TransformedDistribution(
            distribution=self._distribution,
            bijector=bijectors.Invert(bijectors.Sigmoid()))
Example #17
0
    def __init__(self, event_shape, flow_params, dim, channels):
        self._flow_params = flow_params
        self._dim = dim
        self._channels = channels
        base_dist = tfd.MultivariateNormalDiag(loc=tf.zeros(event_shape))

        self._independent_channels_distributions = []
        for ch in range(self._channels):
            flow_maf = build_flow(flow_params, flow_size=dim)
            transformed_distribution = tfd.TransformedDistribution(distribution=base_dist, bijector=flow_maf)
            self._independent_channels_distributions.append(transformed_distribution)
        print()
Example #18
0
def test_transformed_executor_logp_tensorflow(transformed_model):
    norm_log = tfd.TransformedDistribution(tfd.HalfNormal(1), bij.Invert(bij.Exp()))

    _, state = pm.evaluate_model_transformed(transformed_model(), values=dict(__log_n=-math.pi))
    np.testing.assert_allclose(
        state.collect_log_prob(), norm_log.log_prob(-math.pi), equal_nan=False
    )

    _, state = pm.evaluate_model_transformed(transformed_model(), values=dict(n=math.exp(-math.pi)))
    np.testing.assert_allclose(
        state.collect_log_prob(), norm_log.log_prob(-math.pi), equal_nan=False
    )
Example #19
0
    def _base_dist(self, *args, **kwargs):
        """
        Weibull base distribution.

        The inverse of the Weibull bijector applied to a U[0, 1] random
        variable gives a Weibull-distributed random variable.
        """
        return tfd.TransformedDistribution(
            distribution=tfd.Uniform(low=0.0, high=1.0),
            bijector=tfp.bijectors.Invert(
                tfp.bijectors.Weibull(*args, **kwargs)),
            name="Weibull",
        )
Example #20
0
    def _base_dist(self, lower: TensorLike, upper: TensorLike, *args,
                   **kwargs):
        """
        Discrete uniform base distribution.

        A DiscreteUniform is an equiprobable Categorical over (upper - lower),
        shifted up by low.
        """
        probs = np.ones(int(upper - lower)) / (upper - lower)
        return tfd.TransformedDistribution(
            distribution=tfd.Categorical(probs=probs, dtype=tf.float32),
            bijector=tfp.bijectors.AffineScalar(shift=float(lower)),
            name="DiscreteUniform",
        )
Example #21
0
    def _base_dist(self, *args, **kwargs):
        """
        Discrete uniform base distribution.

        A DiscreteUniform is an equiprobable Categorical over (high-low),
        shifted up by low.
        """
        low = kwargs.pop("low")
        high = kwargs.pop("high")
        probs = np.ones(high - low, dtype=np.float32) / (high - low)
        return tfd.TransformedDistribution(
            distribution=tfd.Categorical(probs=probs, dtype=tf.float32),
            bijector=tfp.bijectors.AffineScalar(shift=float(low)),
            name="DiscreteUniform",
        )
Example #22
0
    def _base_dist(self, nu: IntTensorLike, sigma: TensorLike, *args,
                   **kwargs):
        """
        Half student-T base distribution.

        A HalfStudentT is the absolute value of a StudentT.
        """
        return tfd.TransformedDistribution(
            distribution=tfd.StudentT(df=nu,
                                      scale=sigma,
                                      loc=0,
                                      *args,
                                      **kwargs),
            bijector=tfp.bijectors.AbsoluteValue(),
            name="HalfStudentT",
        )
Example #23
0
 def __init__(self,
              mean: tf.Tensor,
              std: tf.Tensor,
              feature_dims: int = 1,
              num_samples: int = 100) -> None:
     dist = tfd.Normal(mean, std)
     dist = tfd.TransformedDistribution(dist, TanhBijector())
     dist = tfd.Independent(dist, feature_dims)
     self._dist = dist
     self._num_samples = num_samples
     super().__init__(
         dtype=self._dist.dtype,
         reparameterization_type=self._dist.reparameterization_type,
         validate_args=False,
         allow_nan_stats=self._dist.allow_nan_stats,
         name='TanhNormalDistribution')
Example #24
0
    def _init_distribution(conditions):

        concentration, scale = conditions["concentration"], conditions["scale"]

        scale_tensor, concentration_tensor = (
            tf.convert_to_tensor(scale),
            tf.convert_to_tensor(concentration),
        )
        broadcast_shape = dist_util.prefer_static_broadcast_shape(
            scale_tensor.shape, concentration_tensor.shape
        )

        return tfd.TransformedDistribution(
            distribution=tfd.Uniform(low=tf.zeros(broadcast_shape), high=tf.ones(broadcast_shape)),
            bijector=bij.Invert(bij.WeibullCDF(scale=scale, concentration=concentration)),
            name="Weibull",
        )
Example #25
0
def get_iaf_elbo(target, num_mc_samples, param_shapes):
    shape_sizes = [
        _tensorshape_size(pshape) for pshape in param_shapes.values()
    ]
    overall_shape = [sum(shape_sizes)]

    def unmarshal(variational_sample):
        results = []
        n_dimensions_used = 0
        for (n_to_add, result_shape) in zip(shape_sizes,
                                            param_shapes.values()):
            result = variational_sample[Ellipsis,
                                        n_dimensions_used:n_dimensions_used +
                                        n_to_add]
            results.append(tf.reshape(result, result_shape))
            n_dimensions_used += n_to_add
        return tuple(results)

    variational_dist = tfd.TransformedDistribution(
        distribution=tfd.Normal(loc=0., scale=1.),
        bijector=tfb.Invert(
            tfb.MaskedAutoregressiveFlow(
                shift_and_log_scale_fn=tfb.
                masked_autoregressive_default_template(
                    hidden_layers=[256, 256]))),
        event_shape=overall_shape,
        name='q_iaf')

    variational_samples = variational_dist.sample(num_mc_samples)
    target_q_sum = tf.reduce_sum(
        variational_dist.log_prob(variational_samples))
    target_sum = 0.
    for s in range(num_mc_samples):
        params = unmarshal(variational_samples[s, Ellipsis])
        target_sum = target_sum + target(*params)

    energy = target_sum / float(num_mc_samples)
    entropy = -target_q_sum / float(num_mc_samples)
    elbo = energy + entropy

    tf.summary.scalar('energy', energy)
    tf.summary.scalar('entropy', entropy)
    tf.summary.scalar('elbo', elbo)

    return elbo
    def _create_dist(self):
        scale_diag = tf.nn.softplus(self._scale_variable) if self._softplus_scale \
            else self._scale_variable
        scale = distribution_util.make_diag_scale(loc=self._loc_variable,
                                                  scale_diag=scale_diag,
                                                  validate_args=False,
                                                  assert_positive=False)
        batch_shape, event_shape = distribution_util.shapes_from_loc_and_scale(
            self._loc_variable, scale)

        return tfp.TransformedDistribution(
            distribution=tfp.Cauchy(loc=tf.zeros([], dtype=scale.dtype),
                                    scale=tf.ones([], dtype=scale.dtype)),
            bijector=bijectors.AffineLinearOperator(shift=self._loc_variable,
                                                    scale=scale),
            batch_shape=batch_shape,
            event_shape=event_shape,
            name="MultivariateCauchyDiag" +
            ("SoftplusScale" if self._softplus_scale else ""))
Example #27
0
File: nn.py Project: xlnwel/d2rl
    def call(self, x):
        x = self._layers(x)

        if self._is_action_discrete:
            dist = Categorical(x)
            terms = {}
        else:
            raw_init_std = np.log(np.exp(self._init_std) - 1)
            mean, std = tf.split(x, 2, -1)
            # https://www.desmos.com/calculator/gs6ypbirgq
            # we bound the mean to [-5, +5] to avoid numerical instabilities
            # as atanh becomes difficult in highly saturated regions
            mean = self._mean_scale * tf.tanh(mean / self._mean_scale)
            std = tf.nn.softplus(std + raw_init_std) + self._min_std
            dist = tfd.Normal(mean, std)
            dist = tfd.TransformedDistribution(dist, TanhBijector())
            dist = tfd.Independent(dist, 1)
            dist = SampleDist(dist)
            terms = dict(raw_act_std=std)

        return dist, terms
Example #28
0
 def __call__(self, features):
     raw_init_std = np.log(np.exp(self._init_std) - 1)
     x = features
     for index in range(self._layers_num):
         x = self.get(f"h{index}", tf.keras.layers.Dense, self._units,
                      self._act)(x)
     if self._dist == "tanh_normal":
         # https://www.desmos.com/calculator/rcmcf5jwe7
         x = self.get(f"hout", tf.keras.layers.Dense, 2 * self._size)(x)
         mean, std = tf.split(x, 2, -1)
         mean = self._mean_scale * tf.tanh(mean / self._mean_scale)
         std = tf.nn.softplus(std + raw_init_std) + self._min_std
         dist = tfd.Normal(mean, std)
         dist = tfd.TransformedDistribution(dist, tools.TanhBijector())
         dist = tfd.Independent(dist, 1)
         dist = tools.SampleDist(dist)
     elif self._dist == "onehot":
         x = self.get(f"hout", tf.keras.layers.Dense, self._size)(x)
         dist = tools.OneHotDist(x)
     else:
         raise NotImplementedError(dist)
     return dist
Example #29
0
 def __init__(self, *args, **kwargs):
     super().__init__(*args, **kwargs)
     self._transformed_distribution = tfd.TransformedDistribution(
         distribution=self._distribution, bijector=bijectors.Identity())
Example #30
0
 def _base_dist(self, mu: TensorLike, sigma: TensorLike, *args, **kwargs):
     return tfd.TransformedDistribution(
         distribution=tfd.Normal(loc=mu, scale=sigma, *args, **kwargs),
         bijector=tfp.bijectors.Sigmoid(),
         name="LogitNormal",
     )