예제 #1
0
    def _build(self, inputs):

        mean, covariance, scale = self.create_mean_n_cov_layers(inputs)

        mean_plus_minus_one = self.force_between_plus_minus_one(
            mean, self.plus_minus_one_method)

        # TODO if you want contractive regularizers implement them first. Then, uncomment the following lines (Riccardo)
        # self.set_contractive_regularizer(mean_zero_one, covariance,
        #                                 self._contractive_regularizer_inputs,
        #                                 self._contractive_regularizer_tuple,
        #                                 self._contractive_collection_network_str)
        #

        output_distribution = tfd.Logistic(loc=mean_plus_minus_one,
                                           scale=scale)

        # add reconstruction_node method (needed to some sort of mean or median to get reconstructions without sampling)
        def reconstruction_node(self):
            return self.mean()

        output_distribution.reconstruction_node = types.MethodType(
            reconstruction_node, output_distribution)

        return output_distribution
예제 #2
0
def discrete_logistics_likelihood(loc, scale, obs, side_censored=True):
    from tensorflow_probability import distributions as tfd
    x_lo = obs - 1 / 255 / 2
    x_hi = obs + 1 / 255 / 2
    if side_censored:
        x_lo = tf.where(x_lo >= 0, x_lo,
                        tf.to_float(-1000) * tf.ones_like(x_lo))
        x_hi = tf.where(x_hi <= 1, x_hi,
                        tf.to_float(+1000) * tf.ones_like(x_hi))
    dist = tfd.Logistic(tf.to_double(loc), tf.to_double(scale))
    # tf.where doesn't backpropagate correctly when there's NaN in the unused branch. So instead of
    # ret = tf.where(
    #     tf.math.abs(obs - loc) <= 2*scale,
    #     tf.math.log(dist.cdf(tf.to_double(x_hi)) - dist.cdf(tf.to_double(x_lo))),
    #     dist.log_prob(tf.to_double(obs)) + tf.to_double(tf.math.log(1/255)))
    # we need
    p_exact = dist.cdf(tf.to_double(x_hi)) - dist.cdf(tf.to_double(x_lo))
    p_exact = tf.where(
        tf.math.abs(obs - loc) <= 2 * scale, p_exact, tf.ones_like(p_exact))
    ret = tf.where(
        tf.math.abs(obs - loc) <= 2 * scale, tf.math.log(p_exact),
        dist.log_prob(tf.to_double(obs)) + tf.to_double(tf.math.log(1 / 255)))
    return tf.to_float(ret)
예제 #3
0
 def _init_distribution(conditions, **kwargs):
     loc, scale = conditions["loc"], conditions["scale"]
     return tfd.Logistic(loc=loc, scale=scale, **kwargs)
예제 #4
0
 def _base_dist(self, mu: TensorLike, s: TensorLike, *args, **kwargs):
     return tfd.Logistic(loc=mu, scale=s, *args, **kwargs)