Example #1
0
 def __init__(self,
              a,
              b,
              validate_args=False,
              allow_nan_stats=True,
              name="BetaWithSoftplusAB"):
   with ops.name_scope(name, values=[a, b]) as ns:
     super(BetaWithSoftplusAB, self).__init__(
         a=nn.softplus(a),
         b=nn.softplus(b),
         validate_args=validate_args,
         allow_nan_stats=allow_nan_stats,
         name=ns)
Example #2
0
 def __init__(self,
              alpha,
              beta,
              validate_args=False,
              allow_nan_stats=True,
              name="GammaWithSoftplusAlphaBeta"):
   with ops.name_scope(name, values=[alpha, beta]) as ns:
     super(GammaWithSoftplusAlphaBeta, self).__init__(
         alpha=nn.softplus(alpha),
         beta=nn.softplus(beta),
         validate_args=validate_args,
         allow_nan_stats=allow_nan_stats,
         name=ns)
 def __init__(
     self, alpha, beta, validate_args=False, allow_nan_stats=True, name="InverseGammaWithSoftplusAlphaBeta"
 ):
     parameters = locals()
     parameters.pop("self")
     with ops.name_scope(name, values=[alpha, beta]) as ns:
         super(InverseGammaWithSoftplusAlphaBeta, self).__init__(
             alpha=nn.softplus(alpha),
             beta=nn.softplus(beta),
             validate_args=validate_args,
             allow_nan_stats=allow_nan_stats,
             name=ns,
         )
     self._parameters = parameters
 def __init__(self,
              alpha,
              beta,
              validate_args=False,
              allow_nan_stats=True,
              name="InverseGammaWithSoftplusAlphaBeta"):
     parameters = locals()
     parameters.pop("self")
     with ops.name_scope(name, values=[alpha, beta]) as ns:
         super(InverseGammaWithSoftplusAlphaBeta,
               self).__init__(alpha=nn.softplus(alpha),
                              beta=nn.softplus(beta),
                              validate_args=validate_args,
                              allow_nan_stats=allow_nan_stats,
                              name=ns)
     self._parameters = parameters
Example #5
0
 def __init__(self,
              concentration,
              rate,
              validate_args=False,
              allow_nan_stats=True,
              name="GammaWithSoftplusConcentrationRate"):
     parameters = distribution_util.parent_frame_arguments()
     with ops.name_scope(name, values=[concentration, rate]) as name:
         super(GammaWithSoftplusConcentrationRate, self).__init__(
             concentration=nn.softplus(concentration,
                                       name="softplus_concentration"),
             rate=nn.softplus(rate, name="softplus_rate"),
             validate_args=validate_args,
             allow_nan_stats=allow_nan_stats,
             name=name)
     self._parameters = parameters
Example #6
0
 def __init__(self,
              concentration,
              scale,
              validate_args=False,
              allow_nan_stats=True,
              name="LomaxWithSoftplusConcentrationScale"):
     parameters = locals()
     with ops.name_scope(name, values=[concentration, scale]):
         super(LomaxWithSoftplusConcentrationScale, self).__init__(
             concentration=nn.softplus(concentration,
                                       name="softplus_concentration"),
             scale=nn.softplus(scale, name="softplus_scale"),
             validate_args=validate_args,
             allow_nan_stats=allow_nan_stats,
             name=name)
     self._parameters = parameters
Example #7
0
 def __init__(self,
              concentration,
              rate,
              validate_args=False,
              allow_nan_stats=True,
              name="InverseGammaWithSoftplusConcentrationRate"):
     parameters = locals()
     with ops.name_scope(name, values=[concentration, rate]):
         super(InverseGammaWithSoftplusConcentrationRate, self).__init__(
             concentration=nn.softplus(concentration,
                                       name="softplus_concentration"),
             rate=nn.softplus(rate, name="softplus_rate"),
             validate_args=validate_args,
             allow_nan_stats=allow_nan_stats,
             name=name)
     self._parameters = parameters
Example #8
0
 def __init__(self,
              concentration,
              rate,
              validate_args=False,
              allow_nan_stats=True,
              name="GammaWithSoftplusConcentrationRate"):
   parameters = distribution_util.parent_frame_arguments()
   with ops.name_scope(name, values=[concentration, rate]) as name:
     super(GammaWithSoftplusConcentrationRate, self).__init__(
         concentration=nn.softplus(concentration,
                                   name="softplus_concentration"),
         rate=nn.softplus(rate, name="softplus_rate"),
         validate_args=validate_args,
         allow_nan_stats=allow_nan_stats,
         name=name)
   self._parameters = parameters
Example #9
0
 def __init__(self,
              a,
              b,
              validate_args=False,
              allow_nan_stats=True,
              name="BetaWithSoftplusAB"):
   parameters = locals()
   parameters.pop("self")
   with ops.name_scope(name, values=[a, b]) as ns:
     super(BetaWithSoftplusAB, self).__init__(
         a=nn.softplus(a, name="softplus_a"),
         b=nn.softplus(b, name="softplus_b"),
         validate_args=validate_args,
         allow_nan_stats=allow_nan_stats,
         name=ns)
   self._parameters = parameters
Example #10
0
 def __init__(self,
              concentration,
              rate,
              validate_args=False,
              allow_nan_stats=True,
              name="InverseGammaWithSoftplusConcentrationRate"):
   parameters = dict(locals())
   with ops.name_scope(name, values=[concentration, rate]) as name:
     super(InverseGammaWithSoftplusConcentrationRate, self).__init__(
         concentration=nn.softplus(concentration,
                                   name="softplus_concentration"),
         rate=nn.softplus(rate, name="softplus_rate"),
         validate_args=validate_args,
         allow_nan_stats=allow_nan_stats,
         name=name)
   self._parameters = parameters
Example #11
0
 def __init__(self,
              a,
              b,
              validate_args=False,
              allow_nan_stats=True,
              name="BetaWithSoftplusAB"):
   parameters = locals()
   parameters.pop("self")
   with ops.name_scope(name, values=[a, b]) as ns:
     super(BetaWithSoftplusAB, self).__init__(
         a=nn.softplus(a, name="softplus_a"),
         b=nn.softplus(b, name="softplus_b"),
         validate_args=validate_args,
         allow_nan_stats=allow_nan_stats,
         name=ns)
   self._parameters = parameters
Example #12
0
 def _entropy(self):
   probs = self._probs
   if self.validate_args:
     probs = control_flow_ops.with_dependencies(
         [check_ops.assert_less(
             probs,
             constant_op.constant(1., probs.dtype),
             message="Entropy is undefined when logits = inf or probs = 1.")],
         probs)
   # Claim: entropy(p) = softplus(s)/p - s
   # where s=logits and p=probs.
   #
   # Proof:
   #
   # entropy(p)
   # := -[(1-p)log(1-p) + plog(p)]/p
   # = -[log(1-p) + plog(p/(1-p))]/p
   # = -[-softplus(s) + ps]/p
   # = softplus(s)/p - s
   #
   # since,
   # log[1-sigmoid(s)]
   # = log[1/(1+exp(s)]
   # = -log[1+exp(s)]
   # = -softplus(s)
   #
   # using the fact that,
   # 1-sigmoid(s) = sigmoid(-s) = 1/(1+exp(s))
   return nn.softplus(self.logits) / probs - self.logits
Example #13
0
 def _entropy(self):
   probs = self._probs
   if self.validate_args:
     probs = control_flow_ops.with_dependencies(
         [check_ops.assert_less(
             probs,
             constant_op.constant(1., probs.dtype),
             message="Entropy is undefined when logits = inf or probs = 1.")],
         probs)
   # Claim: entropy(p) = softplus(s)/p - s
   # where s=logits and p=probs.
   #
   # Proof:
   #
   # entropy(p)
   # := -[(1-p)log(1-p) + plog(p)]/p
   # = -[log(1-p) + plog(p/(1-p))]/p
   # = -[-softplus(s) + ps]/p
   # = softplus(s)/p - s
   #
   # since,
   # log[1-sigmoid(s)]
   # = log[1/(1+exp(s)]
   # = -log[1+exp(s)]
   # = -softplus(s)
   #
   # using the fact that,
   # 1-sigmoid(s) = sigmoid(-s) = 1/(1+exp(s))
   return nn.softplus(self.logits) / probs - self.logits
Example #14
0
 def __init__(self, lam, validate_args=False, allow_nan_stats=True, name="ExponentialWithSoftplusLam"):
     parameters = locals()
     parameters.pop("self")
     with ops.name_scope(name, values=[lam]) as ns:
         super(ExponentialWithSoftplusLam, self).__init__(
             lam=nn.softplus(lam), validate_args=validate_args, allow_nan_stats=allow_nan_stats, name=ns
         )
     self._parameters = parameters
Example #15
0
def _kl_bernoulli_bernoulli(a, b, name=None):
  """Calculate the batched KL divergence KL(a || b) with a and b Bernoulli.

  Args:
    a: instance of a Bernoulli distribution object.
    b: instance of a Bernoulli distribution object.
    name: (optional) Name to use for created operations.
      default is "kl_bernoulli_bernoulli".

  Returns:
    Batchwise KL(a || b)
  """
  with ops.name_scope(name, "kl_bernoulli_bernoulli", [a.logits, b.logits]):
    return (math_ops.sigmoid(a.logits) * (-nn.softplus(-a.logits) +
                                          nn.softplus(-b.logits)) +
            math_ops.sigmoid(-a.logits) * (-nn.softplus(a.logits) +
                                           nn.softplus(b.logits)))
Example #16
0
 def __init__(self, loc, scale, validate_args=False, allow_nan_stats=True, name="LaplaceWithSoftplusScale"):
     parameters = locals()
     parameters.pop("self")
     with ops.name_scope(name, values=[loc, scale]) as ns:
         super(LaplaceWithSoftplusScale, self).__init__(
             loc=loc, scale=nn.softplus(scale), validate_args=validate_args, allow_nan_stats=allow_nan_stats, name=ns
         )
     self._parameters = parameters
Example #17
0
def _kl_bernoulli_bernoulli(a, b, name=None):
    """Calculate the batched KL divergence KL(a || b) with a and b Bernoulli.

  Args:
    a: instance of a Bernoulli distribution object.
    b: instance of a Bernoulli distribution object.
    name: (optional) Name to use for created operations.
      default is "kl_bernoulli_bernoulli".

  Returns:
    Batchwise KL(a || b)
  """
    with ops.name_scope(name, "kl_bernoulli_bernoulli", [a.logits, b.logits]):
        return (math_ops.sigmoid(a.logits) *
                (-nn.softplus(-a.logits) + nn.softplus(-b.logits)) +
                math_ops.sigmoid(-a.logits) *
                (-nn.softplus(a.logits) + nn.softplus(b.logits)))
Example #18
0
 def __init__(self,
              concentration1,
              concentration0,
              validate_args=False,
              allow_nan_stats=True,
              name="BetaWithSoftplusConcentration"):
   parameters = dict(locals())
   with ops.name_scope(name, values=[concentration1,
                                     concentration0]) as name:
     super(BetaWithSoftplusConcentration, self).__init__(
         concentration1=nn.softplus(concentration1,
                                    name="softplus_concentration1"),
         concentration0=nn.softplus(concentration0,
                                    name="softplus_concentration0"),
         validate_args=validate_args,
         allow_nan_stats=allow_nan_stats,
         name=name)
   self._parameters = parameters
Example #19
0
 def __init__(self,
              concentration1,
              concentration0,
              validate_args=False,
              allow_nan_stats=True,
              name="BetaWithSoftplusConcentration"):
     parameters = distribution_util.parent_frame_arguments()
     with ops.name_scope(name, values=[concentration1,
                                       concentration0]) as name:
         super(BetaWithSoftplusConcentration, self).__init__(
             concentration1=nn.softplus(concentration1,
                                        name="softplus_concentration1"),
             concentration0=nn.softplus(concentration0,
                                        name="softplus_concentration0"),
             validate_args=validate_args,
             allow_nan_stats=allow_nan_stats,
             name=name)
     self._parameters = parameters
Example #20
0
def softplus(x):
  """Softplus activation function.

  Arguments:
      x: Input tensor.

  Returns:
      The softplus activation: `log(exp(x) + 1)`.
  """
  return nn.softplus(x)
Example #21
0
 def __init__(self, df, mu, sigma, validate_args=False, allow_nan_stats=True, name="StudentTWithAbsDfSoftplusSigma"):
     with ops.name_scope(name, values=[df, mu, sigma]) as ns:
         super(StudentTWithAbsDfSoftplusSigma, self).__init__(
             df=math_ops.floor(math_ops.abs(df)),
             mu=mu,
             sigma=nn.softplus(sigma),
             validate_args=validate_args,
             allow_nan_stats=allow_nan_stats,
             name=ns,
         )
Example #22
0
def softplus(x):
    """Softplus activation function.

  Arguments:
      x: Input tensor.

  Returns:
      The softplus activation: `log(exp(x) + 1)`.
  """
    return nn.softplus(x)
Example #23
0
 def __init__(
     self, mu, diag_stdev, validate_args=False, allow_nan_stats=True, name="MultivariateNormalDiagWithSoftplusStdDev"
 ):
     with ops.name_scope(name, values=[mu, diag_stdev]) as ns:
         super(MultivariateNormalDiagWithSoftplusStDev, self).__init__(
             mu=mu,
             diag_stdev=nn.softplus(diag_stdev),
             validate_args=validate_args,
             allow_nan_stats=allow_nan_stats,
             name=ns,
         )
Example #24
0
 def __init__(self,
              mu,
              sigma,
              validate_args=False,
              allow_nan_stats=True,
              name="NormalWithSoftplusSigma"):
   with ops.name_scope(name, values=[mu, sigma]) as ns:
     super(NormalWithSoftplusSigma, self).__init__(
         mu=mu,
         sigma=nn.softplus(sigma),
         validate_args=validate_args,
         allow_nan_stats=allow_nan_stats,
         name=ns)
Example #25
0
 def __init__(self,
              rate,
              validate_args=False,
              allow_nan_stats=True,
              name="ExponentialWithSoftplusRate"):
   parameters = locals()
   with ops.name_scope(name, values=[rate]) as name:
     super(ExponentialWithSoftplusRate, self).__init__(
         rate=nn.softplus(rate, name="softplus_rate"),
         validate_args=validate_args,
         allow_nan_stats=allow_nan_stats,
         name=name)
   self._parameters = parameters
Example #26
0
 def __init__(self,
              mu,
              diag_stdev,
              validate_args=False,
              allow_nan_stats=True,
              name="MultivariateNormalDiagWithSoftplusStdDev"):
     with ops.name_scope(name, values=[mu, diag_stdev]) as ns:
         super(MultivariateNormalDiagWithSoftplusStDev,
               self).__init__(mu=mu,
                              diag_stdev=nn.softplus(diag_stdev),
                              validate_args=validate_args,
                              allow_nan_stats=allow_nan_stats,
                              name=ns)
Example #27
0
 def __init__(self,
              rate,
              validate_args=False,
              allow_nan_stats=True,
              name="ExponentialWithSoftplusRate"):
   parameters = locals()
   with ops.name_scope(name, values=[rate]):
     super(ExponentialWithSoftplusRate, self).__init__(
         rate=nn.softplus(rate, name="softplus_rate"),
         validate_args=validate_args,
         allow_nan_stats=allow_nan_stats,
         name=name)
   self._parameters = parameters
Example #28
0
 def __init__(self,
              loc,
              scale,
              validate_args=False,
              allow_nan_stats=True,
              name="LaplaceWithSoftplusScale"):
     with ops.name_scope(name, values=[loc, scale]) as ns:
         super(LaplaceWithSoftplusScale,
               self).__init__(loc=loc,
                              scale=nn.softplus(scale),
                              validate_args=validate_args,
                              allow_nan_stats=allow_nan_stats,
                              name=ns)
Example #29
0
 def __init__(self,
              mu,
              sigma,
              validate_args=False,
              allow_nan_stats=True,
              name="NormalWithSoftplusSigma"):
     with ops.name_scope(name, values=[mu, sigma]) as ns:
         super(NormalWithSoftplusSigma,
               self).__init__(mu=mu,
                              sigma=nn.softplus(sigma),
                              validate_args=validate_args,
                              allow_nan_stats=allow_nan_stats,
                              name=ns)
Example #30
0
 def __init__(self,
              lam,
              validate_args=False,
              allow_nan_stats=True,
              name="ExponentialWithSoftplusLam"):
     parameters = locals()
     parameters.pop("self")
     with ops.name_scope(name, values=[lam]) as ns:
         super(ExponentialWithSoftplusLam,
               self).__init__(lam=nn.softplus(lam),
                              validate_args=validate_args,
                              allow_nan_stats=allow_nan_stats,
                              name=ns)
     self._parameters = parameters
Example #31
0
 def __init__(self,
              df,
              mu,
              sigma,
              validate_args=False,
              allow_nan_stats=True,
              name="StudentTWithAbsDfSoftplusSigma"):
     with ops.name_scope(name, values=[df, mu, sigma]) as ns:
         super(StudentTWithAbsDfSoftplusSigma,
               self).__init__(df=math_ops.floor(math_ops.abs(df)),
                              mu=mu,
                              sigma=nn.softplus(sigma),
                              validate_args=validate_args,
                              allow_nan_stats=allow_nan_stats,
                              name=ns)
Example #32
0
 def __init__(self,
              loc,
              scale,
              validate_args=False,
              allow_nan_stats=True,
              name="LaplaceWithSoftplusScale"):
   parameters = distribution_util.parent_frame_arguments()
   with ops.name_scope(name, values=[loc, scale]) as name:
     super(LaplaceWithSoftplusScale, self).__init__(
         loc=loc,
         scale=nn.softplus(scale, name="softplus_scale"),
         validate_args=validate_args,
         allow_nan_stats=allow_nan_stats,
         name=name)
   self._parameters = parameters
Example #33
0
 def __init__(self,
              loc,
              scale_diag,
              validate_args=False,
              allow_nan_stats=True,
              name="MultivariateNormalDiagWithSoftplusScale"):
   parameters = dict(locals())
   with ops.name_scope(name, values=[scale_diag]) as name:
     super(MultivariateNormalDiagWithSoftplusScale, self).__init__(
         loc=loc,
         scale_diag=nn.softplus(scale_diag),
         validate_args=validate_args,
         allow_nan_stats=allow_nan_stats,
         name=name)
   self._parameters = parameters
Example #34
0
 def __init__(self,
              loc,
              scale,
              validate_args=False,
              allow_nan_stats=True,
              name="NormalWithSoftplusScale"):
   parameters = locals()
   with ops.name_scope(name, values=[scale]):
     super(NormalWithSoftplusScale, self).__init__(
         loc=loc,
         scale=nn.softplus(scale, name="softplus_scale"),
         validate_args=validate_args,
         allow_nan_stats=allow_nan_stats,
         name=name)
   self._parameters = parameters
Example #35
0
 def __init__(self,
              loc,
              scale_diag,
              validate_args=False,
              allow_nan_stats=True,
              name="MultivariateNormalDiagWithSoftplusScale"):
     parameters = locals()
     with ops.name_scope(name, values=[scale_diag]) as ns:
         super(MultivariateNormalDiagWithSoftplusScale,
               self).__init__(loc=loc,
                              scale_diag=nn.softplus(scale_diag),
                              validate_args=validate_args,
                              allow_nan_stats=allow_nan_stats,
                              name=ns)
     self._parameters = parameters
Example #36
0
 def __init__(self,
              loc,
              scale,
              validate_args=False,
              allow_nan_stats=True,
              name="NormalWithSoftplusScale"):
   parameters = dict(locals())
   with ops.name_scope(name, values=[scale]) as name:
     super(NormalWithSoftplusScale, self).__init__(
         loc=loc,
         scale=nn.softplus(scale, name="softplus_scale"),
         validate_args=validate_args,
         allow_nan_stats=allow_nan_stats,
         name=name)
   self._parameters = parameters
Example #37
0
 def __init__(self,
              mu,
              sigma,
              validate_args=False,
              allow_nan_stats=True,
              name="NormalWithSoftplusSigma"):
   parameters = locals()
   parameters.pop("self")
   with ops.name_scope(name, values=[sigma]) as ns:
     super(NormalWithSoftplusSigma, self).__init__(
         mu=mu,
         sigma=nn.softplus(sigma),
         validate_args=validate_args,
         allow_nan_stats=allow_nan_stats,
         name=ns)
   self._parameters = parameters
Example #38
0
def template(x_shape=[2, 3, 4, 5], description: str = ""):
    from tensorflow.python.ops import nn
    x = tf.placeholder(np.float32, x_shape, "x")
    y = nn.softplus(x)

    vx = np.random.rand(*x_shape).astype(np.float32) - 0.5
    with tf.Session() as sess:
        vy, = sess.run([y], {x: vx})

        graph = TensorFlowConverter(sess, batch_size=2).convert([x], [y])

    generate_kernel_test_case(
        description=f"[TensorFlow] Softplus {description}",
        graph=graph,
        inputs={graph.inputs[0]: vx},
        expected={graph.outputs[0]: vy})
 def __init__(self,
              loc,
              scale,
              validate_args=False,
              allow_nan_stats=True,
              name="LaplaceWithSoftplusScale"):
     parameters = distribution_util.parent_frame_arguments()
     with ops.name_scope(name, values=[loc, scale]) as name:
         super(LaplaceWithSoftplusScale,
               self).__init__(loc=loc,
                              scale=nn.softplus(scale,
                                                name="softplus_scale"),
                              validate_args=validate_args,
                              allow_nan_stats=allow_nan_stats,
                              name=name)
     self._parameters = parameters
Example #40
0
 def __init__(self,
              mu,
              diag_stdev,
              validate_args=False,
              allow_nan_stats=True,
              name="MultivariateNormalDiagWithSoftplusStdDev"):
   parameters = locals()
   parameters.pop("self")
   with ops.name_scope(name, values=[diag_stdev]) as ns:
     super(MultivariateNormalDiagWithSoftplusStDev, self).__init__(
         mu=mu,
         diag_stdev=nn.softplus(diag_stdev),
         validate_args=validate_args,
         allow_nan_stats=allow_nan_stats,
         name=ns)
   self._parameters = parameters
Example #41
0
 def __init__(self,
              mu,
              diag_stddev,
              validate_args=False,
              allow_nan_stats=True,
              name="MultivariateNormalDiagWithSoftplusStdDev"):
     parameters = locals()
     parameters.pop("self")
     with ops.name_scope(name, values=[diag_stddev]) as ns:
         super(MultivariateNormalDiagWithSoftplusStDev,
               self).__init__(mu=mu,
                              diag_stddev=nn.softplus(diag_stddev),
                              validate_args=validate_args,
                              allow_nan_stats=allow_nan_stats,
                              name=ns)
     self._parameters = parameters
Example #42
0
 def __init__(self,
              df,
              loc,
              scale,
              validate_args=False,
              allow_nan_stats=True,
              name="StudentTWithAbsDfSoftplusScale"):
   parameters = locals()
   with ops.name_scope(name, values=[df, scale]) as ns:
     super(StudentTWithAbsDfSoftplusScale, self).__init__(
         df=math_ops.floor(math_ops.abs(df)),
         loc=loc,
         scale=nn.softplus(scale, name="softplus_scale"),
         validate_args=validate_args,
         allow_nan_stats=allow_nan_stats,
         name=ns)
   self._parameters = parameters
Example #43
0
 def __init__(self,
              loc,
              scale,
              validate_args=False,
              allow_nan_stats=True,
              name="LaplaceWithSoftplusScale"):
     parameters = locals()
     parameters.pop("self")
     with ops.name_scope(name, values=[loc, scale]) as ns:
         super(LaplaceWithSoftplusScale,
               self).__init__(loc=loc,
                              scale=nn.softplus(scale,
                                                name="softplus_scale"),
                              validate_args=validate_args,
                              allow_nan_stats=allow_nan_stats,
                              name=ns)
     self._parameters = parameters
Example #44
0
 def __init__(self,
              df,
              loc,
              scale,
              validate_args=False,
              allow_nan_stats=True,
              name="StudentTWithAbsDfSoftplusScale"):
   parameters = dict(locals())
   with ops.name_scope(name, values=[df, scale]) as name:
     super(StudentTWithAbsDfSoftplusScale, self).__init__(
         df=math_ops.floor(math_ops.abs(df)),
         loc=loc,
         scale=nn.softplus(scale, name="softplus_scale"),
         validate_args=validate_args,
         allow_nan_stats=allow_nan_stats,
         name=name)
   self._parameters = parameters
Example #45
0
def softplus(x):
    """Softplus activation function, `softplus(x) = log(exp(x) + 1)`.
  
  Example Usage:
  
  >>> a = tf.constant([-20, -1.0, 0.0, 1.0, 20], dtype = tf.float32)
  >>> b = tf.keras.activations.softplus(a) 
  >>> b.numpy()
  array([2.0611537e-09, 3.1326166e-01, 6.9314718e-01, 1.3132616e+00,
           2.0000000e+01], dtype=float32)
  
  Arguments:
      x: Input tensor.

  Returns:
      The softplus activation: `log(exp(x) + 1)`.
  """
    return nn.softplus(x)
Example #46
0
 def __init__(self,
              df,
              mu,
              sigma,
              validate_args=False,
              allow_nan_stats=True,
              name="StudentTWithAbsDfSoftplusSigma"):
   parameters = locals()
   parameters.pop("self")
   with ops.name_scope(name, values=[df, sigma]) as ns:
     super(StudentTWithAbsDfSoftplusSigma, self).__init__(
         df=math_ops.floor(math_ops.abs(df)),
         mu=mu,
         sigma=nn.softplus(sigma, name="softplus_sigma"),
         validate_args=validate_args,
         allow_nan_stats=allow_nan_stats,
         name=ns)
   self._parameters = parameters
Example #47
0
 def __init__(self,
              df,
              mu,
              sigma,
              validate_args=False,
              allow_nan_stats=True,
              name="StudentTWithAbsDfSoftplusSigma"):
   parameters = locals()
   parameters.pop("self")
   with ops.name_scope(name, values=[df, sigma]) as ns:
     super(StudentTWithAbsDfSoftplusSigma, self).__init__(
         df=math_ops.floor(math_ops.abs(df)),
         mu=mu,
         sigma=nn.softplus(sigma, name="softplus_sigma"),
         validate_args=validate_args,
         allow_nan_stats=allow_nan_stats,
         name=ns)
   self._parameters = parameters
def lognormal_q(shape, name=None):
    with tf.variable_scope(name, default_name="lognormal_q"):
        min_scale = 1e-5
        loc = tf.get_variable("loc",
                              shape,
                              initializer=tf.random_normal_initializer(
                                  mean=1.0,
                                  stddev=0.1,
                              ),
                              trainable=True)
        scale = tf.get_variable("scale",
                                shape,
                                initializer=tf.random_normal_initializer(
                                    mean=-3.0, stddev=0.1),
                                trainable=True)
        rv = TransformedDistribution(
            distribution=Normal(loc, tf.maximum(softplus(scale), min_scale)),
            bijector=tf.contrib.distributions.bijectors.Exp())
        return rv
Example #49
0
 def _logcosh(x):
   return x + nn.softplus(-2. * x) - math_ops.log(2.)
Example #50
0
 def _forward(x, alpha):
   """Forward computation of scaled softplus."""
   return alpha * nn.softplus(x / alpha)
Example #51
0
 def _logcosh(x):
     return x + nn.softplus(-2. * x) - math_ops.log(2.)
Example #52
0
def scaled_softplus(x, alpha, clip=None, name=None):
  """Returns `y = alpha * ln(1 + exp(x / alpha))` or `min(y, clip)`.

  This can be seen as a softplus applied to the scaled input, with the output
  appropriately scaled. As `alpha` tends to 0, `scaled_softplus(x, alpha)` tends
  to `relu(x)`. The clipping is optional. As alpha->0, scaled_softplus(x, alpha)
  tends to relu(x), and scaled_softplus(x, alpha, clip=6) tends to relu6(x).

  Note: the gradient for this operation is defined to depend on the backprop
  inputs as well as the outputs of this operation.

  Args:
    x: A `Tensor` of inputs.
    alpha: A `Tensor`, indicating the amount of smoothness. The caller
        must ensure that `alpha > 0`.
    clip: (optional) A `Tensor`, the upper bound to clip the values.
    name: A name for the scope of the operations (optional).

  Returns:
    A tensor of the size and type determined by broadcasting of the inputs.

  """
  clipping = clip is not None
  with ops.name_scope(name, 'scaled_softplus',
                      [x, alpha] + ([clip] if clipping else [])):
    x = ops.convert_to_tensor(x, name='x')
    dtype = x.dtype
    alpha = ops.convert_to_tensor(alpha, dtype=dtype, name='alpha')
    # Compute the forward value.
    y = alpha * nn.softplus(x / alpha)
    if clipping:
      clip = ops.convert_to_tensor(clip, dtype=dtype, name='clip')
      y = math_ops.minimum(y, clip)

    def _grad(op, g):
      """Backprop for scaled softplus, with optional clipping."""
      y, x, alpha = op.inputs[:3]
      # Prevent the memory-expensive computations from happening before g is
      # available.
      with ops.control_dependencies([g]):
        y = array_ops.identity(y)
      clip_grad = []
      if clipping:
        clip = op.inputs[3]
        unclipped = math_ops.cast(y < clip, g.dtype)
        clip_grad = [_reduce_and_reshape_grad(g * (1. - unclipped), clip)]
        g *= unclipped
      y /= alpha
      emy = math_ops.exp(-y)
      dy_dx = 1. - emy
      # The eps below avoids log(0). Note that t*log(t) -> 0 as t->0.
      eps = 1e-8
      dy_dalpha = y * emy - dy_dx * math_ops.log(dy_dx + eps)
      # Backprop to the actual inputs, but not to the output.
      return [None,
              _reduce_and_reshape_grad(g * dy_dx, x),
              _reduce_and_reshape_grad(g * dy_dalpha, alpha)] + clip_grad

    if clipping:
      @function.Defun(dtype, dtype, dtype, dtype,
                      func_name='ScaledSoftplusHelper_clip_%s' % dtype.name,
                      shape_func=lambda op: [op.inputs[0].shape],
                      python_grad_func=_grad)
      def _forward_helper_clip(y, x, alpha, clip):
        del x, alpha, clip  # Unused.
        return y
      return _forward_helper_clip(y, x, alpha, clip)
    # No clipping.
    @function.Defun(dtype, dtype, dtype,
                    func_name='ScaledSoftplusHelper_%s' % dtype.name,
                    shape_func=lambda op: [op.inputs[0].shape],
                    python_grad_func=_grad)
    def _forward_helper(y, x, alpha):
      del x, alpha  # Unused.
      return y
    return _forward_helper(y, x, alpha)
Example #53
0
 def _entropy(self):
   return (-self.logits * (math_ops.sigmoid(self.logits) - 1) +
           nn.softplus(-self.logits))
Example #54
0
def scaled_softplus(x, alpha, clip=None, name=None):
    """Returns `y = alpha * ln(1 + exp(x / alpha))` or `min(y, clip)`.

  This can be seen as a softplus applied to the scaled input, with the output
  appropriately scaled. As `alpha` tends to 0, `scaled_softplus(x, alpha)` tends
  to `relu(x)`. The clipping is optional. As alpha->0, scaled_softplus(x, alpha)
  tends to relu(x), and scaled_softplus(x, alpha, clip=6) tends to relu6(x).

  Note: the gradient for this operation is defined to depend on the backprop
  inputs as well as the outputs of this operation.

  Args:
    x: A `Tensor` of inputs.
    alpha: A `Tensor`, indicating the amount of smoothness. The caller
        must ensure that `alpha > 0`.
    clip: (optional) A `Tensor`, the upper bound to clip the values.
    name: A name for the scope of the operations (optional).

  Returns:
    A tensor of the size and type determined by broadcasting of the inputs.

  """
    clipping = clip is not None
    with ops.name_scope(name, 'scaled_softplus',
                        [x, alpha] + ([clip] if clipping else [])):
        x = ops.convert_to_tensor(x, name='x')
        dtype = x.dtype
        alpha = ops.convert_to_tensor(alpha, dtype=dtype, name='alpha')
        # Compute the forward value.
        y = alpha * nn.softplus(x / alpha)
        if clipping:
            clip = ops.convert_to_tensor(clip, dtype=dtype, name='clip')
            y = math_ops.minimum(y, clip)

        def _grad(op, g):
            """Backprop for scaled softplus, with optional clipping."""
            y, x, alpha = op.inputs[:3]
            # Prevent the memory-expensive computations from happening before g is
            # available.
            with ops.control_dependencies([g]):
                y = array_ops.identity(y)
            clip_grad = []
            if clipping:
                clip = op.inputs[3]
                unclipped = math_ops.cast(y < clip, g.dtype)
                clip_grad = [
                    _reduce_and_reshape_grad(g * (1. - unclipped), clip)
                ]
                g *= unclipped
            y /= alpha
            emy = math_ops.exp(-y)
            dy_dx = 1. - emy
            # The eps below avoids log(0). Note that t*log(t) -> 0 as t->0.
            eps = 1e-8
            dy_dalpha = y * emy - dy_dx * math_ops.log(dy_dx + eps)
            # Backprop to the actual inputs, but not to the output.
            return [
                None,
                _reduce_and_reshape_grad(g * dy_dx, x),
                _reduce_and_reshape_grad(g * dy_dalpha, alpha)
            ] + clip_grad

        if clipping:

            @function.Defun(dtype,
                            dtype,
                            dtype,
                            dtype,
                            func_name='ScaledSoftplusHelper_clip_%s' %
                            dtype.name,
                            shape_func=lambda op: [op.inputs[0].shape],
                            python_grad_func=_grad)
            def _forward_helper_clip(y, x, alpha, clip):
                del x, alpha, clip  # Unused.
                return y

            return _forward_helper_clip(y, x, alpha, clip)
        # No clipping.
        @function.Defun(dtype,
                        dtype,
                        dtype,
                        func_name='ScaledSoftplusHelper_%s' % dtype.name,
                        shape_func=lambda op: [op.inputs[0].shape],
                        python_grad_func=_grad)
        def _forward_helper(y, x, alpha):
            del x, alpha  # Unused.
            return y

        return _forward_helper(y, x, alpha)
  def build(self, input_shape):
    """Builds the layer.

    Creates the variables for the network modeling the densities, creates the
    auxiliary loss estimating the median and tail quantiles of the densities,
    and then uses that to create the probability mass functions and the update
    op that produces the discrete cumulative density functions used by the range
    coder.

    Args:
      input_shape: Shape of the input tensor, used to get the number of
        channels.

    Raises:
      ValueError: if `input_shape` doesn't specify the length of the channel
        dimension.
    """
    input_shape = tensor_shape.TensorShape(input_shape)
    channel_axis = self._channel_axis(input_shape.ndims)
    channels = input_shape[channel_axis].value
    if channels is None:
      raise ValueError("The channel dimension of the inputs must be defined.")
    self.input_spec = base_layer.InputSpec(
        ndim=input_shape.ndims, axes={channel_axis: channels})
    filters = (1,) + self.filters + (1,)
    scale = self.init_scale ** (1 / (len(self.filters) + 1))

    # Create variables.
    self._matrices = []
    self._biases = []
    self._factors = []
    for i in range(len(self.filters) + 1):
      init = np.log(np.expm1(1 / scale / filters[i + 1]))
      matrix = self.add_variable(
          "matrix_{}".format(i), dtype=self.dtype,
          shape=(channels, filters[i + 1], filters[i]),
          initializer=init_ops.Constant(init))
      matrix = nn.softplus(matrix)
      self._matrices.append(matrix)

      bias = self.add_variable(
          "bias_{}".format(i), dtype=self.dtype,
          shape=(channels, filters[i + 1], 1),
          initializer=init_ops.RandomUniform(-.5, .5))
      self._biases.append(bias)

      if i < len(self.filters):
        factor = self.add_variable(
            "factor_{}".format(i), dtype=self.dtype,
            shape=(channels, filters[i + 1], 1),
            initializer=init_ops.Zeros())
        factor = math_ops.tanh(factor)
        self._factors.append(factor)

    # To figure out what range of the densities to sample, we need to compute
    # the quantiles given by `tail_mass / 2` and `1 - tail_mass / 2`. Since we
    # can't take inverses of the cumulative directly, we make it an optimization
    # problem:
    # `quantiles = argmin(|logit(cumulative) - target|)`
    # where `target` is `logit(tail_mass / 2)` or `logit(1 - tail_mass / 2)`.
    # Taking the logit (inverse of sigmoid) of the cumulative makes the
    # representation of the right target more numerically stable.

    # Numerically stable way of computing logits of `tail_mass / 2`
    # and `1 - tail_mass / 2`.
    target = np.log(2 / self.tail_mass - 1)
    # Compute lower and upper tail quantile as well as median.
    target = constant_op.constant([-target, 0, target], dtype=self.dtype)

    def quantiles_initializer(shape, dtype=None, partition_info=None):
      del partition_info  # unused
      assert tuple(shape[1:]) == (1, 3)
      init = constant_op.constant(
          [[[-self.init_scale, 0, self.init_scale]]], dtype=dtype)
      return array_ops.tile(init, (shape[0], 1, 1))

    quantiles = self.add_variable(
        "quantiles", shape=(channels, 1, 3), dtype=self.dtype,
        initializer=quantiles_initializer)
    logits = self._logits_cumulative(quantiles, stop_gradient=True)
    loss = math_ops.reduce_sum(abs(logits - target))
    self.add_loss(loss, inputs=None)

    # Save medians for `call`, `compress`, and `decompress`.
    self._medians = quantiles[:, :, 1:2]
    if not self.optimize_integer_offset:
      self._medians = math_ops.round(self._medians)

    # Largest distance observed between lower tail quantile and median,
    # or between median and upper tail quantile.
    minima = math_ops.reduce_max(self._medians - quantiles[:, :, 0:1])
    maxima = math_ops.reduce_max(quantiles[:, :, 2:3] - self._medians)
    minmax = math_ops.maximum(minima, maxima)
    minmax = math_ops.ceil(minmax)
    minmax = math_ops.maximum(minmax, 1)

    # Sample the density up to `minmax` around the median.
    samples = math_ops.range(-minmax, minmax + 1, dtype=self.dtype)
    samples += self._medians

    half = constant_op.constant(.5, dtype=self.dtype)
    # We strip the sigmoid from the end here, so we can use the special rule
    # below to only compute differences in the left tail of the sigmoid.
    # This increases numerical stability (see explanation in `call`).
    lower = self._logits_cumulative(samples - half, stop_gradient=True)
    upper = self._logits_cumulative(samples + half, stop_gradient=True)
    # Flip signs if we can move more towards the left tail of the sigmoid.
    sign = -math_ops.sign(math_ops.add_n([lower, upper]))
    pmf = abs(math_ops.sigmoid(sign * upper) - math_ops.sigmoid(sign * lower))
    # Add tail masses to first and last bin of pmf, as we clip values for
    # compression, meaning that out-of-range values get mapped to these bins.
    pmf = array_ops.concat([
        math_ops.add_n([pmf[:, 0, :1], math_ops.sigmoid(lower[:, 0, :1])]),
        pmf[:, 0, 1:-1],
        math_ops.add_n([pmf[:, 0, -1:], math_ops.sigmoid(-upper[:, 0, -1:])]),
        ], axis=-1)
    self._pmf = pmf

    cdf = coder_ops.pmf_to_quantized_cdf(
        pmf, precision=self.range_coder_precision)
    def cdf_getter(*args, **kwargs):
      del args, kwargs  # ignored
      return variable_scope.get_variable(
          "quantized_cdf", dtype=dtypes.int32, initializer=cdf,
          trainable=False, validate_shape=False, collections=())
    # Need to provide a fake shape here since add_variable insists on it.
    self._quantized_cdf = self.add_variable(
        "quantized_cdf", shape=(channels, 1), dtype=dtypes.int32,
        getter=cdf_getter, trainable=False)

    update_op = state_ops.assign(
        self._quantized_cdf, cdf, validate_shape=False)
    self.add_update(update_op, inputs=None)

    super(EntropyBottleneck, self).build(input_shape)
Example #56
0
def softplus(x):
  return nn.softplus(x)