def _stddev(self):
        if distribution_util.is_diagonal_scale(self.scale):
            stddev = tf.abs(self.scale.diag_part())
        elif (isinstance(self.scale, tf.linalg.LinearOperatorLowRankUpdate)
              and self.scale.is_self_adjoint):
            stddev = tf.sqrt(
                tf.linalg.diag_part(self.scale.matmul(self.scale.to_dense())))
        else:
            stddev = tf.sqrt(
                tf.linalg.diag_part(
                    self.scale.matmul(self.scale.to_dense(),
                                      adjoint_arg=True)))

        shape = tensorshape_util.concatenate(self.batch_shape,
                                             self.event_shape)
        has_static_shape = tensorshape_util.is_fully_defined(shape)
        if not has_static_shape:
            shape = tf.concat([
                self.batch_shape_tensor(),
                self.event_shape_tensor(),
            ], 0)

        if has_static_shape and shape == stddev.shape:
            return stddev

        # Add dummy tensor of zeros to broadcast.  This is only necessary if shape
        # != stddev.shape, but we could not determine if this is the case.
        return stddev + tf.zeros(shape, self.dtype)
 def _variance(self):
   if distribution_util.is_diagonal_scale(self.scale):
     return 2. * tf.square(self.scale.diag_part())
   elif (isinstance(self.scale, tf.linalg.LinearOperatorLowRankUpdate) and
         self.scale.is_self_adjoint):
     return tf.matrix_diag_part(2. * self.scale.matmul(self.scale.to_dense()))
   else:
     return 2. * tf.matrix_diag_part(
         self.scale.matmul(self.scale.to_dense(), adjoint_arg=True))
 def _variance(self):
   if distribution_util.is_diagonal_scale(self.scale):
     return 2. * tf.square(self.scale.diag_part())
   elif (isinstance(self.scale, tf.linalg.LinearOperatorLowRankUpdate) and
         self.scale.is_self_adjoint):
     return tf.linalg.diag_part(2. * self.scale.matmul(self.scale.to_dense()))
   else:
     return 2. * tf.linalg.diag_part(
         self.scale.matmul(self.scale.to_dense(), adjoint_arg=True))
  def _covariance(self):
    if distribution_util.is_diagonal_scale(self.scale):
      mvn_cov = tf.linalg.diag(tf.square(self.scale.diag_part()))
    else:
      mvn_cov = self.scale.matmul(self.scale.to_dense(), adjoint_arg=True)

    cov_shape = tf.concat(
        [self._sample_shape(), self._event_shape_tensor()], -1)
    mvn_cov = tf.broadcast_to(mvn_cov, cov_shape)
    return self._std_var_helper(mvn_cov, "covariance", 2, lambda x: x)
 def _variance(self):
   if distribution_util.is_diagonal_scale(self.scale):
     answer = tf.square(self.scale.diag_part())
   elif (isinstance(self.scale, tf.linalg.LinearOperatorLowRankUpdate) and
         self.scale.is_self_adjoint):
     answer = tf.linalg.diag_part(self.scale.matmul(self.scale.to_dense()))
   else:
     answer = tf.linalg.diag_part(
         self.scale.matmul(self.scale.to_dense(), adjoint_arg=True))
   return self._broadcast_variance_like_with_loc(answer)
 def _covariance(self):
   # Let
   #   W = (w1,...,wk), with wj ~ iid Exponential(0, 1).
   # Then this distribution is
   #   X = loc + LW,
   # and then since Cov(wi, wj) = 1 if i=j, and 0 otherwise,
   #   Cov(X) = L Cov(W W^T) L^T = L L^T.
   if distribution_util.is_diagonal_scale(self.scale):
     return tf.matrix_diag(tf.square(self.scale.diag_part()))
   else:
     return self.scale.matmul(self.scale.to_dense(), adjoint_arg=True)
 def _stddev(self):
   if distribution_util.is_diagonal_scale(self.scale):
     return np.sqrt(2) * tf.abs(self.scale.diag_part())
   elif (isinstance(self.scale, tf.linalg.LinearOperatorLowRankUpdate) and
         self.scale.is_self_adjoint):
     return np.sqrt(2) * tf.sqrt(
         tf.matrix_diag_part(self.scale.matmul(self.scale.to_dense())))
   else:
     return np.sqrt(2) * tf.sqrt(
         tf.matrix_diag_part(
             self.scale.matmul(self.scale.to_dense(), adjoint_arg=True)))
 def _covariance(self):
     # Let
     #   W = (w1,...,wk), with wj ~ iid Exponential(0, 1).
     # Then this distribution is
     #   X = loc + LW,
     # and then since Cov(wi, wj) = 1 if i=j, and 0 otherwise,
     #   Cov(X) = L Cov(W W^T) L^T = L L^T.
     if distribution_util.is_diagonal_scale(self.scale):
         return tf.matrix_diag(tf.square(self.scale.diag_part()))
     else:
         return self.scale.matmul(self.scale.to_dense(), adjoint_arg=True)
  def _variance(self):
    if distribution_util.is_diagonal_scale(self.scale):
      mvn_var = tf.square(self.scale.diag_part())
    elif (isinstance(self.scale, tf.linalg.LinearOperatorLowRankUpdate) and
          self.scale.is_self_adjoint):
      mvn_var = tf.linalg.diag_part(self.scale.matmul(self.scale.to_dense()))
    else:
      mvn_var = tf.linalg.diag_part(
          self.scale.matmul(self.scale.to_dense(), adjoint_arg=True))

    mvn_var = tf.broadcast_to(mvn_var, self._sample_shape())
    return self._std_var_helper(mvn_var, "variance", 1, lambda x: x)
 def _covariance(self):
   # Let
   #   W = (w1,...,wk), with wj ~ iid Laplace(0, 1).
   # Then this distribution is
   #   X = loc + LW,
   # and since E[X] = loc,
   #   Cov(X) = E[LW W^T L^T] = L E[W W^T] L^T.
   # Since E[wi wj] = 0 if i != j, and 2 if i == j, we have
   #   Cov(X) = 2 LL^T
   if distribution_util.is_diagonal_scale(self.scale):
     return 2. * tf.matrix_diag(tf.square(self.scale.diag_part()))
   else:
     return 2. * self.scale.matmul(self.scale.to_dense(), adjoint_arg=True)
 def _covariance(self):
   # Let
   #   W = (w1,...,wk), with wj ~ iid Laplace(0, 1).
   # Then this distribution is
   #   X = loc + LW,
   # and since E[X] = loc,
   #   Cov(X) = E[LW W^T L^T] = L E[W W^T] L^T.
   # Since E[wi wj] = 0 if i != j, and 2 if i == j, we have
   #   Cov(X) = 2 LL^T
   if distribution_util.is_diagonal_scale(self.scale):
     return 2. * tf.linalg.diag(tf.square(self.scale.diag_part()))
   else:
     return 2. * self.scale.matmul(self.scale.to_dense(), adjoint_arg=True)
Exemple #12
0
 def _variance(self):
   if distribution_util.is_diagonal_scale(self.scale):
     return tf.square(self.scale.diag_part())
   elif (isinstance(self.scale, tf.linalg.LinearOperatorLowRankUpdate) and
         self.scale.is_self_adjoint):
     return self.scale.matmul(self.scale.adjoint()).diag_part()
   elif isinstance(self.scale, tf.linalg.LinearOperatorKronecker):
     factors_sq_operators = [
         factor.matmul(factor.adjoint()) for factor in self.scale.operators
     ]
     return tf.linalg.LinearOperatorKronecker(factors_sq_operators).diag_part()
   else:
     return self.scale.matmul(self.scale.adjoint()).diag_part()
  def _stddev(self):
    if distribution_util.is_diagonal_scale(self.scale):
      mvn_std = tf.abs(self.scale.diag_part())
    elif (isinstance(self.scale, tf.linalg.LinearOperatorLowRankUpdate) and
          self.scale.is_self_adjoint):
      mvn_std = tf.sqrt(
          tf.linalg.diag_part(self.scale.matmul(self.scale.to_dense())))
    else:
      mvn_std = tf.sqrt(
          tf.linalg.diag_part(
              self.scale.matmul(self.scale.to_dense(), adjoint_arg=True)))

    mvn_std = tf.broadcast_to(mvn_std, self._sample_shape())
    return self._std_var_helper(mvn_std, "standard deviation", 1, tf.sqrt)
Exemple #14
0
 def _covariance(self):
   if distribution_util.is_diagonal_scale(self.scale):
     cov = tf.linalg.diag(tf.square(self.scale.diag_part()))
   else:
     cov = self.scale.matmul(self.scale.to_dense(), adjoint_arg=True)
   if self.loc is not None:
     loc_shape = ps.shape(self.loc)
     loc_plus_extra_event_shape = ps.concat([
         loc_shape,
         loc_shape[-1:],
     ], axis=0)
     cov = tf.broadcast_to(
         cov, ps.broadcast_shape(ps.shape(cov), loc_plus_extra_event_shape))
   return cov
Exemple #15
0
  def _stddev(self):
    if distribution_util.is_diagonal_scale(self.scale):
      stddev = tf.abs(self.scale.diag_part())
    elif (isinstance(self.scale, tf.linalg.LinearOperatorLowRankUpdate) and
          self.scale.is_self_adjoint):
      stddev = tf.sqrt(
          tf.linalg.diag_part(self.scale.matmul(self.scale.to_dense())))
    else:
      stddev = tf.sqrt(
          tf.linalg.diag_part(
              self.scale.matmul(self.scale.to_dense(), adjoint_arg=True)))

    if self.loc is not None:
      stddev = tf.broadcast_to(
          stddev, ps.broadcast_shape(ps.shape(stddev), ps.shape(self.loc)))

    return stddev
    def _variance(self):
        if distribution_util.is_diagonal_scale(self.scale):
            variance = tf.square(self.scale.diag_part())
        elif (isinstance(self.scale, tf.linalg.LinearOperatorLowRankUpdate)
              and self.scale.is_self_adjoint):
            variance = self.scale.matmul(self.scale.adjoint()).diag_part()
        elif isinstance(self.scale, tf.linalg.LinearOperatorKronecker):
            factors_sq_operators = [
                factor.matmul(factor.adjoint())
                for factor in self.scale.operators
            ]
            variance = (tf.linalg.LinearOperatorKronecker(
                factors_sq_operators).diag_part())
        else:
            variance = self.scale.matmul(self.scale.adjoint()).diag_part()

        return tf.broadcast_to(
            variance, ps.broadcast_shape(ps.shape(variance),
                                         ps.shape(self.loc)))
 def _covariance(self):
     if distribution_util.is_diagonal_scale(self.scale):
         return tf.matrix_diag(tf.square(self.scale.diag_part()))
     else:
         return self.scale.matmul(self.scale.to_dense(), adjoint_arg=True)
 def _covariance(self):
   if distribution_util.is_diagonal_scale(self.scale):
     return tf.matrix_diag(tf.square(self.scale.diag_part()))
   else:
     return self.scale.matmul(self.scale.to_dense(), adjoint_arg=True)