def dirichlet_log_density(x, alpha, clip_finite=True): """ Assumes that x is either a vector or a right-stochastic matrix (i.e., each row is a probability dist), and alpha is broadcastable to the same shape as x. WARNING: does not enforce sum(x)==1; bad things will happen if this is violated. """ if len(x.get_shape()) == 1: x = tf.reshape(x, (1, -1)) if clip_finite: logx = tf.log(tf.clip_by_value(x, 1e-37, 1.0), name="dirichlet_logx") else: logx = tf.log(x, name="dirichlet_logx") # force broadcasting alpha if alpha.get_shape() != x.get_shape(): alpha = tf.zeros_like(x) + alpha log_z = tf.reduce_sum(gammaln(alpha), axis=1) - gammaln( tf.reduce_sum(alpha, axis=1)) log_density = tf.reduce_sum((alpha - 1) * logx, axis=1) - log_z return log_density
def inv_gamma_log_density(x, alpha, beta): """Creates a TensorFlow variable representing the sum of one or more independent inverse Gamma log-densities. Args: x (scalar or vector): variable(s) being modeled as InvGamma alpha (scalar or vector) beta (scalar or vector) Each of alpha and beta must either be the same dimension as x, or a scalar (which will be broadcast as required). They may be TensorFlow variables or Python/numpy values. Returns: lp: TF scalar containing the log probability of x """ if isinstance(beta, tf.Tensor): log_beta = tf.log(beta) else: log_beta = np.log(beta) if isinstance(x, tf.Tensor): log_x = tf.log(x) else: log_x = np.log(x) if isinstance(alpha, tf.Tensor): gammaln_alpha = gammaln(alpha) else: gammaln_alpha = scipy.special.gammaln(alpha) lps = -beta / x + alpha * log_beta - (alpha + 1) * tf.log(x) - gammaln_alpha return lps
def dirichlet_log_density(x, alpha, clip_finite=True): """ Assumes that x is a vector, and alpha is a vector of the same length. WARNING: does not enforce sum(x)==1; bad things will happen if this is violated. """ if clip_finite: logx = tf.log(tf.clip_by_value(x, 1e-37, 1.0), name="dirichlet_logx") else: logx = tf.log(x, name="dirichlet_logx") # force broadcasting alpha if alpha.get_shape() != x.get_shape(): alpha = tf.zeros_like(x) + alpha log_z = tf.reduce_sum(gammaln(alpha)) - gammaln(tf.reduce_sum(alpha)) log_density = tf.reduce_sum((alpha - 1) * logx) - log_z return log_density
def gamma_log_density(x, alpha, beta, parameterization=None): """Creates a TensorFlow variable representing the sum of one or more independent inverse Gamma densities. Args: x (scalar or vector): variable(s) being modeled as InvGamma alpha (scalar or vector): beta (scalar or vector): Each of alpha and beta must either be the same dimension as x, or a scalar (which will be broadcast as required). They may be TensorFlow variables or Python/numpy values. Returns: lp: TF scalar containing the log probability of x """ try: dtype = x.dtype except AttributeError: try: dtype = alpha.dtype except AttributeError: dtype = beta.dtype if isinstance(beta, tf.Tensor): log_beta = tf.log(beta) else: log_beta = np.log(beta) if isinstance(x, tf.Tensor): log_x = tf.log(x) else: log_x = np.log(x) l1 = -beta * x l2 = alpha * log_beta l3 = (alpha - 1) * log_x if isinstance(alpha, tf.Tensor): l4 = gammaln(alpha) else: l4 = scipy.special.gammaln(alpha) lps = l1 + l2 + l3 - l4 return lps