Пример #1
0
def trainable_gamma(shape, min_concentration=1e-3, min_scale=1e-5, name=None):
  """Learnable Gamma via concentration and scale parameterization."""
  with tf.variable_scope(None, default_name="trainable_gamma"):
    unconstrained_concentration = tf.get_variable(
        "unconstrained_concentration", shape,
        initializer=tf.random_normal_initializer(mean=0.5, stddev=0.1))
    unconstrained_scale = tf.get_variable(
        "unconstrained_scale", shape,
        initializer=tf.random_normal_initializer(stddev=0.1))
    concentration = tf.maximum(tf.nn.softplus(unconstrained_concentration),
                               min_concentration)
    rate = tf.maximum(1. / tf.nn.softplus(unconstrained_scale), 1. / min_scale)
    rv = ed.Gamma(concentration=concentration, rate=rate, name=name)
    return rv
Пример #2
0
def deep_exponential_family(data_size, feature_size, units, shape):
    """A multi-layered topic model over a documents-by-terms matrix."""
    w2 = ed.Gamma(0.1, 0.3, sample_shape=[units[2], units[1]], name="w2")
    w1 = ed.Gamma(0.1, 0.3, sample_shape=[units[1], units[0]], name="w1")
    w0 = ed.Gamma(0.1, 0.3, sample_shape=[units[0], feature_size], name="w0")

    z2 = ed.Gamma(0.1, 0.1, sample_shape=[data_size, units[2]], name="z2")
    z1 = ed.Gamma(shape, shape / tf.matmul(z2, w2), name="z1")
    z0 = ed.Gamma(shape, shape / tf.matmul(z1, w1), name="z0")
    x = ed.Poisson(tf.matmul(z0, w0), name="x")
    return x
Пример #3
0
def deep_exponential_family(data_size, feature_size, units, shape):
    # units表示每层维数的大小,从unit[2]开始到unit[0],最后输出feature_size
    # data_size为batch_size的意思
    w2 = ed.Gamma(0.1, 0.3, sample_shape=[units[2], units[1]],
                  name="w2")  # 前两个位置为参数
    w1 = ed.Gamma(0.1, 0.3, sample_shape=[units[1], units[0]], name="w1")
    w0 = ed.Gamma(0.1, 0.3, sample_shape=[units[0], feature_size], name="w0")
    # z2相当于是原始分布,用z2结合参数传到生成x
    z2 = ed.Gamma(0.1, 0.1, sample_shape=[data_size, units[2]], name="z2")
    z1 = ed.Gamma(shape, shape / tf.matmul(z2, w2),
                  name="z1")  # z1的形状跟着两个参数的形状跑,此处相当于对rate建模,concentration不建模
    z0 = ed.Gamma(shape, shape / tf.matmul(z1, w1), name="z0")
    x = ed.Poisson(tf.matmul(z0, w0), name="x")
    return x, w2, w1, w0, z2, z1, z0
Пример #4
0
 def trainable_gamma(shape, name=None):
     with tf.variable_scope(None, default_name="trainable_gamma"):
         return ed.Gamma(tf.nn.softplus(tf.get_variable("shape", shape)),
                         1.0 /
                         tf.nn.softplus(tf.get_variable("scale", shape)),
                         name=name)
Пример #5
0
def latent_gamma(shape, concentration, rate):
    W = ed.Gamma(concentration=concentration, rate=rate, sample_shape=shape)
    return W