示例#1
0
    def linear_reg(d):
        w0 = inf.Normal(0, 1, name="w0")
        w = inf.Normal(tf.zeros([d, 1]), 1, name="w")

        with inf.datamodel():
            x = inf.Normal(tf.ones([d]), 2, name="x")
            y = inf.Normal(w0 + x @ w, 1.0, name="y")
示例#2
0
def Q_pca(k, d):
    """
    Variational model for Probabilistic PCA model (pca(k,d) function).
    Arguments:
    - k: hidden space dimension.
    - d: observed space dimension.

    """
    # W's mean parameter
    qw_loc = inf.Parameter(tf.zeros([k, d]), name="qw_loc")
    # W's deviation parameter
    qw_scale = tf.math.softplus(inf.Parameter(tf.ones([k, d]),
                                              name="qw_scale"))
    # W ~ N(qw_loc, qw_scale)
    qw = inf.Normal(qw_loc, qw_scale, name="w")

    # delta's mean parameter
    qd_loc = inf.Parameter(tf.ones([d]), name="qd_loc")
    # delta's deviation parameter
    qd_scale = tf.math.softplus(inf.Parameter(tf.ones([d]), name="qd_scale"))
    # delta ~ N(qd_loc, qd_scale)
    qd = inf.Normal(qd_loc, qd_scale, name="delta")

    with inf.datamodel():
        # Z's mean parameter
        qz_loc = inf.Parameter(np.zeros([k]), name="qz_loc")
        # Z's deviation parameter
        qz_scale = tf.math.softplus(
            inf.Parameter(tf.ones([k]), name="qz_scale"))
        # Z ~ N(qz_loc, qz_scale)
        qz = inf.Normal(qz_loc, qz_scale, name="z")
示例#3
0
文件: 2.py 项目: adeeps1/InferPy
def simple(mu=0):
    # global variables
    theta = inf.Normal(mu, 0.1, name="theta")

    # local variables
    with inf.datamodel():
        x = inf.Normal(theta, 1, name="x")
示例#4
0
def qmodel(k, d0, dx, encoder):
    with inf.datamodel():
        x = inf.Normal(tf.ones(dx), 1, name="x")
        output = encoder(x, d0, k)
        qz_loc = output[:, :k]
        qz_scale = tf.nn.softplus(output[:, k:]) + scale_epsilon
        qz = inf.Normal(qz_loc, qz_scale, name="z")
示例#5
0
def Q_vae(k, l, d):
    """
    Variational auto-encoder variational model.
    Arguments:
    - k: hidden space dimension.
    - l: neural network hidden layer dimension.
    - d: observed space dimension.

    The hidden data is supossed to be generated as Z ~ N(f(x)[:k], f(x)[k:]).
    Where f is a two-layer neural network (d-l-2k) in function decoder.
    """

    # Neural network definition
    nn = tf.keras.Sequential([
        tf.keras.layers.Dense(l, activation=tf.nn.relu),
        tf.keras.layers.Dense(2 * k),
    ])

    with inf.datamodel():
        # Obsserved variable X ~ N(0,1)
        x = inf.Normal(tf.zeros(d), 1, name="x")

        # Network output
        output = nn(x)
        # The first k-terms correspond to the distribution's mean.
        qz_loc = output[:, :k]
        # The last k-terms correspond to the distribution's deviation. Using a softplus function
        # to avoid negative and 0 values. An offset is used to avoid getting 0 due to aproximation issues.
        qz_scale = tf.nn.softplus(output[:, k:]) + 0.001

        qz = inf.Normal(qz_loc, qz_scale, name="z")
示例#6
0
def log_reg(d):
    w0 = inf.Normal(0., 1, name="w0")
    w = inf.Normal(0., 1, batch_shape=[d, 1], name="w")

    with inf.datamodel():
        x = inf.Normal(0., 2., batch_shape=d, name="x")
        y = inf.Bernoulli(logits=w0 + x @ w, name="y")
示例#7
0
def pca(k, d):
    beta = inf.Normal(loc=tf.zeros([k, d]),
                      scale=1, name="beta")               # shape = [k,d]

    with inf.datamodel():
        z = inf.Normal(tf.ones(k), 1, name="z")       # shape = [N,k]
        x = inf.Normal(z @ beta, 1, name="x")         # shape = [N,d]
示例#8
0
def linear_reg(d):
    w0 = inf.Normal(0, 1, name="w0")
    w = inf.Normal(0, 1, batch_shape=[d,1], name="w")

    with inf.datamodel():
        x = inf.Normal(5, 2, batch_shape=d, name="x")
        y = inf.Normal(w0 + x @ w, 1.0, name="y")
示例#9
0
文件: 1.py 项目: xcgfth/InferPy
def qmodel(k):
    with inf.datamodel():
        qz_loc = inf.Parameter(tf.ones([k]) * 0.5, name="qz_loc")
        qz_scale = tf.math.softplus(
            inf.Parameter(tf.ones([k]), name="qz_scale"))

        qz = inf.Normal(qz_loc, qz_scale, name="z")
示例#10
0
def nlpca(k, l, d):
    """
    Probabilistic non-linear PCA model.
    Arguments:
    - k: hidden space dimension.
    - l: neural network hidden layer dimension.
    - d: observed space dimension.

    The observed data is supossed to be generated as X ~ N(f(z), 1).
    Where f is a two-layer neural network (k-l-d) in function decoder.
    """

    # Define the NN object
    nn = decoder(k, l, d)
    with inf.datamodel():
        # Variable that handles the hidden space representation of the data
        # Z ~ N(0,1)
        z = inf.Normal(loc=tf.zeros([k]), scale=1, name="z")

        # Retrieve the NN output
        output = nn(z)

        # Observed variables
        # X ~ N(nn(z), 1)
        x = inf.Normal(loc=output, scale=1.0, name="x")
示例#11
0
def nlpca(k, d0, dx, decoder):
    with inf.datamodel():
        z = inf.Normal(tf.ones(k) * 0.1, 1., name="z")  # shape = [N,k]
        output = decoder(z, d0, dx)
        x_loc = output[:, :dx]
        x_scale = tf.nn.softmax(output[:, dx:]) + scale_epsilon
        x = inf.Normal(x_loc, x_scale, name="x")  # shape = [N,d]
示例#12
0
def vae(k, d0, d):
    with inf.datamodel():
        z = inf.Normal(tf.ones(k), 1, name="z")
        decoder = inf.layers.Sequential([
            tfp.layers.DenseFlipout(d0, activation=tf.nn.relu),
            tf.keras.layers.Dense(d)
        ])
        x = inf.Normal(decoder(z), 1, name="x")
示例#13
0
文件: vae.py 项目: xcgfth/InferPy
def vae(k, d0, dx, decoder):

    with inf.datamodel():
        z = inf.Normal(tf.ones([k]) * 0.5, 1., name="z")  # shape = [N,k]
        output = decoder(z, d0, dx)
        x_loc = output[:, :dx]
        x_scale = tf.nn.softmax(output[:, dx:])
        x = inf.Normal(x_loc, x_scale, name="x")  # shape = [N,d]
示例#14
0
def qmodel(k, d0, dx, encoder):
    with inf.datamodel():
        x = inf.Normal(tf.ones([dx]), 1, name="x")

        output = encoder(x, d0, k)
        qz_loc = output[:, :k]
        qz_scale = tf.nn.softmax(output[:, k:])
        qz = inf.Normal(qz_loc, qz_scale, name="z")
示例#15
0
def log_reg(d):
    w0 = inf.Normal(0., 1., name="w0")
    w = inf.Normal(np.zeros([d, 1]), np.ones([d, 1]), name="w")

    with inf.datamodel():
        x = inf.Normal(
            np.zeros(d), 2.,
            name="x")  # the scale is broadcasted to shape [d] because of loc
        y = inf.Bernoulli(logits=w0 + x @ w, name="y")
示例#16
0
def pca(k, d):
    w = inf.Normal(loc=tf.zeros([k, d]), scale=1, name="w")  # shape = [k,d]

    w0 = inf.Normal(loc=tf.zeros([d]), scale=1, name="w0")  # shape = [d]

    with inf.datamodel():

        z = inf.Normal(tf.zeros([k]), 1, name="z")  # shape = [N,k]
        x = inf.Normal(z @ w + w0, 1, name="x")  # shape = [N,d]
示例#17
0
def test_random_variable_in_datamodel():
    # test that random variables in datamodel which uses sample_shape warns about that, and uses datamodel size
    with pytest.warns(UserWarning):
        with inf.datamodel(10):
            x = inf.Normal(0, 1, sample_shape=(2, ))

        assert x.sample_shape == 10
    # assert also that is_datamodel is true
    assert x.is_datamodel
示例#18
0
文件: 1.py 项目: saintland/InferPy
def qmodel(k, d):
    qw_loc = inf.Parameter(tf.ones([k, d]), name="qw_loc")
    qw_scale = tf.math.softplus(inf.Parameter(tf.ones([k, d]),
                                              name="qw_scale"))
    qw = inf.Normal(qw_loc, qw_scale, name="w")

    with inf.datamodel():
        qz_loc = inf.Parameter(tf.ones([k]), name="qz_loc")
        qz_scale = tf.math.softplus(
            inf.Parameter(tf.ones([k]), name="qz_scale"))
        qz = inf.Normal(qz_loc, qz_scale, name="z")
示例#19
0
def Q_nlpca(k, l, d):
    """
    Variational model for Probabilistic non-linear PCA model (nlpca(k,l,d) function).
    Arguments:
    - k: hidden space dimension.
    - l: neural network hidden layer dimension.
    - d: observed space dimension.

    """

    # First layer

    # beta0's mean parameter
    qbeta0_loc = inf.Parameter(tf.zeros([k, l]), name="qbeta0_loc")
    # beta0's deviation parameter
    qbeta0_scale = tf.math.softplus(
        inf.Parameter(tf.ones([k, l]), name="qbeta0_scale"))
    # beta0 ~ N(qbeta0_loc, qbeta0_scale)
    qbeta0 = inf.Normal(qbeta0_loc, qbeta0_scale, name="beta0")

    # alpha0's mean parameter
    qalpha0_loc = inf.Parameter(tf.zeros([l]), name="qalpha0_loc")
    # alpha0's deviation parameter
    qalpha0_scale = tf.math.softplus(
        inf.Parameter(tf.ones([l]), name="qalpha0_scale"))
    # alpha0 ~ N(qalpha0_loc , qalpha0_scale)
    qalpha0 = inf.Normal(qalpha0_loc, qalpha0_scale, name="alpha0")

    # Second layer

    # beta1's mean parameter
    qbeta1_loc = inf.Parameter(tf.zeros([l, d]), name="qbeta1_loc")
    # beta1's deviation parameter
    qbeta1_scale = tf.math.softplus(
        inf.Parameter(tf.ones([l, d]), name="qbeta1_scale"))
    # beta1 ~ N(qbeta1_loc, qbeta1_scale)
    qbeta1 = inf.Normal(qbeta1_loc, qbeta1_scale, name="beta1")

    # alpha1's mean parameter
    qalpha1_loc = inf.Parameter(tf.zeros([d]), name="qalpha1_loc")
    # alpha1's deviation parameter
    qalpha1_scale = tf.math.softplus(
        inf.Parameter(tf.ones([d]), name="qalpha1_scale"))
    # alpha1 ~ N(qalpha1_loc , qalpha1_scale)
    qalpha1 = inf.Normal(qalpha1_loc, qalpha1_scale, name="alpha1")

    with inf.datamodel():
        # z's mean parameter
        qz_loc = inf.Parameter(tf.zeros([k]), name="qz_loc")
        # z's deviation parameter
        qz_scale = tf.math.softplus(
            inf.Parameter(tf.ones([k]), name="qz_scale"))
        # z ~ N(qz_loc, qz_scale)
        qz = inf.Normal(loc=qz_loc, scale=qz_scale, name="z")
示例#20
0
def qmodel(k, d0, d):
    with inf.datamodel():
        x = inf.Normal(tf.ones(d), 1, name="x")
        encoder = tf.keras.Sequential([
            tf.keras.layers.Dense(d0, activation=tf.nn.relu),
            tf.keras.layers.Dense(2 * k)
        ])
        output = encoder(x)
        qz_loc = output[:, :k]
        qz_scale = tf.nn.softplus(output[:, k:]) + 0.01
        qz = inf.Normal(qz_loc, qz_scale, name="z")
示例#21
0
def qmodel(k, d):
    qbeta_loc = inf.Parameter(tf.zeros([k, d]), name="qbeta_loc")
    qbeta_scale = tf.math.softplus(inf.Parameter(tf.ones([k, d]),
                                                 name="qbeta_scale"))

    qbeta = inf.Normal(qbeta_loc, qbeta_scale, name="beta")

    with inf.datamodel():
        qz_loc = inf.Parameter(np.ones(k), name="qz_loc")
        qz_scale = tf.math.softplus(inf.Parameter(tf.ones(k),
                                                  name="qz_scale"))

        qz = inf.Normal(qz_loc, qz_scale, name="z")
示例#22
0
def logregression(d, N, w_init=(1, 1), x_init=(0, 1)):

    w = inf.Normal(loc=np.ones(d, dtype="float32") * w_init[0],
                   scale=1. * w_init[1],
                   name="w")
    w0 = inf.Normal(loc=1. * w_init[0], scale=1. * w_init[1], name="w0")

    with inf.datamodel():
        x = inf.Normal(loc=np.ones(d, dtype="float32") * x_init[0],
                       scale=1. * x_init[1],
                       name="x")
        y = inf.Bernoulli(logits=tf.tensordot(x, w, axes=[[1], [0]]) + w0,
                          name="y")
示例#23
0
def cnn_flipout_classifier(S):
    with inf.datamodel():
        x = inf.Normal(tf.ones(S), 1, name="x")

        nn = inf.layers.Sequential([
            tfp.layers.Convolution2DFlipout(4,
                                            kernel_size=(10, 10),
                                            padding="same",
                                            activation="relu"),
            tf.keras.layers.GlobalMaxPool2D(),
            tf.keras.layers.Dense(1, activation='sigmoid')
        ])

        y = inf.Normal(nn(tf.expand_dims(x, 1)), 0.001, name="y")
示例#24
0
def mixture(k, d):
    """
    Gaussian mixture model. Validation arguments are used to check the model learning.
    Arguments:
    - k: number of components.
    - d: observed space dimensionality.

    """

    # Pi models the categorical parameter ruling each component probability.
    pi = inf.Dirichlet(np.ones(k) / k,
                       allow_nan_stats=False,
                       validate_args=True,
                       name="pi")

    # Lambda models each component precision using an inverse wishart distribution (inverse gamma multidimensional).
    Lambda = inf.InverseGamma(
        concentration=tf.ones([d, k]),
        scale=1,
        allow_nan_stats=False,
        validate_args=True,
        name="Lambda",
    )

    # Mu models each component mean value, using a Gaussian distribution.
    mu = inf.Normal(
        loc=tf.zeros([d, k]),
        scale=1,
        allow_nan_stats=False,
        validate_args=True,
        name="mu",
    )

    # As categorical distributions cannot be used, MixtureGaussian to model both the observed data and the categorical variable.
    with inf.datamodel():
        x = inf.MixtureGaussian(
            locs=mu,
            scales=Lambda,
            probs=pi,
            allow_nan_stats=False,
            validate_args=True,
            name="x",
        )
示例#25
0
def vae(k, l, d):
    """
    Variational auto-encoder model.
    Arguments:
    - k: hidden space dimension.
    - l: neural network hidden layer dimension.
    - d: observed space dimension.

    The observed data is supossed to be generated as X ~ N(f(z), 1).
    Where f is a two-layer neural network (k-l-d) in function decoder.
    """

    # Network definition with Keras
    nn = inf.layers.Sequential([
        tf.keras.layers.Dense(l, activation=tf.nn.relu),
        tf.keras.layers.Dense(d),
    ])

    with inf.datamodel():
        # Hidden variable representation Z ~ N(0,1)
        z = inf.Normal(tf.zeros(k), 1, name="z")
        # Observed variable X ~ N(nn(z), 1)
        x = inf.Normal(nn(z), 1, name="x")
示例#26
0
def pca(k, d):
    """
    Probabilistic PCA model.
    Arguments:
    - k: hidden space dimension.
    - d: observed space dimension.

    The observed data is supossed to be generated as N(w^T z + delta, 1)
    """
    # Variable which encloses the linear transformation between spaces.
    # W ~ N(0,1)
    w = inf.Normal(loc=tf.zeros([k, d]), scale=1, name="w")

    # Variable that enables the observed data to be non-centered.
    # delta ~ N(0,1)
    delta = inf.Normal(loc=tf.zeros([d]), scale=1, name="delta")

    with inf.datamodel():
        # Variable that handles the hidden space representation of the data
        # Z ~ N(0,1)
        z = inf.Normal(tf.zeros([k]), 1, name="z")
        # Observed variables
        # X ~ N(w^T z + delta, 1)
        x = inf.Normal(z @ w + delta, 1, name="x")
示例#27
0
def mdn():
    with inf.datamodel():
        x = inf.Normal(loc = tf.ones([D]), scale = 1.0, name="x")
        locs, scales, logits = neural_network(x)
        y = inf.MixtureGaussian(locs, scales, logits=logits, name="y")
示例#28
0
 def model():
     p = inf.Parameter(0., name='p')
     with inf.datamodel():
         x = inf.Normal(p, 1., name='x')
         inf.Normal(x, 1., name='y')
示例#29
0
def vae(k, d0, dx, decoder):
    with inf.datamodel():
        z = inf.Normal(tf.ones(k), 1, name="z")
        x = inf.Normal(decoder(z, d0, dx), 1, name="x")
示例#30
0
文件: 2.py 项目: adeeps1/InferPy
x = inf.Normal(loc = [[0.,0.],[0.,0.],[0.,0.]], scale=1)  # x.shape = [3,2]

x = inf.Normal(loc = np.zeros([3,2]), scale=1)            # x.shape = [3,2]

x = inf.Normal(loc = 0, scale=tf.ones([3,2]))             # x.shape = [3,2]





# sample shape


x = inf.Normal(tf.ones([3,2]), 0, sample_shape=100)     # x.sample = [100,3,2]

with inf.datamodel(100):
    x = inf.Normal(tf.ones([3, 2]), 0)                  # x.sample = [100,3,2]



# event shape

x = inf.MultivariateNormalDiag(loc=[1., -1], scale_diag=[1, 2.])



### 63

"""

>>> x.event_shape