Exemplo n.º 1
0
def log_reg(d):
    w0 = inf.Normal(0., 1, name="w0")
    w = inf.Normal(0., 1, batch_shape=[d, 1], name="w")

    with inf.datamodel():
        x = inf.Normal(0., 2., batch_shape=d, name="x")
        y = inf.Bernoulli(logits=w0 + x @ w, name="y")
Exemplo n.º 2
0
def nlpca(k, l, d):
    """
    Probabilistic non-linear PCA model.
    Arguments:
    - k: hidden space dimension.
    - l: neural network hidden layer dimension.
    - d: observed space dimension.

    The observed data is supossed to be generated as X ~ N(f(z), 1).
    Where f is a two-layer neural network (k-l-d) in function decoder.
    """

    # Define the NN object
    nn = decoder(k, l, d)
    with inf.datamodel():
        # Variable that handles the hidden space representation of the data
        # Z ~ N(0,1)
        z = inf.Normal(loc=tf.zeros([k]), scale=1, name="z")

        # Retrieve the NN output
        output = nn(z)

        # Observed variables
        # X ~ N(nn(z), 1)
        x = inf.Normal(loc=output, scale=1.0, name="x")
Exemplo n.º 3
0
def pca(k, d):
    beta = inf.Normal(loc=tf.zeros([k, d]),
                      scale=1, name="beta")               # shape = [k,d]

    with inf.datamodel():
        z = inf.Normal(tf.ones(k), 1, name="z")       # shape = [N,k]
        x = inf.Normal(z @ beta, 1, name="x")         # shape = [N,d]
Exemplo n.º 4
0
    def linear_reg(d):
        w0 = inf.Normal(0, 1, name="w0")
        w = inf.Normal(tf.zeros([d, 1]), 1, name="w")

        with inf.datamodel():
            x = inf.Normal(tf.ones([d]), 2, name="x")
            y = inf.Normal(w0 + x @ w, 1.0, name="y")
Exemplo n.º 5
0
def Q_vae(k, l, d):
    """
    Variational auto-encoder variational model.
    Arguments:
    - k: hidden space dimension.
    - l: neural network hidden layer dimension.
    - d: observed space dimension.

    The hidden data is supossed to be generated as Z ~ N(f(x)[:k], f(x)[k:]).
    Where f is a two-layer neural network (d-l-2k) in function decoder.
    """

    # Neural network definition
    nn = tf.keras.Sequential([
        tf.keras.layers.Dense(l, activation=tf.nn.relu),
        tf.keras.layers.Dense(2 * k),
    ])

    with inf.datamodel():
        # Obsserved variable X ~ N(0,1)
        x = inf.Normal(tf.zeros(d), 1, name="x")

        # Network output
        output = nn(x)
        # The first k-terms correspond to the distribution's mean.
        qz_loc = output[:, :k]
        # The last k-terms correspond to the distribution's deviation. Using a softplus function
        # to avoid negative and 0 values. An offset is used to avoid getting 0 due to aproximation issues.
        qz_scale = tf.nn.softplus(output[:, k:]) + 0.001

        qz = inf.Normal(qz_loc, qz_scale, name="z")
Exemplo n.º 6
0
Arquivo: 2.py Projeto: adeeps1/InferPy
def simple(mu=0):
    # global variables
    theta = inf.Normal(mu, 0.1, name="theta")

    # local variables
    with inf.datamodel():
        x = inf.Normal(theta, 1, name="x")
Exemplo n.º 7
0
def Q_pca(k, d):
    """
    Variational model for Probabilistic PCA model (pca(k,d) function).
    Arguments:
    - k: hidden space dimension.
    - d: observed space dimension.

    """
    # W's mean parameter
    qw_loc = inf.Parameter(tf.zeros([k, d]), name="qw_loc")
    # W's deviation parameter
    qw_scale = tf.math.softplus(inf.Parameter(tf.ones([k, d]),
                                              name="qw_scale"))
    # W ~ N(qw_loc, qw_scale)
    qw = inf.Normal(qw_loc, qw_scale, name="w")

    # delta's mean parameter
    qd_loc = inf.Parameter(tf.ones([d]), name="qd_loc")
    # delta's deviation parameter
    qd_scale = tf.math.softplus(inf.Parameter(tf.ones([d]), name="qd_scale"))
    # delta ~ N(qd_loc, qd_scale)
    qd = inf.Normal(qd_loc, qd_scale, name="delta")

    with inf.datamodel():
        # Z's mean parameter
        qz_loc = inf.Parameter(np.zeros([k]), name="qz_loc")
        # Z's deviation parameter
        qz_scale = tf.math.softplus(
            inf.Parameter(tf.ones([k]), name="qz_scale"))
        # Z ~ N(qz_loc, qz_scale)
        qz = inf.Normal(qz_loc, qz_scale, name="z")
Exemplo n.º 8
0
def qmodel(k, d0, dx, encoder):
    with inf.datamodel():
        x = inf.Normal(tf.ones(dx), 1, name="x")
        output = encoder(x, d0, k)
        qz_loc = output[:, :k]
        qz_scale = tf.nn.softplus(output[:, k:]) + scale_epsilon
        qz = inf.Normal(qz_loc, qz_scale, name="z")
Exemplo n.º 9
0
def test_graph(init_context):
    # this test check if calling to update_graph update the graph or not depending on is_building
    is_building = init_context['is_building']
    elems = [
    ]  # we use this variable to append the expected elements in the graph

    # the variable does not exist in the context
    assert len(randvar_registry.get_graph()) == len(elems)
    inf.Normal(0, 1, name='x')
    randvar_registry.update_graph()
    if is_building:
        elems.append('x')

    # the variable exists in the context
    # randvar_registry.register_variable(x) has been automatically called
    assert len(randvar_registry.get_graph()) == len(elems)
    for elem in elems:
        assert elem in randvar_registry.get_graph()

    inf.Normal(0, 1, name='y')
    randvar_registry.update_graph()
    if is_building:
        elems.append('y')

    # the variable exists in the context
    # randvar_registry.register_variable(x) has been automatically called
    assert len(randvar_registry.get_graph()) == len(elems)
    for elem in elems:
        assert elem in randvar_registry.get_graph()
Exemplo n.º 10
0
def decoder(k, l, d):
    """
    Decoder two-layer neural network for non-linear PCA.
    It is composed by four N(0,1) distributions and a ReLU function.
    Arguments:
    - k: hidden space dimension.
    - l: neural network hidden layer dimension.
    - d: observed space dimension.

    No activation function is needed at the end as the value as the mean of a distribution.
    """

    # first layer
    beta0 = inf.Normal(tf.zeros([k, l]), 1, name="beta0")
    alpha0 = inf.Normal(tf.zeros([l]), 1, name="alpha0")

    # second layer
    beta1 = inf.Normal(tf.zeros([l, d]), 1, name="beta1")
    alpha1 = inf.Normal(tf.zeros([d]), 1, name="alpha1")

    # Define network computation. The decoder returns this function so that the four variables
    # are defined only one time.
    def operation(z):
        return tf.nn.relu(z @ beta0 + alpha0) @ beta1 + alpha1

    return operation
Exemplo n.º 11
0
def linear_reg(d):
    w0 = inf.Normal(0, 1, name="w0")
    w = inf.Normal(0, 1, batch_shape=[d,1], name="w")

    with inf.datamodel():
        x = inf.Normal(5, 2, batch_shape=d, name="x")
        y = inf.Normal(w0 + x @ w, 1.0, name="y")
Exemplo n.º 12
0
def nlpca(k, d0, dx, decoder):
    with inf.datamodel():
        z = inf.Normal(tf.ones(k) * 0.1, 1., name="z")  # shape = [N,k]
        output = decoder(z, d0, dx)
        x_loc = output[:, :dx]
        x_scale = tf.nn.softmax(output[:, dx:]) + scale_epsilon
        x = inf.Normal(x_loc, x_scale, name="x")  # shape = [N,d]
Exemplo n.º 13
0
def vae(k, d0, dx, decoder):

    with inf.datamodel():
        z = inf.Normal(tf.ones([k]) * 0.5, 1., name="z")  # shape = [N,k]
        output = decoder(z, d0, dx)
        x_loc = output[:, :dx]
        x_scale = tf.nn.softmax(output[:, dx:])
        x = inf.Normal(x_loc, x_scale, name="x")  # shape = [N,d]
Exemplo n.º 14
0
    def qmodel(d):
        qw0_loc = inf.Parameter(0., name="qw0_loc")
        qw0_scale = tf.math.softplus(inf.Parameter(1., name="qw0_scale"))
        qw0 = inf.Normal(qw0_loc, qw0_scale, name="w0")

        qw_loc = inf.Parameter(tf.zeros([d, 1]), name="qw_loc")
        qw_scale = tf.math.softplus(inf.Parameter(tf.ones([d, 1]), name="qw_scale"))
        qw = inf.Normal(qw_loc, qw_scale, name="w")
Exemplo n.º 15
0
def vae(k, d0, d):
    with inf.datamodel():
        z = inf.Normal(tf.ones(k), 1, name="z")
        decoder = inf.layers.Sequential([
            tfp.layers.DenseFlipout(d0, activation=tf.nn.relu),
            tf.keras.layers.Dense(d)
        ])
        x = inf.Normal(decoder(z), 1, name="x")
Exemplo n.º 16
0
def qmodel(k, d0, dx, encoder):
    with inf.datamodel():
        x = inf.Normal(tf.ones([dx]), 1, name="x")

        output = encoder(x, d0, k)
        qz_loc = output[:, :k]
        qz_scale = tf.nn.softmax(output[:, k:])
        qz = inf.Normal(qz_loc, qz_scale, name="z")
Exemplo n.º 17
0
def pca(k, d):
    w = inf.Normal(loc=tf.zeros([k, d]), scale=1, name="w")  # shape = [k,d]

    w0 = inf.Normal(loc=tf.zeros([d]), scale=1, name="w0")  # shape = [d]

    with inf.datamodel():

        z = inf.Normal(tf.zeros([k]), 1, name="z")  # shape = [N,k]
        x = inf.Normal(z @ w + w0, 1, name="x")  # shape = [N,d]
Exemplo n.º 18
0
def log_reg(d):
    w0 = inf.Normal(0., 1., name="w0")
    w = inf.Normal(np.zeros([d, 1]), np.ones([d, 1]), name="w")

    with inf.datamodel():
        x = inf.Normal(
            np.zeros(d), 2.,
            name="x")  # the scale is broadcasted to shape [d] because of loc
        y = inf.Bernoulli(logits=w0 + x @ w, name="y")
Exemplo n.º 19
0
def Q_nlpca(k, l, d):
    """
    Variational model for Probabilistic non-linear PCA model (nlpca(k,l,d) function).
    Arguments:
    - k: hidden space dimension.
    - l: neural network hidden layer dimension.
    - d: observed space dimension.

    """

    # First layer

    # beta0's mean parameter
    qbeta0_loc = inf.Parameter(tf.zeros([k, l]), name="qbeta0_loc")
    # beta0's deviation parameter
    qbeta0_scale = tf.math.softplus(
        inf.Parameter(tf.ones([k, l]), name="qbeta0_scale"))
    # beta0 ~ N(qbeta0_loc, qbeta0_scale)
    qbeta0 = inf.Normal(qbeta0_loc, qbeta0_scale, name="beta0")

    # alpha0's mean parameter
    qalpha0_loc = inf.Parameter(tf.zeros([l]), name="qalpha0_loc")
    # alpha0's deviation parameter
    qalpha0_scale = tf.math.softplus(
        inf.Parameter(tf.ones([l]), name="qalpha0_scale"))
    # alpha0 ~ N(qalpha0_loc , qalpha0_scale)
    qalpha0 = inf.Normal(qalpha0_loc, qalpha0_scale, name="alpha0")

    # Second layer

    # beta1's mean parameter
    qbeta1_loc = inf.Parameter(tf.zeros([l, d]), name="qbeta1_loc")
    # beta1's deviation parameter
    qbeta1_scale = tf.math.softplus(
        inf.Parameter(tf.ones([l, d]), name="qbeta1_scale"))
    # beta1 ~ N(qbeta1_loc, qbeta1_scale)
    qbeta1 = inf.Normal(qbeta1_loc, qbeta1_scale, name="beta1")

    # alpha1's mean parameter
    qalpha1_loc = inf.Parameter(tf.zeros([d]), name="qalpha1_loc")
    # alpha1's deviation parameter
    qalpha1_scale = tf.math.softplus(
        inf.Parameter(tf.ones([d]), name="qalpha1_scale"))
    # alpha1 ~ N(qalpha1_loc , qalpha1_scale)
    qalpha1 = inf.Normal(qalpha1_loc, qalpha1_scale, name="alpha1")

    with inf.datamodel():
        # z's mean parameter
        qz_loc = inf.Parameter(tf.zeros([k]), name="qz_loc")
        # z's deviation parameter
        qz_scale = tf.math.softplus(
            inf.Parameter(tf.ones([k]), name="qz_scale"))
        # z ~ N(qz_loc, qz_scale)
        qz = inf.Normal(loc=qz_loc, scale=qz_scale, name="z")
Exemplo n.º 20
0
def qmodel(k, d0, d):
    with inf.datamodel():
        x = inf.Normal(tf.ones(d), 1, name="x")
        encoder = tf.keras.Sequential([
            tf.keras.layers.Dense(d0, activation=tf.nn.relu),
            tf.keras.layers.Dense(2 * k)
        ])
        output = encoder(x)
        qz_loc = output[:, :k]
        qz_scale = tf.nn.softplus(output[:, k:]) + 0.01
        qz = inf.Normal(qz_loc, qz_scale, name="z")
Exemplo n.º 21
0
def qmodel(k, d):
    qw_loc = inf.Parameter(tf.ones([k, d]), name="qw_loc")
    qw_scale = tf.math.softplus(inf.Parameter(tf.ones([k, d]),
                                              name="qw_scale"))
    qw = inf.Normal(qw_loc, qw_scale, name="w")

    with inf.datamodel():
        qz_loc = inf.Parameter(tf.ones([k]), name="qz_loc")
        qz_scale = tf.math.softplus(
            inf.Parameter(tf.ones([k]), name="qz_scale"))
        qz = inf.Normal(qz_loc, qz_scale, name="z")
Exemplo n.º 22
0
def test_name():
    x = inf.Normal(0, 1, name='foo')
    assert x.name == 'foo'

    # using the name, not the tensor name
    x = inf.Normal(0, 1, name='foo')
    assert x.name == 'foo'

    # Automatic name generation. It starts with 'randvar_X', where initially X is 0
    x = inf.Normal(0, 1)
    assert isinstance(x.name, str)
    assert x.name == 'randvar_0'
Exemplo n.º 23
0
def test_has_datamodel_var_parameter():
    x = inf.Normal(0, 1)
    with data_model.datamodel(size=10):
        y = inf.Normal(x, 1)
        z = inf.Normal(y, 1)

    # uses the default random variable registry
    assert not data_model._has_datamodel_var_parameters(
        x.name)  # outside datamodel
    assert not data_model._has_datamodel_var_parameters(
        y.name)  # first level in datamodel (not var param)
    assert data_model._has_datamodel_var_parameters(
        z.name)  # has the y rand var as parameter
Exemplo n.º 24
0
def qmodel(k, d):
    qbeta_loc = inf.Parameter(tf.zeros([k, d]), name="qbeta_loc")
    qbeta_scale = tf.math.softplus(inf.Parameter(tf.ones([k, d]),
                                                 name="qbeta_scale"))

    qbeta = inf.Normal(qbeta_loc, qbeta_scale, name="beta")

    with inf.datamodel():
        qz_loc = inf.Parameter(np.ones(k), name="qz_loc")
        qz_scale = tf.math.softplus(inf.Parameter(tf.ones(k),
                                                  name="qz_scale"))

        qz = inf.Normal(qz_loc, qz_scale, name="z")
Exemplo n.º 25
0
def logregression(d, N, w_init=(1, 1), x_init=(0, 1)):

    w = inf.Normal(loc=np.ones(d, dtype="float32") * w_init[0],
                   scale=1. * w_init[1],
                   name="w")
    w0 = inf.Normal(loc=1. * w_init[0], scale=1. * w_init[1], name="w0")

    with inf.datamodel():
        x = inf.Normal(loc=np.ones(d, dtype="float32") * x_init[0],
                       scale=1. * x_init[1],
                       name="x")
        y = inf.Bernoulli(logits=tf.tensordot(x, w, axes=[[1], [0]]) + w0,
                          name="y")
Exemplo n.º 26
0
def cnn_flipout_classifier(S):
    with inf.datamodel():
        x = inf.Normal(tf.ones(S), 1, name="x")

        nn = inf.layers.Sequential([
            tfp.layers.Convolution2DFlipout(4,
                                            kernel_size=(10, 10),
                                            padding="same",
                                            activation="relu"),
            tf.keras.layers.GlobalMaxPool2D(),
            tf.keras.layers.Dense(1, activation='sigmoid')
        ])

        y = inf.Normal(nn(tf.expand_dims(x, 1)), 0.001, name="y")
Exemplo n.º 27
0
def decoder(z, d0, dx):  # k -> d0 -> 2*dx

    beta0 = inf.Normal(tf.ones([k, d0]) * loc_init, scale_init, name="beta0")
    alpha0 = inf.Normal(tf.ones([d0]) * loc_init, scale_init, name="alpha0")

    h0 = tf.nn.relu(z @ beta0 + alpha0, name="h0")

    ######

    beta1 = inf.Normal(tf.ones([d0, 2*dx]) * loc_init, scale_init, name="beta1")
    alpha1 = inf.Normal(tf.ones([2*dx]) * loc_init, scale_init, name="alpha1")

    output = z @ beta0 + alpha0

    return output
Exemplo n.º 28
0
Arquivo: 1.py Projeto: xcgfth/InferPy
def qmodel(k):
    with inf.datamodel():
        qz_loc = inf.Parameter(tf.ones([k]) * 0.5, name="qz_loc")
        qz_scale = tf.math.softplus(
            inf.Parameter(tf.ones([k]), name="qz_scale"))

        qz = inf.Normal(qz_loc, qz_scale, name="z")
Exemplo n.º 29
0
def qmodel(k, d0, x, encoder):

    output = encoder(x, d0, k)
    qz_loc = output[:, :k]
    qz_scale = tf.nn.softmax(output[:, k:])

    qz = inf.Normal(qz_loc, qz_scale, name="z")
Exemplo n.º 30
0
def test_random_variable_in_datamodel():
    # test that random variables in datamodel which uses sample_shape warns about that, and uses datamodel size
    with pytest.warns(UserWarning):
        with inf.datamodel(10):
            x = inf.Normal(0, 1, sample_shape=(2, ))

        assert x.sample_shape == 10
    # assert also that is_datamodel is true
    assert x.is_datamodel