Example #1
0
    def test_basic_ops_value(self):
        np.random.seed(12082518)
        x = K.variable(np.random.randn(8, 8))
        y = K.variable(np.random.randn(8, 8))
        z = K.variable(np.random.randint(0, 2, size=(8, 8)), dtype=np.bool)
        w = K.variable(np.random.randint(0, 2, size=(8, 8)), dtype=np.bool)

        self.assertEqual(round(np.sum(K.eval(K.relu(x, alpha=0.12))) * 10000),
                         276733)
        self.assertEqual(round(np.sum(K.eval(K.elu(x, alpha=0.12))) * 10000),
                         289202)
        self.assertEqual(np.sum(K.eval(K.softmax(x))), 8.0)
        self.assertEqual(round(np.sum(K.eval(K.softplus(x))) * 10000), 554564)
        self.assertEqual(round(np.sum(K.eval(K.softsign(x))) * 100000), 211582)
        self.assertEqual(round(np.sum(K.eval(K.sigmoid(x))) * 10000), 330427)
        self.assertEqual(round(np.sum(K.eval(K.hard_sigmoid(x))) * 10000),
                         330836)
        self.assertEqual(round(np.sum(K.eval(K.tanh(x))) * 100000), 290165)
        self.assertEqual(round(np.sum(K.eval(K.square(x))) * 10000), 744492)
        self.assertEqual(round(np.sum(K.eval(K.sqrt(x))) * 10000), 300212)
        self.assertEqual(round(np.sum(K.eval(K.abs(x))) * 10000), 559979)
        self.assertEqual(np.sum(K.eval(K.sign(x))), 6.0)
        self.assertEqual(round(np.sum(K.eval(K.inv(x))) * 1000), 495838)
        self.assertEqual(round(np.sum(K.eval(K.exp(x))) * 1000), 122062)
        self.assertEqual(round(np.sum(K.eval(K.log(K.abs(x)))) * 10000),
                         -344491)
        self.assertEqual(np.sum(K.eval(K.round(x))), 5.0)
        self.assertEqual(round(np.sum(K.eval(K.pow(x, 8))) * 100), 398153)
        self.assertEqual(
            round(np.sum(K.eval(K.clip(x, -0.12, 0.12))) * 1000000), 620529)
        # TODO: pygpu (libgpuarray) still not support diag
        # self.assertEqual(round(np.sum(K.eval(K.diag(x))) * 100000), 325289)
        self.assertEqual(np.sum(K.eval(K.eye(12, 8))), 8.0)

        self.assertEqual(np.sum(K.eval(K.eq(z, w))), 38)
        self.assertEqual(np.sum(K.eval(K.neq(z, w))), 26)
        self.assertEqual(np.sum(K.eval(K.gt(x, y))), 33)
        self.assertEqual(np.sum(K.eval(K.ge(x, y))), 33)
        self.assertEqual(np.sum(K.eval(K.lt(x, y))), 31)
        self.assertEqual(np.sum(K.eval(K.le(x, y))), 31)
        self.assertEqual(round(np.sum(K.eval(K.switch(z, x, y))) * 100000),
                         139884)
Example #2
0
def convolutional_vae(X, saved_states, **kwargs):
    """ convolutional_vae

    Return
    ------
    [y_encoder, y_decoder]

    States
    ------
    [f_inference (encoder), f_generative (decoder)]

    """
    n = kwargs.get('n', 10)
    batch_size = K.get_shape(X)[0]
    if batch_size is None:
        raise ValueError("You must specify batch_size dimension for the input placeholder.")
    # ====== init ====== #
    if saved_states is None:
        # Encoder
        f_inference = N.Sequence([
            N.Reshape(shape=(-1, 28, 28, 1)),
            N.Conv(num_filters=32, filter_size=3, strides=1, pad='valid',
                   b_init=init_ops.constant_initializer(0.), activation=K.elu),
            N.Conv(num_filters=64, filter_size=5, strides=2, pad='same',
                   b_init=init_ops.constant_initializer(0.), activation=K.elu),

            N.Dropout(level=0.1),
            N.Flatten(outdim=2),

            N.Dense(num_units=n * 2, b_init=None),
            N.BatchNorm(axes=0)
        ], debug=True, name='Encoder')
        # Decoder
        f_generative = N.Sequence([
            N.Dimshuffle(pattern=(0, 'x', 'x', 1)),
            N.TransposeConv(num_filters=64, filter_size=3, strides=1, pad='valid',
                            b_init=init_ops.constant_initializer(0.), activation=K.elu),
            N.TransposeConv(num_filters=32, filter_size=5, strides=2, pad='same',
                            b_init=init_ops.constant_initializer(0.), activation=K.elu),
            N.TransposeConv(num_filters=1, filter_size=13, strides=3, pad='valid',
                            b_init=None),
            N.BatchNorm(activation=K.linear),

            N.Flatten(outdim=3)
        ], debug=True, name="Decoder")
    else:
        f_inference, f_generative = saved_states
    # ====== Perfrom ====== #
    # Encoder
    y_encoder = f_inference(K.cast(X, 'float32'))
    mu = y_encoder[:, :n]
    sigma = K.softplus(y_encoder[:, n:])
    qz = Normal(mu=mu, sigma=sigma, name='Normal_qz')
    # Decoder
    z = Normal(mu=K.zeros(shape=(batch_size, n)),
               sigma=K.ones(shape=(batch_size, n)), name="Normal_pz")
    logits = f_generative(z)
    X_reconstruct = Bernoulli(logits=logits)
    # inference
    params = f_inference.parameters + f_generative.parameters
    inference = ed.KLqp(latent_vars={z: qz}, data={X_reconstruct: X})
    # ====== get cost for training ====== #
    # Bind p(x, z) and q(z | x) to the same placeholder for x.
    if K.is_training():
        import tensorflow as tf
        inference.initialize()
        if True:
            optimizer = tf.train.AdamOptimizer(0.01, epsilon=1.0)
            updates = optimizer.apply_gradients(
                optimizer.compute_gradients(inference.loss, var_list=params))
            init = tf.global_variables_initializer()
            init.run()
            f_train = K.function(X, inference.loss, updates)
        else:
            optimizer = tf.train.AdamOptimizer(0.01, epsilon=1.0)
            inference.initialize(optimizer=optimizer, var_list=params)
            init = tf.global_variables_initializer()
            init.run()
            f_train = lambda x: inference.update(feed_dict={X: x})['loss']
    samples = K.sigmoid(logits)
    return (samples, z, qz), (f_inference, f_generative)
Example #3
0
def convolutional_vae(X, saved_states, **kwargs):
    """ convolutional_vae

    Return
    ------
    [y_encoder, y_decoder]

    States
    ------
    [f_inference (encoder), f_generative (decoder)]

    """
    n = kwargs.get('n', 10)
    batch_size = K.get_shape(X)[0]
    if batch_size is None:
        raise ValueError(
            "You must specify batch_size dimension for the input placeholder.")
    # ====== init ====== #
    if saved_states is None:
        # Encoder
        f_inference = N.Sequence([
            N.Reshape(shape=(-1, 28, 28, 1)),
            N.Conv(num_filters=32,
                   filter_size=3,
                   strides=1,
                   pad='valid',
                   b_init=init_ops.constant_initializer(0.),
                   activation=K.elu),
            N.Conv(num_filters=64,
                   filter_size=5,
                   strides=2,
                   pad='same',
                   b_init=init_ops.constant_initializer(0.),
                   activation=K.elu),
            N.Dropout(level=0.1),
            N.Flatten(outdim=2),
            N.Dense(num_units=n * 2, b_init=None),
            N.BatchNorm(axes=0)
        ],
                                 debug=True,
                                 name='Encoder')
        # Decoder
        f_generative = N.Sequence([
            N.Dimshuffle(pattern=(0, 'x', 'x', 1)),
            N.TransposeConv(num_filters=64,
                            filter_size=3,
                            strides=1,
                            pad='valid',
                            b_init=init_ops.constant_initializer(0.),
                            activation=K.elu),
            N.TransposeConv(num_filters=32,
                            filter_size=5,
                            strides=2,
                            pad='same',
                            b_init=init_ops.constant_initializer(0.),
                            activation=K.elu),
            N.TransposeConv(num_filters=1,
                            filter_size=13,
                            strides=3,
                            pad='valid',
                            b_init=None),
            N.BatchNorm(activation=K.linear),
            N.Flatten(outdim=3)
        ],
                                  debug=True,
                                  name="Decoder")
    else:
        f_inference, f_generative = saved_states
    # ====== Perfrom ====== #
    # Encoder
    y_encoder = f_inference(K.cast(X, 'float32'))
    mu = y_encoder[:, :n]
    sigma = K.softplus(y_encoder[:, n:])
    qz = Normal(mu=mu, sigma=sigma, name='Normal_qz')
    # Decoder
    z = Normal(mu=K.zeros(shape=(batch_size, n)),
               sigma=K.ones(shape=(batch_size, n)),
               name="Normal_pz")
    logits = f_generative(z)
    X_reconstruct = Bernoulli(logits=logits)
    # inference
    params = f_inference.parameters + f_generative.parameters
    inference = ed.KLqp(latent_vars={z: qz}, data={X_reconstruct: X})
    # ====== get cost for training ====== #
    # Bind p(x, z) and q(z | x) to the same placeholder for x.
    if K.is_training():
        import tensorflow as tf
        inference.initialize()
        if True:
            optimizer = tf.train.AdamOptimizer(0.01, epsilon=1.0)
            updates = optimizer.apply_gradients(
                optimizer.compute_gradients(inference.loss, var_list=params))
            init = tf.global_variables_initializer()
            init.run()
            f_train = K.function(X, inference.loss, updates)
        else:
            optimizer = tf.train.AdamOptimizer(0.01, epsilon=1.0)
            inference.initialize(optimizer=optimizer, var_list=params)
            init = tf.global_variables_initializer()
            init.run()
            f_train = lambda x: inference.update(feed_dict={X: x})['loss']
    samples = K.sigmoid(logits)
    return (samples, z, qz), (f_inference, f_generative)