示例#1
0
    def test_rnn_decorator(self):
        @K.rnn_decorator(sequences='X', states='out')
        def rnn(X, out):
            return K.relu(X + out)

        y = rnn(K.ones(shape=(25, 12, 18, 8)), K.zeros(shape=(25, 18, 8)))
        f = K.function([], y)
        self.assertEqual(f()[0].shape, (25, 12, 18, 8))
示例#2
0
 def test_helper_ops_variables(self):
     X = K.placeholder(shape=(10, 20))
     f = N.Sequence([
         N.Dense(12),
         N.Dense(8),
         N.BatchNorm(),
         N.Dense(25, W_init=K.zeros(shape=(8, 25)))
     ])
     y = f(X)
     self.assertEqual(K.get_shape(y), (10, 25))
     self.assertEqual(len(f.variables), 10)
     self.assertEqual(len(f.parameters), 7)
     self.assertEqual(len(f.trainable_variables), 9)
示例#3
0
        outputs = outputs[0]
    return outputs


# ====== simulate data ====== #
def doit(_, x, y, z):
    z += K.sum(x + y) + K.sum(K.pow(_, 2))
    return z

sequences = [
    K.placeholder(shape=(600, None)),
    K.variable(np.arange(0, 1200).reshape(-1, 2)),
    K.variable(np.arange(1200, 2400).reshape(-1, 2))
]

outputs_info = K.zeros(shape=(1200,))

X = np.random.rand(600, 3000)
# ====== tf.scan ====== #
y = Scan2(doit,
          sequences=sequences,
          outputs_info=outputs_info,
          n_steps=None,
          backwards=True,
          name=None)
print('Scan:')
with utils.UnitTimer():
    f2 = K.function(sequences[0], y)
with utils.UnitTimer(12):
    for i in range(12):
        _ = f2(X)
示例#4
0
 def test_cudnn_rnn_nnet(self):
     if get_device() == 'cpu':
         return
     print()
     np.random.seed(1208)
     batch_size = 6
     hidden_size = 4
     X_linear = K.placeholder(shape=(None, 3, 8), name='X_linear')
     X_skip = K.placeholder(shape=(None, 3, hidden_size), name='X_skip')
     for direction_mode in ['bidirectional', 'unidirectional']:
         is_bidirectional = direction_mode == 'bidirectional'
         for nb_layers in [2]:
             real_layers = nb_layers * 2 if is_bidirectional else nb_layers
             for rnn_mode in ['gru', 'lstm', 'rnn_relu', 'rnn_tanh']:
                 for init_state, init_state_name in zip(
                     [
                         None,  # None init
                         K.init.uniform,  # function init
                         K.variable(
                             np.random.rand(real_layers, 1,
                                            hidden_size)),  # variable
                         K.variable(
                             np.random.rand(real_layers, batch_size,
                                            hidden_size)),  # variable
                         K.zeros(shape=(real_layers, 1, hidden_size)),
                         K.ones(shape=(real_layers, batch_size,
                                       hidden_size))
                     ],
                     [
                         'None', 'Function', 'Var1', 'VarB', 'Tensor1',
                         'TensorB'
                     ]):
                     for input_mode in ['linear', 'skip']:
                         if input_mode == 'linear':
                             X = X_linear
                             x = np.random.rand(batch_size, 3, 8)
                         else:
                             X = X_skip
                             x = np.random.rand(batch_size, 3, hidden_size)
                         start = timeit.default_timer()
                         f = N.CudnnRNN(num_units=hidden_size,
                                        rnn_mode=rnn_mode,
                                        input_mode=input_mode,
                                        num_layers=nb_layers,
                                        direction_mode=direction_mode,
                                        params_split=False,
                                        return_states=True)
                         # perform function
                         y = f(X, h0=init_state, c0=init_state)
                         f = K.function(X, y)
                         output = f(x)
                         benchmark = timeit.default_timer() - start
                         self.assertTrue([list(i.shape)
                                          for i in output] == [[
                                              batch_size if j is None else j
                                              for j in K.get_shape(i)
                                          ] for i in y])
                         print(
                             "*PASSED* [Layers]%s [Mode]%-8s [Input]%-6s [Direction]%-12s [State]%s [Benchmark]%.4f"
                             % (nb_layers, rnn_mode, input_mode,
                                direction_mode, init_state_name, benchmark))
示例#5
0
def convolutional_vae(X, saved_states, **kwargs):
    """ convolutional_vae

    Return
    ------
    [y_encoder, y_decoder]

    States
    ------
    [f_inference (encoder), f_generative (decoder)]

    """
    n = kwargs.get('n', 10)
    batch_size = K.get_shape(X)[0]
    if batch_size is None:
        raise ValueError("You must specify batch_size dimension for the input placeholder.")
    # ====== init ====== #
    if saved_states is None:
        # Encoder
        f_inference = N.Sequence([
            N.Reshape(shape=(-1, 28, 28, 1)),
            N.Conv(num_filters=32, filter_size=3, strides=1, pad='valid',
                   b_init=init_ops.constant_initializer(0.), activation=K.elu),
            N.Conv(num_filters=64, filter_size=5, strides=2, pad='same',
                   b_init=init_ops.constant_initializer(0.), activation=K.elu),

            N.Dropout(level=0.1),
            N.Flatten(outdim=2),

            N.Dense(num_units=n * 2, b_init=None),
            N.BatchNorm(axes=0)
        ], debug=True, name='Encoder')
        # Decoder
        f_generative = N.Sequence([
            N.Dimshuffle(pattern=(0, 'x', 'x', 1)),
            N.TransposeConv(num_filters=64, filter_size=3, strides=1, pad='valid',
                            b_init=init_ops.constant_initializer(0.), activation=K.elu),
            N.TransposeConv(num_filters=32, filter_size=5, strides=2, pad='same',
                            b_init=init_ops.constant_initializer(0.), activation=K.elu),
            N.TransposeConv(num_filters=1, filter_size=13, strides=3, pad='valid',
                            b_init=None),
            N.BatchNorm(activation=K.linear),

            N.Flatten(outdim=3)
        ], debug=True, name="Decoder")
    else:
        f_inference, f_generative = saved_states
    # ====== Perfrom ====== #
    # Encoder
    y_encoder = f_inference(K.cast(X, 'float32'))
    mu = y_encoder[:, :n]
    sigma = K.softplus(y_encoder[:, n:])
    qz = Normal(mu=mu, sigma=sigma, name='Normal_qz')
    # Decoder
    z = Normal(mu=K.zeros(shape=(batch_size, n)),
               sigma=K.ones(shape=(batch_size, n)), name="Normal_pz")
    logits = f_generative(z)
    X_reconstruct = Bernoulli(logits=logits)
    # inference
    params = f_inference.parameters + f_generative.parameters
    inference = ed.KLqp(latent_vars={z: qz}, data={X_reconstruct: X})
    # ====== get cost for training ====== #
    # Bind p(x, z) and q(z | x) to the same placeholder for x.
    if K.is_training():
        import tensorflow as tf
        inference.initialize()
        if True:
            optimizer = tf.train.AdamOptimizer(0.01, epsilon=1.0)
            updates = optimizer.apply_gradients(
                optimizer.compute_gradients(inference.loss, var_list=params))
            init = tf.global_variables_initializer()
            init.run()
            f_train = K.function(X, inference.loss, updates)
        else:
            optimizer = tf.train.AdamOptimizer(0.01, epsilon=1.0)
            inference.initialize(optimizer=optimizer, var_list=params)
            init = tf.global_variables_initializer()
            init.run()
            f_train = lambda x: inference.update(feed_dict={X: x})['loss']
    samples = K.sigmoid(logits)
    return (samples, z, qz), (f_inference, f_generative)
示例#6
0
def convolutional_vae(X, saved_states, **kwargs):
    """ convolutional_vae

    Return
    ------
    [y_encoder, y_decoder]

    States
    ------
    [f_inference (encoder), f_generative (decoder)]

    """
    n = kwargs.get('n', 10)
    batch_size = K.get_shape(X)[0]
    if batch_size is None:
        raise ValueError(
            "You must specify batch_size dimension for the input placeholder.")
    # ====== init ====== #
    if saved_states is None:
        # Encoder
        f_inference = N.Sequence([
            N.Reshape(shape=(-1, 28, 28, 1)),
            N.Conv(num_filters=32,
                   filter_size=3,
                   strides=1,
                   pad='valid',
                   b_init=init_ops.constant_initializer(0.),
                   activation=K.elu),
            N.Conv(num_filters=64,
                   filter_size=5,
                   strides=2,
                   pad='same',
                   b_init=init_ops.constant_initializer(0.),
                   activation=K.elu),
            N.Dropout(level=0.1),
            N.Flatten(outdim=2),
            N.Dense(num_units=n * 2, b_init=None),
            N.BatchNorm(axes=0)
        ],
                                 debug=True,
                                 name='Encoder')
        # Decoder
        f_generative = N.Sequence([
            N.Dimshuffle(pattern=(0, 'x', 'x', 1)),
            N.TransposeConv(num_filters=64,
                            filter_size=3,
                            strides=1,
                            pad='valid',
                            b_init=init_ops.constant_initializer(0.),
                            activation=K.elu),
            N.TransposeConv(num_filters=32,
                            filter_size=5,
                            strides=2,
                            pad='same',
                            b_init=init_ops.constant_initializer(0.),
                            activation=K.elu),
            N.TransposeConv(num_filters=1,
                            filter_size=13,
                            strides=3,
                            pad='valid',
                            b_init=None),
            N.BatchNorm(activation=K.linear),
            N.Flatten(outdim=3)
        ],
                                  debug=True,
                                  name="Decoder")
    else:
        f_inference, f_generative = saved_states
    # ====== Perfrom ====== #
    # Encoder
    y_encoder = f_inference(K.cast(X, 'float32'))
    mu = y_encoder[:, :n]
    sigma = K.softplus(y_encoder[:, n:])
    qz = Normal(mu=mu, sigma=sigma, name='Normal_qz')
    # Decoder
    z = Normal(mu=K.zeros(shape=(batch_size, n)),
               sigma=K.ones(shape=(batch_size, n)),
               name="Normal_pz")
    logits = f_generative(z)
    X_reconstruct = Bernoulli(logits=logits)
    # inference
    params = f_inference.parameters + f_generative.parameters
    inference = ed.KLqp(latent_vars={z: qz}, data={X_reconstruct: X})
    # ====== get cost for training ====== #
    # Bind p(x, z) and q(z | x) to the same placeholder for x.
    if K.is_training():
        import tensorflow as tf
        inference.initialize()
        if True:
            optimizer = tf.train.AdamOptimizer(0.01, epsilon=1.0)
            updates = optimizer.apply_gradients(
                optimizer.compute_gradients(inference.loss, var_list=params))
            init = tf.global_variables_initializer()
            init.run()
            f_train = K.function(X, inference.loss, updates)
        else:
            optimizer = tf.train.AdamOptimizer(0.01, epsilon=1.0)
            inference.initialize(optimizer=optimizer, var_list=params)
            init = tf.global_variables_initializer()
            init.run()
            f_train = lambda x: inference.update(feed_dict={X: x})['loss']
    samples = K.sigmoid(logits)
    return (samples, z, qz), (f_inference, f_generative)
示例#7
0
    return outputs


# ====== simulate data ====== #
def doit(_, x, y, z):
    z += K.sum(x + y) + K.sum(K.pow(_, 2))
    return z


sequences = [
    K.placeholder(shape=(600, None)),
    K.variable(np.arange(0, 1200).reshape(-1, 2)),
    K.variable(np.arange(1200, 2400).reshape(-1, 2))
]

outputs_info = K.zeros(shape=(1200, ))

X = np.random.rand(600, 3000)
# ====== tf.scan ====== #
y = Scan2(doit,
          sequences=sequences,
          outputs_info=outputs_info,
          n_steps=None,
          backwards=True,
          name=None)
print('Scan:')
with utils.UnitTimer():
    f2 = K.function(sequences[0], y)
with utils.UnitTimer(12):
    for i in range(12):
        _ = f2(X)