Esempio n. 1
0
    def inference(self):
        with tf.device(self.devices[0]):
            x = self.x * self.pw
            x = self.batch_norm_layer(x, scope="pw_input")
            x = 2 * tf.tanh(x)
            x = tf.unstack(self.x, self.in_steps, 1)
            convLSTM2D_cell = rnn.Conv2DLSTMCell(
                input_shape=self.input_shape,
                output_channels=self.filters[0],
                kernel_shape=[3, 3],
                forget_bias=1.0,
                initializers=orthogonal_initializer(),
                name="conv_lstm_cell_{}".format(self.filters[0]))
            dropout_cell = DropoutWrapper(convLSTM2D_cell,
                                          input_keep_prob=self.keep_rate,
                                          output_keep_prob=self.keep_rate,
                                          state_keep_prob=self.keep_rate)
            outputs, states = tf.nn.static_rnn(dropout_cell,
                                               x,
                                               dtype=tf.float32)
            scope = "activation_batch_norm_{}".format(self.filters[0])
            outputs = self.batch_norm_layer(outputs,
                                            scope=scope,
                                            activation_fn=tf.nn.tanh)
            outputs = 2 * outputs
            x = tf.unstack(outputs, self.in_steps, 0)

        with tf.device(self.devices[-1]):
            for filter in self.filters[1:]:
                convLSTM2D_cell = rnn.Conv2DLSTMCell(
                    input_shape=self.input_shape,
                    output_channels=filter,
                    kernel_shape=[3, 3],
                    forget_bias=1.0,
                    initializers=orthogonal_initializer(),
                    name="conv_lstm_cell_{}".format(filter))
                dropout_cell = DropoutWrapper(convLSTM2D_cell,
                                              input_keep_prob=self.keep_rate,
                                              output_keep_prob=self.keep_rate,
                                              state_keep_prob=self.keep_rate)
                outputs, states = tf.nn.static_rnn(dropout_cell,
                                                   x,
                                                   dtype=tf.float32)
                scope = "activation_batch_norm_{}".format(filter)
                outputs = self.batch_norm_layer(outputs,
                                                scope=scope,
                                                activation_fn=tf.nn.tanh)
                outputs = 2 * outputs
                x = tf.unstack(outputs, self.in_steps, 0)
        sess = tf.Session(config=tf.ConfigProto(log_device_placement=True))

        outputs = outputs[-self.out_steps:]
        self.y_hat = tf.transpose(outputs, perm=[1, 0, 2, 3, 4])
        return self.y_hat
Esempio n. 2
0
    def forward_pass(self, x):
        x = tf.unstack(x, self.in_steps, 1)
        filters = [self.input_shape[-1]] + self.filters
        for i in range(1, len(filters)):
            filter = filters[i]
            input_shape = self.input_shape[:-1] + [filters[i - 1]]
            convLSTM2D_cell = rnn.Conv2DLSTMCell(
                input_shape=input_shape,
                output_channels=filter,
                kernel_shape=[3, 3],
                forget_bias=1.0,
                initializers=orthogonal_initializer(),
                name="conv_lstm_cell_{}".format(i))
            dropout_cell = DropoutWrapper(convLSTM2D_cell,
                                          input_keep_prob=self.keep_rate,
                                          output_keep_prob=self.keep_rate,
                                          state_keep_prob=self.keep_rate)
            outputs, states = tf.nn.static_rnn(dropout_cell,
                                               x,
                                               dtype=tf.float32)
            outputs = self._batch_norm(outputs)
            x = tf.unstack(outputs, self.in_steps, 0)

        outputs = outputs[-self.out_steps:]
        y_hat = tf.transpose(outputs, perm=[1, 0, 2, 3, 4])
        return y_hat
Esempio n. 3
0
    def conv2dlstm(self, x, h):
        x = self.to_nhwc(x)  # NCHW conv not available for Conv2DLSTM

        # conv2dlstm operation
        layer = rnn.Conv2DLSTMCell(
            name='conv_2d_lstm',
            input_shape=[32, 32, 75],
            output_channels=96,
            kernel_shape=[3, 3],
        )  # different versions of TensorFlow have different default names
        layer._base_name = 'conv_2d_lstm'  # tf bug. does not take name we pass (arg is not forward to parent class)
        y, h = layer.apply(x, h)
        y = tf.nn.relu(y)
        y = self.from_nhwc(y)

        return y, h
Esempio n. 4
0
import tensorboard
import numpy as np
from tensorflow.examples.tutorials.mnist import input_data
from tensorflow.contrib import rnn

mnist = input_data.read_data_sets("/tmp/data", one_hot=True)
# mnist = mnist.train.images.reshape(-1, 28, 28)
input_image = tf.placeholder(tf.float32, [None, 28, 28, 1])
Y = tf.placeholder(tf.float32, [None, 10])
# conv1 = tf.layers.conv2d(inputs=input_image, filters=32, kernel_size=[5, 5], padding='same', activation=tf.nn.relu)
# pool1 = tf.layers.max_pooling2d(inputs=conv1,pool_size=[4,4],strides=3)
# conv2 = tf.layers.conv2d(inputs=pool1, filters=64, kernel_size=[3, 3], padding='same', activation=tf.nn.relu)
# pool2 = tf.layers.max_pooling2d(inputs=conv2,pool_size=[3,3],strides=2)
# dense1 = tf.layers.dense(inputs=tf.layers.flatten(pool2),units=256)
# dense2 = tf.layers.dense(inputs=dense1,units=10)
conv1 = rnn.Conv2DLSTMCell()
conv1 = tf.layers.conv2d(inputs=input_image,
                         filters=32,
                         kernel_size=[5, 5],
                         padding='same',
                         activation=tf.nn.relu)
pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[4, 4], strides=3)
conv2 = tf.layers.conv2d(inputs=pool1,
                         filters=64,
                         kernel_size=[3, 3],
                         padding='same',
                         activation=tf.nn.relu)
pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[3, 3], strides=2)
dense1 = tf.layers.dense(inputs=tf.layers.flatten(pool2), units=256)
dense2 = tf.layers.dense(inputs=dense1, units=10)