コード例 #1
0
def shallow_net_5x5(x):
    net = L.conv(x, name="conv_sn5x5_1", kh=5, kw=5, n_out=24)
    net = L.pool(net, name="pool_sn5x5_1", kh=2, kw=2, dw=2, dh=2)
    net = L.conv(net, name="conv_sn5x5_2", kw=3, kh=3, n_out=48)
    net = L.pool(net, name="pool_sn5x5_2", kh=2, kw=2, dw=2, dh=2)
    net = L.conv(net, name="conv_sn5x5_3", kw=3, kh=3, n_out=24)
    net = L.conv(net, name="conv_sn5x5_4", kw=3, kh=3, n_out=12)
    return net
コード例 #2
0
def shallow_net_7x7(x):
    net = L.conv(x, name="conv_sn7x7_1", kh=7, kw=7, n_out=20)
    net = L.pool(net, name="pool_sn7x7_1", kh=2, kw=2, dw=2, dh=2)
    net = L.conv(net, name="conv_sn7x7_2", kw=5, kh=5, n_out=40)
    net = L.pool(net, name="pool_sn7x7_2", kh=2, kw=2, dw=2, dh=2)
    net = L.conv(net, name="conv_sn7x7_3", kw=5, kh=5, n_out=20)
    net = L.conv(net, name="conv_sn7x7_4", kw=5, kh=5, n_out=10)
    return net
コード例 #3
0
def shallow_net_9x9(x):
    net = L.conv(x, name="conv_sn9x9_1", kh=9, kw=9, n_out=16)
    net = L.pool(net, name="pool_sn9x9_1", kh=2, kw=2, dw=2, dh=2)
    net = L.conv(net, name="conv_sn9x9_2", kw=7, kh=7, n_out=32)
    net = L.pool(net, name="pool_sn9x9_2", kh=2, kw=2, dw=2, dh=2)
    net = L.conv(net, name="conv_sn9x9_3", kw=7, kh=7, n_out=16)
    net = L.conv(net, name="conv_sn9x9_4", kw=7, kh=7, n_out=8)
    return net
コード例 #4
0
def intfVGG_use_layer(input_tensor,
                      n_classes=1000,
                      rgb_mean=None,
                      training=True):
    # assuming 224x224x3 input_tensor
    # define image mean
    if rgb_mean is None:
        rgb_mean = np.array([116.779, 123.68, 103.939], dtype=np.float32)
    mu = tf.constant(rgb_mean, name="rgb_mean")
    keep_prob = 0.5

    # subtract image mean
    net = tf.subtract(input_tensor, mu, name="input_mean_centered")

    # block 1 -- outputs 112x112x64
    net = L.conv(net, name="conv1_1", kh=3, kw=3, n_out=64)
    net = L.conv(net, name="conv1_2", kh=3, kw=3, n_out=64)
    net = L.pool(net, name="pool1", kh=2, kw=2, dw=2, dh=2)

    # block 2 -- outputs 56x56x128
    net = L.conv(net, name="conv2_1", kh=3, kw=3, n_out=128)
    net = L.conv(net, name="conv2_2", kh=3, kw=3, n_out=128)
    net = L.pool(net, name="pool2", kh=2, kw=2, dh=2, dw=2)

    # # block 3 -- outputs 28x28x256
    net = L.conv(net, name="conv3_1", kh=3, kw=3, n_out=256)
    net = L.conv(net, name="conv3_2", kh=3, kw=3, n_out=256)
    net = L.pool(net, name="pool3", kh=2, kw=2, dh=2, dw=2)

    # block 4 -- outputs 14x14x512
    net = L.conv(net, name="conv4_1", kh=3, kw=3, n_out=512)
    net = L.conv(net, name="conv4_2", kh=3, kw=3, n_out=512)
    net = L.conv(net, name="conv4_3", kh=3, kw=3, n_out=512)
    net = L.pool(net, name="pool4", kh=2, kw=2, dh=2, dw=2)

    # block 5 -- outputs 7x7x512
    net = L.conv(net, name="conv5_1", kh=3, kw=3, n_out=512)
    net = L.conv(net, name="conv5_2", kh=3, kw=3, n_out=512)
    net = L.conv(net, name="conv5_3", kh=3, kw=3, n_out=512)
    net = L.pool(net, name="pool5", kh=2, kw=2, dw=2, dh=2)

    # flatten
    flattened_shape = np.prod([s.value for s in net.get_shape()[1:]])
    net = tf.reshape(net, [-1, flattened_shape], name="flatten")

    # fully connected
    net = L.fully_connected(net, name="fc6", n_out=4096)
    net = tf.nn.dropout(net, keep_prob)
    net = L.fully_connected(net, name="fc7", n_out=4096)
    net = tf.nn.dropout(net, keep_prob)
    net = L.fully_connected(net, name="fc8", n_out=n_classes)
    return net
コード例 #5
0
def BKStart(x, reuse):
    with tf.variable_scope('BKS', reuse=reuse):

        n = "BKStart_"
        x = conv_layer(x, 1, 32, 5, n + "conv_1", 1, pad='SAME')
        x = pool(x, 3, 2, name=n + "max_pool_1", pad='SAME', pool='max')
        x = conv_layer(x, 32, 32, 4, n + "conv_2", 1, pad='SAME')
        x = pool(x, 3, 2, n + "avg_pool_1", pool='avg')
        x = conv_layer(x, 32, 64, 5, n + "conv_3", 1, pad='SAME')
        x = pool(x, 3, 2, n + "avg_pool_2", pool='avg')
        flattened_shape = np.prod([s.value for s in x.get_shape()[1:]])
        x = tf.reshape(x, [-1, flattened_shape], name=n + 'flatten')
        x = fc_layer(x, 2048, activation='Relu', name=n + 'FC_1')
        #x=dropout_layer(x,keep_prob)
        logits = fc_layer(x, 7, activation='None', name=n + 'FC_2')
    return logits
コード例 #6
0
def VGG16(x, n_classes, keep_prob):
    with tf.name_scope('VGG16'):
        # Group 1
        x = layers.conv('conv1_1', x, 64, [3, 3], [1, 1, 1, 1])
        x = layers.conv('conv1_2', x, 64, [3, 3], [1, 1, 1, 1])
        with tf.name_scope('pool1'):
            x = layers.pool('pool1', x, [1, 2, 2, 1], [1, 2, 2, 1])

        # Group 2
        x = layers.conv('conv2_1', x, 128, [3, 3], [1, 1, 1, 1])
        x = layers.conv('conv2_2', x, 128, [3, 3], [1, 1, 1, 1])
        with tf.name_scope('pool2'):
            x = layers.pool('pool2', x, [1, 2, 2, 1], [1, 2, 2, 1])

        # Group 3
        x = layers.conv('conv3_1', x, 256, [3, 3], [1, 1, 1, 1])
        x = layers.conv('conv3_2', x, 256, [3, 3], [1, 1, 1, 1])
        x = layers.conv('conv3_3', x, 256, [3, 3], [1, 1, 1, 1])
        with tf.name_scope('pool3'):
            x = layers.pool('pool3', x, [1, 2, 2, 1], [1, 2, 2, 1])

        # Group 4
        x = layers.conv('conv4_1', x, 512, [3, 3], [1, 1, 1, 1])
        x = layers.conv('conv4_2', x, 512, [3, 3], [1, 1, 1, 1])
        x = layers.conv('conv4_3', x, 512, [3, 3], [1, 1, 1, 1])
        with tf.name_scope('pool4'):
            x = layers.pool('pool4', x, [1, 2, 2, 1], [1, 2, 2, 1])

        # Group 5
        x = layers.conv('conv5_1', x, 512, [3, 3], [1, 1, 1, 1])
        x = layers.conv('conv5_2', x, 512, [3, 3], [1, 1, 1, 1])
        x = layers.conv('conv5_3', x, 512, [3, 3], [1, 1, 1, 1])
        with tf.name_scope('pool5'):
            x = layers.pool('pool5', x, [1, 2, 2, 1], [1, 2, 2, 1])

        x = layers.fc_layer('fc6', x, 4096)
        x = layers.dropout('drop6', x, keep_prob)

        x = layers.fc_layer('fc7', x, 4096)
        x = layers.dropout('drop7', x, keep_prob)

        x = layers.fc_layer('fc8', x, n_classes)

        return x
コード例 #7
0
def BKVGG8(x, keep_prob):

    n = "BKVGG8_"
    x = conv_layer(x, 1, 32, 3, n + "conv_1", 1, pad='SAME')
    x = pool(x, 2, 2, name=n + "max_pool_1", pad='SAME', pool='max')
    x = conv_layer(x, 32, 64, 3, n + "conv_2", 1, pad='SAME')
    x = pool(x, 2, 2, n + "max_pool_1", pool='max')
    x = conv_layer(x, 64, 128, 3, n + "conv_3", 1, pad='SAME')
    x = pool(x, 2, 2, n + "max_pool_2", pool='max')
    x = conv_layer(x, 128, 256, 3, n + "conv_4", 1, pad='SAME')
    x = conv_layer(x, 256, 256, 3, n + "conv_5", 1, pad='SAME')
    flattened_shape = np.prod([s.value for s in x.get_shape()[1:]])
    x = tf.reshape(x, [-1, flattened_shape], name=n + 'flatten')
    x = fc_layer(x, 256, activation='Relu', name=n + 'FC_1')
    x = dropout_layer(x, keep_prob)
    x = fc_layer(x, 256, activation='Relu', name=n + 'FC_2')
    x = dropout_layer(x, keep_prob)
    logits = fc_layer(x, 7, activation='None', name=n + 'FC_3')
    return logits
コード例 #8
0
def inference(input_tensor,
              n_classes,
              train=True,
              regularizer=None,
              evaluate=False):
    with tf.name_scope('cnn'):
        input_tensor = layers.conv('conv1',
                                   input_tensor,
                                   out_channels=32,
                                   kernel_size=[2, 2],
                                   strides=[1, 2, 2, 1],
                                   train=train)
        input_tensor = layers.pool('pool1',
                                   input_tensor,
                                   kernel_size=[1, 2, 2, 1],
                                   strides=[1, 2, 2, 1],
                                   by_max=True)

        input_tensor = layers.conv('conv2',
                                   input_tensor,
                                   out_channels=64,
                                   kernel_size=[2, 2],
                                   strides=[1, 1, 1, 1],
                                   train=train)
        input_tensor = layers.batch_norm(input_tensor)
        input_tensor = layers.pool('pool2',
                                   input_tensor,
                                   kernel_size=[1, 2, 2, 1],
                                   strides=[1, 1, 1, 1],
                                   by_max=True)

        input_tensor = layers.full_connect('fc1',
                                           input_tensor,
                                           out_nodes=512,
                                           regularizer=regularizer)
        output_tensor = layers.full_connect_not_relu('fc2',
                                                     input_tensor,
                                                     out_nodes=n_classes,
                                                     regularizer=regularizer)

        return output_tensor
コード例 #9
0
def generator(inp_z, inp_y, reuse=False):
    with tf.variable_scope('Generator', reuse=reuse):
        inp = tf.concat([inp_z, inp_y], 1)
        sz = 4
        g1 = linear(inp, 512 * sz * sz, name=gname + 'deconv1')
        g1 = batchnorm(g1, is_training=tf.constant(True), name=gname + 'bn1g')
        g1 = lrelu(g1, 0.2)
        g1_reshaped = tf.reshape(g1, [-1, 512, sz, sz])
        print 'genreshape: ' + str(g1_reshaped.get_shape().as_list())

        g2 = nnupsampling(g1_reshaped, [8, 8])
        g2 = conv2d(g2, nout=512, kernel=3, name=gname + 'deconv2')
        g2 = batchnorm(g2, is_training=tf.constant(True), name=gname + 'bn2g')
        g2 = lrelu(g2, 0.2)

        g3 = nnupsampling(g2, [16, 16])
        g3 = conv2d(g3, nout=256, kernel=3, name=gname + 'deconv3')
        g3 = batchnorm(g3, is_training=tf.constant(True), name=gname + 'bn3g')
        g3 = lrelu(g3, 0.2)

        g4 = nnupsampling(g3, [32, 32])
        g4 = conv2d(g4, nout=128, kernel=3, name=gname + 'deconv4')
        g4 = batchnorm(g4, is_training=tf.constant(True), name=gname + 'bn4g')
        g4 = lrelu(g4, 0.2)

        g4b = conv2d(g4, nout=128, kernel=3, name=gname + 'deconv4b')
        g4b = batchnorm(g4b,
                        is_training=tf.constant(True),
                        name=gname + 'bn4bg')
        g4b = lrelu(g4b, 0.2)

        g5 = nnupsampling(g4b, [64, 64])
        g5 = conv2d(g5, nout=64, kernel=3, name=gname + 'deconv5')
        g5 = batchnorm(g5, is_training=tf.constant(True), name=gname + 'bn5g')
        g5 = lrelu(g5, 0.2)

        g5b = conv2d(g5, nout=64, kernel=3, name=gname + 'deconv5b')
        g5b = batchnorm(g5b,
                        is_training=tf.constant(True),
                        name=gname + 'bn5bg')
        g5b = lrelu(g5b, 0.2)

        g6 = nnupsampling(g5b, [128, 128])
        g6 = conv2d(g6, nout=32, kernel=3, name=gname + 'deconv6')
        g6 = batchnorm(g6, is_training=tf.constant(True), name=gname + 'bn6g')
        g6 = lrelu(g6, 0.2)

        g6b = conv2d(g6, nout=3, kernel=3, name=gname + 'deconv6b')
        g6b = tf.nn.tanh(g6b)
        g6b_64 = pool(g6b, fsize=3, strides=2, op='avg')
        return g6b_64, g6b
コード例 #10
0
def stage_1(x,training):
    """The first stage of the network
        Args:
            x: the input batch
            training: whether we are in the process of training or inference
        Returns:
            The output of the first stage
    """
    x = layers.conv(x, "conv1", 2, 3, 24, 3, training)
    x = layers.pool(x, "pool1", 2, 3)
    x = layers.down_sample_unit(x, "ds1", 24, training)
    x = layers.shuffle_net_unit(x, "sn1_1", 48, training)
    x = layers.shuffle_net_unit(x, "sn1_2", 48, training)
    stage_1_output = layers.shuffle_net_unit(x, "sn1_3", 48, training)

    return stage_1_output
コード例 #11
0
ファイル: CIFAR64GANAEtrick.py プロジェクト: willtwr/ArtGAN
def generator(inp_z, inp_y):
    with tf.variable_scope('Generator'):
        inp = tf.concat([inp_z, inp_y], 1)

        g1 = linear(inp, 512 * 4 * 4, name=gname + 'deconv1')
        g1 = batchnorm(g1, is_training=tf.constant(True), name=gname + 'bn1g')
        g1 = lrelu(g1, 0.2)
        g1_reshaped = tf.reshape(g1, [-1, 512, 4, 4])
        print 'genreshape: ' + str(g1_reshaped.get_shape().as_list())

        g2 = nnupsampling(g1_reshaped, [8, 8])
        g2 = conv2d(g2, nout=256, kernel=3, name=gname + 'deconv2')
        g2 = batchnorm(g2, is_training=tf.constant(True), name=gname + 'bn2g')
        g2 = lrelu(g2, 0.2)

        g3 = nnupsampling(g2, [16, 16])
        g3 = conv2d(g3, nout=128, kernel=3, name=gname + 'deconv3')
        g3 = batchnorm(g3, is_training=tf.constant(True), name=gname + 'bn3g')
        g3 = lrelu(g3, 0.2)

        g3b = conv2d(g3, nout=128, kernel=3, name=gname + 'deconv3b')
        g3b = batchnorm(g3b,
                        is_training=tf.constant(True),
                        name=gname + 'bn3bg')
        g3b = lrelu(g3b, 0.2)

        g4 = nnupsampling(g3b, [32, 32])
        g4 = conv2d(g4, nout=64, kernel=3, name=gname + 'deconv4')
        g4 = batchnorm(g4, is_training=tf.constant(True), name=gname + 'bn4g')
        g4 = lrelu(g4, 0.2)

        g4b = conv2d(g4, nout=64, kernel=3, name=gname + 'deconv4b')
        g4b = batchnorm(g4b,
                        is_training=tf.constant(True),
                        name=gname + 'bn4bg')
        g4b = lrelu(g4b, 0.2)

        g5 = nnupsampling(g4b, [64, 64])
        g5 = conv2d(g5, nout=32, kernel=3, name=gname + 'deconv5')
        g5 = batchnorm(g5, is_training=tf.constant(True), name=gname + 'bn5g')
        g5 = lrelu(g5, 0.2)

        g5b = conv2d(g5, nout=3, kernel=3, name=gname + 'deconv5b')
        g5b = tf.nn.tanh(g5b)
        g5b_32 = pool(g5b, fsize=3, strides=2, op='avg', pad='SAME')
        return g5b_32, g5b
コード例 #12
0
    def _build_network(self, inputs: tf.Tensor, sparse_layers: list,
                       activation_fn: callable) -> tf.Tensor:

        with tf.variable_scope('network'):

            net = inputs

            self.weight_tensors = []

            bias_initializer = tf.constant_initializer(0.1)

            # block 1 -- outputs 112x112x64
            net, w, b = L.sparse_conv(net, sparse_layers[0], name="conv1_1")
            self.weight_tensors.append(w)
            net, w, b = L.sparse_conv(net, sparse_layers[1], name="conv1_2")
            self.weight_tensors.append(w)
            net = L.pool(net, name="pool1", kh=2, kw=2, dw=2, dh=2)

            # block 2 -- outputs 56x56x128
            net, w, b = L.sparse_conv(net, sparse_layers[2], name="conv2_1")
            self.weight_tensors.append(w)
            net, w, b = L.sparse_conv(net, sparse_layers[3], name="conv2_2")
            self.weight_tensors.append(w)
            net = L.pool(net, name="pool2", kh=2, kw=2, dh=2, dw=2)

            # # block 3 -- outputs 28x28x256
            net, w, b = L.sparse_conv(net, sparse_layers[4], name="conv3_1")
            self.weight_tensors.append(w)
            net, w, b = L.sparse_conv(net, sparse_layers[5], name="conv3_2")
            self.weight_tensors.append(w)
            net = L.pool(net, name="pool3", kh=2, kw=2, dh=2, dw=2)

            # block 4 -- outputs 14x14x512
            net, w, b = L.sparse_conv(net, sparse_layers[6], name="conv4_1")
            self.weight_tensors.append(w)
            net, w, b = L.sparse_conv(net, sparse_layers[7], name="conv4_2")
            self.weight_tensors.append(w)
            net, w, b = L.sparse_conv(net, sparse_layers[8], name="conv4_3")
            self.weight_tensors.append(w)
            net = L.pool(net, name="pool4", kh=2, kw=2, dh=2, dw=2)

            # block 5 -- outputs 7x7x512
            net, w, b = L.sparse_conv(net, sparse_layers[9], name="conv5_1")
            self.weight_tensors.append(w)
            net, w, b = L.sparse_conv(net, sparse_layers[10], name="conv5_2")
            self.weight_tensors.append(w)
            net, w, b = L.sparse_conv(net, sparse_layers[11], name="conv5_3")
            self.weight_tensors.append(w)
            net = L.pool(net, name="pool5", kh=2, kw=2, dw=2, dh=2)

            # flatten
            flattened_shape = np.prod([s.value for s in net.get_shape()[1:]])
            net = tf.reshape(net, [-1, flattened_shape], name="flatten")

            # fully connected
            net, w, b = L.sparse_fully_connected(net,
                                                 sparse_layers[12],
                                                 name="fc6")
            self.weight_tensors.append(w)
            net, w, b = L.sparse_fully_connected(net,
                                                 sparse_layers[13],
                                                 name="fc7")
            self.weight_tensors.append(w)
            net, w, b = L.sparse_fully_connected(net,
                                                 sparse_layers[14],
                                                 name="fc8_2",
                                                 activation_fn=None)
            self.weight_tensors.append(w)

            return net
コード例 #13
0
ファイル: vgg.py プロジェクト: cwh1981/pixelrnn
def build(input_tensor, n_classes=1000, rgb_mean=None, training=True):
    # assuming 224x224x3 input_tensor

    # define image mean
    if rgb_mean is None:
        rgb_mean = np.array([116.779, 123.68, 103.939], dtype=np.float32)
    mu = tf.constant(rgb_mean, name="rgb_mean")
    keep_prob = 0.5

    # subtract image mean
    input_mean_centered = tf.subtract(input_tensor,
                                      mu,
                                      name="input_mean_centered")

    # block 1 -- outputs 112x112x64
    conv1_1 = L.conv(input_mean_centered, name="conv1_1", kh=3, kw=3, n_out=64)
    conv1_2 = L.conv(conv1_1, name="conv1_2", kh=3, kw=3, n_out=64)
    pool1 = L.pool(conv1_2, name="pool1", kh=2, kw=2, dw=2, dh=2)

    # block 2 -- outputs 56x56x128
    conv2_1 = L.conv(pool1, name="conv2_1", kh=3, kw=3, n_out=128)
    conv2_2 = L.conv(conv2_1, name="conv2_2", kh=3, kw=3, n_out=128)
    pool2 = L.pool(conv2_2, name="pool2", kh=2, kw=2, dh=2, dw=2)

    # # block 3 -- outputs 28x28x256
    conv3_1 = L.conv(pool2, name="conv3_1", kh=3, kw=3, n_out=256)
    conv3_2 = L.conv(conv3_1, name="conv3_2", kh=3, kw=3, n_out=256)
    pool3 = L.pool(conv3_2, name="pool3", kh=2, kw=2, dh=2, dw=2)

    # block 4 -- outputs 14x14x512
    conv4_1 = L.conv(pool3, name="conv4_1", kh=3, kw=3, n_out=512)
    conv4_2 = L.conv(conv4_1, name="conv4_2", kh=3, kw=3, n_out=512)
    conv4_3 = L.conv(conv4_2, name="conv4_3", kh=3, kw=3, n_out=512)
    pool4 = L.pool(conv4_3, name="pool4", kh=2, kw=2, dh=2, dw=2)

    # block 5 -- outputs 7x7x512
    conv5_1 = L.conv(pool4, name="conv5_1", kh=3, kw=3, n_out=512)
    conv5_2 = L.conv(conv5_1, name="conv5_2", kh=3, kw=3, n_out=512)
    conv5_3 = L.conv(conv5_2, name="conv5_3", kh=3, kw=3, n_out=512)
    # pool5 = L.pool(conv5_3, name="pool5", kh=2, kw=2, dw=2, dh=2)

    pool2_max = L.pool(pool2, name="pool2_max", kh=2, kw=2, dw=2, dh=2)
    pool2_max_conv = L.conv(pool2_max,
                            name="pool2_max_conv",
                            kh=3,
                            kw=3,
                            n_out=512)

    pool3_conv = L.conv(pool3, name="pool3_conv", kh=3, kw=3, n_out=512)

    conv4_3_conv = L.conv(conv4_3, name="conv4_3_conv", kh=3, kw=3, n_out=512)

    conv5_3_conv = L.conv(conv5_3, name="conv5_3_conv", kh=3, kw=3, n_out=512)
    conv5_3_conv_upsampling = tf.image.resize_images(
        conv5_3_conv, [28, 28],
        method=tf.image.ResizeMethod.BILINEAR,
        align_corners=False)

    concatTensor = tf.concat([pool2_max_conv, pool3_conv], 0)
    concatTensor = tf.concat([concatTensor, conv4_3_conv], 0)
    concatTensor = tf.concat([concatTensor, conv5_3_conv_upsampling], 0)
    concatTensor_conv = L.conv(concatTensor,
                               name="concatTensor_conv",
                               kh=3,
                               kw=3,
                               n_out=128)

    hidden_layer_size = 30
    input_size = 8
    target_size = 10
    rnn = LSTM_cell(input_size, hidden_layer_size, target_size)
    # scope = "conv_inputs"
    # logger.info("Building %s" % scope)
    # Getting all outputs from rnn
    outputs = rnn.get_outputs()

    # Getting final output through indexing after reversing
    last_output = outputs[-1]

    # main reccurent layers
    # l_hid = concatTensor_conv
    # for idx in range(conf.recurrent_length):
    #     scope = 'LSTM%d' % idx
    #     l[scope] = l_hid = diagonal_bilstm(l_hid, conf, scope=scope)

    # cell = tf.contrib.rnn.Conv2DLSTMCell(input_shape=[28, 28],
    #                                      # size of input feeding into network (needed for the zero state)
    #                                      kernel_shape=[3, 3],  # for a 3 by 3 conv
    #                                      output_channels=128)  # number of feature maps
    ##########################################################################
    # Now running the dynamic rnn
    ##########################################################################
    # (outputs, state) = tf.nn.dynamic_rnn(cell, concatTensor_conv, time_major=False, dtype=tf.float32)

    ##########################################################################
    # Now treat the hidden state out of the conv lstm as the new image
    ##########################################################################
    x_image = state[0]
    # tf.contrib.rnn.ConvLSTMCell(conv_ndims,input_shape,output_channels, kernel_shape,use_bias=True,skip_connection=False,forget_bias=1.0,
    #                                 initializers=None,    name='conv_lstm_cell')
    # flatten
    # flattened_shape = np.prod([s.value for s in net.get_shape()[1:]])
    # net = tf.reshape(net, [-1, flattened_shape], name="flatten")

    # fully connected
    # net = L.fully_connected(net, name="fc6", n_out=4096)
    # net = tf.nn.dropout(net, keep_prob)
    # net = L.fully_connected(net, name="fc7", n_out=4096)
    # net = tf.nn.dropout(net, keep_prob)
    # net = L.fully_connected(net, name="fc8", n_out=n_classes)
    return net