Esempio n. 1
0
    def _discriminator(self, img, scope, reuse=False):

        with tf.variable_scope(scope, reuse=reuse):
            x = img
            h1 = conv2d(x,
                        128,
                        strides=2,
                        name='down1',
                        training=self.is_training,
                        use_bn=True)
            h2 = conv2d(h1,
                        256,
                        strides=2,
                        name='down2',
                        training=self.is_training,
                        use_bn=True)
            h3 = conv2d(h2,
                        512,
                        strides=2,
                        name='down3',
                        training=self.is_training,
                        use_bn=True)

            h4_flat = tf.layers.flatten(h3)

            logits = fc(h4_flat, 1, name='out', activation_fn=lambda x: x)

            return logits, tf.nn.sigmoid(logits)
Esempio n. 2
0
 def build_unet(self, x, training=False):
     dir_bins = self.cfg.dir_bins
     drop_rate = self.cfg.drop_rate
     batch_norm = self.cfg.batch_norm
     layers_per_block = self.cfg.layers_per_block
     # first conv layer:
     x = conv2d(x, 48)
     stack_skip = []
     # downsample (encoder):
     for i in range(self.n_block_layers):
         n = layers_per_block[str(i)]
         name = 'down_block_' + str(i)
         x_new = dense_block(x, name, n_layers=n, training=training)
         x = concat(x, x_new)
         stack_skip.append(x)
         x = transition_down(x, training)
     # bottleneck:
     n = layers_per_block[str(self.n_block_layers)]
     x_new = dense_block(x, name='bottleneck_block', n_layers=n,
                         training=training)
     rpn_inputs = {self.n_block_layers: concat(x, x_new)}
     # upsample (decoder):
     for i in reversed(range(self.n_block_layers)):
         n = layers_per_block[str(i)]
         name = 'up_block_' + str(i)
         x_skip = stack_skip[i]
         x = transition_up(x_new)
         x = concat(x, x_skip)
         x_new = dense_block(x, name, n_layers=n, training=training,
                             batch_norm=batch_norm, drop_rate=drop_rate)
         rpn_inputs[i] = concat(x, x_new)
     # last conv layer:
     mask_logits = conv2d(x_new, 1 + dir_bins, k_size=1)
     return mask_logits, rpn_inputs
Esempio n. 3
0
def generator(inputs, name='generator', skip=False, padding='REFLECT'):
    with tf.variable_scope(name):
        end_kernel = 7
        kernel = 3

        pad_input = tf.pad(
            inputs, [[0, 0], [kernel, kernel], [kernel, kernel], [0, 0]],
            padding)
        o_c1 = conv2d(pad_input, NUM_GEN_FLITER, end_kernel, 1, name='conv1')
        o_c2 = conv2d(o_c1,
                      NUM_GEN_FLITER * 2,
                      kernel,
                      2,
                      padding='SAME',
                      name='conv2')
        o_c3 = conv2d(o_c2,
                      NUM_GEN_FLITER * 4,
                      kernel,
                      2,
                      padding='SAME',
                      name='conv3')

        o_r1 = build_resnet_block(o_c3, NUM_GEN_FLITER * 4, 'res1', padding)
        o_r2 = build_resnet_block(o_r1, NUM_GEN_FLITER * 4, 'res2', padding)
        o_r3 = build_resnet_block(o_r2, NUM_GEN_FLITER * 4, 'res3', padding)
        o_r4 = build_resnet_block(o_r3, NUM_GEN_FLITER * 4, 'res4', padding)
        o_r5 = build_resnet_block(o_r4, NUM_GEN_FLITER * 4, 'res5', padding)
        o_r6 = build_resnet_block(o_r5, NUM_GEN_FLITER * 4, 'res6', padding)
        o_r7 = build_resnet_block(o_r6, NUM_GEN_FLITER * 4, 'res7', padding)
        o_r8 = build_resnet_block(o_r7, NUM_GEN_FLITER * 4, 'res8', padding)
        o_r9 = build_resnet_block(o_r8, NUM_GEN_FLITER * 4, 'res9', padding)

        o_c4 = conv2d_trans(o_r9,
                            [utils.BATCH_SIZE, 128, 128, NUM_GEN_FLITER * 2],
                            NUM_GEN_FLITER * 2,
                            kernel,
                            2,
                            padding='SAME',
                            name='conv4')
        o_c5 = conv2d_trans(o_c4, [utils.BATCH_SIZE, 256, 256, NUM_GEN_FLITER],
                            NUM_GEN_FLITER,
                            kernel,
                            2,
                            padding='SAME',
                            name='conv5')
        o_c6 = conv2d(o_c5,
                      utils.IMG_CHANNEL,
                      end_kernel,
                      1,
                      padding='SAME',
                      name='conv6',
                      normalize=False,
                      activation=False)

        if skip is True:
            outputs = tf.nn.tanh(inputs + o_c6, 'tanh')
        else:
            outputs = tf.nn.tanh(o_c6, 'tanh')

        return outputs
Esempio n. 4
0
def get_LeNet(batch_size=batch_size, img_shape=(1, 28, 28), dtype="float32"):
    data_shape = (batch_size, ) + img_shape
    data = relay.var("data", shape=data_shape, dtype=dtype)
    conv1_bias = relay.var("conv1_bias")
    conv1 = layers.conv2d(data,
                          kernel_size=(3, 3),
                          strides=(1, 1),
                          padding=(1, 1),
                          channels=6,
                          name="conv1")
    conv1 = relay.nn.bias_add(conv1, conv1_bias)
    maxpool1 = relay.nn.max_pool2d(conv1, (2, 2), (2, 2))
    conv2_bias = relay.var("conv2_bias")
    conv2 = layers.conv2d(maxpool1,
                          kernel_size=(5, 5),
                          strides=(1, 1),
                          padding=(0, 0),
                          channels=16,
                          name="conv2")
    conv2 = relay.nn.bias_add(conv2, conv2_bias)
    maxpool2 = relay.nn.max_pool2d(conv2, (2, 2), (2, 2))
    bf1 = relay.nn.batch_flatten(maxpool2)
    dense1 = layers.dense_without_bias(bf1, units=120, name="dense1")
    dense2 = layers.dense_without_bias(dense1, units=84, name="dense2")
    dense3 = layers.dense_without_bias(dense2, units=10, name="dense3")
    softmax = relay.nn.softmax(dense3)
    #label is from input
    label = relay.var("data2", shape=(batch_size, 10), dtype=dtype)
    loss = relay.nn.cross_entropy(softmax, label)
    args = relay.analysis.free_vars(loss)
    return relay.Function(args, loss)
Esempio n. 5
0
def discriminator(inputdisc, name="discriminator"):
    '''
    build the discriminator
    :param inputdisc: tensor
    :param name: operation name
    :return: tensor
    '''
    with tf.variable_scope(name):
        f = 3
        patch_input = tf.random_crop(inputdisc, [1, 70, 70, 3])
        o_c1 = conv2d(patch_input,
                      ndf,
                      f,
                      f,
                      2,
                      2,
                      "SAME",
                      "c1",
                      do_norm=False,
                      relufactor=0.2)
        o_c2 = conv2d(o_c1, ndf * 2, f, f, 2, 2, "SAME", "c2", relufactor=0.2)
        o_c3 = conv2d(o_c2, ndf * 4, f, f, 2, 2, "SAME", "c3", relufactor=0.2)
        o_c4 = conv2d(o_c3, ndf * 8, f, f, 1, 1, "SAME", "c4", relufactor=0.2)
        o_c5 = conv2d(o_c4,
                      1,
                      f,
                      f,
                      1,
                      1,
                      "SAME",
                      "c5",
                      do_norm=False,
                      do_relu=False)
        return o_c5
Esempio n. 6
0
    def __init__(self, sess, input_shape, num_actions, reuse=False, is_training=True, name='train'):
        super().__init__(sess, reuse)
        self.initial_state = []
        with tf.name_scope(name + "policy_input"):
            self.X_input = tf.placeholder(tf.uint8, input_shape)
        with tf.variable_scope("policy", reuse=reuse):
            conv1 = conv2d('conv1', tf.cast(self.X_input, tf.float32) / 255., num_filters=32, kernel_size=(8, 8),
                           padding='VALID', stride=(4, 4),
                           initializer=orthogonal_initializer(np.sqrt(2)), activation=tf.nn.relu,
                           is_training=is_training)

            conv2 = conv2d('conv2', conv1, num_filters=64, kernel_size=(4, 4), padding='VALID', stride=(2, 2),
                           initializer=orthogonal_initializer(np.sqrt(2)), activation=tf.nn.relu,
                           is_training=is_training)

            conv3 = conv2d('conv3', conv2, num_filters=64, kernel_size=(3, 3), padding='VALID', stride=(1, 1),
                           initializer=orthogonal_initializer(np.sqrt(2)), activation=tf.nn.relu,
                           is_training=is_training)

            conv3_flattened = flatten(conv3)

            fc4 = dense('fc4', conv3_flattened, output_dim=512, initializer=orthogonal_initializer(np.sqrt(2)),
                        activation=tf.nn.relu, is_training=is_training)

            self.policy_logits = dense('policy_logits', fc4, output_dim=num_actions,
                                       initializer=orthogonal_initializer(np.sqrt(1.0)), is_training=is_training)

            self.value_function = dense('value_function', fc4, output_dim=1,
                                        initializer=orthogonal_initializer(np.sqrt(1.0)), is_training=is_training)

            with tf.name_scope('value'):
                self.value_s = self.value_function[:, 0]

            with tf.name_scope('action'):
                self.action_s = noise_and_argmax(self.policy_logits)
Esempio n. 7
0
    def encoder(self, x, training=True, reuse=None, name=None):

        # [None, 28, 28, 1]  -->  [None, 14, 14, 64]
        h = conv2d(x, 64, kernel_size=4, strides=2, activation=tf.nn.leaky_relu, reuse=reuse, name='e_conv_1')

        # [None, 14, 14, 64] -->  [None, 7, 7, 128]
        h = conv2d(h, 128, kernel_size=4, strides=2, reuse=reuse, name='e_conv_2')
        h = batch_norm(h, training=training, reuse=reuse, name='e_bn_1')
        h = tf.nn.leaky_relu(h)

        # [None, 7, 7, 128]  -->  [None, 7*7*128]
        h = tf.reshape(h, [-1, 7*7*128])

        # [None, 7*7*128] -->  [None, 1024]
        h = dense(h, 1024, reuse=reuse, name='e_dense_1')
        h = batch_norm(h, training=training, reuse=reuse, name='e_bn_2')
        h = tf.nn.leaky_relu(h)

        # [None, 1024] -->  [None, 2*self.z_dim]
        h = dense(h, 2*self.z_dim, reuse=reuse, name='e_dense_2')

        # Assign names to final outputs
        mean = tf.identity(h[:,:self.z_dim], name=name+"_mean")
        log_sigma = tf.identity(h[:,self.z_dim:], name=name+"_log_sigma")
        return mean, log_sigma
Esempio n. 8
0
    def __build(self):
        self.__init_global_epoch()
        self.__init_global_step()
        self.__init_input()

        with tf.name_scope('Preprocessing'):
            red, green, blue = tf.split(self.X, num_or_size_splits=3, axis=3)
            preprocessed_input = tf.concat([
                tf.subtract(blue, ShuffleNet.MEAN[0]) * ShuffleNet.NORMALIZER,
                tf.subtract(green, ShuffleNet.MEAN[1]) * ShuffleNet.NORMALIZER,
                tf.subtract(red, ShuffleNet.MEAN[2]) * ShuffleNet.NORMALIZER,
            ], 3)
        x_padded = tf.pad(preprocessed_input, [[0, 0], [1, 1], [1, 1], [0, 0]], "CONSTANT")
        conv1 = conv2d('conv1', x=x_padded, w=None, num_filters=self.output_channels['conv1'], kernel_size=(3, 3),
                       stride=(2, 2), l2_strength=self.args.l2_strength, bias=self.args.bias,
                       batchnorm_enabled=self.args.batchnorm_enabled, is_training=self.is_training,
                       activation=tf.nn.relu, padding='VALID')
        padded = tf.pad(conv1, [[0, 0], [0, 1], [0, 1], [0, 0]], "CONSTANT")
        max_pool = max_pool_2d(padded, size=(3, 3), stride=(2, 2), name='max_pool')
        stage2 = self.__stage(max_pool, stage=2, repeat=3)
        stage3 = self.__stage(stage2, stage=3, repeat=7)
        stage4 = self.__stage(stage3, stage=4, repeat=3)
        global_pool = avg_pool_2d(stage4, size=(7, 7), stride=(1, 1), name='global_pool', padding='VALID')

        logits_unflattened = conv2d('fc', global_pool, w=None, num_filters=self.args.num_classes,
                                    kernel_size=(1, 1),
                                    l2_strength=self.args.l2_strength,
                                    bias=self.args.bias,
                                    is_training=self.is_training)
        self.logits = flatten(logits_unflattened)

        self.__init_output()
Esempio n. 9
0
    def __init__(self, learning_rate, input_shape,
                 BS):  #input_shape example: [BS,1,28,28]
        self.lr = learning_rate

        self.conv2d_1 = ly.conv2d(input_shape, [5, 5, 1, 32], [1, 1])
        self.relu_1 = ly.relu()
        self.max_pool_1 = ly.max_pooling(self.conv2d_1.output_shape,
                                         filter_shape=[2, 2],
                                         strides=[2, 2])

        self.conv2d_2 = ly.conv2d(self.max_pool_1.output_shape, [5, 5, 32, 64],
                                  [1, 1])
        self.relu_2 = ly.relu()
        self.max_pool_2 = ly.max_pooling(self.conv2d_2.output_shape,
                                         filter_shape=[2, 2],
                                         strides=[2, 2])

        self.flatter = ly.flatter()

        self.full_connect_1 = ly.full_connect(input_len=7 * 7 * 64,
                                              output_len=1024)
        self.relu_3 = ly.relu()
        self.dropout_1 = ly.dropout(1024)

        self.full_connect_2 = ly.full_connect(input_len=1024, output_len=10)
        self.loss_func = ly.softmax_cross_entropy_error()
Esempio n. 10
0
File: LeNet.py Progetto: iiharu/NN
    def build(self, input_shape=(28, 28, 1), classes=10):
        inputs = keras.Input(shape=input_shape)

        outputs = conv2d(filters=6, kernel_size=(6, 6))(inputs)
        outputs = max_pooling2d(pool_size=(2, 2), strides=(2, 2))(outputs)
        outputs = sigmoid()(outputs)

        outputs = conv2d(filters=16, kernel_size=(6, 6))(inputs)
        outputs = max_pooling2d(pool_size=(2, 2), strides=(2, 2))(outputs)
        outputs = sigmoid()(outputs)

        outputs = flatten()(outputs)

        outputs = dense(120)(outputs)
        outputs = sigmoid()(outputs)

        outputs = dense(64)(outputs)
        outputs = sigmoid()(outputs)

        outputs = dense(classes)(outputs)
        outputs = softmax()(outputs)

        model = keras.Model(inputs, outputs)

        model.summary()

        return model
    def bottleneck_layer(self, x, scope):
        with tf.variable_scope(scope):
            # x = layers.batch_normalization(x, training=self.training, name=scope + '_batch1')
            x = layers.selu(x)
            x = layers.conv2d(x,
                              filters=4 * self.filters,
                              kernel_size=[1, 1],
                              strides=[1, 1],
                              kernel_regularizer=layers.l2_regularizer(0.0005),
                              padding='same',
                              activation=None,
                              name=scope + '_conv1')
            x = layers.drop_out(x, rate=self.dropout, training=self.training)

            # x = layers.batch_normalization(x, training=self.training, name=scope + '_batch2')
            x = layers.selu(x)
            x = layers.conv2d(x,
                              filters=self.filters,
                              kernel_size=[3, 3],
                              strides=[1, 1],
                              kernel_regularizer=layers.l2_regularizer(0.0005),
                              padding='same',
                              activation=None,
                              name=scope + '_conv2')
            x = layers.drop_out(x, rate=self.dropout, training=self.training)

            return x
Esempio n. 12
0
def discriminator(x, reuse=False):
    '''
    :param:
        x: RGB face images, shape [batch, 128, 128, 3], value [-1,1].
    :return:
        pred_img: shape [batch, 2, 2, 1].
        pred_au: AU prediction, shape [batch, 17], value [0,1].
    '''
    with tf.variable_scope('discriminator') as scope:
        if reuse:
            scope.reuse_variables()

        for i in range(6):
            x = conv2d(x,
                       out_channels=64 * (2**i),
                       kernel_size=4,
                       strides=2,
                       use_bias=True,
                       name='Conv' + str(i + 1))
            x = tf.nn.leaky_relu(x, alpha=0.01, name='LReLU' + str(i + 1))

        pred_img = conv2d(x,
                          out_channels=1,
                          kernel_size=3,
                          strides=1,
                          name='PredImg')
        pred_au = conv2d(x,
                         out_channels=17,
                         kernel_size=2,
                         strides=1,
                         padding='valid',
                         name='ConvAU')
        pred_au = tf.squeeze(pred_au, [1, 2], name='PredAU')
        return pred_img, pred_au
Esempio n. 13
0
    def __init__(self, learning_rate,
                 input_shape):  #input_shape example: [BS,1,28,28]
        self.lr = learning_rate

        # conv1:(BS,1,28,28)->(BS,6,28,28)->(BS,6,14,14)
        self.conv2d_1 = ly.conv2d(input_shape, [5, 5, 1, 6], [1, 1], 'SAME')
        self.relu_1 = ly.relu()
        self.pool_1 = ly.max_pooling(self.conv2d_1.output_shape, [2, 2],
                                     [2, 2], 'SAME')

        # conv2:(BS,6,14,14)->(BS,10,14,14)->(BS,10,7,7)
        self.conv2d_2 = ly.conv2d(self.pool_1.output_shape, [5, 5, 6, 10],
                                  [1, 1], 'SAME')
        self.relu_2 = ly.relu()
        self.pool_2 = ly.max_pooling(self.conv2d_2.output_shape, [2, 2],
                                     [2, 2], 'SAME')

        # flat:(BS,10,7,7)->(BS,490)
        self.flatter = ly.flatter()

        # fc1:(BS,490)->(BS,84)
        self.full_connect_1 = ly.full_connect(490, 84)
        self.relu_3 = ly.relu()
        self.dropout = ly.dropout(lenth=84)

        # fc2:(BS,84)->(BS,10)
        self.full_connect_2 = ly.full_connect(84, 10)

        self.loss_func = ly.softmax_cross_entropy_error()
Esempio n. 14
0
 def _stochastic(self, x, dim, scope, ema):
     b_init_var = tf.constant_initializer(0. if self.is_log_var else 1.)
     x = self.activation(x)
     if isinstance(dim, int):
         flatten = tf.contrib.layers.flatten(x)
         mean = dense(flatten,
                      dim,
                      scope=scope + "_mean",
                      training=self.ph_is_training,
                      ema=ema,
                      init=self.init)
         var = dense(flatten,
                     dim,
                     scope=scope + "_var",
                     bias_initializer=b_init_var,
                     training=self.ph_is_training,
                     ema=ema,
                     init=self.init)
     else:
         mean = conv2d(x,
                       dim,
                       scope=scope + "_mean",
                       training=self.ph_is_training,
                       ema=ema,
                       init=self.init)
         var = conv2d(x,
                      dim,
                      scope=scope + "_var",
                      bias_initializer=b_init_var,
                      training=self.ph_is_training,
                      ema=ema,
                      init=self.init)
     var = tf.nn.softplus(var) + self.eps
     z = stochastic_gaussian(mean, var, is_log_var=self.is_log_var)
     return z, mean, var
Esempio n. 15
0
def build_graph(x, dropout_rate):

    with tf.name_scope('transpose'):
        x_trans = tf.transpose(x, perm=[0, 2, 3, 1])

    is_training = tf.placeholder(tf.bool, name='is_training')

    with tf.name_scope('convolution_layers'):
        conv1 = conv2d(x_trans, filters=16, training=is_training, name='convolution1')
        conv2 = conv2d(conv1, filters=32, training=is_training, name='convolution2')
        max_pooling1 = tf.layers.max_pooling2d(conv2, pool_size=[2, 2], strides=2, name='max_pooling1')

        conv3 = conv2d(max_pooling1, filters=64, training=is_training, name='convolution3')
        conv4 = conv2d(conv3, filters=128, training=is_training, name='convolution4')
        max_pooling2 = tf.layers.max_pooling2d(conv4, pool_size=[2, 2], strides=2, name='max_pooling2')

    with tf.name_scope('fully_connected'):
        flatten = tf.layers.flatten(max_pooling2)
        fc = tf.layers.dense(flatten, units=100, activation=tf.nn.relu, name='fc',
                             kernel_initializer=tf.random_uniform_initializer(minval=-0.2, maxval=0.2))

        fc_drop = tf.layers.dropout(fc, rate=dropout_rate, training=is_training, name='dropout')

        logits = tf.layers.dense(fc_drop, units=10, name='logits',
                                 kernel_initializer=tf.random_uniform_initializer(minval=-0.2, maxval=0.2))

    return logits, is_training
def deepmind_CNN(state, output_size=128):
    initializer = tf.truncated_normal_initializer(0, 0.1)
    activation_fn = tf.nn.relu

    state = tf.transpose(state, [0, 2, 3, 1])

    l1 = conv2d(state,
                32, [8, 8], [4, 4],
                initializer,
                activation_fn,
                'NHWC',
                name='l1')
    l2 = conv2d(l1,
                64, [4, 4], [2, 2],
                initializer,
                activation_fn,
                'NHWC',
                name='l2')
    l3 = conv2d(l2,
                64, [3, 3], [1, 1],
                initializer,
                activation_fn,
                'NHWC',
                name='l3')

    shape = l3.get_shape().as_list()
    l3_flat = tf.reshape(l3, [-1, reduce(lambda x, y: x * y, shape[1:])])

    embedding = linear(l3_flat,
                       output_size,
                       activation_fn=activation_fn,
                       name='l4')

    # Returns the network output, parameters
    return embedding
Esempio n. 17
0
def residual(inputres, dim, name="resnet"):
    with tf.variable_scope(name):
        out_res = tf.pad(inputres, [[0, 0], [1, 1], [1, 1], [0, 0]], "REFLECT")
        _, out_res = conv2d(out_res,
                            dim,
                            3,
                            3,
                            1,
                            1,
                            0.02,
                            "VALID",
                            "c1",
                            relufactor=0.2)
        out_res = tf.pad(out_res, [[0, 0], [1, 1], [1, 1], [0, 0]], "REFLECT")
        _, out_res = conv2d(out_res,
                            dim,
                            3,
                            3,
                            1,
                            1,
                            0.02,
                            "VALID",
                            "c2",
                            do_relu=False)

        return lrelu(out_res + inputres)
Esempio n. 18
0
def ResnetAdaILNBlock(input, dim, gamma, beta, name):
    pad_input = fluid.layers.pad2d(input, [1, 1, 1, 1], mode="reflect")
    conv1 = conv2d(pad_input,
                   dim,
                   3,
                   1,
                   0.02,
                   "VALID",
                   name=name + "rab_c1",
                   norm=False,
                   relu=False)
    norm1 = adaILN(conv1, dim, gamma, beta)
    norm1 = fluid.layers.relu(norm1)
    norm1 = fluid.layers.pad2d(norm1, [1, 1, 1, 1], mode="reflect")
    conv2 = conv2d(norm1,
                   dim,
                   3,
                   1,
                   0.02,
                   "VALID",
                   name=name + "rab_c2",
                   norm=False,
                   relu=False)
    norm2 = adaILN(conv2, dim, gamma, beta)
    return norm2
Esempio n. 19
0
def build_resnet_block(inputs, out_dim, name='resnet', padding='REFLECT'):
    """build a single block of resnet.
  :param inputres: inputs
  :param out_dim: output dim
  :param name: name
  :param padding: for tensorflow version use REFLECT; for pytorch version use
   CONSTANT
  :return: a single block of resnet.
  """
    kernel = 3
    stride = 1
    with tf.variable_scope(name):
        outputs = tf.pad(inputs, [[0, 0], [1, 1], [1, 1], [0, 0]], padding)
        outputs = conv2d(outputs,
                         out_dim,
                         kernel,
                         stride,
                         padding='VALID',
                         name='conv1')
        outputs = tf.pad(outputs, [[0, 0], [1, 1], [1, 1], [0, 0]], padding)
        outputs = conv2d(outputs,
                         out_dim,
                         kernel,
                         stride,
                         padding='VALID',
                         name='conv2',
                         activation=False)
        return tf.nn.relu(outputs + inputs)
Esempio n. 20
0
def res_layer(inp, num_features1, stride):
    num_features2 = num_features1 * 4
    shape = inp.get_shape()
    [seq_len, inp_width, num_channels] = [int(shape[i]) for i in [1, 2, 4]]
    #[_, seq_len, inp_width, _, num_channels] = [int(i) for i in list(inp.get_shape())]

    inputs = tf.reshape(inp, [-1, inp_width, inp_width, num_channels])

    if num_channels == num_features2:
        o_l = inputs
    else:
        b_l = bias(num_features2, 0.2)
        w_l = weights([1, 1, num_channels, num_features2], 0.04)
        o_l = conv2d(inputs, b_l, w_l, stride)

    b1_r = bias(num_features1, 0.2)
    w1_r = weights([1, 1, num_channels, num_features1], 0.04)
    conv1_r = tf.nn.relu(batch_norm(conv2d(inputs, b1_r, w1_r, stride)))

    b2_r = bias(num_features1, 0.2)
    w2_r = weights([3, 3, num_features1, num_features1], 0.04)
    conv2_r = tf.nn.relu(batch_norm(conv2d(conv1_r, b2_r, w2_r, 1)))

    b3_r = bias(num_features2, 0.2)
    w3_r = weights([1, 1, num_features1, num_features2], 0.04)
    conv3_r = conv2d(conv2_r, b3_r, w3_r, 1)

    out = tf.nn.relu(batch_norm(tf.add(o_l, conv3_r)))

    shape = out.get_shape()
    [out_width, out_features] = [int(shape[i]) for i in [1, 3]]
    #[_, out_width, _, out_features] = [int(i) for i in list(out.get_shape())]

    return tf.reshape(out, [-1, seq_len, out_width, out_width, out_features])
Esempio n. 21
0
 def discriminator(self, img, const_init=False, trainable=True, reuse=False):
     # (n, 1, 28, 28)
     h0 = layers.conv2d(
         img,
         64,
         5,
         name="d_conv1",
         const_init=const_init,
         trainable=trainable,
         reuse=reuse,
     )
     h0 = flow.nn.leaky_relu(h0, 0.3)
     h0 = flow.nn.dropout(h0, rate=0.3)
     # (n, 64, 14, 14)
     h1 = layers.conv2d(
         h0,
         128,
         5,
         name="d_conv2",
         const_init=const_init,
         trainable=trainable,
         reuse=reuse,
     )
     h1 = flow.nn.leaky_relu(h1, 0.3)
     h1 = flow.nn.dropout(h1, rate=0.3)
     # (n, 128 * 7 * 7)
     out = flow.reshape(h1, (self.batch_size, -1))
     # (n, 1)
     out = layers.dense(
         out, 1, name="d_fc", const_init=const_init, trainable=trainable, reuse=reuse
     )
     return out
Esempio n. 22
0
 def encoder(self, x):
     out = conv2d(x, 20, 5, activation=tf.nn.relu)
     out = max_pool(out, 2, 2)
     out = conv2d(out, 50, 5, activation=tf.nn.relu)
     out = max_pool(out, 2, 2)
     out = tf.layers.flatten(out)
     out = dense(out, 500, activation=tf.nn.relu)
     return out
Esempio n. 23
0
def discriminator(inputdisc, name="discriminator"):
    with tf.variable_scope(name):
        f = 4

        _, o_c1 = conv2d(inputdisc,
                         ndf,
                         f,
                         f,
                         2,
                         2,
                         0.02,
                         "SAME",
                         "c1",
                         do_norm=False,
                         relufactor=0.2)
        _, o_c2 = conv2d(o_c1,
                         ndf * 2,
                         f,
                         f,
                         2,
                         2,
                         0.02,
                         "SAME",
                         "c2",
                         relufactor=0.2)
        _, o_c3 = conv2d(o_c2,
                         ndf * 4,
                         f,
                         f,
                         2,
                         2,
                         0.02,
                         "SAME",
                         "c3",
                         relufactor=0.2)
        _, o_c4 = conv2d(o_c3,
                         ndf * 8,
                         f,
                         f,
                         1,
                         1,
                         0.02,
                         "SAME",
                         "c4",
                         relufactor=0.2)
        _, o_c5 = conv2d(o_c4,
                         1,
                         f,
                         f,
                         1,
                         1,
                         0.02,
                         "SAME",
                         "c5",
                         do_norm=False,
                         do_relu=False)

        return o_c5
Esempio n. 24
0
    def discriminator2(self, x, reuse=None):
        with tf.variable_scope("discriminator2", reuse=tf.AUTO_REUSE):
            #2º discriminator
            x_2 = lay.conv2d(x, f=64, name='d-conv2d-1')
            x_2 = lay.batch_norm(x_2)
            x_2 = tf.nn.leaky_relu(x_2, alpha=0.1)

            x_2 = lay.conv2d(x_2, f=128, name='d-conv2d-0')
            x_2 = lay.batch_norm(x_2)
            out = tf.nn.leaky_relu(x_2, alpha=0.1)

            return out
Esempio n. 25
0
 def build_rpn(self, x, anchors_per_cell):
     _, h, w, _ = x.get_shape().as_list()
     n = anchors_per_cell
     x = conv2d(x, 512)
     rpn_outputs = x
     x = tf.nn.relu(x)
     x = conv2d(x, n_filters=6 * n, k_size=1)
     x = tf.transpose(x, (0, 3, 1, 2))
     x = tf.reshape(x, [-1, 6, n * h * w])
     x = tf.transpose(x, (0, 2, 1))
     clf_logits, regs = tf.split(x, [2, 4], axis=2)
     return rpn_outputs, clf_logits, regs
Esempio n. 26
0
 def net(self, X, reuse=None):
     with tf.variable_scope('EyeNet', reuse=reuse):
         conv1 = conv2d(X,output_dims=20,k_h=5,k_w=5,s_h=1,s_w=1,padding='VALID',name='conv1')   
         pool1 = max_pool(conv1,k_h=2,k_w=2,s_h=2,s_w=2,padding='SAME',name='pool1')
         conv2 = conv2d(pool1,output_dims=50,k_h=5,k_w=5,s_h=1,s_w=1,padding='VALID',name='conv2')              
         pool2 = max_pool(conv2,k_h=2,k_w=2,s_h=2,s_w=2,padding='SAME',name='pool2') 
         flatten = tf.reshape(pool2,[-1, pool2.get_shape().as_list()[1]
                                         *pool2.get_shape().as_list()[2]
                                         *pool2.get_shape().as_list()[3]], name='conv_reshape')
         fc1 = fc(flatten, output_dims=500, name='fc1')
         relu1 = relu(fc1, name='relu1')
         out = fc(relu1, output_dims=2, name='output')
         return out
Esempio n. 27
0
def build_resnet_block(inputres, dim, name="resnet"):
    out_res = fluid.layers.pad2d(inputres, [1, 1, 1, 1], mode="reflect")
    out_res = conv2d(out_res, dim, 3, 1, 0.02, "VALID", name + "_c1")
    out_res = fluid.layers.pad2d(out_res, [1, 1, 1, 1], mode="reflect")
    out_res = conv2d(out_res,
                     dim,
                     3,
                     1,
                     0.02,
                     "VALID",
                     name + "_c2",
                     relu=False)
    return fluid.layers.relu(out_res + inputres)
Esempio n. 28
0
    def _encoder(self, img, scope, reuse=False):

        with tf.variable_scope(scope, reuse=reuse):
            h1 = conv2d(img, 16, strides=2, name='down1', training=self.is_training, use_bn=True)

            h2 = conv2d(h1, 64, name='down2', training=self.is_training, use_bn=True)

            h3 = conv2d(h2, 128, strides=2, name='down3', training=self.is_training, use_bn=True)
            h3_flat = tf.layers.flatten(h3)

            z = fc(h3_flat, self._zdim, 'out', activation_fn=tf.tanh)

            return z
Esempio n. 29
0
    def __build(self):
        self.__init_global_epoch()
        self.__init_global_step()
        self.__init_input()

        with tf.name_scope('Preprocessing'):
            red, green, blue = tf.split(self.X, num_or_size_splits=3, axis=3)
            preprocessed_input = tf.concat([
                tf.subtract(blue, ShuffleNet.MEAN[0]) * ShuffleNet.NORMALIZER,
                tf.subtract(green, ShuffleNet.MEAN[1]) * ShuffleNet.NORMALIZER,
                tf.subtract(red, ShuffleNet.MEAN[2]) * ShuffleNet.NORMALIZER,
            ], 3)
        x_padded = tf.pad(preprocessed_input, [[0, 0], [1, 1], [1, 1], [0, 0]],
                          "CONSTANT")
        conv1 = conv2d('conv1',
                       x=x_padded,
                       w=None,
                       num_filters=self.output_channels['conv1'],
                       kernel_size=(3, 3),
                       stride=(2, 2),
                       l2_strength=self.args.l2_strength,
                       bias=self.args.bias,
                       batchnorm_enabled=self.args.batchnorm_enabled,
                       is_training=self.is_training,
                       activation=tf.nn.relu,
                       padding='VALID')
        padded = tf.pad(conv1, [[0, 0], [0, 1], [0, 1], [0, 0]], "CONSTANT")
        max_pool = max_pool_2d(padded,
                               size=(3, 3),
                               stride=(2, 2),
                               name='max_pool')
        stage2 = self.__stage(max_pool, stage=2, repeat=3)
        stage3 = self.__stage(stage2, stage=3, repeat=7)
        stage4 = self.__stage(stage3, stage=4, repeat=3)
        global_pool = avg_pool_2d(stage4,
                                  size=(7, 7),
                                  stride=(1, 1),
                                  name='global_pool',
                                  padding='VALID')

        logits_unflattened = conv2d('fc',
                                    global_pool,
                                    w=None,
                                    num_filters=self.args.num_classes,
                                    kernel_size=(1, 1),
                                    l2_strength=self.args.l2_strength,
                                    bias=self.args.bias,
                                    is_training=self.is_training)
        self.logits = flatten(logits_unflattened)

        self.__init_output()
Esempio n. 30
0
    def discriminator(self, image, reuse=False):
        with tf.variable_scope("discriminator") as scope:
            if reuse:
                scope.reuse_variables()

            h0 = leaky_relu(conv2d(image, self.df_dim, name='d_h0_conv'))
            h1 = leaky_relu(
                self.d_bn1(conv2d(h0, self.df_dim * 2, name='d_h1_conv')))
            h2 = leaky_relu(
                self.d_bn2(conv2d(h1, self.df_dim * 4, name='d_h2_conv')))
            h3 = leaky_relu(
                self.d_bn3(conv2d(h2, self.df_dim * 8, name='d_h3_conv')))
            h4 = linear(tf.reshape(h3, [self.batch_size, -1]), 1, 'd_h3_lin')

            return h4
Esempio n. 31
0
def generator(real_img, desired_au, reuse=False):
    '''
    :param:
        real_img: RGB face images, shape [batch, 128, 128, 3], value [-1,1].
        desired_au: AU value, shape [batch, 17], value [0,1].
    :return:
        fake_img: RGB generate face, shape [batch, 128, 128, 3], value [-1,1].
        fake_mask: face mask, shape [batch, 128, 128, 1], value [0,1].
    '''
    with tf.variable_scope('generator') as scope:
        if reuse:
            scope.reuse_variables()
            
        desired_au = tf.expand_dims(desired_au, axis=1, name='ExpandDims1')
        desired_au = tf.expand_dims(desired_au, axis=2, name='ExpandDims2')
        desired_au = tf.tile(desired_au, multiples=[1,128,128,1], name='Tile')
        x = tf.concat([real_img, desired_au], axis=3, name='Concat')
        
        x = conv2d(x, out_channels=64, kernel_size=7, strides=1, name='Conv1')
        x = instance_norm(x, name='InstNorm1')
        x = tf.nn.relu(x, name='ReLU1')

        x = conv2d(x, out_channels=128, kernel_size=4, strides=2, name='Conv2')
        x = instance_norm(x, name='InstNorm2')
        x = tf.nn.relu(x, name='ReLU2')

        x = conv2d(x, out_channels=256, kernel_size=4, strides=2, name='Conv3')
        x = instance_norm(x, name='InstNorm3')
        x = tf.nn.relu(x, name='ReLU3')

        for i in range(1, 7):
            x = res_block(x, out_channels=256, name='ResBlock'+str(i))

        x = deconv2d(x, out_channels=128, kernel_size=4, stride=2, name='Deconv1')
        x = instance_norm(x, name='InstNorm4')
        x = tf.nn.relu(x, name='ReLU4')

        x = deconv2d(x, out_channels=64, kernel_size=4, stride=2, name='Deconv2')
        x = instance_norm(x, name='InstNorm5')
        features = tf.nn.relu(x, name='ReLU5')

        x = conv2d(features, out_channels=3, kernel_size=7, strides=1, name='ConvImg')
        fake_img = tf.tanh(x, name='Tanh')

        x = conv2d(features, out_channels=1, kernel_size=7, strides=1, name='ConvMask')
        fake_mask = tf.sigmoid(x, name='Sigmoid')

        return fake_img, fake_mask
Esempio n. 32
0
def simple_img_conv_pool(input,
                         num_filters,
                         filter_size,
                         pool_size,
                         pool_stride,
                         act,
                         pool_type='max',
                         main_program=None,
                         startup_program=None):
    conv_out = layers.conv2d(
        input=input,
        num_filters=num_filters,
        filter_size=filter_size,
        act=act,
        main_program=main_program,
        startup_program=startup_program)

    pool_out = layers.pool2d(
        input=conv_out,
        pool_size=pool_size,
        pool_type=pool_type,
        pool_stride=pool_stride,
        main_program=main_program,
        startup_program=startup_program)
    return pool_out
Esempio n. 33
0
 def __build(self):
     self.__init_global_epoch()
     self.__init_global_step()
     self.__init_input()
     # 0. 图像预处理 减去均值 乘以归一化系数##################################
     with tf.name_scope('Preprocessing'):
         # 分割成三通道
         red, green, blue = tf.split(self.X, num_or_size_splits=3, axis=3)
         # 每个通道 减去均值 乘以归一化系数 后再concat/merge 通道扩展合并
         preprocessed_input = tf.concat([
             tf.subtract(blue, ShuffleNet.MEAN[0]) * ShuffleNet.NORMALIZER,
             tf.subtract(green, ShuffleNet.MEAN[1]) * ShuffleNet.NORMALIZER,
             tf.subtract(red, ShuffleNet.MEAN[2]) * ShuffleNet.NORMALIZER,
         ], 3)
     # 1. conv1 3*3*3*24 卷积 步长 2 BN RELU #########################################################
     ######## 周围填充 
     x_padded = tf.pad(preprocessed_input, [[0, 0], [1, 1], [1, 1], [0, 0]], "CONSTANT")
     ######## conv
     conv1 = conv2d('conv1', x=x_padded, w=None, num_filters=self.output_channels['conv1'], kernel_size=(3, 3),
                    stride=(2, 2), l2_strength=self.args.l2_strength, bias=self.args.bias,
                    batchnorm_enabled=self.args.batchnorm_enabled, is_training=self.is_training,
                    activation=tf.nn.relu, padding='VALID')
     # 2. 最大值池化 3*3 步长2 ##################################################
     padded = tf.pad(conv1, [[0, 0], [0, 1], [0, 1], [0, 0]], "CONSTANT")
     max_pool = max_pool_2d(padded, size=(3, 3), stride=(2, 2), name='max_pool')
     # 3. 一次 步长为2 非分组点卷积 concate通道扩展合并模块, 再进行3次步长为1的 add通道叠加模块
     stage2 = self.__stage(max_pool, stage=2, repeat=3)
     # 4. 一次 步长为2 分组点卷积   concate通道扩展合并模块, 再进行7次步长为1的 add通道叠加模块
     stage3 = self.__stage(stage2, stage=3, repeat=7)
     # 5. 一次 步长为2 分组点卷积   concate通道扩展合并模块, 再进行3次步长为1的 add通道叠加模块
     stage4 = self.__stage(stage3, stage=4, repeat=3)
     # 6. 全局均值池化层 7*7 池化核 步长1
     global_pool = avg_pool_2d(stage4, size=(7, 7), stride=(1, 1), name='global_pool', padding='VALID')
     # 7. 1*1点卷积 输出 类别数量个 卷积特征图
     logits_unflattened = conv2d('fc', global_pool, w=None, num_filters=self.args.num_classes,
                                 kernel_size=(1, 1),# 1*1点卷积
                                 l2_strength=self.args.l2_strength,
                                 bias=self.args.bias,
                                 is_training=self.is_training)
     # 8. 摊平 到 一维
     self.logits = flatten(logits_unflattened)
     # 9. 计算误差 
     self.__init_output()
Esempio n. 34
0
	def __call__(self, inputs):
		with tf.name_scope(self.name):
			with tf.name_scope('preprocessing'):
				pad_1 = tf.pad(inputs, np.array([[0,0],[2,2],[2,2],[0,0]]))
				conv_1 = conv2d(pad_1, 64, kernel_size=6, strides = 2, name = '256to128')
				res_1 = residual(conv_1, 128)
				pool_1 = tf.contrib.layers.max_pool2d(res_1, [2,2], [2,2], padding= 'VALID')
				res_2 = residual(pool_1, 128)
				res_3 = residual(res_2, self.nFeat)
			# Supervision Table
			hg = [None] * self.nbStack
			ll = [None] * self.nbStack
			ll_ = [None] * self.nbStack
			drop = [None] * self.nbStack
			out = [None] * self.nbStack
			out_ = [None] * self.nbStack
			sum_ = [None] * self.nbStack
			with tf.name_scope('stacks'):
				with tf.name_scope('hourglass.1'):
					hg[0] = self.hourglass(res_3, self.nLow, self.nFeat, 'hourglass')
					ll[0] = convBnrelu(hg[0], self.nFeat, name= 'conv_1')
					ll_[0] = conv2d(ll[0],self.nFeat,1,1,'VALID','ll')
					drop[0] = tf.layers.dropout(ll_[0], rate = 0.1, training = self.train)
					out[0] = conv2d(ll[0],self.outDim,1,1,'VALID','out')
					out_[0] = conv2d(out[0],self.nFeat,1,1,'VALID','out_')
					sum_[0] = tf.add_n([drop[0], out_[0], res_3])
				for i in range(1, self.nbStack-1):
					with tf.name_scope('hourglass.' + str(i+1)):
						hg[i] = self.hourglass(sum_[i-1], self.nLow, self.nFeat, 'hourglass')
						ll[i] = convBnrelu(hg[i], self.nFeat, name='conv_1')
						ll_[i] = conv2d(ll[i],self.nFeat,1,1,'VALID','ll')
						drop[i] = tf.layers.dropout(ll_[i],rate=0.1, training = self.train)
						out[i] = conv2d(ll[i],self.outDim,1,1,'VALID','out')
						out_[i] = conv2d(out[i],self.nFeat,1,1,'VALID','out_')
						sum_[i] = tf.add_n([drop[i], out_[i], sum_[i-1]])
				with tf.name_scope('hourglass.' + str(self.nbStack)):
					hg[self.nbStack-1] = self.hourglass(sum_[self.nbStack - 2], self.nLow, self.nFeat, 'hourglass')
					ll[self.nbStack-1] = convBnrelu(hg[self.nbStack - 1], self.nFeat, name='conv_1')
					drop[self.nbStack-1] = tf.layers.dropout(ll[self.nbStack-1], rate=0.1, training = self.train)
					out[self.nbStack-1] = conv2d(drop[self.nbStack-1],self.outDim,1,1,'VALID', 'out')
			return tf.stack(out, name = 'output')
    def __call__(self, x, reuse=False, output_name=None):
        with tf.variable_scope(self.name) as scope:

            if reuse:
                scope.reuse_variables()

            # Initial dense multiplication
            x = layers.linear(x, "G_FC1", self.nb_filters * 8 * 8)

            batch_size = tf.shape(x)[0]
            if FLAGS.data_format == "NHWC":
                target_shape = (batch_size, 8, 8, self.nb_filters)
            elif FLAGS.data_format == "NCHW":
                target_shape = (batch_size, self.nb_filters, 8, 8)

            x = layers.reshape(x, target_shape)
            # x = tf.contrib.layers.batch_norm(x, fused=True, data_format=FLAGS.data_format)
            x = tf.nn.elu(x)

            x = layers.dec_conv2d_block(x, "G_conv2D1", self.nb_filters, 3, data_format=FLAGS.data_format)
            x = layers.upsampleNN(x, "G_up1", 2, data_format=FLAGS.data_format)

            x = layers.dec_conv2d_block(x, "G_conv2D2", self.nb_filters, 3, data_format=FLAGS.data_format)
            x = layers.upsampleNN(x, "G_up2", 2, data_format=FLAGS.data_format)

            x = layers.dec_conv2d_block(x, "G_conv2D3", self.nb_filters, 3, data_format=FLAGS.data_format)
            x = layers.upsampleNN(x, "G_up3", 2, data_format=FLAGS.data_format)

            x = layers.dec_conv2d_block(x, "G_conv2D4", self.nb_filters, 3, data_format=FLAGS.data_format)

            # Last conv
            x = layers.conv2d(x, "G_conv2D5", self.nb_filters, FLAGS.channels, 3, 1, "SAME", data_format=FLAGS.data_format)

            x = tf.nn.tanh(x, name=output_name)

            return x
Esempio n. 36
0
def img_conv_group(input,
                   conv_num_filter,
                   pool_size,
                   conv_padding=1,
                   conv_filter_size=3,
                   conv_act=None,
                   conv_with_batchnorm=False,
                   conv_batchnorm_drop_rate=None,
                   pool_stride=1,
                   pool_type=None,
                   main_program=None,
                   startup_program=None):
    """
    Image Convolution Group, Used for vgg net.
    """
    tmp = input
    assert isinstance(conv_num_filter, list) or \
        isinstance(conv_num_filter, tuple)

    def __extend_list__(obj):
        if not hasattr(obj, '__len__'):
            return [obj] * len(conv_num_filter)
        else:
            return obj

    conv_padding = __extend_list__(conv_padding)
    conv_filter_size = __extend_list__(conv_filter_size)
    conv_with_batchnorm = __extend_list__(conv_with_batchnorm)
    conv_batchnorm_drop_rate = __extend_list__(conv_batchnorm_drop_rate)

    for i in xrange(len(conv_num_filter)):
        local_conv_act = conv_act
        if conv_with_batchnorm[i]:
            local_conv_act = None

        tmp = layers.conv2d(
            input=tmp,
            num_filters=conv_num_filter[i],
            filter_size=conv_filter_size[i],
            padding=conv_padding[i],
            act=local_conv_act,
            main_program=main_program,
            startup_program=startup_program)

        if conv_with_batchnorm[i]:
            tmp = layers.batch_norm(
                input=tmp,
                act=conv_act,
                main_program=main_program,
                startup_program=startup_program)
            drop_rate = conv_batchnorm_drop_rate[i]
            if abs(drop_rate) > 1e-5:
                tmp = layers.dropout(
                    x=tmp,
                    dropout_prob=drop_rate,
                    main_program=main_program,
                    startup_program=startup_program)

    pool_out = layers.pool2d(
        input=tmp,
        pool_size=pool_size,
        pool_type=pool_type,
        pool_stride=pool_stride,
        main_program=main_program,
        startup_program=startup_program)
    return pool_out
    def __call__(self, x, reuse=False, output_name=None):
        with tf.variable_scope(self.name) as scope:

            if reuse:
                scope.reuse_variables()

            ##################
            # Encoding part
            ##################

            # First conv
            x = layers.conv2d(x, "D_conv2D1", FLAGS.channels, self.nb_filters, 3, 1, "SAME", data_format=FLAGS.data_format)
            x = tf.nn.elu(x)

            # Conv blocks
            x = layers.enc_conv2d_block(x, "D_enc_conv2D2", self.nb_filters, 3, activation_fn=tf.nn.elu, data_format=FLAGS.data_format)
            x = layers.enc_conv2d_block(x, "D_enc_conv2D3", 2 * self.nb_filters, 3, activation_fn=tf.nn.elu, data_format=FLAGS.data_format)
            x = layers.enc_conv2d_block(x, "D_enc_conv2D4", 3 * self.nb_filters, 3, activation_fn=tf.nn.elu, data_format=FLAGS.data_format)
            x = layers.enc_conv2d_block(x, "D_enc_conv2D5", 4 * self.nb_filters, 3, activation_fn=tf.nn.elu, data_format=FLAGS.data_format, downsampling=False)

            # Flatten
            batch_size = tf.shape(x)[0]
            other_dims = x.get_shape().as_list()[1:]
            prod_dim = 1
            for d in other_dims:
                prod_dim *= d
            x = layers.reshape(x, (batch_size, prod_dim))

            # Linear
            x = layers.linear(x, "D_FC1", self.h_dim, activation_fn=None)

            ##################
            # Decoding part
            ##################

            x = layers.linear(x, "D_FC2", self.nb_filters * 8 * 8)

            batch_size = tf.shape(x)[0]
            if FLAGS.data_format == "NHWC":
                target_shape = (batch_size, 8, 8, self.nb_filters)
            elif FLAGS.data_format == "NCHW":
                target_shape = (batch_size, self.nb_filters, 8, 8)

            x = layers.reshape(x, target_shape)
            # x = tf.contrib.layers.batch_norm(x, fused=True, data_format=FLAGS.data_format)
            x = tf.nn.elu(x)

            x = layers.dec_conv2d_block(x, "D_dec_conv2D1", self.nb_filters, 3, data_format=FLAGS.data_format)
            x = layers.upsampleNN(x, "D_up1", 2, data_format=FLAGS.data_format)

            x = layers.dec_conv2d_block(x, "D_dec_conv2D2", self.nb_filters, 3, data_format=FLAGS.data_format)
            x = layers.upsampleNN(x, "D_up2", 2, data_format=FLAGS.data_format)

            x = layers.dec_conv2d_block(x, "D_dec_conv2D3", self.nb_filters, 3, data_format=FLAGS.data_format)
            x = layers.upsampleNN(x, "D_up3", 2, data_format=FLAGS.data_format)

            x = layers.dec_conv2d_block(x, "D_dec_conv2D4", self.nb_filters, 3, data_format=FLAGS.data_format)

            # Last conv
            x = layers.conv2d(x, "D_dec_conv2D5", self.nb_filters, FLAGS.channels, 3, 1, "SAME", data_format=FLAGS.data_format)
            x = tf.nn.tanh(x, name=output_name)

            return x