コード例 #1
0
ファイル: model.py プロジェクト: zyw1218/OUCML
def discriminator(inputs, is_train=True, reuse=False):
    df_dim = 64  # Dimension of discrim filters in first conv layer. [64]
    w_init = tf.glorot_normal_initializer()
    gamma_init = tf.random_normal_initializer(1., 0.02)

    with tf.variable_scope("discriminator", reuse=reuse):

        net_in = InputLayer(inputs, name='d/in')
        net_h0 = Conv2d(net_in,
                        df_dim, (5, 5), (2, 2),
                        act=tf.nn.leaky_relu,
                        padding='SAME',
                        W_init=w_init,
                        name='d/h0/conv2d')

        net_h1 = Conv2d(net_h0,
                        df_dim * 2, (5, 5), (2, 2),
                        act=None,
                        padding='SAME',
                        W_init=w_init,
                        name='d/h1/conv2d')
        net_h1 = BatchNormLayer(net_h1,
                                act=tf.nn.leaky_relu,
                                is_train=is_train,
                                gamma_init=gamma_init,
                                name='d/h1/batch_norm')

        net_h2 = Conv2d(net_h1,
                        df_dim * 4, (5, 5), (2, 2),
                        act=None,
                        padding='SAME',
                        W_init=w_init,
                        name='d/h2/conv2d')
        net_h2 = BatchNormLayer(net_h2,
                                act=tf.nn.leaky_relu,
                                is_train=is_train,
                                gamma_init=gamma_init,
                                name='d/h2/batch_norm')

        net_h3 = Conv2d(net_h2,
                        df_dim * 8, (5, 5), (2, 2),
                        act=None,
                        padding='SAME',
                        W_init=w_init,
                        name='d/h3/conv2d')
        net_h3 = BatchNormLayer(net_h3,
                                act=tf.nn.leaky_relu,
                                is_train=is_train,
                                gamma_init=gamma_init,
                                name='d/h3/batch_norm')

        net_h4 = FlattenLayer(net_h3, name='d/h4/flatten')
        net_h4 = DenseLayer(net_h4,
                            n_units=1,
                            act=tf.identity,
                            W_init=w_init,
                            name='d/h4/lin_sigmoid')
        logits = net_h4.outputs
        net_h4.outputs = tf.nn.sigmoid(net_h4.outputs)
    return net_h4, logits
コード例 #2
0
def model(x, is_train):
    with tf.variable_scope("model", reuse=tf.AUTO_REUSE):
        net = InputLayer(x, name='input')
        net = Conv2d(net,
                     64, (5, 5), (1, 1),
                     padding='SAME',
                     b_init=None,
                     name='cnn1')
        net = BatchNormLayer(net,
                             decay=0.99,
                             is_train=is_train,
                             act=tf.nn.relu,
                             name='batch1')
        net = MaxPool2d(net, (3, 3), (2, 2), padding='SAME', name='pool1')

        net = Conv2d(net,
                     64, (5, 5), (1, 1),
                     padding='SAME',
                     b_init=None,
                     name='cnn2')
        net = BatchNormLayer(net,
                             decay=0.99,
                             is_train=is_train,
                             act=tf.nn.relu,
                             name='batch2')
        net = MaxPool2d(net, (3, 3), (2, 2), padding='SAME', name='pool2')

        net = FlattenLayer(net, name='flatten')
        net = DenseLayer(net, 384, act=tf.nn.relu, name='d1relu')
        net = DenseLayer(net, 192, act=tf.nn.relu, name='d2relu')
        net = DenseLayer(net, 10, act=None, name='output')
    return net
コード例 #3
0
def model_batch_norm(x_crop, y_, is_train, reuse):
    W_init = tf.truncated_normal_initializer(stddev=5e-2)
    W_init2 = tf.truncated_normal_initializer(stddev=0.04)
    b_init2 = tf.constant_initializer(value=0.1)
    with tf.variable_scope("model", reuse=reuse):
        net = InputLayer(x_crop, name='input')
        net = Conv2d(net, 64, (5, 5), (1, 1), padding='SAME', W_init=W_init, b_init=None, name='cnn1')
        net = BatchNormLayer(net, decay=0.99, is_train=is_train, act=tf.nn.relu, name='batch1')
        net = MaxPool2d(net, (3, 3), (2, 2), padding='SAME', name='pool1')

        net = Conv2d(net, 64, (5, 5), (1, 1), padding='SAME', W_init=W_init, b_init=None, name='cnn2')
        net = BatchNormLayer(net, decay=0.99, is_train=is_train, act=tf.nn.relu, name='batch2')
        net = MaxPool2d(net, (3, 3), (2, 2), padding='SAME', name='pool2')

        net = FlattenLayer(net, name='flatten')
        net = DenseLayer(net, 384, act=tf.nn.relu, W_init=W_init2, b_init=b_init2, name='d1relu')
        net = DenseLayer(net, 192, act=tf.nn.relu, W_init=W_init2, b_init=b_init2, name='d2relu')
        net = DenseLayer(net, n_units=10, act=None, W_init=W_init2, name='output')
        y = net.outputs

        ce = tl.cost.cross_entropy(y, y_, name='cost')
        # L2 for the MLP, without this, the accuracy will be reduced by 15%.
        L2 = 0
        for p in tl.layers.get_variables_with_name('relu/W', True, True):
            L2 += tf.contrib.layers.l2_regularizer(0.004)(p)
        cost = ce + L2

        correct_prediction = tf.equal(tf.cast(tf.argmax(y, 1), tf.int32), y_)
        acc = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

        return net, cost, acc
コード例 #4
0
 def depthwise_conv_block(cls, n, n_filter, strides=(1, 1), is_train=False, name="depth_block"):
     with tf.variable_scope(name):
         n = DepthwiseConv2d(n, (3, 3), strides, b_init=None, name='depthwise')
         n = BatchNormLayer(n, act=tf.nn.relu6, is_train=is_train, name='batchnorm1')
         n = Conv2d(n, n_filter, (1, 1), (1, 1), b_init=None, name='conv')
         n = BatchNormLayer(n, act=tf.nn.relu6, is_train=is_train, name='batchnorm2')
     return n
コード例 #5
0
 def depthwise_conv_block(n,
                          n_filter,
                          filter_size=(3, 3),
                          strides=(1, 1),
                          name="depth_block"):
     with tf.variable_scope(name):
         n = DepthwiseConv2d(n,
                             filter_size,
                             strides,
                             W_init=W_init,
                             b_init=None,
                             name='depthwise')
         n = BatchNormLayer(n,
                            decay=decay,
                            act=tf.nn.relu6,
                            is_train=train_bn,
                            name='batchnorm1')
         n = Conv2d(n,
                    n_filter, (1, 1), (1, 1),
                    W_init=W_init,
                    b_init=None,
                    name='conv')
         n = BatchNormLayer(n,
                            decay=decay,
                            act=tf.nn.relu6,
                            is_train=train_bn,
                            name='batchnorm2')
     return n
コード例 #6
0
def discriminator(inputs, is_train=True, reuse=False):
    dfs = 64
    gamma_init = tf.random_normal_initializer(1., 0.02)
    W_init = tf.random_normal_initializer(stddev=0.02)

    with tf.variable_scope('discriminator', reuse=reuse):
        tl.layers.set_name_reuse(reuse)

        d = InputLayer(inputs, name='d/inputs')
        d = Conv2d(d,
                   dfs, (5, 5), (2, 2),
                   W_init=W_init,
                   act=lambda x: tl.act.lrelu(x, 0.2),
                   name='d/conv1')

        d = Conv2d(d,
                   dfs * 2, (5, 5), (2, 2),
                   W_init=W_init,
                   act=None,
                   name='d/conv2')
        d = BatchNormLayer(d,
                           act=lambda x: tl.act.lrelu(x, 0.2),
                           is_train=is_train,
                           gamma_init=gamma_init,
                           name='d/bn3')

        d = Conv2d(d,
                   dfs * 4, (5, 5), (2, 2),
                   W_init=W_init,
                   act=None,
                   name='d/conv4')
        d = BatchNormLayer(d,
                           act=lambda x: tl.act.lrelu(x, 0.2),
                           is_train=is_train,
                           gamma_init=gamma_init,
                           name='d/bn5')

        d = Conv2d(d,
                   dfs * 8, (5, 5), (2, 2),
                   W_init=W_init,
                   act=None,
                   name='d/conv6')
        d = BatchNormLayer(d,
                           act=lambda x: tl.act.lrelu(x, 0.2),
                           is_train=is_train,
                           gamma_init=gamma_init,
                           name='d/bn7')

        d = FlattenLayer(d, name='d/flt8')
        d = DenseLayer(d,
                       1,
                       act=tl.act.identity,
                       W_init=W_init,
                       name='d/output')

        logits = d.outputs
        d.outputs = tf.nn.sigmoid(d.outputs)
        return d, logits
コード例 #7
0
def discriminator(inputs, is_train=True):
    with tf.variable_scope("discriminator", reuse=tf.AUTO_REUSE):
        net_in = InputLayer(inputs, name='din')

        #Conv2d is tf.nn.conv2d + tf.nn.relu
        dnet_c0 = Conv2d(net_in,
                         64, (8, 8), (2, 2),
                         act=tf.nn.relu,
                         padding='SAME',
                         name='dnet_c0')

        #Conv2d is tf.nn.conv2d
        #BatchNormLayer is tf.nn.batch_normalization + tf.nn.relu
        dnet_c1 = Conv2d(dnet_c0,
                         128, (8, 8), (2, 2),
                         act=None,
                         padding='SAME',
                         name='dnet_c1')
        dnet_b1 = BatchNormLayer(dnet_c1,
                                 decay=0.9,
                                 act=tf.nn.relu,
                                 is_train=is_train,
                                 name='dnet_b1')

        #    dnet_p1 = MaxPool2d(dnet_b1, (2, 2), name='pool2')   #Don't use pool layer, it is not good. But you can try.

        dnet_c2 = Conv2d(dnet_b1,
                         256, (8, 8), (2, 2),
                         act=None,
                         padding='SAME',
                         name='dnet_c2')
        dnet_b2 = BatchNormLayer(dnet_c2,
                                 decay=0.9,
                                 act=tf.nn.relu,
                                 is_train=is_train,
                                 name='dnet_b2')

        dnet_c3 = Conv2d(dnet_b2,
                         512, (8, 8), (2, 2),
                         act=None,
                         padding='SAME',
                         name='dnet_c3')
        dnet_b3 = BatchNormLayer(dnet_c3,
                                 decay=0.9,
                                 act=tf.nn.relu,
                                 is_train=is_train,
                                 name='dnet_b3')

        #FlattenLayer is tf.reshape
        dnet_f1 = FlattenLayer(dnet_b3, name='dnet_f1')
        #DenseLayer is tf.layers.dense, the full-connected
        dnet_d1 = DenseLayer(dnet_f1,
                             n_units=1,
                             act=tf.identity,
                             name='dnet_h4')
        logits = dnet_d1.outputs
        dnet_d1.outputs = tf.nn.sigmoid(dnet_d1.outputs)
    return dnet_d1, logits
コード例 #8
0
ファイル: model_u2_3d_d2.py プロジェクト: piccaSun/Research
def generator(inputs, is_train=True, reuse=False):
    image_size = 128
   
    gf_dim = 64    # Dimension of gen filters in first conv layer. [64]
    c_dim = 1    # n_color 1
    w_init = tf.glorot_normal_initializer()
    gamma_init = tf.random_normal_initializer(1., 0.02)

    with tf.name_scope("GENERATOR"):

        with tf.variable_scope("generator", reuse=reuse):


            with tf.name_scope("net_in"):
                net_in = InputLayer(inputs, name='g/in')
        #############################################################################
            with tf.name_scope("layer0"):
                net_h0 = DenseLayer(net_in, n_units=(gf_dim * 32 * 4 * 4), W_init=w_init,
                act = tf.identity, name='g/h0/lin')
                net_h0 = ReshapeLayer(net_h0, shape=[-1, 4, 4, gf_dim * 32], name='g/h0/reshape')
                net_h0 = BatchNormLayer(net_h0, decay=0.9, act=tf.nn.relu, is_train=is_train,
                gamma_init=gamma_init, name='g/h0/batch_norm')

            with tf.name_scope("layer1"):
                net_h1 = DeConv2d(net_h0, gf_dim * 8, (5, 5), strides=(2, 2),
                padding='SAME', act=None, W_init=w_init, name='g/h1/decon2d')
                net_h1 = BatchNormLayer(net_h1, decay=0.9, act=tf.nn.relu, is_train=is_train,
                gamma_init=gamma_init, name='g/h1/batch_norm')

            with tf.name_scope("layer2"):
                net_h2 = DeConv2d(net_h1, gf_dim * 4, (5, 5), strides=(2, 2),
                padding='SAME', act=None, W_init=w_init, name='g/h2/decon2d')
                net_h2 = BatchNormLayer(net_h2, decay=0.9, act=tf.nn.relu, is_train=is_train,
                gamma_init=gamma_init, name='g/h2/batch_norm')

            with tf.name_scope("layer3"):
                net_h3 = DeConv2d(net_h2, gf_dim*2, (5, 5), strides=(2, 2),
                padding='SAME', act=None, W_init=w_init, name='g/h3/decon2d')
                net_h3 = BatchNormLayer(net_h3, decay=0.9, act=tf.nn.relu, is_train=is_train,
                gamma_init=gamma_init, name='g/h3/batch_norm')


            with tf.name_scope("layer4"):
                net_h4 = DeConv2d(net_h3, gf_dim, (5, 5), strides=(2, 2),
                padding='SAME', act=None, W_init=w_init, name='g/h4/decon2d')
                net_h4 = BatchNormLayer(net_h4, decay=0.9, act=tf.nn.relu, is_train=is_train,
                gamma_init=gamma_init, name='g/h4/batch_norm')

            with tf.name_scope("layer5"):
                net_h5 = DeConv2d(net_h4, c_dim, (5, 5), strides=(2, 2),
                padding='SAME', act=None, W_init=w_init, name='g/h5/decon2d')
        #net_h5.outputs = tf.nn.tanh(net_h5.outputs)
                net_h5.outputs = tf.nn.tanh(net_h5.outputs)

        return net_h5
コード例 #9
0
def ResBlockUp(inputs, input_size, batch_size, filters, scope_name, reuse,
               phase_train):
    with tf.variable_scope(scope_name, reuse=reuse):
        set_name_reuse(reuse)
        w_init = tf.truncated_normal_initializer(stddev=0.02)
        b_init = tf.constant_initializer(value=0.0)
        gamma_init = tf.random_normal_initializer(1., 0.02)
        input_layer = InputLayer(inputs, name='inputs')
        conv1 = DeConv2d(input_layer,
                         filters, (3, 3), (input_size * 2, input_size * 2),
                         (2, 2),
                         batch_size=batch_size,
                         act=None,
                         padding='SAME',
                         W_init=w_init,
                         b_init=b_init,
                         name="deconv1")
        conv1 = BatchNormLayer(conv1,
                               act=tf.nn.leaky_relu,
                               is_train=phase_train,
                               gamma_init=gamma_init,
                               name='bn1')
        conv2 = DeConv2d(conv1,
                         filters / 2, (3, 3), (input_size * 2, input_size * 2),
                         (1, 1),
                         act=None,
                         padding='SAME',
                         batch_size=batch_size,
                         W_init=w_init,
                         b_init=b_init,
                         name="deconv2")
        conv2 = BatchNormLayer(conv2,
                               act=tf.nn.leaky_relu,
                               is_train=phase_train,
                               gamma_init=gamma_init,
                               name='bn2')

        conv3 = DeConv2d(input_layer,
                         filters / 2, (3, 3), (input_size * 2, input_size * 2),
                         (2, 2),
                         act=None,
                         padding='SAME',
                         batch_size=batch_size,
                         W_init=w_init,
                         b_init=b_init,
                         name="conv3")
        conv3 = BatchNormLayer(conv3,
                               act=tf.nn.leaky_relu,
                               is_train=phase_train,
                               gamma_init=gamma_init,
                               name='bn3')

        conv_out = conv2.outputs + conv3.outputs
    return conv_out
コード例 #10
0
ファイル: model_u2_3d_d2.py プロジェクト: piccaSun/Research
def discriminator1(inputs, is_train=True, reuse=False):
    df_dim = 32   # Dimension of discrim filters in first conv layer. [64]
    w_init = tf.glorot_normal_initializer()
    gamma_init = tf.random_normal_initializer(1., 0.02)
    lrelu = lambda x : tf.nn.leaky_relu(x, 0.2)

    with tf.name_scope("DISCRIMINATOR1"):
        with tf.variable_scope("discriminator", reuse=reuse):
        
            with tf.name_scope("net_in"):
                net_in = InputLayer(inputs, name='d/in')

            with tf.name_scope("layer0"):
                net_h0 = Conv2d(net_in, df_dim, (3, 3), (3, 3), act=lrelu,
                    padding='SAME', W_init=w_init, name='d/h0/conv2d')

            with tf.name_scope("layer1"):
                net_h1 = Conv2d(net_h0, df_dim*2, (3, 3), (3, 3), act=None,
                    padding='SAME', W_init=w_init, name='d/h1/conv2d')
                net_h1 = BatchNormLayer(net_h1, decay=0.9, act=lrelu,
                    is_train=is_train, gamma_init=gamma_init, name='d/h1/batch_norm')

            with tf.name_scope("layer2"):
                net_h2 = Conv2d(net_h1, df_dim*4, (3, 3), (3, 3), act=None,
                    padding='SAME', W_init=w_init, name='d/h2/conv2d')
                net_h2 = BatchNormLayer(net_h2, decay=0.9, act=lrelu,
                    is_train=is_train, gamma_init=gamma_init, name='d/h2/batch_norm')

            with tf.name_scope("layer3"):
                net_h3 = Conv2d(net_h2, df_dim*8, (3, 3), (3, 3), act=None,
                    padding='SAME', W_init=w_init, name='d/h3/conv2d')
                net_h3 = BatchNormLayer(net_h3, decay=0.9, act=lrelu,
                    is_train=is_train, gamma_init=gamma_init, name='d/h3/batch_norm')

            with tf.name_scope("layer4"):
                net_h4 = FlattenLayer(net_h3, name='d/h4/flatten')
                net_h4 = DenseLayer(net_h4, n_units=df_dim*8, act=tf.identity,
                    W_init = w_init, name='d/h4/lin_sigmoid')

            with tf.name_scope("layer5"):
                net_h5 = FlattenLayer(net_h4, name='d/h5/flatten')
                net_h5 = DenseLayer(net_h5, n_units=df_dim*8, act=tf.identity,
                    W_init = w_init, name='d/h5/lin_sigmoid')

        #net_h6 = FlattenLayer(net_h5, name='d/h6/flatten')
            with tf.name_scope("layer6"):
                net_h6= DenseLayer(net_h5, n_units=2, act=tf.identity,
                    W_init = w_init, name='d/h6/lin_sigmoid')
                logits1 = net_h6.outputs
                net_h6.outputs = tf.nn.softplus(net_h6.outputs)
        return net_h6, logits1
コード例 #11
0
def conv_bn_relu(name,
                 x,
                 output_channel,
                 phase='TRAIN',
                 reg=None,
                 dropout=None,
                 if_bn=True):
    is_train = (phase == 'TRAIN')
    input_channel = int(x.get_shape()[-1])
    assert len(x.get_shape()) == 4
    with tf.variable_scope(name + '_w'):
        w = tf.get_variable('w',
                            [3, 3, input_channel, output_channel], tf.float32,
                            msra_initializer(9.0 * output_channel), reg)
        b = tf.get_variable('b', [output_channel], tf.float32,
                            tf.zeros_initializer())
    with tf.name_scope(name):
        y = tf.nn.bias_add(tf.nn.conv2d(x, w, [1, 1, 1, 1], 'SAME'),
                           b,
                           name='conv')
        if if_bn:
            y = layers.batch_norm(y)
            y = BatchNormLayer()
        y = tf.nn.relu(y, 'relu')
        if dropout is not None:
            if phase == 'TRAIN':
                y = tf.nn.dropout(y, 1.0 - dropout, name='dropout')
            else:
                y = tf.multiply(y, dropout, 'dropout')
    return y
コード例 #12
0
 def bn(x, name):
     return BatchNormLayer(
         x,
         is_train=is_train,
         act=tf.nn.relu,
         decay=decay,
         name=name,
         # https://github.com/tensorlayer/tensorlayer/commit/4e6f768bd2d0c0f27c2385ce7f541b848deb7953
         data_format=data_format)
コード例 #13
0
def batch_norm(layer,
               act=tf.identity,
               is_train=True,
               gamma_init=g_init,
               name='bn'):
    return BatchNormLayer(layer,
                          act=act,
                          is_train=is_train,
                          gamma_init=gamma_init,
                          name=name)
コード例 #14
0
ファイル: shufflenetv1.py プロジェクト: Waikkii/TL_ShuffleNet
 def shufflenet_unit(self, inputs, n_filter, filter_size, strides, groups, stage, bottleneck_ratio=0.25, name='_shufflenetunit'):
     in_channels = inputs.outputs.get_shape()[3]
     #print("input", inputs.outputs.get_shape())
     bottleneck_channels = int(n_filter * bottleneck_ratio)
     if stage == 2:
         x = Conv2d(inputs, n_filter=bottleneck_channels, filter_size=filter_size, strides=(1, 1),
                    padding='SAME', name=name+'_Conv2d1')
         #print("conv", x.outputs.get_shape())
     else:
         x = self.group_conv(inputs, groups, bottleneck_channels, (1, 1), (1, 1), name=name+'_groupconv1')
     x = BatchNormLayer(x, act=tf.nn.leaky_relu, name=name+'_Batch1')
     #print("batch", x.outputs.get_shape())
     x = self.channel_shuffle(x, groups, name=name+'_channelshuffle')
     #print("shuffle", x.outputs.get_shape())
     #x = PadLayer(x, [[0, 0], [4, 4], [4, 4], [0, 0]], "CONSTANT", name=name+'_pad')
     #print("pad", x.outputs.get_shape())
     x = DepthwiseConv2d(x, shape=filter_size, strides=strides, depth_multiplier=1,
                         padding='SAME', name=name+'_DepthwiseConv2d')
     #print("deep", x.outputs.get_shape())
     #x = Conv2d(x, n_filter=in_channels, filter_size=filter_size, strides=(1, 1),padding='SAME', name=name+'_Conv2d2')
     #print("conv", x.outputs.get_shape())
     x = BatchNormLayer(x, name=name+'_Batch2')
     #print("deep_batch", x.outputs.get_shape())
     if strides == (2, 2):
         x = self.group_conv(x, groups, n_filter - in_channels, (1, 1), (1, 1), name=name+'_groupconv2')#n_filter - in_channels ??????????
         #print("gonv", x.outputs.get_shape())
         x = BatchNormLayer(x, name=name+'_Batch3')
         #print("batch", x.outputs.get_shape())
         avg = MeanPool2d(inputs, filter_size=(3, 3), strides=(2, 2), padding='SAME', name=name+'_AvePool')
         #print("avg", avg.outputs.get_shape())
         x = ConcatLayer([x, avg], concat_dim=-1, name=name+'_Concat')
         #print("x1out", x.outputs.get_shape())
     else:
         x = self.group_conv(x, groups, n_filter, (1, 1), (1, 1), name=name+'_groupconv3')
         #print("x", x.outputs.get_shape())
         x = BatchNormLayer(x, name=name+'_Batch4')
         if x.outputs.get_shape()[3] != inputs.outputs.get_shape()[3]:
             x = Conv2d(x, n_filter=in_channels, filter_size=filter_size, strides=(1, 1),
                        padding='SAME', name=name+'_Conv2d2')
         x = ElementwiseLayer([x, inputs], combine_fn=tf.add, name=name+'_Elementwise')
     return x
コード例 #15
0
ファイル: network.py プロジェクト: vincensChan/TC-GAN
    def generator(self, z, label_class, is_train=True, reuse=False):
        # NOTE: concate z & label might be wrong, need to test
        labels_one_hot = tf.one_hot(label_class, self.class_num)
        z_labels = tf.concat([z, labels_one_hot], 1)
        image_size = self.images_size
        s16 = image_size // 16
        gf_dim = 64    # Dimension of gen filters in first conv layer. [64]
        c_dim = self.channel    # n_color 3
        w_init = tf.glorot_normal_initializer()
        gamma_init = tf.random_normal_initializer(1., 0.02)

        with tf.variable_scope("generator", reuse=reuse):
            net_in = InputLayer(z_labels, name='g/in')
            net_h0 = DenseLayer(net_in, n_units=(gf_dim * 8 * s16 * s16), W_init=w_init,
                    act = tf.identity, name='g/h0/lin')
            net_h0 = ReshapeLayer(net_h0, shape=[-1, s16, s16, gf_dim*8], name='g/h0/reshape')
            net_h0 = BatchNormLayer(net_h0, decay=0.9, act=tf.nn.relu, is_train=is_train,
                    gamma_init=gamma_init, name='g/h0/batch_norm')

            net_h1 = DeConv2d(net_h0, gf_dim * 4, (5, 5), strides=(2, 2),
                    padding='SAME', act=None, W_init=w_init, name='g/h1/decon2d')
            net_h1 = BatchNormLayer(net_h1, decay=0.9, act=tf.nn.relu, is_train=is_train,
                    gamma_init=gamma_init, name='g/h1/batch_norm')

            net_h2 = DeConv2d(net_h1, gf_dim * 2, (5, 5), strides=(2, 2),
                    padding='SAME', act=None, W_init=w_init, name='g/h2/decon2d')
            net_h2 = BatchNormLayer(net_h2, decay=0.9, act=tf.nn.relu, is_train=is_train,
                    gamma_init=gamma_init, name='g/h2/batch_norm')

            net_h3 = DeConv2d(net_h2, gf_dim, (5, 5), strides=(2, 2),
                    padding='SAME', act=None, W_init=w_init, name='g/h3/decon2d')
            net_h3 = BatchNormLayer(net_h3, decay=0.9, act=tf.nn.relu, is_train=is_train,
                    gamma_init=gamma_init, name='g/h3/batch_norm')

            net_h4 = DeConv2d(net_h3, c_dim, (5, 5), strides=(2, 2),
                    padding='SAME', act=None, W_init=w_init, name='g/h4/decon2d')
            net_h4.outputs = tf.nn.tanh(net_h4.outputs)
        return net_h4
コード例 #16
0
ファイル: network.py プロジェクト: vincensChan/TC-GAN
    def discriminator(self, inputs, is_train=True, reuse=False):
        df_dim = image_size = self.images_size   # Dimension of discrim filters in first conv layer. [64]
        w_init = tf.glorot_normal_initializer()
        gamma_init = tf.random_normal_initializer(1., 0.02)
        lrelu = lambda x : tf.nn.leaky_relu(x, 0.2)
        with tf.variable_scope("discriminator", reuse=reuse):
            
            net_in = InputLayer(inputs, name='d/in')
            net_h0 = Conv2d(net_in, df_dim, (5, 5), (2, 2), act=lrelu,
                    padding='SAME', W_init=w_init, name='d/h0/conv2d')

            net_h1 = Conv2d(net_h0, df_dim*2, (5, 5), (2, 2), act=None,
                    padding='SAME', W_init=w_init, name='d/h1/conv2d')
            net_h1 = BatchNormLayer(net_h1, decay=0.9, act=lrelu,
                    is_train=is_train, gamma_init=gamma_init, name='d/h1/batch_norm')

            net_h2 = Conv2d(net_h1, df_dim*4, (5, 5), (2, 2), act=None,
                    padding='SAME', W_init=w_init, name='d/h2/conv2d')
            net_h2 = BatchNormLayer(net_h2, decay=0.9, act=lrelu,
                    is_train=is_train, gamma_init=gamma_init, name='d/h2/batch_norm')

            net_h3 = Conv2d(net_h2, df_dim*8, (5, 5), (2, 2), act=None,
                    padding='SAME', W_init=w_init, name='d/h3/conv2d')
            net_h3 = BatchNormLayer(net_h3, decay=0.9, act=lrelu,
                    is_train=is_train, gamma_init=gamma_init, name='d/h3/batch_norm')

            net_h4 = FlattenLayer(net_h3, name='d/h4/flatten')
            # real or fake binary loss
            net_h4_1 = DenseLayer(net_h4, n_units=1, act=tf.identity,
                    W_init = w_init, name='d/h4_1/lin_sigmoid')
            # category loss
            net_h4_2 = DenseLayer(net_h4, n_units=self.class_num, act=tf.identity,
                    W_init = w_init, name='d/h4_2/lin_sigmoid')
            net_h4_1_logits = net_h4_1.outputs
            net_h4_2_logits = net_h4_2.outputs
            net_h4_1.outputs = tf.nn.sigmoid(net_h4_1.outputs)
            net_h4_2.outputs = tf.nn.sigmoid(net_h4_2.outputs)
        return net_h4_1, net_h4_1_logits, net_h4_2, net_h4_2_logits
コード例 #17
0
def conv_block(n,
               n_filter,
               filter_size=(3, 3),
               strides=(1, 1),
               is_train=False,
               name='conv_block'):
    # ref: https://github.com/keras-team/keras/blob/master/keras/applications/mobilenet.py
    with tf.variable_scope(name):
        n = Conv2d(n, n_filter, filter_size, strides, b_init=None, name='conv')
        n = BatchNormLayer(n,
                           act=tf.nn.relu6,
                           is_train=is_train,
                           name='batchnorm')
    return n
コード例 #18
0
def vgg_net_model_bn(x, y_correct, reuse, is_train):
    """ Batch normalization should be placed before rectifier. """

    w_init = tf.truncated_normal_initializer(stddev=5e-2)
    w_init2 = tf.truncated_normal_initializer(stddev=0.04)
    b_init2 = tf.constant_initializer(value=0.1)

    with tf.variable_scope("vgg_net_model_fn", reuse=reuse):
        input_layer = InputLayer(x, name="input")

        # 卷积层组 1
        conv_1_1 = Conv2d(input_layer,
                          64, (3, 3), (1, 1),
                          act=tf.nn.relu,
                          padding='SAME',
                          W_init=w_init,
                          name='conv_1_1')
        conv_1_2 = Conv2d(conv_1_1,
                          64, (3, 3), (1, 1),
                          act=tf.nn.relu,
                          padding='SAME',
                          W_init=w_init,
                          name='conv_1_2')
        bn_1 = BatchNormLayer(conv_1_2, is_train, act=tf.nn.relu, name='bn_1')

        # 池化 1
        pool_1 = MaxPool2d(bn_1, (3, 3), (2, 2), padding='SAME', name='lrn_1')

        # 卷积层组 2
        conv_2_1 = Conv2d(pool_1,
                          128, (3, 3), (1, 1),
                          act=tf.nn.relu,
                          padding='SAME',
                          W_init=w_init,
                          name='conv_2_1')
        conv_2_2 = Conv2d(conv_2_1,
                          128, (3, 3), (1, 1),
                          act=tf.nn.relu,
                          padding='SAME',
                          W_init=w_init,
                          name='conv_2_2')
        bn_2 = BatchNormLayer(conv_2_2, is_train, act=tf.nn.relu, name='bn_2')

        # 池化 2
        pool_2 = MaxPool2d(bn_2, (3, 3), (2, 2), padding='SAME', name='pool_2')

        # 卷积层组 3
        conv_3_1 = Conv2d(pool_2,
                          256, (3, 3), (1, 1),
                          act=tf.nn.relu,
                          padding='SAME',
                          W_init=w_init,
                          name='conv_3_1')
        conv_3_2 = Conv2d(conv_3_1,
                          256, (3, 3), (1, 1),
                          act=tf.nn.relu,
                          padding='SAME',
                          W_init=w_init,
                          name='conv_3_2')
        conv_3_3 = Conv2d(conv_3_2,
                          256, (3, 3), (1, 1),
                          act=tf.nn.relu,
                          padding='SAME',
                          W_init=w_init,
                          name='conv_3_3')
        bn_3 = BatchNormLayer(conv_3_3, is_train, act=tf.nn.relu, name='bn_3')

        # 池化 3
        pool_3 = MaxPool2d(bn_3, (3, 3), (2, 2), padding='SAME', name='pool_3')

        # 卷积层组 4
        conv_4_1 = Conv2d(pool_3,
                          512, (3, 3), (1, 1),
                          act=tf.nn.relu,
                          padding='SAME',
                          W_init=w_init,
                          name='conv_4_1')
        conv_4_2 = Conv2d(conv_4_1,
                          512, (3, 3), (1, 1),
                          act=tf.nn.relu,
                          padding='SAME',
                          W_init=w_init,
                          name='conv_4_2')
        conv_4_3 = Conv2d(conv_4_2,
                          512, (3, 3), (1, 1),
                          act=tf.nn.relu,
                          padding='SAME',
                          W_init=w_init,
                          name='conv_4_3')
        bn_4 = BatchNormLayer(conv_4_3, is_train, act=tf.nn.relu, name='bn_4')

        # 池化 4
        pool_4 = MaxPool2d(bn_4, (3, 3), (2, 2), padding='SAME', name='pool_4')

        # 卷积层组 4
        conv_5_1 = Conv2d(pool_4,
                          512, (3, 3), (1, 1),
                          act=tf.nn.relu,
                          padding='SAME',
                          W_init=w_init,
                          name='conv_5_1')
        conv_5_2 = Conv2d(conv_5_1,
                          512, (3, 3), (1, 1),
                          act=tf.nn.relu,
                          padding='SAME',
                          W_init=w_init,
                          name='conv_5_2')
        conv_5_3 = Conv2d(conv_5_2,
                          512, (3, 3), (1, 1),
                          act=tf.nn.relu,
                          padding='SAME',
                          W_init=w_init,
                          name='conv_5_3')
        bn_5 = BatchNormLayer(conv_5_3, is_train, act=tf.nn.relu, name='bn_5')

        # 池化 5
        pool_5 = MaxPool2d(bn_5, (3, 3), (2, 2), padding='SAME', name='pool_5')

        # 全连接层
        flatten_layer = FlattenLayer(pool_5, name='flatten')

        fc1 = DenseLayer(flatten_layer,
                         4096,
                         act=tf.nn.relu,
                         W_init=w_init2,
                         b_init=b_init2,
                         name='fc1')
        fc2 = DenseLayer(fc1,
                         4096,
                         act=tf.nn.relu,
                         W_init=w_init2,
                         b_init=b_init2,
                         name='fc2')
        fc3 = DenseLayer(fc2,
                         1000,
                         act=tf.nn.relu,
                         W_init=w_init2,
                         b_init=b_init2,
                         name='fc3')

        model = DenseLayer(fc3,
                           CLASSES_NUM,
                           act=None,
                           W_init=w_init2,
                           name='output')

        y_pred = model.outputs

        ce = tl.cost.cross_entropy(y_pred, y_correct, name='_cost')
        # l2 for the MLP, without this, the ACCURACY will be reduced by 15%.
        l2 = 0
        for p in tl.layers.get_variables_with_name('relu/W', True, True):
            l2 += tf.contrib.layers.l2_regularizer(0.004)(p)
        cost = ce + l2

        correct_prediction = tf.equal(tf.argmax(y_pred, 1), y_correct)
        accurary = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

        return model, cost, accurary
コード例 #19
0
 def bn(in_layer, name):
     return BatchNormLayer(in_layer,
                           act=lambda x: tl.act.lrelu(x, 0.2),
                           is_train=train_mode,
                           gamma_init=gamma_init,
                           name=name)
コード例 #20
0
def generator(input_placeholder, train_mode, image_size, reuse=False):
    s2, s4, s8, s16 = int(image_size / 2), int(image_size / 4), int(
        image_size / 8), int(image_size / 16)
    gf_dim = 32

    w_init = tf.random_normal_initializer(stddev=0.02)
    gamma_init = tf.random_normal_initializer(1., 0.02)

    with tf.variable_scope("decoder", reuse=reuse):
        tl.layers.set_name_reuse(reuse)

        input_layer = InputLayer(input_placeholder, name='dec/input')
        lin_layer = DenseLayer(input_layer,
                               n_units=gf_dim * 8 * s16 * s16,
                               W_init=w_init,
                               act=tf.identity,
                               name='dec/lin')
        # lin_layer.shape = (batch_size,256*4*4)
        resh1_layer = ReshapeLayer(lin_layer,
                                   shape=[-1, s16, s16, gf_dim * 8],
                                   name='decoder/reshape')
        # resh1_layer.shape = (batch_size, 4, 4, 256)
        in_bn_layer = BatchNormLayer(resh1_layer,
                                     act=lambda x: tl.act.lrelu(x, 0.2),
                                     is_train=train_mode,
                                     gamma_init=gamma_init,
                                     name='dec/in_bn')

        # upsampling
        up1_layer = UpSampling2dLayer(in_bn_layer,
                                      size=[s8, s8],
                                      is_scale=False,
                                      method=ResizeMethod.NEAREST_NEIGHBOR,
                                      align_corners=False,
                                      name='dec/up1')
        conv1_layer = Conv2d(up1_layer,
                             gf_dim * 4, (3, 3), (1, 1),
                             padding='SAME',
                             W_init=w_init,
                             name='dec/conv1')
        bn1_layer = BatchNormLayer(conv1_layer,
                                   act=lambda x: tl.act.lrelu(x, 0.2),
                                   is_train=train_mode,
                                   gamma_init=gamma_init,
                                   name='dec/bn1')
        # bn1_layer.shape = (batch_size,8,8,128)

        up2_layer = UpSampling2dLayer(bn1_layer,
                                      size=[s4, s4],
                                      is_scale=False,
                                      method=ResizeMethod.NEAREST_NEIGHBOR,
                                      align_corners=False,
                                      name='dec/up2')
        conv2_layer = Conv2d(up2_layer,
                             gf_dim * 2, (3, 3), (1, 1),
                             padding='SAME',
                             W_init=w_init,
                             name='dec/conv2')
        bn2_layer = BatchNormLayer(conv2_layer,
                                   act=lambda x: tl.act.lrelu(x, 0.2),
                                   is_train=train_mode,
                                   gamma_init=gamma_init,
                                   name='dec/bn2')
        # bn2_layer.shape = (batch_size,16,16,64)

        up3_layer = UpSampling2dLayer(bn2_layer,
                                      size=[s2, s2],
                                      is_scale=False,
                                      method=ResizeMethod.NEAREST_NEIGHBOR,
                                      align_corners=False,
                                      name='dec/up3')
        conv3_layer = Conv2d(up3_layer,
                             gf_dim, (3, 3), (1, 1),
                             padding='SAME',
                             W_init=w_init,
                             name='dec/conv3')
        bn3_layer = BatchNormLayer(conv3_layer,
                                   act=lambda x: tl.act.lrelu(x, 0.2),
                                   is_train=train_mode,
                                   gamma_init=gamma_init,
                                   name='dec/bn3_layer')
        # bn3_layer.shape = (batch_size,32,32,32)

        # no BN on last deconv
        up4_layer = UpSampling2dLayer(bn3_layer,
                                      size=[image_size, image_size],
                                      is_scale=False,
                                      method=ResizeMethod.NEAREST_NEIGHBOR,
                                      align_corners=False,
                                      name='dec/up4')
        conv4_layer = Conv2d(up4_layer,
                             3, (3, 3), (1, 1),
                             padding='SAME',
                             W_init=w_init,
                             name='dec/conv4')
        # conv4_layer.shape = (batch_size,64,64,3)
        logits = conv4_layer.outputs
        conv4_layer.outputs = tf.nn.tanh(conv4_layer.outputs)
    return conv4_layer, logits
コード例 #21
0
def encoder(input_placeholder,
            z_dim,
            train_mode,
            conv_filters_num=32,
            reuse=False):

    w_init = tf.random_normal_initializer(stddev=0.02)
    gamma_init = tf.random_normal_initializer(1., 0.02)

    with tf.variable_scope("encoder", reuse=reuse):
        tl.layers.set_name_reuse(reuse)

        input_layer = InputLayer(input_placeholder, name='enc/input')
        conv1_layer = Conv2d(input_layer,
                             n_filter=conv_filters_num,
                             filter_size=(4, 4),
                             strides=(2, 2),
                             act=None,
                             padding='SAME',
                             name='enc/conv1')

        bn1_layer = BatchNormLayer(conv1_layer,
                                   act=lambda x: tl.act.lrelu(x, 0.02),
                                   is_train=train_mode,
                                   gamma_init=gamma_init,
                                   name='enc/bn1')

        conv2_layer = Conv2d(bn1_layer,
                             n_filter=2 * conv_filters_num,
                             filter_size=(4, 4),
                             strides=(2, 2),
                             act=None,
                             padding='SAME',
                             name='enc/conv2')
        bn2_layer = BatchNormLayer(conv2_layer,
                                   act=lambda x: tl.act.lrelu(x, 0.02),
                                   is_train=train_mode,
                                   gamma_init=gamma_init,
                                   name='enc/bn2')

        conv3_layer = Conv2d(bn2_layer,
                             n_filter=4 * conv_filters_num,
                             filter_size=(4, 4),
                             strides=(2, 2),
                             act=None,
                             padding='SAME',
                             name='enc/conv3')
        bn3_layer = BatchNormLayer(conv3_layer,
                                   act=lambda x: tl.act.lrelu(x, 0.02),
                                   is_train=train_mode,
                                   gamma_init=gamma_init,
                                   name='enc/bn3')

        # mean of Z
        mean_flat_layer = FlattenLayer(bn3_layer, name='enc/mean_flatten')
        mean_out = DenseLayer(mean_flat_layer,
                              n_units=z_dim,
                              act=tf.identity,
                              W_init=w_init,
                              name='enc/mean_out_lin')
        mean_out = BatchNormLayer(mean_out,
                                  act=tf.identity,
                                  is_train=train_mode,
                                  gamma_init=gamma_init,
                                  name='enc/mean_out')

        # covariance of Z
        cov_flat_layer = FlattenLayer(bn3_layer, name='enc/cov_flatten')
        cov_out = DenseLayer(cov_flat_layer,
                             n_units=z_dim,
                             act=tf.identity,
                             W_init=w_init,
                             name='enc/cov_out_lin')
        cov_out = BatchNormLayer(cov_out,
                                 act=tf.identity,
                                 is_train=train_mode,
                                 gamma_init=gamma_init,
                                 name='enc/cov_out')
        z_mean, z_cov = mean_out.outputs, cov_out.outputs + 1e-6

    return mean_out, cov_out, z_mean, z_cov
コード例 #22
0
def discriminator(input_placeholder, train_mode, reuse=False):
    filters_num = 64
    w_init = tf.random_normal_initializer(stddev=0.02)
    gamma_init = tf.random_normal_initializer(1., 0.02)

    with tf.variable_scope("discriminator", reuse=reuse):
        tl.layers.set_name_reuse(reuse)

        input_layer = InputLayer(input_placeholder, name="discr/in")

        conv1_layer = Conv2d(input_layer,
                             n_filter=filters_num,
                             filter_size=(5, 5),
                             strides=(2, 2),
                             act=lambda x: tl.act.lrelu(x, 0.2),
                             padding='SAME',
                             name='discr/conv1',
                             W_init=w_init)

        conv2_layer = Conv2d(conv1_layer,
                             n_filter=filters_num * 2,
                             filter_size=(5, 5),
                             strides=(2, 2),
                             act=None,
                             padding='SAME',
                             name='discr/conv2',
                             W_init=w_init)
        bn2_layer = BatchNormLayer(conv2_layer,
                                   act=lambda x: tl.act.lrelu(x, 0.2),
                                   is_train=train_mode,
                                   gamma_init=gamma_init,
                                   name='discr/bn2')

        conv3_layer = Conv2d(bn2_layer,
                             n_filter=filters_num * 4,
                             filter_size=(5, 5),
                             strides=(2, 2),
                             act=None,
                             padding='SAME',
                             name='discr/conv3',
                             W_init=w_init)
        bn3_layer = BatchNormLayer(conv3_layer,
                                   act=lambda x: tl.act.lrelu(x, 0.2),
                                   is_train=train_mode,
                                   gamma_init=gamma_init,
                                   name='discr/bn3')

        conv4_layer = Conv2d(bn3_layer,
                             n_filter=filters_num * 8,
                             filter_size=(5, 5),
                             strides=(2, 2),
                             act=None,
                             padding='SAME',
                             name='discr/conv4',
                             W_init=w_init)
        bn4_layer = BatchNormLayer(conv4_layer,
                                   act=lambda x: tl.act.lrelu(x, 0.2),
                                   is_train=train_mode,
                                   gamma_init=gamma_init,
                                   name='discr/bn4')

        flat_layer = FlattenLayer(bn4_layer, name='discr/flatten')

        out_layer = DenseLayer(flat_layer,
                               n_units=1,
                               W_init=w_init,
                               act=tf.identity,
                               name='discr/out')

        logits = out_layer.outputs

        out_layer.outputs = tf.nn.sigmoid(out_layer.outputs)

    return out_layer, logits
コード例 #23
0
def SRGAN_g(t_image, is_train=False, reuse=False):
    '''
    Build the generator
    '''
    w_init = tf.random_normal_initializer(stddev=0.02)
    b_init = tf.constant_initializer(value=0.0)
    g_init = tf.random_normal_initializer(1., 0.02)
    with tf.variable_scope("SRGAN_g", reuse=reuse) as vs:

        n = InputLayer(t_image, name='in')
        n = Conv2d(n,
                   64, (3, 3), (1, 1),
                   act=tf.nn.relu,
                   padding='SAME',
                   W_init=w_init,
                   name='n64s1/c')
        temp = n

        # 16 residual blocks
        for i in range(16):
            nn = Conv2d(n,
                        64, (3, 3), (1, 1),
                        act=None,
                        padding='SAME',
                        W_init=w_init,
                        b_init=b_init,
                        name='n64s1/c1/%s' % i)
            nn = BatchNormLayer(nn,
                                act=tf.nn.relu,
                                is_train=is_train,
                                gamma_init=g_init,
                                name='n64s1/b1/%s' % i)
            nn = Conv2d(nn,
                        64, (3, 3), (1, 1),
                        act=None,
                        padding='SAME',
                        W_init=w_init,
                        b_init=b_init,
                        name='n64s1/c2/%s' % i)
            nn = BatchNormLayer(nn,
                                is_train=is_train,
                                gamma_init=g_init,
                                name='n64s1/b2/%s' % i)
            nn = ElementwiseLayer([n, nn],
                                  tf.add,
                                  name='b_residual_add/%s' % i)
            n = nn

        n = Conv2d(n,
                   64, (3, 3), (1, 1),
                   act=None,
                   padding='SAME',
                   W_init=w_init,
                   b_init=b_init,
                   name='n64s1/c/m')
        n = BatchNormLayer(n,
                           is_train=is_train,
                           gamma_init=g_init,
                           name='n64s1/b/m')
        n = ElementwiseLayer([n, temp], tf.add, name='add3')
        # 16 residual blacks end

        n = Conv2d(n,
                   256, (3, 3), (1, 1),
                   act=None,
                   padding='SAME',
                   W_init=w_init,
                   b_init=b_init,
                   name='n256s1/1')
        n = SubpixelConv2d(n,
                           scale=2,
                           n_out_channel=None,
                           act=tf.nn.relu,
                           name='pixelshufflerx2/1')

        # n = Conv2d(n, 256, (3, 3), (1, 1), act=None, padding='SAME', W_init=w_init, name='n256s1/2')
        # n = SubpixelConv2d(n, scale=2, n_out_channel=None, act=tf.nn.relu, name='pixelshufflerx2/2')

        n = Conv2d(n,
                   3, (1, 1), (1, 1),
                   act=tf.nn.tanh,
                   padding='SAME',
                   W_init=w_init,
                   b_init=b_init,
                   name='out')
        return n
コード例 #24
0
def generator(inputs, is_train=True):
    with tf.variable_scope("generator", reuse=tf.AUTO_REUSE):
        net_in = InputLayer(inputs, name='gin')

        gnet_d0 = DenseLayer(net_in,
                             n_units=(16384),
                             act=tf.identity,
                             name='gnet_d0')
        gnet_r0 = ReshapeLayer(gnet_d0, shape=[-1, 4, 4, 1024], name='gnet_r0')
        gnet_b0 = BatchNormLayer(gnet_r0,
                                 decay=0.9,
                                 act=tf.nn.relu,
                                 is_train=is_train,
                                 name='gnet_b0')

        gnet_dc1 = DeConv2d(gnet_b0,
                            256, (8, 8),
                            strides=(2, 2),
                            padding='SAME',
                            act=None,
                            name='gnet_dc1')
        gnet_b1 = BatchNormLayer(gnet_dc1,
                                 decay=0.9,
                                 act=tf.nn.relu,
                                 is_train=is_train,
                                 name='gnet_b1')

        gnet_dc2 = DeConv2d(gnet_b1,
                            128, (8, 8),
                            strides=(2, 2),
                            padding='SAME',
                            act=None,
                            name='gnet_dc2')
        gnet_b2 = BatchNormLayer(gnet_dc2,
                                 decay=0.9,
                                 act=tf.nn.relu,
                                 is_train=is_train,
                                 name='gnet_b2')

        gnet_dc3 = DeConv2d(gnet_b2,
                            64, (8, 8),
                            strides=(2, 2),
                            padding='SAME',
                            act=None,
                            name='gnet_dc3')
        gnet_b3 = BatchNormLayer(gnet_dc3,
                                 decay=0.9,
                                 act=tf.nn.relu,
                                 is_train=is_train,
                                 name='gnet_b3')

        gnet_dc4 = DeConv2d(gnet_b3,
                            3, (8, 8),
                            strides=(2, 2),
                            padding='SAME',
                            act=None,
                            name='net_h4')

        #Based on the paper, we need to provide non-linearity to the generated image
        #TODO: Why?
        gnet_dc4.outputs = tf.nn.tanh(gnet_dc4.outputs)
    return gnet_dc4
コード例 #25
0
def SRGAN_d(input_images, is_train=True, reuse=False):
    '''
    Build the discriminator
    '''
    w_init = tf.random_normal_initializer(stddev=0.02)
    b_init = tf.constant_initializer(value=0.0)
    gamma_init = tf.random_normal_initializer(1., 0.02)
    df_dim = 64
    lrelu = lambda x: tl.act.lrelu(x, 0.2)
    with tf.variable_scope("SRGAN_d", reuse=reuse):
        tl.layers.set_name_reuse(reuse)
        net_in = InputLayer(input_images, name='input/images')
        net_h0 = Conv2d(net_in,
                        df_dim, (4, 4), (2, 2),
                        act=lrelu,
                        padding='SAME',
                        W_init=w_init,
                        name='h0/c')

        net_h1 = Conv2d(net_h0,
                        df_dim * 2, (4, 4), (2, 2),
                        act=None,
                        padding='SAME',
                        W_init=w_init,
                        b_init=b_init,
                        name='h1/c')
        net_h1 = BatchNormLayer(net_h1,
                                act=lrelu,
                                is_train=is_train,
                                gamma_init=gamma_init,
                                name='h1/bn')
        net_h2 = Conv2d(net_h1,
                        df_dim * 4, (4, 4), (2, 2),
                        act=None,
                        padding='SAME',
                        W_init=w_init,
                        b_init=b_init,
                        name='h2/c')
        net_h2 = BatchNormLayer(net_h2,
                                act=lrelu,
                                is_train=is_train,
                                gamma_init=gamma_init,
                                name='h2/bn')
        net_h3 = Conv2d(net_h2,
                        df_dim * 8, (4, 4), (2, 2),
                        act=None,
                        padding='SAME',
                        W_init=w_init,
                        b_init=b_init,
                        name='h3/c')
        net_h3 = BatchNormLayer(net_h3,
                                act=lrelu,
                                is_train=is_train,
                                gamma_init=gamma_init,
                                name='h3/bn')
        net_h4 = Conv2d(net_h3,
                        df_dim * 16, (4, 4), (2, 2),
                        act=None,
                        padding='SAME',
                        W_init=w_init,
                        b_init=b_init,
                        name='h4/c')
        net_h4 = BatchNormLayer(net_h4,
                                act=lrelu,
                                is_train=is_train,
                                gamma_init=gamma_init,
                                name='h4/bn')
        net_h5 = Conv2d(net_h4,
                        df_dim * 32, (4, 4), (2, 2),
                        act=None,
                        padding='SAME',
                        W_init=w_init,
                        b_init=b_init,
                        name='h5/c')
        net_h5 = BatchNormLayer(net_h5,
                                act=lrelu,
                                is_train=is_train,
                                gamma_init=gamma_init,
                                name='h5/bn')
        net_h6 = Conv2d(net_h5,
                        df_dim * 16, (1, 1), (1, 1),
                        act=None,
                        padding='SAME',
                        W_init=w_init,
                        b_init=b_init,
                        name='h6/c')
        net_h6 = BatchNormLayer(net_h6,
                                act=lrelu,
                                is_train=is_train,
                                gamma_init=gamma_init,
                                name='h6/bn')
        net_h7 = Conv2d(net_h6,
                        df_dim * 8, (1, 1), (1, 1),
                        act=None,
                        padding='SAME',
                        W_init=w_init,
                        b_init=b_init,
                        name='h7/c')
        net_h7 = BatchNormLayer(net_h7,
                                is_train=is_train,
                                gamma_init=gamma_init,
                                name='h7/bn')

        net = Conv2d(net_h7,
                     df_dim * 2, (1, 1), (1, 1),
                     act=None,
                     padding='SAME',
                     W_init=w_init,
                     b_init=b_init,
                     name='res/c')
        net = BatchNormLayer(net,
                             act=lrelu,
                             is_train=is_train,
                             gamma_init=gamma_init,
                             name='res/bn')
        net = Conv2d(net,
                     df_dim * 2, (3, 3), (1, 1),
                     act=None,
                     padding='SAME',
                     W_init=w_init,
                     b_init=b_init,
                     name='res/c2')
        net = BatchNormLayer(net,
                             act=lrelu,
                             is_train=is_train,
                             gamma_init=gamma_init,
                             name='res/bn2')
        net = Conv2d(net,
                     df_dim * 8, (3, 3), (1, 1),
                     act=None,
                     padding='SAME',
                     W_init=w_init,
                     b_init=b_init,
                     name='res/c3')
        net = BatchNormLayer(net,
                             is_train=is_train,
                             gamma_init=gamma_init,
                             name='res/bn3')
        net_h8 = ElementwiseLayer([net_h7, net],
                                  combine_fn=tf.add,
                                  name='res/add')
        net_h8.outputs = tl.act.lrelu(net_h8.outputs, 0.2)

        net_ho = FlattenLayer(net_h8, name='ho/flatten')
        net_ho = DenseLayer(net_ho,
                            n_units=1,
                            act=tf.identity,
                            W_init=w_init,
                            name='ho/dense')
        logits = net_ho.outputs
        net_ho.outputs = tf.nn.sigmoid(net_ho.outputs)

    return net_ho, logits
コード例 #26
0
ファイル: model.py プロジェクト: GAN-Challenger/CartoonGAN_TF
def generator(input, is_train=False, reuse=False):
    """
    Cartoon GAN generator neural network
    :param input: TF Tensor
        input tensor
    :param is_train: boolean
        train or test flag
    :param reuse: boolean
        whether to reuse the neural network
    :return:
    """
    w_init = tf.random_normal_initializer(stddev=0.02)
    b_init = None
    gamma_init = tf.random_normal_initializer(1.0, stddev=0.02)

    with tf.variable_scope('CartoonGAN_G', reuse=reuse):
        tl.layers.set_name_reuse(reuse)

        n = InputLayer(input, name='g_input')
        n = Conv2d(n,
                   64, (7, 7), (1, 1),
                   act=None,
                   padding='SAME',
                   W_init=w_init,
                   name='k7n64s1/c')
        n = BatchNormLayer(n,
                           act=tf.nn.relu,
                           is_train=is_train,
                           gamma_init=gamma_init,
                           name='k7n64s1/b_r')

        with tf.variable_scope('down_conv'):
            n = Conv2d(n,
                       128, (3, 3), (2, 2),
                       padding='SAME',
                       W_init=w_init,
                       name='k3n128s2/c1')
            n = Conv2d(n,
                       128, (3, 3), (1, 1),
                       padding='SAME',
                       W_init=w_init,
                       name='k3n128s1/c2')
            n = BatchNormLayer(n,
                               act=tf.nn.relu,
                               is_train=is_train,
                               gamma_init=gamma_init,
                               name='k3n128/b_r')

            n = Conv2d(n,
                       256, (3, 3), (2, 2),
                       padding='SAME',
                       W_init=w_init,
                       name='k3n256s2/c1')
            n = Conv2d(n,
                       256, (3, 3), (1, 1),
                       padding='SAME',
                       W_init=w_init,
                       name='k3n256s1/cc')
            n = BatchNormLayer(n,
                               act=tf.nn.relu,
                               is_train=is_train,
                               gamma_init=gamma_init,
                               name='k3n256/b_r')

        with tf.variable_scope('residual_blocks'):
            for i in range(8):
                nn = Conv2d(n,
                            256, (3, 3), (1, 1),
                            act=None,
                            padding='SAME',
                            W_init=w_init,
                            b_init=b_init,
                            name='k3n256s1/c1/%s' % i)
                nn = BatchNormLayer(nn,
                                    act=tf.nn.relu,
                                    is_train=is_train,
                                    gamma_init=gamma_init,
                                    name='k3n256s1/b1/%s' % i)
                nn = Conv2d(nn,
                            256, (3, 3), (1, 1),
                            act=None,
                            padding='SAME',
                            W_init=w_init,
                            b_init=b_init,
                            name='k3n256s1/c2/%s' % i)
                nn = BatchNormLayer(nn,
                                    is_train=is_train,
                                    gamma_init=gamma_init,
                                    name='k3n256s1/b2/%s' % i)
                nn = ElementwiseLayer([n, nn],
                                      tf.add,
                                      name='b_residual_add/%s' % i)
                n = nn

        with tf.variable_scope('up_conv'):
            n = DeConv2d(n,
                         n_filter=128,
                         filter_size=(3, 3),
                         out_size=(128, 128),
                         strides=(2, 2),
                         padding='SAME',
                         W_init=w_init,
                         name='k3n128s05/c1')
            n = Conv2d(n,
                       128, (3, 3), (1, 1),
                       padding='SAME',
                       W_init=w_init,
                       name='k3n128s1/c2')
            n = BatchNormLayer(n,
                               act=tf.nn.relu,
                               is_train=is_train,
                               gamma_init=gamma_init,
                               name='k3n128/b_r')

            n = DeConv2d(n,
                         n_filter=64,
                         filter_size=(3, 3),
                         out_size=(256, 256),
                         strides=(2, 2),
                         padding='SAME',
                         W_init=w_init,
                         name='k3n64s05/c1')
            n = Conv2d(n,
                       64, (3, 3), (1, 1),
                       padding='SAME',
                       W_init=w_init,
                       name='k3n64s1/c2')
            n = BatchNormLayer(n,
                               act=tf.nn.relu,
                               is_train=is_train,
                               gamma_init=gamma_init,
                               name='k3n64/b_r')

        n = Conv2d(n,
                   3, (7, 7), (1, 1),
                   act=tf.nn.tanh,
                   padding='SAME',
                   W_init=w_init,
                   name='g_output')

    return n
コード例 #27
0
ファイル: model.py プロジェクト: GAN-Challenger/CartoonGAN_TF
def discriminator(input, is_train=False, reuse=False):
    """
    Cartoon GAN discriminator neural network
    :param input: TF Tensor
        input tensor
    :param is_train: boolean
        train or test flag
    :param reuse: boolean
        whether to reuse the discriminator neural network
    :return:
    """
    w_init = tf.random_normal_initializer(stddev=0.02)
    gamma_init = tf.random_normal_initializer(1.0, stddev=0.02)
    leaky_relu = lambda x: tl.act.lrelu(x, 0.2)

    with tf.variable_scope('CartoonGAN_D', reuse=reuse):
        tl.layers.set_name_reuse(reuse)

        n = InputLayer(input, name='d_input')
        n = Conv2d(n,
                   32, (3, 3), (1, 1),
                   act=leaky_relu,
                   padding='SAME',
                   W_init=w_init,
                   name='block1/c')

        n = Conv2d(n,
                   64, (3, 3), (2, 2),
                   act=leaky_relu,
                   padding='SAME',
                   W_init=w_init,
                   name='block2/c1')
        n = Conv2d(n,
                   128, (3, 3), (1, 1),
                   act=None,
                   padding='SAME',
                   W_init=w_init,
                   name='block2/c2')
        n = BatchNormLayer(n,
                           act=leaky_relu,
                           is_train=is_train,
                           gamma_init=gamma_init,
                           name='block2/b')

        n = Conv2d(n,
                   128, (3, 3), (2, 2),
                   act=leaky_relu,
                   padding='SAME',
                   W_init=w_init,
                   name='block3/c1')
        n = Conv2d(n,
                   256, (3, 3), (1, 1),
                   act=None,
                   padding='SAME',
                   W_init=w_init,
                   name='block3/c2')
        n = BatchNormLayer(n,
                           act=leaky_relu,
                           is_train=is_train,
                           gamma_init=gamma_init,
                           name='block3/b')

        n = Conv2d(n,
                   256, (3, 3), (1, 1),
                   act=None,
                   padding='SAME',
                   W_init=w_init,
                   name='block4/c')
        n = BatchNormLayer(n,
                           act=leaky_relu,
                           is_train=is_train,
                           gamma_init=gamma_init,
                           name='block4/b')

        n = Conv2d(n,
                   1, (3, 3), (1, 1),
                   act=None,
                   padding='SAME',
                   W_init=w_init,
                   name='d_output')
        n = FlattenLayer(n)
        n = DenseLayer(n, n_units=1, name='d_output')
        logits = n.outputs
        n.outputs = tf.nn.sigmoid(n.outputs)

    return n, logits, n.outputs
コード例 #28
0
def model(x,
          n_pos,
          mask_miss1,
          mask_miss2,
          is_train=False,
          train_bn=False,
          reuse=None,
          data_format='channels_last'):  # hao25
    def depthwise_conv_block(n,
                             n_filter,
                             filter_size=(3, 3),
                             strides=(1, 1),
                             name="depth_block"):
        with tf.variable_scope(name):
            n = DepthwiseConv2d(n,
                                filter_size,
                                strides,
                                W_init=W_init,
                                b_init=None,
                                name='depthwise')
            n = BatchNormLayer(n,
                               decay=decay,
                               act=tf.nn.relu6,
                               is_train=train_bn,
                               name='batchnorm1')
            n = Conv2d(n,
                       n_filter, (1, 1), (1, 1),
                       W_init=W_init,
                       b_init=None,
                       name='conv')
            n = BatchNormLayer(n,
                               decay=decay,
                               act=tf.nn.relu6,
                               is_train=train_bn,
                               name='batchnorm2')
        return n

    def stage(cnn,
              b1,
              b2,
              n_pos,
              maskInput1,
              maskInput2,
              is_train,
              name='stageX'):
        """Define the archuecture of stage 2 to 6."""
        with tf.variable_scope(name):
            net = ConcatLayer([cnn, b1, b2], -1, name='concat')
            with tf.variable_scope("branch1"):
                b1 = depthwise_conv_block(net,
                                          128,
                                          filter_size=(7, 7),
                                          name="c1")
                b1 = depthwise_conv_block(b1,
                                          128,
                                          filter_size=(7, 7),
                                          name="c2")
                b1 = depthwise_conv_block(b1,
                                          128,
                                          filter_size=(7, 7),
                                          name="c3")
                b1 = depthwise_conv_block(b1,
                                          128,
                                          filter_size=(7, 7),
                                          name="c4")
                b1 = depthwise_conv_block(b1,
                                          128,
                                          filter_size=(7, 7),
                                          name="c5")
                b1 = depthwise_conv_block(b1,
                                          128,
                                          filter_size=(1, 1),
                                          name="c6")
                b1 = Conv2d(b1,
                            n_pos, (1, 1), (1, 1),
                            None,
                            'VALID',
                            W_init=W_init,
                            b_init=b_init2,
                            name='conf')
                if is_train:
                    b1.outputs = b1.outputs * maskInput1
            with tf.variable_scope("branch2"):
                b2 = depthwise_conv_block(net,
                                          128,
                                          filter_size=(7, 7),
                                          name="c1")
                b2 = depthwise_conv_block(b2,
                                          128,
                                          filter_size=(7, 7),
                                          name="c2")
                b2 = depthwise_conv_block(b2,
                                          128,
                                          filter_size=(7, 7),
                                          name="c3")
                b2 = depthwise_conv_block(b2,
                                          128,
                                          filter_size=(7, 7),
                                          name="c4")
                b2 = depthwise_conv_block(b2,
                                          128,
                                          filter_size=(7, 7),
                                          name="c5")
                b2 = depthwise_conv_block(b2,
                                          128,
                                          filter_size=(1, 1),
                                          name="c6")
                b2 = Conv2d(b2,
                            38, (1, 1), (1, 1),
                            None,
                            'VALID',
                            W_init=W_init,
                            b_init=b_init2,
                            name='pafs')
                if is_train:
                    b2.outputs = b2.outputs * maskInput2
        return b1, b2

    if data_format != 'channels_last':
        # TODO: support NCHW
        print('data_format=%s is ignored' % data_format)

    b1_list = []
    b2_list = []
    with tf.variable_scope('model', reuse):
        x = x - 0.5
        n = InputLayer(x, name='in')
        n = Conv2d(n,
                   32, (3, 3), (1, 1),
                   None,
                   'SAME',
                   W_init=W_init,
                   b_init=b_init,
                   name='conv1_1')
        n = BatchNormLayer(n,
                           decay=decay,
                           is_train=train_bn,
                           act=tf.nn.relu,
                           name='bn1')
        n = depthwise_conv_block(n, 64, name="conv1_depth1")

        n = depthwise_conv_block(n, 128, strides=(2, 2), name="conv2_depth1")
        n = depthwise_conv_block(n, 128, name="conv2_depth2")
        n1 = n

        n = depthwise_conv_block(n, 256, strides=(2, 2), name="conv3_depth1")
        n = depthwise_conv_block(n, 256, name="conv3_depth2")
        n2 = n

        n = depthwise_conv_block(n, 512, strides=(2, 2), name="conv4_depth1")
        n = depthwise_conv_block(n, 512, name="conv4_depth2")
        n = depthwise_conv_block(n, 512, name="conv4_depth3")
        n = depthwise_conv_block(n, 512, name="conv4_depth4")
        cnn = depthwise_conv_block(n, 512, name="conv4_depth5")

        ## low-level features
        # n1 = MaxPool2d(n1, (2, 2), (2, 2), 'same', name='maxpool2d')
        n1 = depthwise_conv_block(n1, 128, strides=(2, 2), name="n1_down1")
        n1 = depthwise_conv_block(n1, 128, strides=(2, 2), name="n1_down2")
        ## mid-level features
        n2 = depthwise_conv_block(n2, 256, strides=(2, 2), name="n2_down1")
        ## combine features
        cnn = ConcatLayer([cnn, n1, n2], -1, name='cancat')

        ## stage1
        with tf.variable_scope("stage1/branch1"):
            b1 = depthwise_conv_block(cnn, 128, filter_size=(7, 7), name="c1")
            b1 = depthwise_conv_block(b1, 128, filter_size=(7, 7), name="c2")
            b1 = depthwise_conv_block(b1, 128, filter_size=(7, 7), name="c3")
            b1 = depthwise_conv_block(b1, 512, filter_size=(1, 1), name="c4")
            b1 = Conv2d(b1,
                        n_pos, (1, 1), (1, 1),
                        None,
                        'VALID',
                        W_init=W_init,
                        b_init=b_init,
                        name='confs')

            if is_train:
                b1.outputs = b1.outputs * mask_miss1
        with tf.variable_scope("stage1/branch2"):
            b2 = depthwise_conv_block(cnn, 128, filter_size=(7, 7), name="c1")
            b2 = depthwise_conv_block(b2, 128, filter_size=(7, 7), name="c2")
            b2 = depthwise_conv_block(b2, 128, filter_size=(7, 7), name="c3")
            b2 = depthwise_conv_block(b2, 512, filter_size=(1, 1), name="c4")
            b2 = Conv2d(b2,
                        38, (1, 1), (1, 1),
                        None,
                        'VALID',
                        W_init=W_init,
                        b_init=b_init2,
                        name='pafs')
            if is_train:
                b2.outputs = b2.outputs * mask_miss2
            b1_list.append(b1)
            b2_list.append(b2)

        ## other stages
        # for i in range(2, 7): # [2, 3, 4, 5, 6]
        # for i in [5, 6]:
        for i in [3, 4, 5, 6]:
            b1, b2 = stage(cnn,
                           b1_list[-1],
                           b2_list[-1],
                           n_pos,
                           mask_miss1,
                           mask_miss2,
                           is_train,
                           name='stage%d' % i)
            b1_list.append(b1)
            b2_list.append(b2)
        net = tl.layers.merge_networks([b1_list[-1], b2_list[-1]])
    return cnn, b1_list, b2_list, net
コード例 #29
0
def generator(input_placeholder,
              train_mode,
              image_size,
              batch_size,
              reuse=False,
              filters_num=128):

    w_init = tf.random_normal_initializer(stddev=0.02)
    gamma_init = tf.random_normal_initializer(1., 0.02)

    s2, s4, s8, s16 = int(image_size / 2), int(image_size / 4), int(
        image_size / 8), int(image_size / 16)

    with tf.variable_scope("generator", reuse=reuse):
        tl.layers.set_name_reuse(reuse)

        input_layer = InputLayer(input_placeholder, name='gen/in')
        lin_layer = DenseLayer(input_layer,
                               n_units=filters_num * 8 * s16 * s16,
                               W_init=w_init,
                               act=tf.identity,
                               name='gen/lin')

        resh1_layer = ReshapeLayer(lin_layer,
                                   shape=[-1, s16, s16, filters_num * 8],
                                   name='gen/reshape')

        in_bn_layer = BatchNormLayer(resh1_layer,
                                     act=tf.nn.relu,
                                     is_train=train_mode,
                                     gamma_init=gamma_init,
                                     name='dec/in_bn')
        # in_bn_layer.shape = (batch_size, 4, 4, 1024)
        up1_layer = DeConv2d(in_bn_layer,
                             filters_num * 4, (5, 5),
                             out_size=(s8, s8),
                             strides=(2, 2),
                             padding='SAME',
                             batch_size=batch_size,
                             act=None,
                             W_init=w_init,
                             name='gen/up1')

        bn1_layer = BatchNormLayer(up1_layer,
                                   act=tf.nn.relu,
                                   is_train=train_mode,
                                   gamma_init=gamma_init,
                                   name='dec/bn1')

        # bn1_layer.shape = (batch_size, 8, 8, 512)
        up2_layer = DeConv2d(bn1_layer,
                             filters_num * 2, (5, 5),
                             out_size=(s4, s4),
                             strides=(2, 2),
                             padding='SAME',
                             batch_size=batch_size,
                             act=None,
                             W_init=w_init,
                             name='gen/up2')
        bn2_layer = BatchNormLayer(up2_layer,
                                   act=tf.nn.relu,
                                   is_train=train_mode,
                                   gamma_init=gamma_init,
                                   name='dec/bn2')
        # bn2_layer.shape = (batch_size, 16, 16, 256)

        up3_layer = DeConv2d(bn2_layer,
                             filters_num, (5, 5),
                             out_size=(s2, s2),
                             strides=(2, 2),
                             padding='SAME',
                             batch_size=batch_size,
                             act=None,
                             W_init=w_init,
                             name='gen/up3')
        bn3_layer = BatchNormLayer(up3_layer,
                                   act=tf.nn.relu,
                                   is_train=train_mode,
                                   gamma_init=gamma_init,
                                   name='dec/bn3')
        # bn3_layer.shape = (batch_size, 32, 32, 128)
        up4_layer = DeConv2d(bn3_layer,
                             3, (5, 5),
                             out_size=(image_size, image_size),
                             strides=(2, 2),
                             padding='SAME',
                             batch_size=batch_size,
                             act=None,
                             W_init=w_init,
                             name='gen/up4')

        up4_layer.outputs = tf.nn.tanh(up4_layer.outputs)

    return up4_layer, up4_layer.outputs
コード例 #30
0
def model(x,
          n_pos,
          is_train=False,
          reuse=None,
          data_format='channels_last'):  # hao25
    if data_format != 'channels_last':
        # TODO: support NCHW
        print('data_format=%s is ignored' % data_format)

    b1_list = []
    b2_list = []
    with tf.variable_scope('model', reuse):
        x = x - 0.5
        n = InputLayer(x, name='in')
        n = Conv2d(n,
                   32, (3, 3), (1, 1),
                   None,
                   'SAME',
                   W_init=W_init,
                   b_init=b_init,
                   name='conv1_1')
        n = BatchNormLayer(n,
                           decay=decay,
                           is_train=is_train,
                           act=tf.nn.relu,
                           name='bn1')
        n = depthwise_conv_block(n, 64, is_train=is_train, name="conv1_depth1")

        n = depthwise_conv_block(n,
                                 128,
                                 strides=(2, 2),
                                 is_train=is_train,
                                 name="conv2_depth1")
        n = depthwise_conv_block(n,
                                 128,
                                 is_train=is_train,
                                 name="conv2_depth2")
        n1 = n

        n = depthwise_conv_block(n,
                                 256,
                                 strides=(2, 2),
                                 is_train=is_train,
                                 name="conv3_depth1")
        n = depthwise_conv_block(n,
                                 256,
                                 is_train=is_train,
                                 name="conv3_depth2")
        n2 = n

        n = depthwise_conv_block(n,
                                 512,
                                 strides=(2, 2),
                                 is_train=is_train,
                                 name="conv4_depth1")
        n = depthwise_conv_block(n,
                                 512,
                                 is_train=is_train,
                                 name="conv4_depth2")
        n = depthwise_conv_block(n,
                                 512,
                                 is_train=is_train,
                                 name="conv4_depth3")
        n = depthwise_conv_block(n,
                                 512,
                                 is_train=is_train,
                                 name="conv4_depth4")
        cnn = depthwise_conv_block(n,
                                   512,
                                   is_train=is_train,
                                   name="conv4_depth5")

        ## low-level features
        # n1 = MaxPool2d(n1, (2, 2), (2, 2), 'same', name='maxpool2d')
        n1 = depthwise_conv_block(n1,
                                  128,
                                  strides=(2, 2),
                                  is_train=is_train,
                                  name="n1_down1")
        n1 = depthwise_conv_block(n1,
                                  128,
                                  strides=(2, 2),
                                  is_train=is_train,
                                  name="n1_down2")
        ## mid-level features
        n2 = depthwise_conv_block(n2,
                                  256,
                                  strides=(2, 2),
                                  is_train=is_train,
                                  name="n2_down1")
        ## combine features
        cnn = ConcatLayer([cnn, n1, n2], -1, name='cancat')

        ## stage1
        with tf.variable_scope("stage1/branch1"):
            b1 = depthwise_conv_block(cnn,
                                      128,
                                      filter_size=(7, 7),
                                      is_train=is_train,
                                      name="c1")
            b1 = depthwise_conv_block(b1,
                                      128,
                                      filter_size=(7, 7),
                                      is_train=is_train,
                                      name="c2")
            b1 = depthwise_conv_block(b1,
                                      128,
                                      filter_size=(7, 7),
                                      is_train=is_train,
                                      name="c3")
            b1 = depthwise_conv_block(b1,
                                      512,
                                      filter_size=(1, 1),
                                      is_train=is_train,
                                      name="c4")
            b1 = Conv2d(b1,
                        n_pos, (1, 1), (1, 1),
                        None,
                        'VALID',
                        W_init=W_init,
                        b_init=b_init,
                        name='confs')

        with tf.variable_scope("stage1/branch2"):
            b2 = depthwise_conv_block(cnn,
                                      128,
                                      filter_size=(7, 7),
                                      is_train=is_train,
                                      name="c1")
            b2 = depthwise_conv_block(b2,
                                      128,
                                      filter_size=(7, 7),
                                      is_train=is_train,
                                      name="c2")
            b2 = depthwise_conv_block(b2,
                                      128,
                                      filter_size=(7, 7),
                                      is_train=is_train,
                                      name="c3")
            b2 = depthwise_conv_block(b2,
                                      512,
                                      filter_size=(1, 1),
                                      is_train=is_train,
                                      name="c4")
            b2 = Conv2d(b2,
                        38, (1, 1), (1, 1),
                        None,
                        'VALID',
                        W_init=W_init,
                        b_init=b_init2,
                        name='pafs')

            b1_list.append(b1)
            b2_list.append(b2)

        ## other stages
        # for i in range(2, 7): # [2, 3, 4, 5, 6]
        # for i in [5, 6]:
        for i in [3, 4, 5, 6]:
            b1, b2 = stage(cnn,
                           b1_list[-1],
                           b2_list[-1],
                           n_pos,
                           is_train,
                           name='stage%d' % i)
            b1_list.append(b1)
            b2_list.append(b2)
        net = tl.layers.merge_networks([b1_list[-1], b2_list[-1]])
    return cnn, b1_list, b2_list, net