Ejemplo n.º 1
0
def get_G_cmnist(
        shape_z):  # Dimension of gen filters in first conv layer. [64]
    # input: (100,)
    w_init = tf.random_normal_initializer(stddev=0.02)
    gamma_init = tf.random_normal_initializer(1., 0.02)
    nz = Input(shape_z)
    n = Dense(n_units=3136, act=tf.nn.relu, W_init=w_init)(nz)
    n = Reshape(shape=[-1, 14, 14, 16])(n)
    n = DeConv2d(64, (5, 5), strides=(2, 2), W_init=w_init,
                 b_init=None)(n)  # (1, 28, 28, 64)
    n = BatchNorm2d(decay=0.9, act=tf.nn.relu, gamma_init=gamma_init)(n)
    n = DeConv2d(32, (5, 5),
                 strides=(1, 1),
                 padding="VALID",
                 W_init=w_init,
                 b_init=None)(n)  # (1, 32, 32, 32)
    n = BatchNorm2d(decay=0.9, act=tf.nn.relu, gamma_init=gamma_init)(n)
    n = DeConv2d(flags.c_dim, (5, 5),
                 strides=(2, 2),
                 act=tf.nn.tanh,
                 W_init=w_init)(n)  # (1, 64, 64, 3)
    return tl.models.Model(inputs=nz, outputs=n, name='generator_CMNIST')
Ejemplo n.º 2
0
def get_G(shape_z,
          gf_dim=64):  # Dimension of gen filters in first conv layer. [64]
    # # input: (100,)
    # w_init = tf.random_normal_initializer(stddev=0.02)
    # gamma_init = tf.random_normal_initializer(1., 0.02)
    # nz = Input(shape_z)
    # n = Dense(n_units=3136, act=tf.nn.relu, W_init=w_init)(nz)
    # n = Reshape(shape=[-1, 14, 14, 16])(n)
    # n = DeConv2d(64, (5, 5), strides=(2, 2), W_init=w_init, b_init=None)(n) # (1, 28, 28, 64)
    # n = BatchNorm2d(decay=0.9, act=tf.nn.relu, gamma_init=gamma_init)(n)
    # n = DeConv2d(flags.c_dim, (5, 5), strides=(1, 1), padding="VALID", W_init=w_init, b_init=None)(n) # (1, 32, 32, 3)
    # return tl.models.Model(inputs=nz, outputs=n, name='generator')

    image_size = 32
    s16 = image_size // 16
    # w_init = tf.glorot_normal_initializer()
    w_init = tf.random_normal_initializer(stddev=0.02)
    gamma_init = tf.random_normal_initializer(1., 0.02)

    ni = Input(shape_z)
    nn = Dense(n_units=(gf_dim * 8 * s16 * s16), W_init=w_init,
               b_init=None)(ni)
    nn = Reshape(shape=[-1, s16, s16, gf_dim * 8])(
        nn)  # [-1, 2, 2, gf_dim * 8]
    nn = BatchNorm(decay=0.9, act=tf.nn.relu, gamma_init=gamma_init,
                   name=None)(nn)
    nn = DeConv2d(gf_dim * 4, (5, 5), (2, 2), W_init=w_init,
                  b_init=None)(nn)  # [-1, 4, 4, gf_dim * 4]
    nn = BatchNorm2d(decay=0.9, act=tf.nn.relu, gamma_init=gamma_init)(nn)
    nn = DeConv2d(gf_dim * 2, (5, 5), (2, 2), W_init=w_init,
                  b_init=None)(nn)  # [-1, 8, 8, gf_dim * 2]
    nn = BatchNorm2d(decay=0.9, act=tf.nn.relu, gamma_init=gamma_init)(nn)
    nn = DeConv2d(gf_dim, (5, 5), (2, 2), W_init=w_init,
                  b_init=None)(nn)  # [-1, 16, 16, gf_dim *]
    nn = BatchNorm2d(decay=0.9, act=tf.nn.relu, gamma_init=gamma_init)(nn)
    nn = DeConv2d(3, (5, 5), (2, 2), act=tf.nn.tanh,
                  W_init=w_init)(nn)  # [-1, 32, 32, 3]

    return tl.models.Model(inputs=ni, outputs=nn, name='generator')
Ejemplo n.º 3
0
def get_generator(
        shape,
        gf_dim=64,
        o_size=32,
        o_channel=3):  # Dimension of gen filters in first conv layer. [64]
    image_size = o_size
    s4 = image_size // 4
    lrelu = lambda x: tf.nn.leaky_relu(x, 0.2)

    ni = Input(shape)
    print(ni.shape)
    nn = Dense(n_units=(gf_dim * 4 * s4 * s4))(ni)
    print(nn.shape)
    nn = Reshape(shape=(-1, s4, s4, gf_dim * 4))(nn)
    nn = BatchNorm2d(decay=0.9, act=lrelu)(nn)
    nn = DeConv2d(gf_dim * 2, (5, 5), (1, 1))(nn)
    nn = BatchNorm2d(decay=0.9, act=lrelu)(nn)
    nn = DeConv2d(gf_dim, (5, 5), (2, 2))(nn)
    nn = BatchNorm2d(decay=0.9, act=lrelu)(nn)
    nn = DeConv2d(o_channel, (5, 5), (2, 2), act=tf.nn.tanh)(nn)

    return tl.models.Model(inputs=ni, outputs=nn, name='generator')
Ejemplo n.º 4
0
    def generator(self, z, label_class, is_train=True, reuse=False):
        # NOTE: concate z & label might be wrong, need to test
        labels_one_hot = tf.one_hot(label_class, self.class_num)
        z_labels = tf.concat([z, labels_one_hot], 1)
        image_size = self.images_size
        s16 = image_size // 16
        gf_dim = 64    # Dimension of gen filters in first conv layer. [64]
        c_dim = self.channel    # n_color 3
        w_init = tf.glorot_normal_initializer()
        gamma_init = tf.random_normal_initializer(1., 0.02)

        with tf.variable_scope("generator", reuse=reuse):
            net_in = InputLayer(z_labels, name='g/in')
            net_h0 = DenseLayer(net_in, n_units=(gf_dim * 8 * s16 * s16), W_init=w_init,
                    act = tf.identity, name='g/h0/lin')
            net_h0 = ReshapeLayer(net_h0, shape=[-1, s16, s16, gf_dim*8], name='g/h0/reshape')
            net_h0 = BatchNormLayer(net_h0, decay=0.9, act=tf.nn.relu, is_train=is_train,
                    gamma_init=gamma_init, name='g/h0/batch_norm')

            net_h1 = DeConv2d(net_h0, gf_dim * 4, (5, 5), strides=(2, 2),
                    padding='SAME', act=None, W_init=w_init, name='g/h1/decon2d')
            net_h1 = BatchNormLayer(net_h1, decay=0.9, act=tf.nn.relu, is_train=is_train,
                    gamma_init=gamma_init, name='g/h1/batch_norm')

            net_h2 = DeConv2d(net_h1, gf_dim * 2, (5, 5), strides=(2, 2),
                    padding='SAME', act=None, W_init=w_init, name='g/h2/decon2d')
            net_h2 = BatchNormLayer(net_h2, decay=0.9, act=tf.nn.relu, is_train=is_train,
                    gamma_init=gamma_init, name='g/h2/batch_norm')

            net_h3 = DeConv2d(net_h2, gf_dim, (5, 5), strides=(2, 2),
                    padding='SAME', act=None, W_init=w_init, name='g/h3/decon2d')
            net_h3 = BatchNormLayer(net_h3, decay=0.9, act=tf.nn.relu, is_train=is_train,
                    gamma_init=gamma_init, name='g/h3/batch_norm')

            net_h4 = DeConv2d(net_h3, c_dim, (5, 5), strides=(2, 2),
                    padding='SAME', act=None, W_init=w_init, name='g/h4/decon2d')
            net_h4.outputs = tf.nn.tanh(net_h4.outputs)
        return net_h4
Ejemplo n.º 5
0
def get_generator(
        shape,
        gf_dim=128):  # Dimension of gen filters in first conv layer. [64]
    image_size = FLAGS.output_size
    s16 = image_size // 16
    w_init = tf.glorot_normal_initializer()
    gamma_init = tf.random_normal_initializer(1., 0.02)

    ni = Input(shape)
    nn = Dense(n_units=(gf_dim * 8 * s16 * s16), W_init=w_init,
               b_init=None)(ni)
    nn = Reshape(shape=[-1, s16, s16, gf_dim * 8])(nn)
    nn = BatchNorm(decay=0.9, act=tf.nn.relu, gamma_init=gamma_init,
                   name=None)(nn)
    nn = DeConv2d(gf_dim * 4, (5, 5), (2, 2), W_init=w_init, b_init=None)(nn)
    nn = BatchNorm2d(decay=0.9, act=tf.nn.relu, gamma_init=gamma_init)(nn)
    nn = DeConv2d(gf_dim * 2, (5, 5), (2, 2), W_init=w_init, b_init=None)(nn)
    nn = BatchNorm2d(decay=0.9, act=tf.nn.relu, gamma_init=gamma_init)(nn)
    nn = DeConv2d(gf_dim, (5, 5), (2, 2), W_init=w_init, b_init=None)(nn)
    nn = BatchNorm2d(decay=0.9, act=tf.nn.relu, gamma_init=gamma_init)(nn)
    nn = DeConv2d(3, (5, 5), (2, 2), act=tf.nn.tanh, W_init=w_init)(nn)

    return tl.models.Model(inputs=ni, outputs=nn, name='generator')
Ejemplo n.º 6
0
def get_generator(shape,gf_dim=64):
    image_size=256
    s16=image_size//16
    w_init=tf.random_normal_initializer(stddev=0.02)
    gamma_init=tf.random_normal_initializer(1.,0.02)
    ni=Input(shape)
    nn=Dense(n_units=(gf_dim*16*s16*s16),W_init=w_init,b_init=None)(ni)
    nn=Reshape(shape=[-1,s16,s16,gf_dim*16])(nn)
    n1=BatchNorm2d(decay=0.9,act=tf.nn.relu,gamma_init=gamma_init,name=None)(nn)
    n1=DeConv2d(gf_dim*16,(5,5),(2,2),W_init=w_init,b_init=None)(n1);
    nn=n1+nn
    n1=BatchNorm2d(decay=0.9,act=tf.nn.relu,gamma_init=gamma_init,name=None)(nn)
    n1=DeConv2d(gf_dim*8,(5,5),(2,2),W_init=w_init,b_init=None)(n1);
    nn=n1+nn
    n1=BatchNorm2d(decay=0.9,act=tf.nn.relu,gamma_init=gamma_init,name=None)(nn)
    n1=DeConv2d(gf_dim*8,(5,5),(2,2),W_init=w_init,b_init=None)(n1);
    nn=n1+nn
    n1=BatchNorm2d(decay=0.9,act=tf.nn.relu,gamma_init=gamma_init,name=None)(nn)
    n1=DeConv2d(gf_dim*4,(5,5),(2,2),W_init=w_init,b_init=None)(n1);
    nn=n1+nn
    n1=BatchNorm2d(decay=0.9,act=tf.nn.relu,gamma_init=gamma_init,name=None)(nn)
    n1=DeConv2d(gf_dim*2,(5,5),(2,2),W_init=w_init,b_init=None)(n1);
    nn=n1+nn
    #non local block maybe bugs here
    f=Conv2d(gf_dim//4,(1,1),(1,1),W_init=w_init,b_init=None)(nn)
    f=MaxPool2d((2,2),(2,2))(f)
    g=Conv2d(gf_dim//4,(1,1),(1,1),W_init=w_init,b_init=None)(nn)
    h=Conv2d(gf_dim,(1,1),(1,1),W_init=w_init,b_init=None)(nn)
    h=MaxPool2d((2,2),(2,2))(h)
    s = tf.matmul(Reshape(shape=[g.shape[0],-1,g.shape[1]])(g), Reshape(shape=[f.shape[0],-1,f.shape[1]])(f), transpose_b=True)
    beta=tf.nn.softmax(s)
    o=tf.matmul(beta,Reshape(shape=[h.shape[0],-1,h.shape[1]])(h))
    o=Reshape(shape=[nn.shape[0],nn.shape[1],nn.shape[2],gf_dim])(o)
    o=Conv2d(gf_dim*2,(1,1),(1,1),W_init=w_init,b_init=None)(o)
    nn=nn+gamma_init*o
    #
    n1=BatchNorm2d(decay=0.9,act=tf.nn.relu,gamma_init=gamma_init,name=None)(nn)
    n1=DeConv2d(gf_dim,(5,5),(2,2),W_init=w_init,b_init=None)(n1);
    nn=n1+nn
    nn=BatchNorm2d(decay=0.9,act=tf.nn.relu,gamma_init=gamma_init,name=None)(nn)
    nn=Conv2d(3, (3, 3), (1, 1), act=tf.nn.tanh, W_init=w_init)(nn)
    return tl.models.Model(inputs=ni,outputs=nn,name='generator')
Ejemplo n.º 7
0
def u_net_2d_64_2048_deconv(x, n_out=2):
    from tensorlayer.layers import InputLayer, Conv2d, MaxPool2d, DeConv2d, ConcatLayer
    # batch_size = int(x._shape[0])
    nx = int(x._shape[1])
    ny = int(x._shape[2])
    nz = int(x._shape[3])
    print(" * Input: size of image: %d %d %d" % (nx, ny, nz))

    w_init = tf.truncated_normal_initializer(stddev=0.01)
    b_init = tf.constant_initializer(value=0.0)
    inputs = InputLayer(x, name='inputs')

    conv1 = Conv2d(inputs, 64, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='conv1_1')
    conv1 = Conv2d(conv1, 64, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='conv1_2')
    pool1 = MaxPool2d(conv1, (2, 2), padding='SAME', name='pool1')

    conv2 = Conv2d(pool1, 128, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='conv2_1')
    conv2 = Conv2d(conv2, 128, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='conv2_2')
    pool2 = MaxPool2d(conv2, (2, 2), padding='SAME', name='pool2')

    conv3 = Conv2d(pool2, 256, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='conv3_1')
    conv3 = Conv2d(conv3, 256, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='conv3_2')
    pool3 = MaxPool2d(conv3, (2, 2), padding='SAME', name='pool3')

    conv4 = Conv2d(pool3, 512, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='conv4_1')
    conv4 = Conv2d(conv4, 512, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='conv4_2')
    pool4 = MaxPool2d(conv4, (2, 2), padding='SAME', name='pool4')

    conv5 = Conv2d(pool4, 1024, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='conv5_1')
    conv5 = Conv2d(conv5, 1024, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='conv5_2')
    pool5 = MaxPool2d(conv5, (2, 2), padding='SAME', name='pool5')

    conv6 = Conv2d(pool5, 2048, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='conv6_1')
    conv6 = Conv2d(conv6, 2048, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='conv6_2')

    print(" * After conv: %s" % conv6.outputs)

    up5 = DeConv2d(conv6, 1024, (3, 3), out_size = (nx/16, ny/16), strides = (2, 2),
                                padding = 'SAME', act=None, W_init=w_init, b_init=b_init, name='deconv5')
    up5 = ConcatLayer([up5, conv5], concat_dim=3, name='concat5')
    conv5 = Conv2d(up5, 1024, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='uconv5_1')
    conv5 = Conv2d(conv5, 1024, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='uconv5_2')

    up4 = DeConv2d(conv5, 512, (3, 3), out_size = (nx/8, ny/8), strides = (2, 2),
                                padding = 'SAME', act=None, W_init=w_init, b_init=b_init, name='deconv4')
    up4 = ConcatLayer([up4, conv4], concat_dim=3, name='concat4')
    conv4 = Conv2d(up4, 512, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='uconv4_1')
    conv4 = Conv2d(conv4, 512, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='uconv4_2')

    up3 = DeConv2d(conv4, 256, (3, 3), out_size = (nx/4, ny/4), strides = (2, 2),
                                padding = 'SAME', act=None, W_init=w_init, b_init=b_init, name='deconv3')
    up3 = ConcatLayer([up3, conv3], concat_dim=3, name='concat3')
    conv3 = Conv2d(up3, 256, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='uconv3_1')
    conv3 = Conv2d(conv3, 256, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='uconv3_2')

    up2 = DeConv2d(conv3, 128, (3, 3), out_size = (nx/2, ny/2), strides = (2, 2),
                                padding = 'SAME', act=None, W_init=w_init, b_init=b_init, name='deconv2')
    up2 = ConcatLayer([up2, conv2] ,concat_dim=3, name='concat2')
    conv2 = Conv2d(up2, 128, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='uconv2_1')
    conv2 = Conv2d(conv2, 128, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='uconv2_2')

    up1 = DeConv2d(conv2, 64, (3, 3), out_size = (nx/1, ny/1), strides = (2, 2),
                                padding = 'SAME', act=None, W_init=w_init, b_init=b_init, name='deconv1')
    up1 = ConcatLayer([up1, conv1] ,concat_dim=3, name='concat1')
    conv1 = Conv2d(up1, 64, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='uconv1_1')
    conv1 = Conv2d(conv1, 64, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='uconv1_2')

    conv1 = Conv2d(conv1, n_out, (1, 1), act=None, name='uconv1')
    print(" * Output: %s" % conv1.outputs)
    outputs = tl.act.pixel_wise_softmax(conv1.outputs)
    return conv1, outputs
Ejemplo n.º 8
0
def generator(input_placeholder,
              train_mode,
              image_size,
              batch_size,
              reuse=False,
              filters_num=128):

    w_init = tf.random_normal_initializer(stddev=0.02)
    gamma_init = tf.random_normal_initializer(1., 0.02)

    s2, s4, s8, s16 = int(image_size / 2), int(image_size / 4), int(
        image_size / 8), int(image_size / 16)

    with tf.variable_scope("generator", reuse=reuse):
        tl.layers.set_name_reuse(reuse)

        input_layer = InputLayer(input_placeholder, name='gen/in')
        lin_layer = DenseLayer(input_layer,
                               n_units=filters_num * 8 * s16 * s16,
                               W_init=w_init,
                               act=tf.identity,
                               name='gen/lin')

        resh1_layer = ReshapeLayer(lin_layer,
                                   shape=[-1, s16, s16, filters_num * 8],
                                   name='gen/reshape')

        in_bn_layer = BatchNormLayer(resh1_layer,
                                     act=tf.nn.relu,
                                     is_train=train_mode,
                                     gamma_init=gamma_init,
                                     name='dec/in_bn')
        # in_bn_layer.shape = (batch_size, 4, 4, 1024)
        up1_layer = DeConv2d(in_bn_layer,
                             filters_num * 4, (5, 5),
                             out_size=(s8, s8),
                             strides=(2, 2),
                             padding='SAME',
                             batch_size=batch_size,
                             act=None,
                             W_init=w_init,
                             name='gen/up1')

        bn1_layer = BatchNormLayer(up1_layer,
                                   act=tf.nn.relu,
                                   is_train=train_mode,
                                   gamma_init=gamma_init,
                                   name='dec/bn1')

        # bn1_layer.shape = (batch_size, 8, 8, 512)
        up2_layer = DeConv2d(bn1_layer,
                             filters_num * 2, (5, 5),
                             out_size=(s4, s4),
                             strides=(2, 2),
                             padding='SAME',
                             batch_size=batch_size,
                             act=None,
                             W_init=w_init,
                             name='gen/up2')
        bn2_layer = BatchNormLayer(up2_layer,
                                   act=tf.nn.relu,
                                   is_train=train_mode,
                                   gamma_init=gamma_init,
                                   name='dec/bn2')
        # bn2_layer.shape = (batch_size, 16, 16, 256)

        up3_layer = DeConv2d(bn2_layer,
                             filters_num, (5, 5),
                             out_size=(s2, s2),
                             strides=(2, 2),
                             padding='SAME',
                             batch_size=batch_size,
                             act=None,
                             W_init=w_init,
                             name='gen/up3')
        bn3_layer = BatchNormLayer(up3_layer,
                                   act=tf.nn.relu,
                                   is_train=train_mode,
                                   gamma_init=gamma_init,
                                   name='dec/bn3')
        # bn3_layer.shape = (batch_size, 32, 32, 128)
        up4_layer = DeConv2d(bn3_layer,
                             3, (5, 5),
                             out_size=(image_size, image_size),
                             strides=(2, 2),
                             padding='SAME',
                             batch_size=batch_size,
                             act=None,
                             W_init=w_init,
                             name='gen/up4')

        up4_layer.outputs = tf.nn.tanh(up4_layer.outputs)

    return up4_layer, up4_layer.outputs
Ejemplo n.º 9
0
def generator(inputs, is_train=True, reuse=False):
    image_size = 64
    s16 = image_size // 16
    gf_dim = 64  # Dimension of gen filters in first conv layer. [64]
    c_dim = FLAGS.c_dim  # n_color 3
    w_init = tf.glorot_normal_initializer()
    gamma_init = tf.random_normal_initializer(1., 0.02)

    with tf.variable_scope("generator", reuse=reuse):

        net_in = InputLayer(inputs, name='g/in')
        net_h0 = DenseLayer(net_in,
                            n_units=(gf_dim * 8 * s16 * s16),
                            W_init=w_init,
                            act=tf.identity,
                            name='g/h0/lin')
        net_h0 = ReshapeLayer(net_h0,
                              shape=[-1, s16, s16, gf_dim * 8],
                              name='g/h0/reshape')
        net_h0 = BatchNormLayer(net_h0,
                                act=tf.nn.relu,
                                is_train=is_train,
                                gamma_init=gamma_init,
                                name='g/h0/batch_norm')

        net_h1 = DeConv2d(net_h0,
                          gf_dim * 4, (5, 5),
                          strides=(2, 2),
                          padding='SAME',
                          act=None,
                          W_init=w_init,
                          name='g/h1/decon2d')
        net_h1 = BatchNormLayer(net_h1,
                                act=tf.nn.relu,
                                is_train=is_train,
                                gamma_init=gamma_init,
                                name='g/h1/batch_norm')

        net_h2 = DeConv2d(net_h1,
                          gf_dim * 2, (5, 5),
                          strides=(2, 2),
                          padding='SAME',
                          act=None,
                          W_init=w_init,
                          name='g/h2/decon2d')
        net_h2 = BatchNormLayer(net_h2,
                                act=tf.nn.relu,
                                is_train=is_train,
                                gamma_init=gamma_init,
                                name='g/h2/batch_norm')

        net_h3 = DeConv2d(net_h2,
                          gf_dim, (5, 5),
                          strides=(2, 2),
                          padding='SAME',
                          act=None,
                          W_init=w_init,
                          name='g/h3/decon2d')
        net_h3 = BatchNormLayer(net_h3,
                                act=tf.nn.relu,
                                is_train=is_train,
                                gamma_init=gamma_init,
                                name='g/h3/batch_norm')

        net_h4 = DeConv2d(net_h3,
                          c_dim, (5, 5),
                          strides=(2, 2),
                          padding='SAME',
                          act=None,
                          W_init=w_init,
                          name='g/h4/decon2d')
        net_h4.outputs = tf.nn.tanh(net_h4.outputs)
    return net_h4
Ejemplo n.º 10
0
def generator(inputs, is_train=True, reuse=False):
    image_size = 128
    #s32 = image_size // 32
    gf_dim = 64  # Dimension of gen filters in first conv layer. [64]
    c_dim = 1  # n_color 1
    w_init = tf.glorot_normal_initializer()
    gamma_init = tf.random_normal_initializer(1., 0.02)

    with tf.name_scope("GENERATOR"):

        with tf.variable_scope("generator", reuse=reuse):

            with tf.name_scope("net_in"):
                net_in = InputLayer(inputs, name='g/in')
        #############################################################################
            with tf.name_scope("layer0"):
                net_h0 = DenseLayer(net_in,
                                    n_units=(gf_dim * 32 * 4 * 4),
                                    W_init=w_init,
                                    act=tf.identity,
                                    name='g/h0/lin')
                net_h0 = ReshapeLayer(net_h0,
                                      shape=[-1, 4, 4, gf_dim * 32],
                                      name='g/h0/reshape')
                net_h0 = BatchNormLayer(net_h0,
                                        decay=0.9,
                                        act=tf.nn.relu,
                                        is_train=is_train,
                                        gamma_init=gamma_init,
                                        name='g/h0/batch_norm')

            with tf.name_scope("layer1"):
                net_h1 = DeConv2d(net_h0,
                                  gf_dim * 8, (5, 5),
                                  strides=(2, 2),
                                  padding='SAME',
                                  act=None,
                                  W_init=w_init,
                                  name='g/h1/decon2d')
                net_h1 = BatchNormLayer(net_h1,
                                        decay=0.9,
                                        act=tf.nn.relu,
                                        is_train=is_train,
                                        gamma_init=gamma_init,
                                        name='g/h1/batch_norm')

            with tf.name_scope("layer2"):
                net_h2 = DeConv2d(net_h1,
                                  gf_dim * 4, (5, 5),
                                  strides=(2, 2),
                                  padding='SAME',
                                  act=None,
                                  W_init=w_init,
                                  name='g/h2/decon2d')
                net_h2 = BatchNormLayer(net_h2,
                                        decay=0.9,
                                        act=tf.nn.relu,
                                        is_train=is_train,
                                        gamma_init=gamma_init,
                                        name='g/h2/batch_norm')

            with tf.name_scope("layer3"):
                net_h3 = DeConv2d(net_h2,
                                  gf_dim * 2, (5, 5),
                                  strides=(2, 2),
                                  padding='SAME',
                                  act=None,
                                  W_init=w_init,
                                  name='g/h3/decon2d')
                net_h3 = BatchNormLayer(net_h3,
                                        decay=0.9,
                                        act=tf.nn.relu,
                                        is_train=is_train,
                                        gamma_init=gamma_init,
                                        name='g/h3/batch_norm')

            with tf.name_scope("layer4"):
                net_h4 = DeConv2d(net_h3,
                                  gf_dim, (5, 5),
                                  strides=(2, 2),
                                  padding='SAME',
                                  act=None,
                                  W_init=w_init,
                                  name='g/h4/decon2d')
                net_h4 = BatchNormLayer(net_h4,
                                        decay=0.9,
                                        act=tf.nn.relu,
                                        is_train=is_train,
                                        gamma_init=gamma_init,
                                        name='g/h4/batch_norm')

            with tf.name_scope("layer5"):
                net_h5 = DeConv2d(net_h4,
                                  c_dim, (5, 5),
                                  strides=(2, 2),
                                  padding='SAME',
                                  act=None,
                                  W_init=w_init,
                                  name='g/h5/decon2d')
                #net_h5.outputs = tf.nn.tanh(net_h5.outputs)
                net_h5.outputs = tf.nn.tanh(net_h5.outputs)

        return net_h5
Ejemplo n.º 11
0
def u_net(inputs, refine=False):
    w_init = tf.random_normal_initializer(stddev=0.02)
    g_init = tf.random_normal_initializer(1., 0.02)
    lrelu = lambda x: tl.act.lrelu(x, 0.2)

    # ENCODER
    conv1 = Conv2d(64, (4, 4), (2, 2), padding='SAME', W_init=w_init)(inputs)
    conv1 = InstanceNorm2d(act=lrelu, gamma_init=g_init)(conv1)

    conv2 = Conv2d(128, (4, 4), (2, 2), padding='SAME', W_init=w_init)(conv1)
    conv2 = InstanceNorm2d(act=lrelu, gamma_init=g_init)(conv2)

    conv3 = Conv2d(256, (4, 4), (2, 2), padding='SAME', W_init=w_init)(conv2)
    conv3 = InstanceNorm2d(act=lrelu, gamma_init=g_init)(conv3)

    conv4 = Conv2d(512, (4, 4), (2, 2), padding='SAME', W_init=w_init)(conv3)
    conv4 = InstanceNorm2d(act=lrelu, gamma_init=g_init)(conv4)

    conv5 = Conv2d(512, (4, 4), (2, 2), padding='SAME', W_init=w_init)(conv4)
    conv5 = InstanceNorm2d(act=lrelu, gamma_init=g_init)(conv5)

    conv6 = Conv2d(512, (4, 4), (2, 2), padding='SAME', W_init=w_init)(conv5)
    conv6 = InstanceNorm2d(act=lrelu, gamma_init=g_init)(conv6)

    conv7 = Conv2d(512, (4, 4), (2, 2), padding='SAME', W_init=w_init)(conv6)
    conv7 = InstanceNorm2d(act=lrelu, gamma_init=g_init)(conv7)

    conv8 = Conv2d(512, (4, 4), (2, 2), padding='SAME', W_init=w_init)(conv7)
    conv8 = InstanceNorm2d(act=lrelu, gamma_init=g_init)(conv8)

    # DECODER
    d0 = DeConv2d(n_filter=512, filter_size=(4, 4))(conv8)
    d0 = Dropout(0.5)(d0)
    d0 = Concat()(
        [InstanceNorm2d(act=tf.nn.relu, gamma_init=g_init)(d0), conv7])

    d1 = DeConv2d(n_filter=512, filter_size=(4, 4))(d0)
    d1 = Dropout(0.5)(d1)
    d1 = Concat()(
        [InstanceNorm2d(act=tf.nn.relu, gamma_init=g_init)(d1), conv6])

    d2 = DeConv2d(n_filter=512, filter_size=(4, 4))(d1)
    d2 = Dropout(0.5)(d2)
    d2 = Concat()(
        [InstanceNorm2d(act=tf.nn.relu, gamma_init=g_init)(d2), conv5])

    d3 = DeConv2d(n_filter=512, filter_size=(4, 4))(d2)
    d3 = Concat()(
        [InstanceNorm2d(act=tf.nn.relu, gamma_init=g_init)(d3), conv4])

    d4 = DeConv2d(n_filter=256, filter_size=(4, 4))(d3)
    d4 = Concat()(
        [InstanceNorm2d(act=tf.nn.relu, gamma_init=g_init)(d4), conv3])

    d5 = DeConv2d(n_filter=128, filter_size=(4, 4))(d4)
    d5 = Concat()(
        [InstanceNorm2d(act=tf.nn.relu, gamma_init=g_init)(d5), conv2])

    d6 = DeConv2d(n_filter=64, filter_size=(4, 4))(d5)
    d6 = Concat()(
        [InstanceNorm2d(act=tf.nn.relu, gamma_init=g_init)(d6), conv1])

    d7 = DeConv2d(n_filter=64, filter_size=(4, 4))(d6)
    d7 = InstanceNorm2d(act=tf.nn.relu, gamma_init=g_init)(d7)

    nn = Conv2d(1, (1, 1), (1, 1),
                act=tf.nn.tanh,
                padding='SAME',
                W_init=w_init)(d7)

    if refine:
        nn = RampElementwise(tf.add, act=tl.act.ramp, v_min=-1)([nn, inputs])

    return nn
Ejemplo n.º 12
0
def unet(ni, out_channel, is_tanh, out_size=flags.img_size_h):
    w_init = tf.random_normal_initializer(stddev=0.02)
    gamma_init = tf.random_normal_initializer(1., 0.02)
    ngf = 64
    conv1 = Conv2d(ngf, (3, 3), (1, 1), W_init=w_init, act=lrelu)(ni)

    conv2 = Conv2d(ngf, (4, 4), (2, 2), W_init=w_init, act=None,
                   b_init=None)(conv1)
    conv2 = BatchNorm2d(decay=0.9, act=lrelu, gamma_init=gamma_init,
                        name=None)(conv2)

    conv3 = Conv2d(ngf * 2, (4, 4), (1, 1),
                   W_init=w_init,
                   act=None,
                   b_init=None)(conv2)
    conv3 = BatchNorm2d(decay=0.9, act=lrelu, gamma_init=gamma_init,
                        name=None)(conv3)
    conv4 = Conv2d(ngf * 2, (4, 4), (2, 2),
                   W_init=w_init,
                   act=None,
                   b_init=None)(conv3)
    conv4 = BatchNorm2d(decay=0.9, act=lrelu, gamma_init=gamma_init,
                        name=None)(conv4)

    conv5 = Conv2d(ngf * 4, (4, 4), (1, 1),
                   W_init=w_init,
                   act=None,
                   b_init=None)(conv4)
    conv5 = BatchNorm2d(decay=0.9, act=lrelu, gamma_init=gamma_init,
                        name=None)(conv5)
    conv6 = Conv2d(ngf * 4, (4, 4), (2, 2),
                   W_init=w_init,
                   act=None,
                   b_init=None)(conv5)
    conv6 = BatchNorm2d(decay=0.9, act=lrelu, gamma_init=gamma_init,
                        name=None)(conv6)

    conv7 = Conv2d(ngf * 8, (4, 4), (1, 1),
                   W_init=w_init,
                   act=None,
                   b_init=None)(conv6)
    conv7 = BatchNorm2d(decay=0.9, act=lrelu, gamma_init=gamma_init,
                        name=None)(conv7)
    conv8 = Conv2d(ngf * 8, (4, 4), (2, 2),
                   act=lrelu,
                   W_init=w_init,
                   b_init=None)(conv7)
    # 8 8 512 now start upsample

    c_size = conv8.shape[-2]
    ##############################################################################################
    no = None
    for _ in range(1):
        up8 = DeConv2d(ngf * 8, (4, 4), (2, 2), W_init=w_init,
                       b_init=None)(conv8)
        up8 = BatchNorm2d(decay=0.9,
                          act=lrelu,
                          gamma_init=gamma_init,
                          name=None)(up8)
        up7 = Concat(concat_dim=3)([up8, conv7])
        up7 = DeConv2d(ngf * 8, (4, 4), (1, 1), W_init=w_init,
                       b_init=None)(up7)
        up7 = BatchNorm2d(decay=0.9,
                          act=lrelu,
                          gamma_init=gamma_init,
                          name=None)(up7)
        c_size = c_size * 2
        if c_size == out_size:
            no = up7
            break
        up6 = Concat(concat_dim=3)([up7, conv6])
        up6 = DeConv2d(ngf * 4, (4, 4), (2, 2), W_init=w_init,
                       b_init=None)(up6)
        up6 = BatchNorm2d(decay=0.9,
                          act=lrelu,
                          gamma_init=gamma_init,
                          name=None)(up6)
        up5 = Concat(concat_dim=3)([up6, conv5])
        up5 = DeConv2d(ngf * 4, (4, 4), (1, 1), W_init=w_init,
                       b_init=None)(up5)
        up5 = BatchNorm2d(decay=0.9,
                          act=lrelu,
                          gamma_init=gamma_init,
                          name=None)(up5)
        c_size = c_size * 2
        if c_size == out_size:
            no = up5
            break
        up4 = Concat(concat_dim=3)([up5, conv4])
        up4 = DeConv2d(ngf * 2, (4, 4), (2, 2), W_init=w_init,
                       b_init=None)(up4)
        up4 = BatchNorm2d(decay=0.9,
                          act=lrelu,
                          gamma_init=gamma_init,
                          name=None)(up4)
        up3 = Concat(concat_dim=3)([up4, conv3])
        up3 = DeConv2d(ngf * 2, (4, 4), (1, 1), W_init=w_init,
                       b_init=None)(up3)
        up3 = BatchNorm2d(decay=0.9,
                          act=lrelu,
                          gamma_init=gamma_init,
                          name=None)(up3)
        c_size = c_size * 2
        if c_size == out_size:
            no = up3
            break
        up2 = Concat(concat_dim=3)([up3, conv2])
        up2 = DeConv2d(ngf * 1, (4, 4), (2, 2), W_init=w_init,
                       b_init=None)(up2)
        up2 = BatchNorm2d(decay=0.9,
                          act=lrelu,
                          gamma_init=gamma_init,
                          name=None)(up2)
        up1 = Concat(concat_dim=3)([up2, conv1])
        up1 = DeConv2d(ngf * 1, (4, 4), (1, 1), W_init=w_init,
                       b_init=None)(up1)
        up1 = BatchNorm2d(decay=0.9,
                          act=lrelu,
                          gamma_init=gamma_init,
                          name=None)(up1)
        c_size = c_size * 2
        if c_size == out_size:
            no = up1
            break
    if is_tanh:
        up0 = DeConv2d(out_channel, (3, 3), (1, 1),
                       W_init=w_init,
                       act=tf.nn.tanh)(no)
    else:
        up0 = DeConv2d(out_channel, (3, 3), (1, 1),
                       W_init=w_init,
                       b_init=None,
                       act=None)(no)

    return up0
Ejemplo n.º 13
0
def generator(inputs, is_train=True, reuse=False):
    img_size = CFG.img_size
    s2, s4, s8, s16 = [int(img_size / i) for i in [2, 4, 8, 16]]
    gfs = 64
    channels = CFG.channels
    batch_size = CFG.batch_size

    W_init = tf.random_normal_initializer(stddev=0.02)
    gamma_init = tf.random_normal_initializer(1., 0.02)

    with tf.variable_scope('generator', reuse=reuse):
        tl.layers.set_name_reuse(reuse)

        g = InputLayer(inputs, name='g/inputs')
        g = DenseLayer(g,
                       gfs * 8 * s16 * s16,
                       W_init=W_init,
                       act=tl.act.identity,
                       name='g/fc1')
        g = ReshapeLayer(g, shape=(-1, s16, s16, gfs * 8), name='g/reshape2')
        g = BatchNormLayer(g,
                           act=tf.nn.relu,
                           is_train=is_train,
                           gamma_init=gamma_init,
                           name='g/bn3')

        g = DeConv2d(g,
                     gfs * 4, (5, 5),
                     out_size=(s8, s8),
                     strides=(2, 2),
                     batch_size=batch_size,
                     act=None,
                     W_init=W_init,
                     name='g/dconv4')
        g = BatchNormLayer(g,
                           act=tf.nn.relu,
                           is_train=is_train,
                           gamma_init=gamma_init,
                           name='g/bn5')

        g = DeConv2d(g,
                     gfs * 2, (5, 5),
                     out_size=(s4, s4),
                     strides=(2, 2),
                     batch_size=batch_size,
                     act=None,
                     W_init=W_init,
                     name='g/dconv6')
        g = BatchNormLayer(g,
                           act=tf.nn.relu,
                           is_train=is_train,
                           gamma_init=gamma_init,
                           name='g/bn7')

        g = DeConv2d(g,
                     gfs, (5, 5),
                     out_size=(s2, s2),
                     strides=(2, 2),
                     batch_size=batch_size,
                     act=None,
                     W_init=W_init,
                     name='g/dconv8')
        g = BatchNormLayer(g,
                           act=tf.nn.relu,
                           is_train=is_train,
                           gamma_init=gamma_init,
                           name='g/bn9')

        g = DeConv2d(g,
                     channels, (5, 5),
                     out_size=(img_size, img_size),
                     strides=(2, 2),
                     batch_size=batch_size,
                     act=None,
                     W_init=W_init,
                     name='g/dconv10')

        logits = g.outputs
        g.outputs = tf.nn.tanh(g.outputs)
    return g, logits
Ejemplo n.º 14
0
def build_generator_9blocks(name="generator", skip=False):
    with tf.compat.v1.variable_scope(name):
        #pdb.set_trace()
        inputgen = Input(shape=[None, IMG_WIDTH, IMG_HEIGHT, IMG_CHANNELS],
                         dtype=tf.float32)
        f = 7
        ks = 3
        padding = "CONSTANT"
        padgen = PadLayer([[0, 0], [ks, ks], [ks, ks], [0, 0]],
                          padding)(inputgen)

        o_c1 = Conv2d(n_filter=ngf,
                      filter_size=(f, f),
                      strides=(1, 1),
                      padding="VALID",
                      act=None,
                      W_init=tf.initializers.TruncatedNormal(stddev=0.02),
                      b_init=tf.constant_initializer(0.0))(padgen)
        o_c1 = InstanceNorm2d(act=tf.nn.relu)(o_c1)

        o_c2 = Conv2d(n_filter=ngf * 2,
                      filter_size=(ks, ks),
                      strides=(2, 2),
                      padding="SAME",
                      act=None,
                      W_init=tf.initializers.TruncatedNormal(stddev=0.02),
                      b_init=tf.constant_initializer(0.0))(o_c1)
        o_c2 = InstanceNorm2d(act=tf.nn.relu)(o_c2)

        o_c3 = Conv2d(n_filter=ngf * 4,
                      filter_size=(ks, ks),
                      strides=(2, 2),
                      padding="SAME",
                      act=None,
                      W_init=tf.initializers.TruncatedNormal(stddev=0.02),
                      b_init=tf.constant_initializer(0.0))(o_c2)
        o_c3 = InstanceNorm2d(act=tf.nn.relu)(o_c3)

        o_r1 = build_resnet_block(o_c3, ngf * 4, "r1", padding)
        o_r2 = build_resnet_block(o_r1, ngf * 4, "r2", padding)
        o_r3 = build_resnet_block(o_r2, ngf * 4, "r3", padding)
        o_r4 = build_resnet_block(o_r3, ngf * 4, "r4", padding)
        o_r5 = build_resnet_block(o_r4, ngf * 4, "r5", padding)
        o_r6 = build_resnet_block(o_r5, ngf * 4, "r6", padding)
        o_r7 = build_resnet_block(o_r6, ngf * 4, "r7", padding)
        o_r8 = build_resnet_block(o_r7, ngf * 4, "r8", padding)
        o_r9 = build_resnet_block(o_r8, ngf * 4, "r9", padding)

        o_c4 = DeConv2d(n_filter=ngf * 2,
                        filter_size=(ks, ks),
                        strides=(2, 2),
                        padding="SAME",
                        act=None,
                        W_init=tf.initializers.TruncatedNormal(stddev=0.02),
                        b_init=tf.constant_initializer(0.0))(o_r9)
        o_c4 = InstanceNorm2d(act=tf.nn.relu)(o_c4)

        o_c5 = DeConv2d(n_filter=ngf,
                        filter_size=(ks, ks),
                        strides=(2, 2),
                        padding="SAME",
                        act=None,
                        W_init=tf.initializers.TruncatedNormal(stddev=0.02),
                        b_init=tf.constant_initializer(0.0))(o_c4)
        o_c5 = InstanceNorm2d(act=tf.nn.relu)(o_c5)

        o_c6 = Conv2d(n_filter=IMG_CHANNELS,
                      filter_size=(f, f),
                      strides=(1, 1),
                      padding="SAME",
                      act=None,
                      W_init=tf.initializers.TruncatedNormal(stddev=0.02),
                      b_init=tf.constant_initializer(0.0))(o_c5)

        if skip is True:
            #out_gen = Lambda(tf.nn.tanh, name="t1")(Elementwise(combine_fn=tf.add)([inputgen, o_c6]))
            tmp = Elementwise(combine_fn=tf.add)([inputgen, o_c6])
            out_gen = Lambda(tf.nn.tanh)(tmp)
        else:
            #out_gen = Lambda(tf.nn.tanh, name="t1")(o_c6)
            out_gen = Lambda(tf.nn.tanh)(o_c6)

        return Model(inputs=inputgen, outputs=out_gen)
Ejemplo n.º 15
0
def generator(input, is_train=False, reuse=False):
    """
    Cartoon GAN generator neural network
    :param input: TF Tensor
        input tensor
    :param is_train: boolean
        train or test flag
    :param reuse: boolean
        whether to reuse the neural network
    :return:
    """
    w_init = tf.random_normal_initializer(stddev=0.02)
    b_init = None
    gamma_init = tf.random_normal_initializer(1.0, stddev=0.02)

    with tf.variable_scope('CartoonGAN_G', reuse=reuse):
        tl.layers.set_name_reuse(reuse)

        n = InputLayer(input, name='g_input')
        n = Conv2d(n,
                   64, (7, 7), (1, 1),
                   act=None,
                   padding='SAME',
                   W_init=w_init,
                   name='k7n64s1/c')
        n = BatchNormLayer(n,
                           act=tf.nn.relu,
                           is_train=is_train,
                           gamma_init=gamma_init,
                           name='k7n64s1/b_r')

        with tf.variable_scope('down_conv'):
            n = Conv2d(n,
                       128, (3, 3), (2, 2),
                       padding='SAME',
                       W_init=w_init,
                       name='k3n128s2/c1')
            n = Conv2d(n,
                       128, (3, 3), (1, 1),
                       padding='SAME',
                       W_init=w_init,
                       name='k3n128s1/c2')
            n = BatchNormLayer(n,
                               act=tf.nn.relu,
                               is_train=is_train,
                               gamma_init=gamma_init,
                               name='k3n128/b_r')

            n = Conv2d(n,
                       256, (3, 3), (2, 2),
                       padding='SAME',
                       W_init=w_init,
                       name='k3n256s2/c1')
            n = Conv2d(n,
                       256, (3, 3), (1, 1),
                       padding='SAME',
                       W_init=w_init,
                       name='k3n256s1/cc')
            n = BatchNormLayer(n,
                               act=tf.nn.relu,
                               is_train=is_train,
                               gamma_init=gamma_init,
                               name='k3n256/b_r')

        with tf.variable_scope('residual_blocks'):
            for i in range(8):
                nn = Conv2d(n,
                            256, (3, 3), (1, 1),
                            act=None,
                            padding='SAME',
                            W_init=w_init,
                            b_init=b_init,
                            name='k3n256s1/c1/%s' % i)
                nn = BatchNormLayer(nn,
                                    act=tf.nn.relu,
                                    is_train=is_train,
                                    gamma_init=gamma_init,
                                    name='k3n256s1/b1/%s' % i)
                nn = Conv2d(nn,
                            256, (3, 3), (1, 1),
                            act=None,
                            padding='SAME',
                            W_init=w_init,
                            b_init=b_init,
                            name='k3n256s1/c2/%s' % i)
                nn = BatchNormLayer(nn,
                                    is_train=is_train,
                                    gamma_init=gamma_init,
                                    name='k3n256s1/b2/%s' % i)
                nn = ElementwiseLayer([n, nn],
                                      tf.add,
                                      name='b_residual_add/%s' % i)
                n = nn

        with tf.variable_scope('up_conv'):
            n = DeConv2d(n,
                         n_filter=128,
                         filter_size=(3, 3),
                         out_size=(128, 128),
                         strides=(2, 2),
                         padding='SAME',
                         W_init=w_init,
                         name='k3n128s05/c1')
            n = Conv2d(n,
                       128, (3, 3), (1, 1),
                       padding='SAME',
                       W_init=w_init,
                       name='k3n128s1/c2')
            n = BatchNormLayer(n,
                               act=tf.nn.relu,
                               is_train=is_train,
                               gamma_init=gamma_init,
                               name='k3n128/b_r')

            n = DeConv2d(n,
                         n_filter=64,
                         filter_size=(3, 3),
                         out_size=(256, 256),
                         strides=(2, 2),
                         padding='SAME',
                         W_init=w_init,
                         name='k3n64s05/c1')
            n = Conv2d(n,
                       64, (3, 3), (1, 1),
                       padding='SAME',
                       W_init=w_init,
                       name='k3n64s1/c2')
            n = BatchNormLayer(n,
                               act=tf.nn.relu,
                               is_train=is_train,
                               gamma_init=gamma_init,
                               name='k3n64/b_r')

        n = Conv2d(n,
                   3, (7, 7), (1, 1),
                   act=tf.nn.tanh,
                   padding='SAME',
                   W_init=w_init,
                   name='g_output')

    return n
Ejemplo n.º 16
0
def generator(inputs, is_train=True):
    with tf.variable_scope("generator", reuse=tf.AUTO_REUSE):
        net_in = InputLayer(inputs, name='gin')

        gnet_d0 = DenseLayer(net_in,
                             n_units=(16384),
                             act=tf.identity,
                             name='gnet_d0')
        gnet_r0 = ReshapeLayer(gnet_d0, shape=[-1, 4, 4, 1024], name='gnet_r0')
        gnet_b0 = BatchNormLayer(gnet_r0,
                                 decay=0.9,
                                 act=tf.nn.relu,
                                 is_train=is_train,
                                 name='gnet_b0')

        gnet_dc1 = DeConv2d(gnet_b0,
                            256, (8, 8),
                            strides=(2, 2),
                            padding='SAME',
                            act=None,
                            name='gnet_dc1')
        gnet_b1 = BatchNormLayer(gnet_dc1,
                                 decay=0.9,
                                 act=tf.nn.relu,
                                 is_train=is_train,
                                 name='gnet_b1')

        gnet_dc2 = DeConv2d(gnet_b1,
                            128, (8, 8),
                            strides=(2, 2),
                            padding='SAME',
                            act=None,
                            name='gnet_dc2')
        gnet_b2 = BatchNormLayer(gnet_dc2,
                                 decay=0.9,
                                 act=tf.nn.relu,
                                 is_train=is_train,
                                 name='gnet_b2')

        gnet_dc3 = DeConv2d(gnet_b2,
                            64, (8, 8),
                            strides=(2, 2),
                            padding='SAME',
                            act=None,
                            name='gnet_dc3')
        gnet_b3 = BatchNormLayer(gnet_dc3,
                                 decay=0.9,
                                 act=tf.nn.relu,
                                 is_train=is_train,
                                 name='gnet_b3')

        gnet_dc4 = DeConv2d(gnet_b3,
                            3, (8, 8),
                            strides=(2, 2),
                            padding='SAME',
                            act=None,
                            name='net_h4')

        #Based on the paper, we need to provide non-linearity to the generated image
        #TODO: Why?
        gnet_dc4.outputs = tf.nn.tanh(gnet_dc4.outputs)
    return gnet_dc4
Ejemplo n.º 17
0
    def batch_normal_u_net(self):
        # built encoder
        inputs = Input(self.in_shape, name='inputs')
        conv1 = Conv2d(64, (4, 4), (2, 2), name='conv1')(inputs)

        conv2 = Conv2d(128, (4, 4), (2, 2), name='conv2')(conv1)
        conv2 = BatchNorm(act=tl.act.lrelu, is_train=self.train_bn,
                          name='bn2')(conv2)

        conv3 = Conv2d(256, (4, 4), (2, 2), name='conv3')(conv2)
        conv3 = BatchNorm(act=tl.act.lrelu, is_train=self.train_bn,
                          name='bn3')(conv3)

        conv4 = Conv2d(512, (4, 4), (2, 2), name='conv4')(conv3)
        conv4 = BatchNorm(act=tl.act.lrelu, is_train=self.train_bn,
                          name='bn4')(conv4)

        conv5 = Conv2d(512, (4, 4), (2, 2), name='conv5')(conv4)
        conv5 = BatchNorm(act=tl.act.lrelu, is_train=self.train_bn,
                          name='bn5')(conv5)

        conv6 = Conv2d(512, (4, 4), (2, 2), name='conv6')(conv5)
        conv6 = BatchNorm(act=tl.act.lrelu, is_train=self.train_bn,
                          name='bn6')(conv6)

        conv7 = Conv2d(512, (4, 4), (2, 2), name='conv7')(conv6)
        conv7 = BatchNorm(act=tl.act.lrelu, is_train=self.train_bn,
                          name='bn7')(conv7)

        conv8 = Conv2d(512, (4, 4), (2, 2), act=tl.act.lrelu,
                       name='conv8')(conv7)

        # built decoder
        up7 = DeConv2d(512, (4, 4), name='deconv7')(conv8)
        up7 = BatchNorm(act=tf.nn.relu, is_train=self.train_bn,
                        name='dbn7')(up7)

        up6 = Concat(concat_dim=3, name='concat6')([up7, conv7])
        up6 = DeConv2d(1024, (4, 4), name='deconv6')(up6)
        up6 = BatchNorm(act=tf.nn.relu, is_train=self.train_bn,
                        name='dbn6')(up6)

        up5 = Concat(concat_dim=3, name='concat5')([up6, conv6])
        up5 = DeConv2d(1024, (4, 4), name='deconv5')(up5)
        up5 = BatchNorm(act=tf.nn.relu, is_train=self.train_bn,
                        name='dbn5')(up5)

        up4 = Concat(concat_dim=3, name='concat4')([up5, conv5])
        up4 = DeConv2d(1024, (4, 4), name='deconv4')(up4)
        up4 = BatchNorm(act=tf.nn.relu, is_train=self.train_bn,
                        name='dbn4')(up4)

        up3 = Concat(concat_dim=3, name='concat3')([up4, conv4])
        up3 = DeConv2d(256, (4, 4), name='deconv3')(up3)
        up3 = BatchNorm(act=tf.nn.relu, is_train=self.train_bn,
                        name='dbn3')(up3)

        up2 = Concat(concat_dim=3, name='concat2')([up3, conv3])
        up2 = DeConv2d(128, (4, 4), name='deconv2')(up2)
        up2 = BatchNorm(act=tf.nn.relu, is_train=self.train_bn,
                        name='dbn2')(up2)

        up1 = Concat(concat_dim=3, name='concat1')([up2, conv2])
        up1 = DeConv2d(64, (4, 4), name='deconv1')(up1)
        up1 = BatchNorm(act=tf.nn.relu, is_train=self.train_bn,
                        name='dbn1')(up1)

        up0 = Concat(concat_dim=3, name='concat0')([up1, conv1])
        up0 = DeConv2d(64, (4, 4), name='deconv0')(up0)
        up0 = BatchNorm(act=tf.nn.relu, is_train=self.train_bn,
                        name='dbn0')(up0)

        outs = Conv2d(3, (1, 1), act=tf.nn.sigmoid, name='out')(up0)
        return Model(inputs=inputs, outputs=outs, name="BN_U_Net")
Ejemplo n.º 18
0
def build_generator_9blocks(inputgen, name="generator", skip = False):
    f = 7
    ks = 3
    padding  = "CONSTANT"
    inputgen = PadLayer([[0, 0], [ks, ks], [ks, ks], [0, 0]], mode=padding)(inputgen)

    o_c1 = Conv2d(
        n_filter=ngf,
        filter_size=(f, f),
        strides=(1, 1),
        padding="VALID",
        act=None,
        W_init=tf.initializers.TruncatedNormal(stddev=0.02),
        b_init=tf.constant_initializer(0.0)
    )(inputgen)
    o_c1 = InstanceNorm2d(act=tf.nn.relu)(o_c1)

    o_c2 = Conv2d(
        n_filter=ngf * 2,
        filter_size=(ks, ks),
        strides=(2, 2),
        padding="SAME",
        act=None,
        W_init=tf.initializers.TruncatedNormal(stddev=0.02),
        b_init=tf.constant_initializer(0.0)
    )(o_c1)
    o_c2 = InstanceNorm2d(act=tf.nn.relu)(o_c2)

    o_c3 = Conv2d(
        n_filter=ngf * 4,
        filter_size=(ks, ks),
        strides=(2, 2),
        padding="SAME",
        act=None,
        W_init=tf.initializers.TruncatedNormal(stddev=0.02),
        b_init=tf.constant_initializer(0.0)
    )(o_c2)
    o_c3 = InstanceNorm2d(act=tf.nn.relu)(o_c3)

    o_r1 = build_resnet_block(o_c3, ngf * 4, "r1", padding)
    o_r2 = build_resnet_block(o_r1, ngf * 4, "r2", padding)
    o_r3 = build_resnet_block(o_r2, ngf * 4, "r3", padding)
    o_r4 = build_resnet_block(o_r3, ngf * 4, "r4", padding)
    o_r5 = build_resnet_block(o_r4, ngf * 4, "r5", padding)
    o_r6 = build_resnet_block(o_r5, ngf * 4, "r6", padding)
    o_r7 = build_resnet_block(o_r6, ngf * 4, "r7", padding)
    o_r8 = build_resnet_block(o_r7, ngf * 4, "r8", padding)
    o_r9 = build_resnet_block(o_r8, ngf * 4, "r9", padding)

    o_c4 = DeConv2d(
        n_filter= ngf * 2,
        filter_size=(ks, ks),
        strides=(2, 2),
        padding="SAME",
        act=None,
        W_init=tf.initializers.TruncatedNormal(stddev=0.02),
        b_init=tf.constant_initializer(0.0)
    )(o_r9)
    o_c4 = InstanceNorm2d(act=tf.nn.relu)(o_c4)

    o_c5 = DeConv2d(
        n_filter= ngf,
        filter_size=(ks, ks),
        strides=(2, 2),
        padding="SAME",
        act=None,
        W_init=tf.initializers.TruncatedNormal(stddev=0.02),
        b_init=tf.constant_initializer(0.0)
    )(o_c4)
    o_c5 = InstanceNorm2d(act=tf.nn.relu)(o_c5)

    o_c6 = Conv2d(
        n_filter=IMG_CHANNELS,
        filter_size=(f, f),
        strides=(1, 1),
        padding="SAME",
        act=None,
        W_init=tf.initializers.TruncatedNormal(stddev=0.02),
        b_init=tf.constant_initializer(0.0)
    )(o_c5)

    if skip is True:
        out_gen = tf.nn.tanh(inputgen + o_c6, name="t1")
    else:
        out_gen = tf.nn.tanh(o_c6, "t1")

    return out_gen
Ejemplo n.º 19
0
def a2net(x, is_train=True, reuse=False):
    w_init = tf.random_normal_initializer(stddev=0.02)
    gamma_init = tf.random_normal_initializer(1., 0.02)

    with tf.variable_scope('a2net', reuse=reuse):
        net_in = InputLayer(x, name='input')
        inputY = InputLayer(x[:, :, :, :1], name='inputY')
        inputUV = InputLayer(x[:, :, :, 1:], name='inputUV')

        # Encoder

        conv1 = Conv2d(net_in,
                       32, (3, 3), (1, 1),
                       act=None,
                       W_init=w_init,
                       b_init=None,
                       name='encoder/conv1')
        conv1 = BatchNormLayer(conv1,
                               act=tf.nn.relu,
                               is_train=is_train,
                               gamma_init=gamma_init,
                               name='encoder/bn1')
        conv2 = Conv2d(conv1,
                       32, (3, 3), (1, 1),
                       act=None,
                       W_init=w_init,
                       b_init=None,
                       name='encoder/conv2')
        conv2 = BatchNormLayer(conv2,
                               act=tf.nn.relu,
                               is_train=is_train,
                               gamma_init=gamma_init,
                               name='encoder/bn2')

        concat1 = ConcatLayer([conv1, conv2],
                              concat_dim=-1,
                              name='encoder/concat1')
        aggregation1 = Conv2d(concat1,
                              32, (4, 4), (2, 2),
                              act=None,
                              W_init=w_init,
                              b_init=None,
                              name='encoder/aggregation1')
        aggregation1 = BatchNormLayer(aggregation1,
                                      act=tf.nn.relu,
                                      is_train=is_train,
                                      gamma_init=gamma_init,
                                      name='encoder/bn3')

        conv3 = Conv2d(aggregation1,
                       32, (3, 3), (1, 1),
                       act=None,
                       W_init=w_init,
                       b_init=None,
                       name='encoder/conv3')
        conv3 = BatchNormLayer(conv3,
                               act=tf.nn.relu,
                               is_train=is_train,
                               gamma_init=gamma_init,
                               name='encoder/bn4')

        concat2 = ConcatLayer([aggregation1, conv3],
                              concat_dim=-1,
                              name='encoder/concat2')
        aggregation2 = Conv2d(concat2,
                              32, (4, 4), (2, 2),
                              act=None,
                              W_init=w_init,
                              b_init=None,
                              name='encoder/aggregation2')
        aggregation2 = BatchNormLayer(aggregation2,
                                      act=tf.nn.relu,
                                      is_train=is_train,
                                      gamma_init=gamma_init,
                                      name='encoder/bn5')

        conv4 = Conv2d(aggregation2,
                       32, (3, 3), (1, 1),
                       act=None,
                       W_init=w_init,
                       b_init=None,
                       name='encoder/conv4')
        conv4 = BatchNormLayer(conv4,
                               act=tf.nn.relu,
                               is_train=is_train,
                               gamma_init=gamma_init,
                               name='encoder/bn6')

        concat3 = ConcatLayer([aggregation2, conv4],
                              concat_dim=-1,
                              name='encoder/concat3')
        aggregation3 = Conv2d(concat3,
                              32, (4, 4), (2, 2),
                              act=None,
                              W_init=w_init,
                              b_init=None,
                              name='encoder/aggregation3')
        aggregation3 = BatchNormLayer(aggregation3,
                                      act=tf.nn.relu,
                                      is_train=is_train,
                                      gamma_init=gamma_init,
                                      name='encoder/bn7')

        # DecoderY

        convY_1 = Conv2d(aggregation3,
                         32, (3, 3), (1, 1),
                         act=None,
                         W_init=w_init,
                         b_init=None,
                         name='decoderY/conv1')
        convY_1 = BatchNormLayer(convY_1,
                                 act=tf.nn.relu,
                                 is_train=is_train,
                                 gamma_init=gamma_init,
                                 name='decoderY/bn1')

        concatY_1 = ConcatLayer([aggregation3, convY_1],
                                concat_dim=-1,
                                name='decoderY/concat1')
        aggregationY_1 = DeConv2d(concatY_1,
                                  32, (2, 2), (2, 2),
                                  act=None,
                                  W_init=w_init,
                                  b_init=None,
                                  name='decoderY/aggregation1')
        aggregationY_1 = BatchNormLayer(aggregationY_1,
                                        act=tf.nn.relu,
                                        is_train=is_train,
                                        gamma_init=gamma_init,
                                        name='decoderY/bn2')

        copyY_1 = ConcatLayer([conv4, aggregationY_1],
                              concat_dim=-1,
                              name='decoderY/copy1')
        convY_2 = Conv2d(copyY_1,
                         32, (3, 3), (1, 1),
                         act=None,
                         W_init=w_init,
                         b_init=None,
                         name='decoderY/conv2')
        convY_2 = BatchNormLayer(convY_2,
                                 act=tf.nn.relu,
                                 is_train=is_train,
                                 gamma_init=gamma_init,
                                 name='decoderY/bn3')

        concatY_2 = ConcatLayer([copyY_1, convY_2],
                                concat_dim=-1,
                                name='decoderY/concat2')
        aggregationY_2 = DeConv2d(concatY_2,
                                  32, (2, 2), (2, 2),
                                  act=None,
                                  W_init=w_init,
                                  b_init=None,
                                  name='decoderY/aggregation2')
        aggregationY_2 = BatchNormLayer(aggregationY_2,
                                        act=tf.nn.relu,
                                        is_train=is_train,
                                        gamma_init=gamma_init,
                                        name='decoderY/bn4')

        copyY_2 = ConcatLayer([conv3, aggregationY_2],
                              concat_dim=-1,
                              name='decoderY/copy2')
        convY_3 = Conv2d(copyY_2,
                         32, (3, 3), (1, 1),
                         act=None,
                         W_init=w_init,
                         b_init=None,
                         name='decoderY/conv3')
        convY_3 = BatchNormLayer(convY_3,
                                 act=tf.nn.relu,
                                 is_train=is_train,
                                 gamma_init=gamma_init,
                                 name='decoderY/bn5')

        concatY_3 = ConcatLayer([copyY_2, convY_3],
                                concat_dim=-1,
                                name='decoderY/concat3')
        aggregationY_3 = DeConv2d(concatY_3,
                                  32, (2, 2), (2, 2),
                                  act=None,
                                  W_init=w_init,
                                  b_init=None,
                                  name='decoderY/aggregation3')
        aggregationY_3 = BatchNormLayer(aggregationY_3,
                                        act=tf.nn.relu,
                                        is_train=is_train,
                                        gamma_init=gamma_init,
                                        name='decoderY/bn6')

        copyY_3 = ConcatLayer([conv2, aggregationY_3],
                              concat_dim=-1,
                              name='decoderY/copy3')

        outputY = Conv2d(copyY_3,
                         1, (3, 3), (1, 1),
                         act=tf.nn.tanh,
                         name='decoderY/output')

        # DecoderUV

        convUV_1 = Conv2d(aggregation3,
                          UV_SIZE, (3, 3), (1, 1),
                          act=None,
                          W_init=w_init,
                          b_init=None,
                          name='decoderUV/conv1')
        convUV_1 = BatchNormLayer(convUV_1,
                                  act=tf.nn.relu,
                                  is_train=is_train,
                                  gamma_init=gamma_init,
                                  name='decoderUV/bn1')

        concatUV_1 = ConcatLayer([aggregation3, convUV_1],
                                 concat_dim=-1,
                                 name='decoderUV/concat1')
        aggregationUV_1 = DeConv2d(concatUV_1,
                                   UV_SIZE, (2, 2), (2, 2),
                                   act=None,
                                   W_init=w_init,
                                   b_init=None,
                                   name='decoderUV/aggregation1')
        aggregationUV_1 = BatchNormLayer(aggregationUV_1,
                                         act=tf.nn.relu,
                                         is_train=is_train,
                                         gamma_init=gamma_init,
                                         name='decoderUV/bn2')

        copyUV_1 = ConcatLayer([conv4, aggregationUV_1],
                               concat_dim=-1,
                               name='decoderUV/copy1')
        convUV_2 = Conv2d(copyUV_1,
                          UV_SIZE, (3, 3), (1, 1),
                          act=None,
                          W_init=w_init,
                          b_init=None,
                          name='decoderUV/conv2')
        convUV_2 = BatchNormLayer(convUV_2,
                                  act=tf.nn.relu,
                                  is_train=is_train,
                                  gamma_init=gamma_init,
                                  name='decoderUV/bn3')

        concatUV_2 = ConcatLayer([copyUV_1, convUV_2],
                                 concat_dim=-1,
                                 name='decoderUV/concat2')
        aggregationUV_2 = DeConv2d(concatUV_2,
                                   UV_SIZE, (2, 2), (2, 2),
                                   act=None,
                                   W_init=w_init,
                                   b_init=None,
                                   name='decoderUV/aggregation2')
        aggregationUV_2 = BatchNormLayer(aggregationUV_2,
                                         act=tf.nn.relu,
                                         is_train=is_train,
                                         gamma_init=gamma_init,
                                         name='decoderUV/bn4')

        copyUV_2 = ConcatLayer([conv3, aggregationUV_2],
                               concat_dim=-1,
                               name='decoderUV/copy2')
        convUV_3 = Conv2d(copyUV_2,
                          UV_SIZE, (3, 3), (1, 1),
                          act=None,
                          W_init=w_init,
                          b_init=None,
                          name='decoderUV/conv3')
        convUV_3 = BatchNormLayer(convUV_3,
                                  act=tf.nn.relu,
                                  is_train=is_train,
                                  gamma_init=gamma_init,
                                  name='decoderUV/bn5')

        concatUV_3 = ConcatLayer([copyUV_2, convUV_3],
                                 concat_dim=-1,
                                 name='decoderUV/concat3')
        aggregationUV_3 = DeConv2d(concatUV_3,
                                   UV_SIZE, (2, 2), (2, 2),
                                   act=None,
                                   W_init=w_init,
                                   b_init=None,
                                   name='decoderUV/aggregation3')
        aggregationUV_3 = BatchNormLayer(aggregationUV_3,
                                         act=tf.nn.relu,
                                         is_train=is_train,
                                         gamma_init=gamma_init,
                                         name='decoderUV/bn6')

        copyUV_3 = ConcatLayer([conv2, aggregationUV_3],
                               concat_dim=-1,
                               name='decoderUV/copy3')

        outputUV = Conv2d(copyUV_3,
                          2, (3, 3), (1, 1),
                          act=tf.nn.tanh,
                          name='decoderUV/output')

        outY_plus_Y = ElementwiseLambdaLayer([outputY, inputY],
                                             fn=lambda x, y: BETA * x +
                                             (1 - BETA) * y,
                                             name='outY_plus_Y')

        outUV_plus_UV = ElementwiseLambdaLayer([outputUV, inputUV],
                                               fn=lambda x, y: BETA * x +
                                               (1 - BETA) * y,
                                               name='outUV_plus_UV')

        net_out = ConcatLayer([outY_plus_Y, outUV_plus_UV],
                              concat_dim=-1,
                              name='net_out')

        return outY_plus_Y, outUV_plus_UV, net_out