def get_generator(shape, gf_dim=64): # Dimension of gen filters in first conv layer. [64]
    image_size = flags.output_size
    s16 = image_size // 8
    # w_init = tf.glorot_normal_initializer()
    w_init = tf.random_normal_initializer(stddev=0.02)
    gamma_init = tf.random_normal_initializer(1., 0.02)
    lrelu = lambda x : tf.nn.leaky_relu(x, 0.2)

    ni = Input(shape)
    nn = Dense(n_units=(gf_dim * 8 * s16 * s16), W_init=w_init, b_init=None)(ni)
    nn = Reshape(shape=[-1, s16, s16, gf_dim*16])(nn)
    nn = BatchNorm2d(decay=0.9, act=tf.nn.relu, gamma_init=gamma_init, name=None)(nn)
    #nn = DeConv2d(gf_dim * 4, (5, 5), (2, 2), W_init=w_init, b_init=None)(nn)
    nn = UpSampling2d(scale=(2, 2),antialias=True)(nn)
    nn = Conv2d(gf_dim * 8, (5, 5),   padding='SAME', b_init=None, W_init=w_init)(nn)    
    nn = BatchNorm2d( decay=0.9, act=tf.nn.relu, gamma_init=gamma_init)(nn)
#    nn = DeConv2d(gf_dim * 2, (5, 5), (2, 2), W_init=w_init, b_init=None)(nn)
    nn = UpSampling2d(scale=(2, 2),antialias=True)(nn)
    nn = Conv2d(gf_dim * 4, (5, 5),   padding='SAME', b_init=None, W_init=w_init)(nn)    
    nn = BatchNorm2d(decay=0.9, act=tf.nn.relu, gamma_init=gamma_init)(nn)
    #nn = DeConv2d(gf_dim, (5, 5), (2, 2), W_init=w_init, b_init=None)(nn)
    nn = UpSampling2d(scale=(2, 2),antialias=True)(nn)
    nn = Conv2d(gf_dim *2, (5, 5),   padding='SAME', b_init=None, W_init=w_init)(nn)    
    nn = BatchNorm2d(decay=0.9, act=tf.nn.relu, gamma_init=gamma_init)(nn)

    nn = UpSampling2d(scale=(2, 2),antialias=True)(nn)
    nn = Conv2d(gf_dim , (5, 5),   padding='SAME', b_init=None, W_init=w_init)(nn)    
    nn = BatchNorm2d(decay=0.9, act=tf.nn.relu, gamma_init=gamma_init)(nn)


    nn = DeConv2d(3, (5, 5), (2, 2), act=tf.nn.tanh, W_init=w_init)(nn)

    return tl.models.Model(inputs=ni, outputs=nn, name='generator')
Exemple #2
0
def get_z_D(shape_z):
    w_init = tf.random_normal_initializer(stddev=0.02)
    lrelu = lambda x: tf.nn.leaky_relu(x, 0.2)
    nz = Input(shape_z)
    print(nz.shape)
    # 8 8 128
    n = SpectralNormConv2d(128, (3, 3), (1, 1),
                           act=lrelu,
                           W_init=w_init,
                           padding='VALID')(nz)
    print(n.shape)
    # 6 6 128
    n = SpectralNormConv2d(128, (3, 3), (1, 1), act=lrelu, W_init=w_init)(n)
    print(n.shape)
    # 6 6 128
    n = SpectralNormConv2d(256, (3, 3), (1, 1),
                           act=lrelu,
                           W_init=w_init,
                           padding='VALID')(n)
    print(n.shape)
    # 4 4 256
    n = SpectralNormConv2d(512, (4, 4), (1, 1),
                           act=lrelu,
                           W_init=w_init,
                           padding='VALID')(n)
    print(n.shape)
    # 1 1 512
    n = Reshape(shape=[-1, 512])(n)
    n = Dense(n_units=1, act=tf.identity, W_init=w_init, b_init=None)(n)
    print(n.shape)
    return tl.models.Model(inputs=nz, outputs=n)
Exemple #3
0
def get_D(x_shape=(None, flags.img_size_h, flags.img_size_w, flags.c_dim),
          name=None):
    # ref: Image-to-Image Translation with Conditional Adversarial Networks
    # input: (batch_size_train, 256, 256, 3)
    # output: (batch_size_train, )
    ch = 64
    n_layer = 8
    tch = ch
    ni = Input(x_shape)
    n = SpectralNormConv2d(ch, (3, 3), (2, 2), act=lrelu, W_init=w_init)(ni)
    for i in range(1, n_layer - 1):
        n = SpectralNormConv2d(tch * 2, (3, 3), (2, 2),
                               act=lrelu,
                               W_init=w_init)(n)
        tch *= 2
    n = SpectralNormConv2d(tch * 2, (3, 3), (2, 2), act=lrelu,
                           W_init=w_init)(n)
    tch *= 2
    n = SpectralNormConv2d(1, (1, 1), (1, 1),
                           act=None,
                           padding='VALID',
                           W_init=w_init)(n)
    n = Reshape([-1, 1])(n)
    M = Model(inputs=ni, outputs=n, name=name)
    return M
def get_generator(shape=[None, flags.z_dim], gf_dim=64, name=None):
    image_size = 64
    s16 = image_size // 16
    w_init = tf.random_normal_initializer(stddev=0.02)
    gamma_init = tf.random_normal_initializer(1., 0.02)

    ni = Input(shape)
    nn = Dense(n_units=(gf_dim * 8 * s16 * s16), W_init=w_init,
               b_init=None)(ni)
    nn = Reshape(shape=[-1, s16, s16, gf_dim * 8])(nn)

    nn = BatchNorm(decay=0.9, act=tf.nn.relu, gamma_init=gamma_init,
                   name=None)(nn)
    nn = DeConv2d(gf_dim * 4, (5, 5), (2, 2), W_init=w_init, b_init=None)(nn)

    nn = BatchNorm2d(decay=0.9, act=tf.nn.relu, gamma_init=gamma_init)(nn)
    nn = DeConv2d(gf_dim * 2, (5, 5), (2, 2), W_init=w_init, b_init=None)(nn)

    nn = BatchNorm2d(decay=0.9, act=tf.nn.relu, gamma_init=gamma_init)(nn)
    nn = DeConv2d(gf_dim, (5, 5), (2, 2), W_init=w_init, b_init=None)(nn)

    nn = BatchNorm2d(decay=0.9, act=tf.nn.relu, gamma_init=gamma_init)(nn)
    nn = DeConv2d(3, (5, 5), (2, 2), act=tf.nn.tanh, W_init=w_init)(nn)

    return tl.models.Model(inputs=ni, outputs=nn, name=name)
Exemple #5
0
def get_generator(
        shape,
        gf_dim=64):  # Dimension of gen filters in first conv layer. [64]
    image_size = 64
    s16 = image_size // 16

    w_init = tf.random_normal_initializer(0.0, 0.02)
    gamma_init = tf.random_normal_initializer(1., 0.02)

    ni = Input(shape)
    nn = Dense(n_units=(gf_dim * 8 * s16 * s16), W_init=w_init,
               b_init=None)(ni)
    nn = Reshape(shape=[-1, s16, s16, gf_dim * 8])(nn)
    nn = BatchNorm2d(decay=0.9,
                     act=tf.nn.relu,
                     gamma_init=gamma_init,
                     name=None)(nn)
    nn = DeConv2d(gf_dim * 4, (5, 5), (2, 2), W_init=w_init, b_init=None)(nn)
    nn = BatchNorm2d(decay=0.9, act=tf.nn.relu, gamma_init=gamma_init)(nn)
    nn = DeConv2d(gf_dim * 2, (5, 5), (2, 2), W_init=w_init, b_init=None)(nn)
    nn = BatchNorm2d(decay=0.9, act=tf.nn.relu, gamma_init=gamma_init)(nn)
    nn = DeConv2d(gf_dim, (5, 5), (2, 2), W_init=w_init, b_init=None)(nn)
    nn = BatchNorm2d(decay=0.9, act=tf.nn.relu, gamma_init=gamma_init)(nn)
    nn = DeConv2d(3, (5, 5), (2, 2), act=tf.nn.tanh, W_init=w_init)(nn)

    return tl.models.Model(inputs=ni, outputs=nn, name='generator')
Exemple #6
0
def get_z_G(shape_z):
    w_init = tf.random_normal_initializer(stddev=0.02)
    gamma_init = tf.random_normal_initializer(1., 0.02)
    # lrelu = lambda x: tf.nn.leaky_relu(x, 0.2)
    nz = Input(shape_z)
    print(nz.shape)
    n = Dense(n_units=4 * 4 * 256, W_init=w_init, b_init=None, act=None)(nz)
    print(n.shape)
    n = Reshape(shape=[-1, 4, 4, 256])(n)
    n = BatchNorm2d(decay=0.9,
                    act=tf.nn.relu,
                    gamma_init=gamma_init,
                    name=None)(n)
    print(n.shape)

    n = DeConv2d(128, (3, 3), (1, 1),
                 W_init=w_init,
                 padding='VALID',
                 b_init=None)(n)
    n = BatchNorm2d(decay=0.9,
                    act=tf.nn.relu,
                    gamma_init=gamma_init,
                    name=None)(n)
    print(n.shape)

    n = DeConv2d(128, (3, 3), (1, 1), W_init=w_init, b_init=None)(n)
    n = BatchNorm2d(decay=0.9,
                    act=tf.nn.relu,
                    gamma_init=gamma_init,
                    name=None)(n)
    print(n.shape)

    n = DeConv2d(128, (3, 3), (1, 1), W_init=w_init, padding='VALID')(n)
    print(n.shape)
    return tl.models.Model(inputs=nz, outputs=n)
Exemple #7
0
def get_G(shape):
    w_init = tf.random_normal_initializer(stddev=0.02)
    gamma_init = tf.random_normal_initializer(1., 0.02)

    ni = Input(shape)
    nn = Dense(n_units=(2 * 2 * 448), W_init=w_init, b_init=None)(ni)
    nn = Reshape(shape=[-1, 2, 2, 448])(nn)
    nn = BatchNorm2d(decay=0.9, act=tf.nn.relu, gamma_init=gamma_init)(nn)
    nn = DeConv2d(n_filter=256,
                  filter_size=(4, 4),
                  strides=(2, 2),
                  W_init=w_init,
                  b_init=None)(nn)
    nn = BatchNorm2d(decay=0.9, act=tf.nn.relu, gamma_init=gamma_init)(nn)
    nn = DeConv2d(n_filter=128,
                  filter_size=(4, 4),
                  strides=(2, 2),
                  act=tf.nn.relu,
                  W_init=w_init)(nn)
    nn = DeConv2d(n_filter=64,
                  filter_size=(4, 4),
                  strides=(2, 2),
                  act=tf.nn.relu,
                  W_init=w_init)(nn)
    nn = DeConv2d(n_filter=3,
                  filter_size=(4, 4),
                  strides=(2, 2),
                  act=tf.nn.tanh,
                  W_init=w_init)(nn)

    return tl.models.Model(inputs=ni, outputs=nn, name='G')
Exemple #8
0
def get_G(shape):
    w_init = tf.random_normal_initializer(stddev=0.02)
    gamma_init = tf.random_normal_initializer(1., 0.02)

    ni = Input(shape)
    nn = Dense(n_units=(1024), W_init=w_init, b_init=None)(ni)
    nn = Reshape(shape=[-1, 1024])(nn)
    nn = BatchNorm(decay=0.9, act=tf.nn.relu, gamma_init=gamma_init,
                   name=None)(nn)
    nn = Dense(n_units=(8 * 8 * 256), W_init=w_init, b_init=None)(nn)
    nn = Reshape(shape=[-1, 8, 8, 256])(nn)  # ???
    nn = BatchNorm(decay=0.9, act=tf.nn.relu, gamma_init=gamma_init,
                   name=None)(nn)
    nn = DeConv2d(n_filter=256,
                  filter_size=(4, 4),
                  strides=(1, 1),
                  W_init=w_init,
                  b_init=None)(nn)
    nn = BatchNorm2d(decay=0.9, act=tf.nn.relu, gamma_init=gamma_init)(nn)
    nn = DeConv2d(n_filter=256,
                  filter_size=(4, 4),
                  strides=(2, 2),
                  W_init=w_init,
                  b_init=None)(nn)
    nn = BatchNorm2d(decay=0.9, act=tf.nn.relu, gamma_init=gamma_init)(nn)
    nn = DeConv2d(n_filter=128,
                  filter_size=(4, 4),
                  strides=(2, 2),
                  W_init=w_init,
                  b_init=None)(nn)
    nn = BatchNorm2d(decay=0.9, act=tf.nn.relu, gamma_init=gamma_init)(nn)
    nn = DeConv2d(n_filter=64,
                  filter_size=(4, 4),
                  strides=(2, 2),
                  W_init=w_init,
                  b_init=None)(nn)
    nn = BatchNorm2d(decay=0.9, act=tf.nn.relu, gamma_init=gamma_init)(nn)
    nn = DeConv2d(n_filter=1,
                  filter_size=(4, 4),
                  strides=(1, 1),
                  act=tf.nn.sigmoid,
                  W_init=w_init)(nn)

    return tl.models.Model(inputs=ni, outputs=nn, name='G')
Exemple #9
0
def get_generator(shape,gf_dim=64):
    image_size=256
    s16=image_size//16
    w_init=tf.random_normal_initializer(stddev=0.02)
    gamma_init=tf.random_normal_initializer(1.,0.02)
    ni=Input(shape)
    nn=Dense(n_units=(gf_dim*16*s16*s16),W_init=w_init,b_init=None)(ni)
    nn=Reshape(shape=[-1,s16,s16,gf_dim*16])(nn)
    n1=BatchNorm2d(decay=0.9,act=tf.nn.relu,gamma_init=gamma_init,name=None)(nn)
    n1=DeConv2d(gf_dim*16,(5,5),(2,2),W_init=w_init,b_init=None)(n1);
    nn=n1+nn
    n1=BatchNorm2d(decay=0.9,act=tf.nn.relu,gamma_init=gamma_init,name=None)(nn)
    n1=DeConv2d(gf_dim*8,(5,5),(2,2),W_init=w_init,b_init=None)(n1);
    nn=n1+nn
    n1=BatchNorm2d(decay=0.9,act=tf.nn.relu,gamma_init=gamma_init,name=None)(nn)
    n1=DeConv2d(gf_dim*8,(5,5),(2,2),W_init=w_init,b_init=None)(n1);
    nn=n1+nn
    n1=BatchNorm2d(decay=0.9,act=tf.nn.relu,gamma_init=gamma_init,name=None)(nn)
    n1=DeConv2d(gf_dim*4,(5,5),(2,2),W_init=w_init,b_init=None)(n1);
    nn=n1+nn
    n1=BatchNorm2d(decay=0.9,act=tf.nn.relu,gamma_init=gamma_init,name=None)(nn)
    n1=DeConv2d(gf_dim*2,(5,5),(2,2),W_init=w_init,b_init=None)(n1);
    nn=n1+nn
    #non local block maybe bugs here
    f=Conv2d(gf_dim//4,(1,1),(1,1),W_init=w_init,b_init=None)(nn)
    f=MaxPool2d((2,2),(2,2))(f)
    g=Conv2d(gf_dim//4,(1,1),(1,1),W_init=w_init,b_init=None)(nn)
    h=Conv2d(gf_dim,(1,1),(1,1),W_init=w_init,b_init=None)(nn)
    h=MaxPool2d((2,2),(2,2))(h)
    s = tf.matmul(Reshape(shape=[g.shape[0],-1,g.shape[1]])(g), Reshape(shape=[f.shape[0],-1,f.shape[1]])(f), transpose_b=True)
    beta=tf.nn.softmax(s)
    o=tf.matmul(beta,Reshape(shape=[h.shape[0],-1,h.shape[1]])(h))
    o=Reshape(shape=[nn.shape[0],nn.shape[1],nn.shape[2],gf_dim])(o)
    o=Conv2d(gf_dim*2,(1,1),(1,1),W_init=w_init,b_init=None)(o)
    nn=nn+gamma_init*o
    #
    n1=BatchNorm2d(decay=0.9,act=tf.nn.relu,gamma_init=gamma_init,name=None)(nn)
    n1=DeConv2d(gf_dim,(5,5),(2,2),W_init=w_init,b_init=None)(n1);
    nn=n1+nn
    nn=BatchNorm2d(decay=0.9,act=tf.nn.relu,gamma_init=gamma_init,name=None)(nn)
    nn=Conv2d(3, (3, 3), (1, 1), act=tf.nn.tanh, W_init=w_init)(nn)
    return tl.models.Model(inputs=ni,outputs=nn,name='generator')
Exemple #10
0
def get_discriminator(shape,df_dim=64):
    w_init=tf.random_normal_initializer(stddev=0.02)
    gamma_init=tf.random_normal_initializer(1.,0.02)
    ni=Input(shape)
    n1=BatchNorm2d(decay=0.9,act=tf.nn.relu,gamma_init=gamma_init,name=None)(ni)
    n1=Conv2d(df_dim,(5,5),(2,2),W_init=w_init,b_init=None)(n1);
    nn=n1+ni
    n1=BatchNorm2d(decay=0.9,act=tf.nn.relu,gamma_init=gamma_init,name=None)(nn)
    n1=Conv2d(df_dim*2,(5,5),(2,2),W_init=w_init,b_init=None)(n1);
    nn=nn+n1;
    f=Conv2d(df_dim//4,(1,1),(1,1),W_init=w_init,b_init=None)(nn)
    f=MaxPool2d((2,2),(2,2))(f)
    g=Conv2d(df_dim//4,(1,1),(1,1),W_init=w_init,b_init=None)(nn)
    h=Conv2d(df_dim,(1,1),(1,1),W_init=w_init,b_init=None)(nn)
    h=MaxPool2d((2,2),(2,2))(h)
    s = tf.matmul(Reshape(shape=[g.shape[0],-1,g.shape[1]])(g), Reshape(shape=[f.shape[0],-1,f.shape[1]])(f), transpose_b=True)
    beta=tf.nn.softmax(s)
    o=tf.matmul(beta,Reshape(shape=[h.shape[0],-1,h.shape[1]])(h))
    o=Reshape(shape=[nn.shape[0],nn.shape[1],nn.shape[2],gf_dim])(o)
    o=Conv2d(df_dim*2,(1,1),(1,1),W_init=w_init,b_init=None)(o)
    nn=nn+gamma_init*o
    n1=BatchNorm2d(decay=0.9,act=tf.nn.relu,gamma_init=gamma_init,name=None)(nn)
    n1=Conv2d(df_dim*4,(5,5),(2,2),W_init=w_init,b_init=None)(n1);
    nn=nn+n1;
    n1=BatchNorm2d(decay=0.9,act=tf.nn.relu,gamma_init=gamma_init,name=None)(nn)
    n1=Conv2d(df_dim*8,(5,5),(2,2),W_init=w_init,b_init=None)(n1);
    nn=nn+n1;
    n1=BatchNorm2d(decay=0.9,act=tf.nn.relu,gamma_init=gamma_init,name=None)(nn)
    n1=Conv2d(df_dim*8,(5,5),(2,2),W_init=w_init,b_init=None)(n1);
    nn=nn+n1;
    n1=BatchNorm2d(decay=0.9,act=tf.nn.relu,gamma_init=gamma_init,name=None)(nn)
    n1=Conv2d(df_dim*16,(5,5),(2,2),W_init=w_init,b_init=None)(n1);
    nn=nn+n1;
    n1=Conv2d(df_dim*16,(5,5),(2,2),W_init=w_init,b_init=None)(nn);
    n1=BatchNorm2d(decay=0.9,act=tf.nn.relu,gamma_init=gamma_init,name=None)(n1)
    nn=nn+n1;
    nn=tf.reduce_sum(nn,axis=[1,2])
    nn=Dense(n_units=1,W_init=w_init,act=tf.identity)(nn)
    return tl.models.Model(inputs=ni,outputs=nn,name='discriminator')
def get_G(shape_z,
          ngf=64):  # Dimension of gen filters in first conv layer. [64]
    # w_init = tf.glorot_normal_initializer()
    print("for G")
    w_init = tf.random_normal_initializer(stddev=0.02)
    gamma_init = tf.random_normal_initializer(1., 0.02)
    n_extra_layers = flags.n_extra_layers
    isize = 64
    cngf, tisize = ngf // 2, 4
    while tisize != isize:
        cngf = cngf * 2
        tisize = tisize * 2
    ni = Input(shape_z)
    nn = Reshape(shape=[-1, 1, 1, 128])(ni)
    nn = DeConv2d(cngf, (4, 4), (1, 1),
                  W_init=w_init,
                  b_init=None,
                  padding='VALID')(nn)
    nn = BatchNorm(decay=0.9, act=tf.nn.relu, gamma_init=gamma_init,
                   name=None)(nn)
    print(nn.shape)

    csize, cndf = 4, cngf
    while csize < isize // 2:
        cngf = cngf // 2
        nn = DeConv2d(cngf, (4, 4), (2, 2), W_init=w_init, b_init=None)(nn)
        nn = BatchNorm(decay=0.9,
                       act=tf.nn.relu,
                       gamma_init=gamma_init,
                       name=None)(nn)
        print(nn.shape)
        csize = csize * 2

    for t in range(n_extra_layers):
        nn = DeConv2d(cngf, (3, 3), (1, 1), W_init=w_init, b_init=None)(nn)
        nn = BatchNorm(decay=0.9,
                       act=tf.nn.relu,
                       gamma_init=gamma_init,
                       name=None)(nn)
        print(nn.shape)

    nn = DeConv2d(3, (4, 4), (2, 2),
                  act=tf.nn.tanh,
                  W_init=w_init,
                  b_init=None)(nn)
    print(nn.shape)

    return tl.models.Model(inputs=ni, outputs=nn)
Exemple #12
0
def get_D_content(c_shape=(None, flags.c_shape[0], flags.c_shape[1],
                           flags.c_shape[2])):
    # reference: DRIT resource code -- Pytorch implementation
    ni = Input(c_shape)
    n = Conv2d(256, (7, 7), (2, 2), act=None, W_init=w_init)(ni)
    n = InstanceNorm2d(act=lrelu, gamma_init=g_init)(n)
    n = Conv2d(256, (7, 7), (2, 2), act=None, W_init=w_init)(n)
    n = InstanceNorm2d(act=lrelu, gamma_init=g_init)(n)
    n = Conv2d(256, (7, 7), (2, 2), act=None, W_init=w_init)(n)
    n = InstanceNorm2d(act=lrelu, gamma_init=g_init)(n)
    n = Conv2d(256, (4, 4), (1, 1), act=None, padding='VALID',
               W_init=w_init)(n)
    n = InstanceNorm2d(act=lrelu, gamma_init=g_init)(n)
    n = Conv2d(1, (5, 5), (5, 5), padding='VALID', W_init=w_init)(n)
    n = Reshape(shape=[-1, 1])(n)
    return tl.models.Model(inputs=ni, outputs=n, name=None)
Exemple #13
0
def get_dwG(
        shape_z=(None, 100),
        shape_h=(0, 16)):  # Dimension of gen filters in first conv layer. [64]
    s16 = flags.img_size_h // 16
    gf_dim = 64  # Dimension of gen filters in first conv layer. [64]
    w_init = tf.random_normal_initializer(stddev=0.02)
    gamma_init = tf.random_normal_initializer(1., 0.02)

    n_z = Input(shape_z)
    n_h = Input(shape_h)
    n = Concat(-1)([n_z, n_h])

    n = Dense(n_units=(gf_dim * 8 * s16 * s16),
              W_init=w_init,
              act=tf.identity,
              b_init=None)(n)

    n = Reshape(shape=[-1, s16, s16, gf_dim * 8])(n)

    n = BatchNorm(decay=0.9, act=tf.nn.relu, gamma_init=gamma_init)(n)

    n = DeConv2d(gf_dim * 4, (5, 5),
                 strides=(2, 2),
                 act=None,
                 W_init=w_init,
                 b_init=None)(n)
    n = BatchNorm(decay=0.9, act=tf.nn.relu, gamma_init=gamma_init)(n)

    n = DeConv2d(gf_dim * 2, (5, 5),
                 strides=(2, 2),
                 act=None,
                 W_init=w_init,
                 b_init=None)(n)
    n = BatchNorm(decay=0.9, act=tf.nn.relu, gamma_init=gamma_init)(n)

    n = DeConv2d(gf_dim, (5, 5),
                 strides=(2, 2),
                 act=None,
                 W_init=w_init,
                 b_init=None)(n)
    n = BatchNorm(decay=0.9, act=tf.nn.relu, gamma_init=gamma_init)(n)

    n = DeConv2d(flags.c_dim, (5, 5),
                 strides=(2, 2),
                 act=tf.nn.tanh,
                 W_init=w_init)(n)
    return tl.models.Model(inputs=[n_z, n_h], outputs=n, name='generator')
Exemple #14
0
def get_G_zc(shape_z=(None, flags.zc_dim), gf_dim=64):
    # reference: DCGAN generator
    output_size = 64
    s16 = output_size // 16

    ni = Input(shape_z)
    nn = Dense(n_units=(gf_dim * 8 * s16 * s16), W_init=w_init,
               b_init=None)(ni)
    nn = Reshape(shape=[-1, s16, s16, gf_dim * 8])(nn)
    nn = InstanceNorm2d(act=tf.nn.relu, gamma_init=g_init)(nn)
    nn = DeConv2d(gf_dim * 4, (5, 5), (2, 2), W_init=w_init, b_init=None)(nn)
    nn = InstanceNorm2d(act=tf.nn.relu, gamma_init=g_init)(nn)
    nn = DeConv2d(gf_dim * 2, (5, 5), (2, 2), W_init=w_init, b_init=None)(nn)
    nn = InstanceNorm2d(act=tf.nn.relu, gamma_init=g_init)(nn)
    nn = DeConv2d(gf_dim, (5, 5), (2, 2), W_init=w_init, b_init=None)(nn)
    nn = InstanceNorm2d(act=tf.nn.relu, gamma_init=g_init)(nn)
    nn = DeConv2d(256, (5, 5), (2, 2), act=tf.nn.tanh, W_init=w_init)(nn)

    return tl.models.Model(inputs=ni, outputs=nn, name='Generator_zc')
Exemple #15
0
def get_generator(
        shape,
        gf_dim=64,
        o_size=32,
        o_channel=3):  # Dimension of gen filters in first conv layer. [64]
    image_size = o_size
    s4 = image_size // 4
    lrelu = lambda x: tf.nn.leaky_relu(x, 0.2)

    ni = Input(shape)
    nn = Dense(n_units=(gf_dim * 4 * s4 * s4))(ni)
    nn = Reshape(shape=(-1, s4, s4, gf_dim * 4))(nn)
    nn = BatchNorm2d(decay=0.9, act=lrelu)(nn)
    nn = DeConv2d(gf_dim * 2, (5, 5), (1, 1))(nn)
    nn = BatchNorm2d(decay=0.9, act=lrelu)(nn)
    nn = DeConv2d(gf_dim, (5, 5), (2, 2))(nn)
    nn = BatchNorm2d(decay=0.9, act=lrelu)(nn)
    nn = DeConv2d(o_channel, (5, 5), (2, 2), act=tf.nn.tanh)(nn)

    return tl.models.Model(inputs=ni, outputs=nn, name='generator')
def get_E(shape):
    w_init = tf.random_normal_initializer(stddev=0.02)
    gamma_init = tf.random_normal_initializer(1., 0.02)
    ngf = 64
    isize = 64
    n_extra_layers = flags.n_extra_layers
    print(" for E")
    ni = Input(shape)
    nn = Conv2d(ngf, (4, 4), (2, 2), act=None, W_init=w_init, b_init=None)(ni)
    print(nn.shape)
    isize = isize // 2

    for t in range(n_extra_layers):
        nn = Conv2d(ngf, (3, 3), (1, 1), W_init=w_init, b_init=None)(nn)
        nn = BatchNorm(decay=0.9,
                       act=tf.nn.relu,
                       gamma_init=gamma_init,
                       name=None)(nn)
        print(nn.shape)

    while isize > 4:
        ngf = ngf * 2
        nn = Conv2d(ngf, (4, 4), (2, 2), W_init=w_init, b_init=None)(nn)
        nn = BatchNorm(decay=0.9,
                       act=tf.nn.relu,
                       gamma_init=gamma_init,
                       name=None)(nn)
        print(nn.shape)
        isize = isize // 2

    nn = Conv2d(flags.z_dim, (4, 4), (1, 1),
                act=None,
                W_init=w_init,
                b_init=None,
                padding='VALID')(nn)
    print(nn.shape)
    nz = Reshape(shape=[-1, 128])(nn)

    return tl.models.Model(inputs=ni, outputs=nz)
Exemple #17
0
def get_G_cmnist(
        shape_z):  # Dimension of gen filters in first conv layer. [64]
    # input: (100,)
    w_init = tf.random_normal_initializer(stddev=0.02)
    gamma_init = tf.random_normal_initializer(1., 0.02)
    nz = Input(shape_z)
    n = Dense(n_units=3136, act=tf.nn.relu, W_init=w_init)(nz)
    n = Reshape(shape=[-1, 14, 14, 16])(n)
    n = DeConv2d(64, (5, 5), strides=(2, 2), W_init=w_init,
                 b_init=None)(n)  # (1, 28, 28, 64)
    n = BatchNorm2d(decay=0.9, act=tf.nn.relu, gamma_init=gamma_init)(n)
    n = DeConv2d(32, (5, 5),
                 strides=(1, 1),
                 padding="VALID",
                 W_init=w_init,
                 b_init=None)(n)  # (1, 32, 32, 32)
    n = BatchNorm2d(decay=0.9, act=tf.nn.relu, gamma_init=gamma_init)(n)
    n = DeConv2d(flags.c_dim, (5, 5),
                 strides=(2, 2),
                 act=tf.nn.tanh,
                 W_init=w_init)(n)  # (1, 64, 64, 3)
    return tl.models.Model(inputs=nz, outputs=n, name='generator_CMNIST')
def get_G(shape_z,
          gf_dim=64):  # Dimension of gen filters in first conv layer. [64]
    # # input: (100,)
    # w_init = tf.random_normal_initializer(stddev=0.02)
    # gamma_init = tf.random_normal_initializer(1., 0.02)
    # nz = Input(shape_z)
    # n = Dense(n_units=3136, act=tf.nn.relu, W_init=w_init)(nz)
    # n = Reshape(shape=[-1, 14, 14, 16])(n)
    # n = DeConv2d(64, (5, 5), strides=(2, 2), W_init=w_init, b_init=None)(n) # (1, 28, 28, 64)
    # n = BatchNorm2d(decay=0.9, act=tf.nn.relu, gamma_init=gamma_init)(n)
    # n = DeConv2d(flags.c_dim, (5, 5), strides=(1, 1), padding="VALID", W_init=w_init, b_init=None)(n) # (1, 32, 32, 3)
    # return tl.models.Model(inputs=nz, outputs=n, name='generator')

    image_size = 32
    s16 = image_size // 16
    # w_init = tf.glorot_normal_initializer()
    w_init = tf.random_normal_initializer(stddev=0.02)
    gamma_init = tf.random_normal_initializer(1., 0.02)

    ni = Input(shape_z)
    nn = Dense(n_units=(gf_dim * 8 * s16 * s16), W_init=w_init,
               b_init=None)(ni)
    nn = Reshape(shape=[-1, s16, s16, gf_dim * 8])(
        nn)  # [-1, 2, 2, gf_dim * 8]
    nn = BatchNorm(decay=0.9, act=tf.nn.relu, gamma_init=gamma_init,
                   name=None)(nn)
    nn = DeConv2d(gf_dim * 4, (5, 5), (2, 2), W_init=w_init,
                  b_init=None)(nn)  # [-1, 4, 4, gf_dim * 4]
    nn = BatchNorm2d(decay=0.9, act=tf.nn.relu, gamma_init=gamma_init)(nn)
    nn = DeConv2d(gf_dim * 2, (5, 5), (2, 2), W_init=w_init,
                  b_init=None)(nn)  # [-1, 8, 8, gf_dim * 2]
    nn = BatchNorm2d(decay=0.9, act=tf.nn.relu, gamma_init=gamma_init)(nn)
    nn = DeConv2d(gf_dim, (5, 5), (2, 2), W_init=w_init,
                  b_init=None)(nn)  # [-1, 16, 16, gf_dim *]
    nn = BatchNorm2d(decay=0.9, act=tf.nn.relu, gamma_init=gamma_init)(nn)
    nn = DeConv2d(3, (5, 5), (2, 2), act=tf.nn.tanh,
                  W_init=w_init)(nn)  # [-1, 32, 32, 3]

    return tl.models.Model(inputs=ni, outputs=nn, name='generator')
Exemple #19
0
def MobileNetV1(pretrained=False, end_with='out', name=None):
    """Pre-trained MobileNetV1 model (static mode). Input shape [?, 224, 224, 3], value range [0, 1].

    Parameters
    ----------
    pretrained : boolean
        Whether to load pretrained weights. Default False.
    end_with : str
        The end point of the model [conv, depth1, depth2 ... depth13, globalmeanpool, out]. Default ``out`` i.e. the whole model.
    name : None or str
        Name for this model.

    Examples
    ---------
    Classify ImageNet classes, see `tutorial_models_mobilenetv1.py <https://github.com/tensorlayer/tensorlayer/blob/master/example/tutorial_models_mobilenetv1.py>`__

    >>> # get the whole model with pretrained weights
    >>> mobilenetv1 = tl.models.MobileNetV1(pretrained=True)
    >>> # use for inferencing
    >>> output = mobilenetv1(img1, is_train=False)
    >>> prob = tf.nn.softmax(output)[0].numpy()

    Extract features and Train a classifier with 100 classes

    >>> # get model without the last layer
    >>> cnn = tl.models.MobileNetV1(pretrained=True, end_with='reshape').as_layer()
    >>> # add one more layer and build new model
    >>> ni = Input([None, 224, 224, 3], name="inputs")
    >>> nn = cnn(ni)
    >>> nn = Conv2d(100, (1, 1), (1, 1), name='out')(nn)
    >>> nn = Flatten(name='flatten')(nn)
    >>> model = tl.models.Model(inputs=ni, outputs=nn)
    >>> # train your own classifier (only update the last layer)
    >>> train_params = model.get_layer('out').trainable_weights

    Returns
    -------
        static MobileNetV1.
    """
    ni = Input([None, 224, 224, 3], name="input")

    for i in range(len(layer_names)):
        if i == 0:
            n = conv_block(ni,
                           n_filters[i],
                           strides=(2, 2),
                           name=layer_names[i])
        elif layer_names[i] in ['depth2', 'depth4', 'depth6', 'depth12']:
            n = depthwise_conv_block(n,
                                     n_filters[i],
                                     strides=(2, 2),
                                     name=layer_names[i])
        elif layer_names[i] == 'globalmeanpool':
            n = GlobalMeanPool2d(name='globalmeanpool')(n)
        elif layer_names[i] == 'reshape':
            n = Reshape([-1, 1, 1, 1024], name='reshape')(n)
        elif layer_names[i] == 'out':
            n = Conv2d(1000, (1, 1), (1, 1), name='out')(n)
            n = Flatten(name='flatten')(n)
        else:
            n = depthwise_conv_block(n, n_filters[i], name=layer_names[i])

        if layer_names[i] == end_with:
            break

    network = Model(inputs=ni, outputs=n, name=name)

    if pretrained:
        restore_params(network)

    return network