Пример #1
0
 def __init__(self,
              n_filter,
              in_channels,
              strides=(1, 1),
              is_down_sample=False,
              data_format="channels_first"):
     super().__init__()
     self.data_format = data_format
     self.is_down_sample = is_down_sample
     self.main_block = LayerList([
         Conv2d(n_filter=n_filter,
                in_channels=in_channels,
                filter_size=(3, 3),
                strides=strides,
                b_init=None,
                data_format=self.data_format),
         BatchNorm2d(decay=0.9,
                     act=tf.nn.relu,
                     is_train=True,
                     num_features=n_filter,
                     data_format=self.data_format),
         Conv2d(n_filter=n_filter,
                in_channels=n_filter,
                filter_size=(3, 3),
                strides=(1, 1),
                b_init=None,
                data_format=self.data_format),
         BatchNorm2d(decay=0.9,
                     is_train=True,
                     num_features=n_filter,
                     data_format=self.data_format),
     ])
     if (self.is_down_sample):
         self.down_sample = LayerList([
             Conv2d(n_filter=n_filter,
                    in_channels=in_channels,
                    filter_size=(3, 3),
                    strides=strides,
                    b_init=None,
                    data_format=self.data_format),
             BatchNorm2d(decay=0.9,
                         is_train=True,
                         num_features=n_filter,
                         data_format=self.data_format)
         ])
Пример #2
0
def build_generator(image_size=128, gf_dim=64):
  s16 = image_size // 16
  w_init = tf.random_normal_initializer(stddev=0.02)
  gamma_init = tf.random_normal_initializer(1., 0.02)

  ni = Input([None, Z_DIM])
  nn = Dense(n_units=(gf_dim * 8 * s16 * s16), W_init=w_init, b_init=None)(ni)
  nn = Reshape(shape=[-1, s16, s16, gf_dim*8])(nn)
  nn = BatchNorm2d(decay=0.9, act=tf.nn.relu, gamma_init=gamma_init, name=None)(nn)
  nn = DeConv2d(gf_dim * 4, (5, 5), (2, 2), W_init=w_init, b_init=None)(nn)
  nn = BatchNorm2d(decay=0.9, act=tf.nn.relu, gamma_init=gamma_init)(nn)
  nn = DeConv2d(gf_dim * 2, (5, 5), (2, 2), W_init=w_init, b_init=None)(nn)
  nn = BatchNorm2d(decay=0.9, act=tf.nn.relu, gamma_init=gamma_init)(nn)
  nn = DeConv2d(gf_dim, (5, 5), (2, 2), W_init=w_init, b_init=None)(nn)
  nn = BatchNorm2d(decay=0.9, act=tf.nn.relu, gamma_init=gamma_init)(nn)
  nn = DeConv2d(3, (5, 5), (2, 2), act=tf.nn.tanh, W_init=w_init)(nn)

  return tl.models.Model(inputs=ni, outputs=nn)
Пример #3
0
def get_discriminator(shape, df_dim=64): # Dimension of discrim filters in first conv layer. [64]
    # w_init = tf.glorot_normal_initializer()
    w_init = tf.random_normal_initializer(stddev=0.02)
    gamma_init = tf.random_normal_initializer(1., 0.02)
    lrelu = lambda x : tf.nn.leaky_relu(x, 0.2)

    ni = Input(shape)
    nn = Conv2d(df_dim, (5, 5), (2, 2), act=lrelu, W_init=w_init)(ni)
    nn = Conv2d(df_dim*2, (5, 5), (2, 2), W_init=w_init, b_init=None)(nn)
    nn = BatchNorm2d(decay=0.9, act=lrelu, gamma_init=gamma_init)(nn)
    nn = Conv2d(df_dim*4, (5, 5), (2, 2), W_init=w_init, b_init=None)(nn)
    nn = BatchNorm2d(decay=0.9, act=lrelu, gamma_init=gamma_init)(nn)
    nn = Conv2d(df_dim*8, (5, 5), (2, 2), W_init=w_init, b_init=None)(nn)
    nn = BatchNorm2d(decay=0.9, act=lrelu, gamma_init=gamma_init)(nn)
    nn = Flatten()(nn)
    nn = Dense(n_units=1, act=tf.identity, W_init=w_init)(nn)

    return tl.models.Model(inputs=ni, outputs=nn, name='discriminator')
Пример #4
0
def get_G(shape):
    w_init = tf.random_normal_initializer(stddev=0.02)
    gamma_init = tf.random_normal_initializer(1., 0.02)

    ni = Input(shape)
    nn = Dense(n_units=(1024), W_init=w_init, b_init=None)(ni)
    nn = Reshape(shape=[-1, 1024])(nn)
    nn = BatchNorm(decay=0.9, act=tf.nn.relu, gamma_init=gamma_init,
                   name=None)(nn)
    nn = Dense(n_units=(8 * 8 * 256), W_init=w_init, b_init=None)(nn)
    nn = Reshape(shape=[-1, 8, 8, 256])(nn)  # ???
    nn = BatchNorm(decay=0.9, act=tf.nn.relu, gamma_init=gamma_init,
                   name=None)(nn)
    nn = DeConv2d(n_filter=256,
                  filter_size=(4, 4),
                  strides=(1, 1),
                  W_init=w_init,
                  b_init=None)(nn)
    nn = BatchNorm2d(decay=0.9, act=tf.nn.relu, gamma_init=gamma_init)(nn)
    nn = DeConv2d(n_filter=256,
                  filter_size=(4, 4),
                  strides=(2, 2),
                  W_init=w_init,
                  b_init=None)(nn)
    nn = BatchNorm2d(decay=0.9, act=tf.nn.relu, gamma_init=gamma_init)(nn)
    nn = DeConv2d(n_filter=128,
                  filter_size=(4, 4),
                  strides=(2, 2),
                  W_init=w_init,
                  b_init=None)(nn)
    nn = BatchNorm2d(decay=0.9, act=tf.nn.relu, gamma_init=gamma_init)(nn)
    nn = DeConv2d(n_filter=64,
                  filter_size=(4, 4),
                  strides=(2, 2),
                  W_init=w_init,
                  b_init=None)(nn)
    nn = BatchNorm2d(decay=0.9, act=tf.nn.relu, gamma_init=gamma_init)(nn)
    nn = DeConv2d(n_filter=1,
                  filter_size=(4, 4),
                  strides=(1, 1),
                  act=tf.nn.sigmoid,
                  W_init=w_init)(nn)

    return tl.models.Model(inputs=ni, outputs=nn, name='G')
Пример #5
0
def get_G(input_shape):
    w_init = tf.random_normal_initializer(stddev=0.02)
    g_init = tf.random_normal_initializer(1., 0.02)

    nin = Input(input_shape)
    n = Conv2d(64, (3, 3), (1, 1),
               act=tf.nn.relu,
               padding='SAME',
               W_init=w_init)(nin)
    temp = n

    # B residual blocks
    for i in range(16):
        nn = Conv2d(64, (3, 3), (1, 1),
                    padding='SAME',
                    W_init=w_init,
                    b_init=None)(n)
        nn = BatchNorm2d(act=tf.nn.relu, gamma_init=g_init)(nn)
        nn = Conv2d(64, (3, 3), (1, 1),
                    padding='SAME',
                    W_init=w_init,
                    b_init=None)(nn)
        nn = BatchNorm2d(gamma_init=g_init)(nn)
        nn = Elementwise(tf.add)([n, nn])
        n = nn

    n = Conv2d(64, (3, 3), (1, 1), padding='SAME', W_init=w_init,
               b_init=None)(n)
    n = BatchNorm(gamma_init=g_init)(n)
    n = Elementwise(tf.add)([n, temp])
    # B residual blacks end

    n = Conv2d(256, (3, 3), (1, 1), padding='SAME', W_init=w_init)(n)
    n = SubpixelConv2d(scale=2, n_out_channels=None, act=tf.nn.relu)(n)

    n = Conv2d(256, (3, 3), (1, 1), act=None, padding='SAME', W_init=w_init)(n)
    n = SubpixelConv2d(scale=2, n_out_channels=None, act=tf.nn.relu)(n)

    nn = Conv2d(3, (1, 1), (1, 1),
                act=tf.nn.tanh,
                padding='SAME',
                W_init=w_init)(n)
    G = Model(inputs=nin, outputs=nn, name="generator")
    return G
Пример #6
0
 def __init__(self,
              n_filter=512,
              in_channels=3,
              data_format="channels_first"):
     super().__init__()
     self.data_format = data_format
     self.out_channels = n_filter
     self.conv1 = Conv2d(n_filter=64,
                         in_channels=in_channels,
                         filter_size=(7, 7),
                         strides=(2, 2),
                         b_init=None,
                         data_format=self.data_format)
     self.bn1 = BatchNorm2d(decay=0.9,
                            act=tf.nn.relu,
                            is_train=True,
                            num_features=64,
                            data_format=self.data_format)
     self.maxpool = MaxPool2d(filter_size=(3, 3),
                              strides=(2, 2),
                              data_format=self.data_format)
     self.res_block_2_1 = self.Res_block(n_filter=64,
                                         in_channels=64,
                                         strides=(1, 1),
                                         is_down_sample=False,
                                         data_format=self.data_format)
     self.res_block_2_2 = self.Res_block(n_filter=64,
                                         in_channels=64,
                                         strides=(1, 1),
                                         is_down_sample=False,
                                         data_format=self.data_format)
     self.res_block_3_1 = self.Res_block(n_filter=128,
                                         in_channels=64,
                                         strides=(2, 2),
                                         is_down_sample=True,
                                         data_format=self.data_format)
     self.res_block_3_2 = self.Res_block(n_filter=128,
                                         in_channels=128,
                                         strides=(1, 1),
                                         is_down_sample=False,
                                         data_format=self.data_format)
     self.res_block_4_1 = self.Res_block(n_filter=256,
                                         in_channels=128,
                                         strides=(2, 2),
                                         is_down_sample=True,
                                         data_format=self.data_format)
     self.res_block_4_2 = self.Res_block(n_filter=256,
                                         in_channels=256,
                                         strides=(1, 1),
                                         is_down_sample=False,
                                         data_format=self.data_format)
     self.res_block_5_1 = self.Res_block(n_filter=n_filter,
                                         in_channels=256,
                                         strides=(2, 2),
                                         is_down_sample=True,
                                         data_format=self.data_format)
Пример #7
0
def get_D(shape):
    w_init = tf.random_normal_initializer(stddev=0.02)
    gamma_init = tf.random_normal_initializer(1., 0.02)

    def lrelu(x):
        return tf.nn.leaky_relu(x, flags.leaky_rate)

    ni = Input(shape)
    nn = Conv2d(n_filter=64,
                filter_size=(4, 4),
                strides=(2, 2),
                act=lrelu,
                W_init=w_init)(ni)
    nn = Conv2d(n_filter=128,
                filter_size=(4, 4),
                strides=(2, 2),
                W_init=w_init,
                b_init=None)(nn)
    nn = BatchNorm2d(decay=0.9, act=lrelu, gamma_init=gamma_init)(nn)
    nn = Conv2d(n_filter=256,
                filter_size=(4, 4),
                strides=(2, 2),
                W_init=w_init,
                b_init=None)(nn)
    nn = BatchNorm2d(decay=0.9, act=lrelu, gamma_init=gamma_init)(nn)
    nn = Conv2d(n_filter=256,
                filter_size=(4, 4),
                strides=(1, 1),
                W_init=w_init,
                b_init=None)(nn)
    nn = BatchNorm2d(decay=0.9, act=lrelu, gamma_init=gamma_init)(nn)
    nn = Conv2d(n_filter=256,
                filter_size=(4, 4),
                strides=(1, 1),
                W_init=w_init,
                b_init=None)(nn)
    nn = BatchNorm2d(decay=0.9, act=lrelu, gamma_init=gamma_init)(nn)
    nn = Flatten()(nn)
    nn = Dense(n_units=1024, W_init=w_init)(nn)
    nn = BatchNorm(decay=0.9, act=lrelu, gamma_init=gamma_init)(nn)
    mid = nn
    d = Dense(n_units=1, W_init=w_init)(nn)
    return tl.models.Model(inputs=ni, outputs=[d, mid], name='D')
Пример #8
0
def get_generator(
        shape,
        gf_dim=64,
        o_size=32,
        o_channel=3):  # Dimension of gen filters in first conv layer. [64]
    image_size = o_size
    s4 = image_size // 4
    lrelu = lambda x: tf.nn.leaky_relu(x, 0.2)

    ni = Input(shape)
    nn = Dense(n_units=(gf_dim * 4 * s4 * s4))(ni)
    nn = Reshape(shape=(-1, s4, s4, gf_dim * 4))(nn)
    nn = BatchNorm2d(decay=0.9, act=lrelu)(nn)
    nn = DeConv2d(gf_dim * 2, (5, 5), (1, 1))(nn)
    nn = BatchNorm2d(decay=0.9, act=lrelu)(nn)
    nn = DeConv2d(gf_dim, (5, 5), (2, 2))(nn)
    nn = BatchNorm2d(decay=0.9, act=lrelu)(nn)
    nn = DeConv2d(o_channel, (5, 5), (2, 2), act=tf.nn.tanh)(nn)

    return tl.models.Model(inputs=ni, outputs=nn, name='generator')
Пример #9
0
def get_generator(shape, gf_dim=64): # Dimension of gen filters in first conv layer. [64]
    image_size = 64
    s16 = image_size // 16
    # w_init = tf.glorot_normal_initializer()
    w_init = tf.random_normal_initializer(stddev=0.02)
    gamma_init = tf.random_normal_initializer(1., 0.02)

    ni = Input(shape)
    nn = Dense(n_units=(gf_dim * 8 * s16 * s16), W_init=w_init, b_init=None)(ni)
    nn = Reshape(shape=[-1, s16, s16, gf_dim*8])(nn)
    nn = BatchNorm2d(decay=0.9, act=tf.nn.relu, gamma_init=gamma_init, name=None)(nn)
    nn = DeConv2d(gf_dim * 4, (5, 5), (2, 2), W_init=w_init, b_init=None)(nn)
    nn = BatchNorm2d( decay=0.9, act=tf.nn.relu, gamma_init=gamma_init)(nn)
    nn = DeConv2d(gf_dim * 2, (5, 5), (2, 2), W_init=w_init, b_init=None)(nn)
    nn = BatchNorm2d(decay=0.9, act=tf.nn.relu, gamma_init=gamma_init)(nn)
    nn = DeConv2d(gf_dim, (5, 5), (2, 2), W_init=w_init, b_init=None)(nn)
    nn = BatchNorm2d(decay=0.9, act=tf.nn.relu, gamma_init=gamma_init)(nn)
    nn = DeConv2d(3, (5, 5), (2, 2), act=tf.nn.tanh, W_init=w_init)(nn)

    return tl.models.Model(inputs=ni, outputs=nn, name='generator')
Пример #10
0
def get_E(shape):
    w_init = tf.random_normal_initializer(stddev=0.02)
    gamma_init = tf.random_normal_initializer(1., 0.02)
    lrelu = lambda x: tf.nn.leaky_relu(x, 0.2)
    ni = Input(shape)  # (1, 64, 64, 3)
    n = Conv2d(3, (5, 5), (2, 2), act=None, W_init=w_init,
               b_init=None)(ni)  # (1, 16, 16, 3)
    n = BatchNorm2d(decay=0.9, act=lrelu, gamma_init=gamma_init)(n)
    n = Conv2d(32, (5, 5), (1, 1),
               padding="VALID",
               act=None,
               W_init=w_init,
               b_init=None)(n)  # (1, 12, 12, 32)
    n = BatchNorm2d(decay=0.9, act=lrelu, gamma_init=gamma_init)(n)
    n = Conv2d(64, (5, 5), (2, 2), act=None, W_init=w_init,
               b_init=None)(n)  # (1, 6, 6, 64)
    n = BatchNorm2d(decay=0.9, act=lrelu, gamma_init=gamma_init)(n)
    n = Flatten(name='flatten')(n)
    nz = Dense(n_units=flags.z_dim, act=None, W_init=w_init)(n)
    return tl.models.Model(inputs=ni, outputs=nz, name='encoder')
Пример #11
0
def get_discriminator(shape=[None, flags.output_size, flags.output_size, flags.c_dim] \
                      , df_dim=64, name=None):
    w_init = tf.random_normal_initializer(stddev=0.02)
    gamma_init = tf.random_normal_initializer(1., 0.02)
    lrelu = lambda x: tf.nn.leaky_relu(x, 0.2)

    ni = Input(shape)
    nn = Conv2d(df_dim, (5, 5), (2, 2), act=lrelu, W_init=w_init)(ni)
    nn = Conv2d(df_dim * 2, (5, 5), (2, 2), W_init=w_init, b_init=None)(nn)
    nn = BatchNorm2d(decay=0.9, act=lrelu, gamma_init=gamma_init)(nn)

    nn = Conv2d(df_dim * 4, (5, 5), (2, 2), W_init=w_init, b_init=None)(nn)
    nn = BatchNorm2d(decay=0.9, act=lrelu, gamma_init=gamma_init)(nn)

    nn = Conv2d(df_dim * 8, (5, 5), (2, 2), W_init=w_init, b_init=None)(nn)
    nn = BatchNorm2d(decay=0.9, act=lrelu, gamma_init=gamma_init)(nn)

    nn = Flatten()(nn)
    nn = Dense(n_units=1, act=tf.identity, W_init=w_init)(nn)

    return tl.models.Model(inputs=ni, outputs=nn, name=name)
Пример #12
0
def get_G_cifar_10(
        shape_z):  # Dimension of gen filters in first conv layer. [64]
    # input: (flags.z_dim,)
    w_init = tf.random_normal_initializer(stddev=0.02)
    ni = Input(shape_z)
    n = Dense(n_units=128 * 4 * 4, act=tf.nn.relu, W_init=w_init)(ni)
    # res blocks
    nn = Conv2d(128, (3, 3), (1, 1), act=None, W_init=w_init, b_init=None)(n)
    nn = BatchNorm2d(decay=0.9, act=tf.nn.relu, gamma_init=g_init)(nn)
    nn = Conv2d(128, (3, 3), (1, 1), act=None, W_init=w_init, b_init=None)(nn)
    nn = BatchNorm2d(decay=0.9, act=None, gamma_init=g_init)(nn)
    n = Elementwise(tf.add)([n, nn])
    # res blocks
    nn = Conv2d(128, (3, 3), (1, 1), act=None, W_init=w_init, b_init=None)(n)
    nn = BatchNorm2d(decay=0.9, act=tf.nn.relu, gamma_init=g_init)(nn)
    nn = Conv2d(128, (3, 3), (1, 1), act=None, W_init=w_init, b_init=None)(nn)
    nn = BatchNorm2d(decay=0.9, act=None, gamma_init=g_init)(nn)
    n = Elementwise(tf.add)([n, nn])
    # res blocks
    nn = Conv2d(128, (3, 3), (1, 1), act=None, W_init=w_init, b_init=None)(n)
    nn = BatchNorm2d(decay=0.9, act=tf.nn.relu, gamma_init=g_init)(nn)
    nn = Conv2d(128, (3, 3), (1, 1), act=None, W_init=w_init, b_init=None)(nn)
    nn = BatchNorm2d(decay=0.9, act=None, gamma_init=g_init)(nn)
    n = Elementwise(tf.add)([n, nn])
    n = Conv2d(3, (3, 3), (1, 1), act=tf.nn.relu, W_init=w_init)(n)
    return tl.models.Model(inputs=nz, outputs=n, name='generator_CIFAR10')
Пример #13
0
def generator_I(shape, nc=3, ngf=64, nz=60):

    ni = Input(shape)
    nn = DeConv2d(in_channels=nz,
                  n_filter=ngf * 8,
                  filter_size=(6, 6),
                  strides=(1, 1),
                  padding='VALID',
                  b_init=None)(ni)
    nn = BatchNorm2d(num_features=ngf * 8, act=tf.nn.relu)(nn)
    nn = DeConv2d(in_channels=ngf * 8,
                  n_filter=ngf * 4,
                  filter_size=(4, 4),
                  strides=(2, 2),
                  padding='SAME',
                  b_init=None)(nn)
    nn = BatchNorm2d(num_features=ngf * 4, act=tf.nn.relu)(nn)
    nn = DeConv2d(in_channels=ngf * 4,
                  n_filter=ngf * 2,
                  filter_size=(4, 4),
                  strides=(2, 2),
                  padding='SAME',
                  b_init=None)(nn)
    nn = BatchNorm2d(num_features=ngf * 2, act=tf.nn.relu)(nn)
    nn = DeConv2d(in_channels=ngf * 2,
                  n_filter=ngf,
                  filter_size=(4, 4),
                  strides=(2, 2),
                  padding='SAME',
                  b_init=None)(nn)
    nn = BatchNorm2d(num_features=ngf, act=tf.nn.relu)(nn)
    out = DeConv2d(in_channels=ngf,
                   n_filter=nc,
                   filter_size=(4, 4),
                   strides=(2, 2),
                   padding='SAME',
                   b_init=None,
                   act=tf.nn.tanh)(nn)

    return Model(inputs=ni, outputs=out, name='generator_I')
Пример #14
0
def get_G_cmnist(
        shape_z):  # Dimension of gen filters in first conv layer. [64]
    # input: (100,)
    w_init = tf.random_normal_initializer(stddev=0.02)
    gamma_init = tf.random_normal_initializer(1., 0.02)
    nz = Input(shape_z)
    n = Dense(n_units=3136, act=tf.nn.relu, W_init=w_init)(nz)
    n = Reshape(shape=[-1, 14, 14, 16])(n)
    n = DeConv2d(64, (5, 5), strides=(2, 2), W_init=w_init,
                 b_init=None)(n)  # (1, 28, 28, 64)
    n = BatchNorm2d(decay=0.9, act=tf.nn.relu, gamma_init=gamma_init)(n)
    n = DeConv2d(32, (5, 5),
                 strides=(1, 1),
                 padding="VALID",
                 W_init=w_init,
                 b_init=None)(n)  # (1, 32, 32, 32)
    n = BatchNorm2d(decay=0.9, act=tf.nn.relu, gamma_init=gamma_init)(n)
    n = DeConv2d(flags.c_dim, (5, 5),
                 strides=(2, 2),
                 act=tf.nn.tanh,
                 W_init=w_init)(n)  # (1, 64, 64, 3)
    return tl.models.Model(inputs=nz, outputs=n, name='generator_CMNIST')
Пример #15
0
def get_G(shape_z,
          gf_dim=64):  # Dimension of gen filters in first conv layer. [64]
    # # input: (100,)
    # w_init = tf.random_normal_initializer(stddev=0.02)
    # gamma_init = tf.random_normal_initializer(1., 0.02)
    # nz = Input(shape_z)
    # n = Dense(n_units=3136, act=tf.nn.relu, W_init=w_init)(nz)
    # n = Reshape(shape=[-1, 14, 14, 16])(n)
    # n = DeConv2d(64, (5, 5), strides=(2, 2), W_init=w_init, b_init=None)(n) # (1, 28, 28, 64)
    # n = BatchNorm2d(decay=0.9, act=tf.nn.relu, gamma_init=gamma_init)(n)
    # n = DeConv2d(flags.c_dim, (5, 5), strides=(1, 1), padding="VALID", W_init=w_init, b_init=None)(n) # (1, 32, 32, 3)
    # return tl.models.Model(inputs=nz, outputs=n, name='generator')

    image_size = 32
    s16 = image_size // 16
    # w_init = tf.glorot_normal_initializer()
    w_init = tf.random_normal_initializer(stddev=0.02)
    gamma_init = tf.random_normal_initializer(1., 0.02)

    ni = Input(shape_z)
    nn = Dense(n_units=(gf_dim * 8 * s16 * s16), W_init=w_init,
               b_init=None)(ni)
    nn = Reshape(shape=[-1, s16, s16, gf_dim * 8])(
        nn)  # [-1, 2, 2, gf_dim * 8]
    nn = BatchNorm(decay=0.9, act=tf.nn.relu, gamma_init=gamma_init,
                   name=None)(nn)
    nn = DeConv2d(gf_dim * 4, (5, 5), (2, 2), W_init=w_init,
                  b_init=None)(nn)  # [-1, 4, 4, gf_dim * 4]
    nn = BatchNorm2d(decay=0.9, act=tf.nn.relu, gamma_init=gamma_init)(nn)
    nn = DeConv2d(gf_dim * 2, (5, 5), (2, 2), W_init=w_init,
                  b_init=None)(nn)  # [-1, 8, 8, gf_dim * 2]
    nn = BatchNorm2d(decay=0.9, act=tf.nn.relu, gamma_init=gamma_init)(nn)
    nn = DeConv2d(gf_dim, (5, 5), (2, 2), W_init=w_init,
                  b_init=None)(nn)  # [-1, 16, 16, gf_dim *]
    nn = BatchNorm2d(decay=0.9, act=tf.nn.relu, gamma_init=gamma_init)(nn)
    nn = DeConv2d(3, (5, 5), (2, 2), act=tf.nn.tanh,
                  W_init=w_init)(nn)  # [-1, 32, 32, 3]

    return tl.models.Model(inputs=ni, outputs=nn, name='generator')
Пример #16
0
    def __init__(self, nc=3, ndf=64):
        super(discriminator_I, self).__init__()

        self.conv1 = Conv2d(in_channels=nc,
                            n_filter=ndf,
                            filter_size=(4, 4),
                            strides=(2, 2),
                            padding='SAME',
                            b_init=None,
                            act=tf.nn.leaky_relu)
        self.conv2 = Conv2d(in_channels=ndf,
                            n_filter=ndf * 2,
                            filter_size=(4, 4),
                            strides=(2, 2),
                            padding='SAME',
                            b_init=None)
        self.batch1 = BatchNorm2d(num_features=ndf * 2, act=tf.nn.leaky_relu)
        self.conv3 = Conv2d(in_channels=ndf * 2,
                            n_filter=ndf * 4,
                            filter_size=(4, 4),
                            strides=(2, 2),
                            padding='SAME',
                            b_init=None)
        self.batch2 = BatchNorm2d(num_features=ndf * 4, act=tf.nn.leaky_relu)
        self.conv4 = Conv2d(in_channels=ndf * 4,
                            n_filter=ndf * 8,
                            filter_size=(4, 4),
                            strides=(2, 2),
                            padding='SAME',
                            b_init=None)
        self.batch3 = BatchNorm2d(num_features=ndf * 8, act=tf.nn.leaky_relu)
        self.conv5 = Conv2d(in_channels=ndf * 8,
                            n_filter=1,
                            filter_size=(6, 6),
                            strides=(1, 1),
                            padding='VALID',
                            b_init=None,
                            act=tf.nn.sigmoid)
Пример #17
0
def dw_conv_block(n_filter,
                  in_channels,
                  filter_size=(3, 3),
                  strides=(1, 1),
                  dilation_rate=(1, 1),
                  W_init=initializer,
                  b_init=initializer,
                  data_format="channels_first"):
    layer_list = []
    layer_list.append(
        DepthwiseConv2d(filter_size=filter_size,
                        strides=strides,
                        in_channels=in_channels,
                        dilation_rate=dilation_rate,
                        W_init=initializer,
                        b_init=None,
                        data_format=data_format))
    layer_list.append(
        BatchNorm2d(decay=0.99,
                    act=tf.nn.relu,
                    num_features=in_channels,
                    data_format=data_format,
                    is_train=True))
    layer_list.append(
        Conv2d(n_filter=n_filter,
               filter_size=(1, 1),
               strides=(1, 1),
               in_channels=in_channels,
               W_init=initializer,
               b_init=None,
               data_format=data_format))
    layer_list.append(
        BatchNorm2d(decay=0.99,
                    act=tf.nn.relu,
                    num_features=n_filter,
                    data_format=data_format,
                    is_train=True))
    return layers.LayerList(layer_list)
Пример #18
0
 def __init__(self,parts=CocoPart,limbs=CocoLimb,colors=None,K_size=18,L_size=17,win=384,hin=384,wout=12,hout=12,wnei=9,hnei=9\
     ,lmd_rsp=0.25,lmd_iou=1,lmd_coor=5,lmd_size=5,lmd_limb=0.5,backbone=None,data_format="channels_first"):
     super().__init__()
     #construct params
     self.parts=parts
     self.limbs=limbs
     self.colors=colors
     self.K=K_size
     self.L=L_size
     self.win=win
     self.hin=hin
     self.wout=wout
     self.hout=hout
     self.hnei=hnei
     self.wnei=wnei
     self.n_pos=K_size
     self.lmd_rsp=lmd_rsp
     self.lmd_iou=lmd_iou
     self.lmd_coor=lmd_coor
     self.lmd_size=lmd_size
     self.lmd_limb=lmd_limb
     self.data_format=data_format
     
     self.output_dim=6*self.K+self.hnei*self.wnei*self.L
     #construct networks
     if(backbone==None):
         self.backbone=self.Resnet_18(n_filter=512,in_channels=3,data_format=data_format)
     else:
         self.backbone=backbone(scale_size=32,data_format=self.data_format)
     self.add_layer_1=LayerList([
         Conv2d(n_filter=512,in_channels=self.backbone.out_channels,filter_size=(3,3),strides=(1,1),data_format=self.data_format),
         BatchNorm2d(decay=0.9,act=lambda x:tl.act.leaky_relu(x,alpha=0.1),is_train=True,num_features=512,data_format=self.data_format)
     ])
     self.add_layer_2=LayerList([
         Conv2d(n_filter=512,in_channels=512,filter_size=(3,3),strides=(1,1),data_format=self.data_format),
         BatchNorm2d(decay=0.9,act=lambda x:tl.act.leaky_relu(x,alpha=0.1),is_train=True,num_features=512,data_format=self.data_format)
     ])
     self.add_layer_3=Conv2d(n_filter=self.output_dim,in_channels=512,filter_size=(1,1),strides=(1,1),data_format=self.data_format)
Пример #19
0
 def __init__(self,
              in_channels=64,
              n_filter=64,
              strides=(1, 1),
              data_format="channels_first"):
     super().__init__()
     self.in_channels = in_channels
     self.n_filter = n_filter
     self.strides = strides
     self.data_format = data_format
     self.downsample = None
     if (self.strides != (1, 1) or self.in_channels != self.n_filter):
         self.downsample=LayerList([
             Conv2d(n_filter=self.n_filter,in_channels=self.in_channels,filter_size=(1,1),strides=self.strides,b_init=None,\
                 data_format=self.data_format),
             BatchNorm2d(is_train=True,num_features=self.n_filter,data_format=self.data_format)
             ])
     self.main_block = LayerList([
         Conv2d(n_filter=self.n_filter,
                in_channels=self.in_channels,
                filter_size=(3, 3),
                strides=self.strides,
                b_init=None,
                data_format=self.data_format),
         BatchNorm2d(is_train=True,
                     num_features=self.n_filter,
                     act=tf.nn.relu,
                     data_format=self.data_format),
         Conv2d(n_filter=self.n_filter,
                in_channels=self.n_filter,
                filter_size=(3, 3),
                b_init=None,
                data_format=self.data_format),
         BatchNorm2d(is_train=True,
                     num_features=self.n_filter,
                     data_format=self.data_format)
     ])
Пример #20
0
 def conv_block(self,
                n_filter=32,
                in_channels=3,
                filter_size=(3, 3),
                strides=(1, 1),
                padding="SAME"):
     layer_list = []
     layer_list.append(Conv2d(n_filter=n_filter,in_channels=in_channels,filter_size=filter_size,strides=strides,\
         data_format=self.data_format,padding=padding))
     layer_list.append(
         BatchNorm2d(num_features=n_filter,
                     is_train=True,
                     act=tf.nn.relu,
                     data_format=self.data_format))
     return LayerList(layer_list)
Пример #21
0
def get_D(input_shape):
    w_init = tf.random_normal_initializer(stddev=0.02)
    gamma_init = tf.random_normal_initializer(1., 0.02)
    df_dim = 8
    lrelu = lambda x: tl.act.lrelu(x, 0.2)

    nin = Input(input_shape)
    n = Conv2d(df_dim, (4, 4), (2, 2), act=lrelu, padding='SAME', W_init=w_init)(nin)

    n = Conv2d(df_dim * 2, (4, 4), (2, 2), padding='SAME', W_init=w_init, b_init=None)(n)
    n = BatchNorm2d(act=lrelu, gamma_init=gamma_init)(n)
    n = Conv2d(df_dim * 4, (4, 4), (2, 2), padding='SAME', W_init=w_init, b_init=None)(n)
    n = BatchNorm2d(act=lrelu, gamma_init=gamma_init)(n)
    n = Conv2d(df_dim * 8, (4, 4), (2, 2), padding='SAME', W_init=w_init, b_init=None)(n)
    n = BatchNorm2d(act=lrelu, gamma_init=gamma_init)(n)
    n = Conv2d(df_dim * 16, (4, 4), (2, 2), padding='SAME', W_init=w_init, b_init=None)(n)
    n = BatchNorm2d(act=lrelu, gamma_init=gamma_init)(n)
    n = Conv2d(df_dim * 32, (4, 4), (2, 2), padding='SAME', W_init=w_init, b_init=None)(n)
    n = BatchNorm2d(act=lrelu, gamma_init=gamma_init)(n)
    n = Conv2d(df_dim * 16, (1, 1), (1, 1), padding='SAME', W_init=w_init, b_init=None)(n)
    n = BatchNorm2d(act=lrelu, gamma_init=gamma_init)(n)
    n = Conv2d(df_dim * 8, (1, 1), (1, 1), padding='SAME', W_init=w_init, b_init=None)(n)
    nn = BatchNorm2d(gamma_init=gamma_init)(n)

    n = Conv2d(df_dim * 2, (1, 1), (1, 1), padding='SAME', W_init=w_init, b_init=None)(nn)
    n = BatchNorm2d(act=lrelu, gamma_init=gamma_init)(n)
    n = Conv2d(df_dim * 2, (3, 3), (1, 1), padding='SAME', W_init=w_init, b_init=None)(n)
    n = BatchNorm2d(act=lrelu, gamma_init=gamma_init)(n)
    n = Conv2d(df_dim * 8, (3, 3), (1, 1), padding='SAME', W_init=w_init, b_init=None)(n)
    n = BatchNorm2d(gamma_init=gamma_init)(n)
    n = Elementwise(combine_fn=tf.add, act=lrelu)([n, nn])

    n = Flatten()(n)
    no = Dense(n_units=1, W_init=w_init)(n)
    D = Model(inputs=nin, outputs=no)
    return D
Пример #22
0
def convolutional(input_layer,
                  filters_shape,
                  downsample=False,
                  activate=True,
                  bn=True,
                  activate_type='leaky',
                  name=None):
    if downsample:
        input_layer = ZeroPad2d(((1, 0), (1, 0)))(input_layer)
        padding = 'VALID'
        strides = 2
    else:
        strides = 1
        padding = 'SAME'

    if bn:
        b_init = None
    else:
        b_init = tl.initializers.constant(value=0.0)

    conv = Conv2d(n_filter=filters_shape[-1],
                  filter_size=(filters_shape[0], filters_shape[1]),
                  strides=(strides, strides),
                  padding=padding,
                  b_init=b_init,
                  name=name)(input_layer)

    if bn:
        if activate == True:
            if activate_type == 'leaky':
                conv = BatchNorm2d(act='lrelu0.1')(conv)
            elif activate_type == 'mish':
                conv = BatchNorm2d(act=mish)(conv)
        else:
            conv = BatchNorm2d()(conv)
    return conv
Пример #23
0
 def __init__(self):
     super(get_G,self).__init__()
     w_init = tf.random_normal_initializer(stddev=0.02)
     g_init = tf.random_normal_initializer(1., 0.02)
     self.conv1 = Conv2d(n_filter=64,filter_size=(3, 3),strides=(1, 1),in_channels=3, act=tf.nn.relu, padding='SAME', W_init=w_init,b_init=None)
     self.res = [0]*16
     for i in range(16):
         self.res[i] = ResBlock()
     self.conv2 = Conv2d(n_filter=64,filter_size=(3, 3),strides=(1, 1),in_channels=64, padding='SAME', W_init=w_init,b_init=None)
     self.bn1 = BatchNorm2d(num_features = 64,gamma_init=g_init, act=tf.nn.relu)
     self.conv3 = Conv2d(n_filter=256,filter_size=(3, 3),strides=(1, 1),in_channels=64, padding='SAME', W_init=w_init,b_init=None)
     self.conv4 = Conv2d(n_filter=256,filter_size=(3, 3),strides=(1, 1),in_channels=64, padding='SAME', W_init=w_init,b_init=None)
     self.conv5 = Conv2d(n_filter=3,filter_size=(3, 3),strides=(1, 1),in_channels=64, act=tf.nn.tanh, padding='SAME', W_init=w_init,b_init=None)  
     self.subconv1 = SubpixelConv2d(scale=2, n_out_channels=256,in_channels=256, act=tf.nn.relu)
     self.add1 = Elementwise(tf.add)
Пример #24
0
def get_G(input_shape):
    """Get a Generator model with randomly inizialized weights.

    Args:
        input_shape : tuple
            Input shape of the Input layer of the model.
    """
    w_init = tf.random_normal_initializer(stddev=0.02)
    g_init = tf.random_normal_initializer(1., 0.02)

    nin = Input(input_shape)
    n = Conv2d(64, (3, 3), (1, 1), act=tf.nn.relu,
               padding='SAME', W_init=w_init)(nin)
    temp = n

    def get_G_res_block(n, w_init, g_init):
        nn = Conv2d(64, (3, 3), (1, 1), padding='SAME',
                    W_init=w_init, b_init=None)(n)
        nn = BatchNorm2d(act=tf.nn.relu, gamma_init=g_init)(nn)
        nn = Conv2d(64, (3, 3), (1, 1), padding='SAME',
                    W_init=w_init, b_init=None)(nn)
        nn = BatchNorm2d(gamma_init=g_init)(nn)
        nn = Elementwise(tf.add)([n, nn])
        return nn

    def get_conv_block(n, w_init):
        n = Conv2d(256, (3, 3), (1, 1), padding='SAME', W_init=w_init)(n)
        n = SubpixelConv2d(scale=2, n_out_channels=None, act=tf.nn.relu)(n)
        return n

    # B residual blocks
    for _ in range(16):
        n = get_G_res_block(n, w_init, g_init)

    n = Conv2d(64, (3, 3), (1, 1), padding='SAME',
               W_init=w_init, b_init=None)(n)
    n = BatchNorm2d(gamma_init=g_init)(n)
    n = Elementwise(tf.add)([n, temp])
    # B residual blacks end

    for _ in range(2):
        n = get_conv_block(n, w_init)

    nn = Conv2d(3, (1, 1), (1, 1), act=tf.nn.tanh,
                padding='SAME', W_init=w_init)(n)
    G = Model(inputs=nin, outputs=nn)  # , name="generator"
    return G
Пример #25
0
def separable_block(n_filter=32,
                    in_channels=3,
                    filter_size=(3, 3),
                    strides=(1, 1),
                    act=tf.nn.relu,
                    padding="SAME",
                    data_format="channels_first"):
    layer_list = []
    layer_list.append(SeparableConv2d(n_filter=n_filter,in_channels=in_channels,filter_size=filter_size,strides=strides,act=act,\
        data_format=data_format,padding=padding))
    layer_list.append(
        BatchNorm2d(num_features=n_filter,
                    decay=0.999,
                    is_train=True,
                    act=act,
                    data_format=data_format))
    return LayerList(layer_list)
Пример #26
0
 def __init__(self,
              in_channels=3,
              n_filter=64,
              scale_size=8,
              data_format="channels_first"):
     super().__init__()
     self.in_channels = in_channels
     self.n_filter = n_filter
     self.scale_size = scale_size
     self.data_format = data_format
     self.out_channels = 2048
     self.conv1 = Conv2d(n_filter=64,
                         in_channels=self.in_channels,
                         filter_size=(7, 7),
                         strides=(2, 2),
                         padding="SAME",
                         b_init=None,
                         data_format=self.data_format)
     self.bn1 = BatchNorm2d(is_train=True,
                            num_features=64,
                            data_format=self.data_format,
                            act=tf.nn.relu)
     self.maxpool1 = MaxPool2d(filter_size=(3, 3),
                               strides=(2, 2),
                               data_format=self.data_format)
     self.layer1 = self.Basic_block(in_channels=64,
                                    n_filter=64,
                                    strides=(1, 1),
                                    data_format=self.data_format)
     self.layer2 = self.Basic_block(in_channels=256,
                                    n_filter=128,
                                    strides=(2, 2),
                                    data_format=self.data_format)
     if (self.scale_size == 8):
         strides = (1, 1)
     else:
         strides = (2, 2)
     self.layer3 = self.Basic_block(in_channels=512,
                                    n_filter=256,
                                    strides=strides,
                                    data_format=self.data_format)
     self.layer4 = self.Basic_block(in_channels=1024,
                                    n_filter=512,
                                    strides=strides,
                                    data_format=self.data_format)
Пример #27
0
def get_generator(shape,gf_dim=64):
    image_size=256
    s16=image_size//16
    w_init=tf.random_normal_initializer(stddev=0.02)
    gamma_init=tf.random_normal_initializer(1.,0.02)
    ni=Input(shape)
    nn=Dense(n_units=(gf_dim*16*s16*s16),W_init=w_init,b_init=None)(ni)
    nn=Reshape(shape=[-1,s16,s16,gf_dim*16])(nn)
    n1=BatchNorm2d(decay=0.9,act=tf.nn.relu,gamma_init=gamma_init,name=None)(nn)
    n1=DeConv2d(gf_dim*16,(5,5),(2,2),W_init=w_init,b_init=None)(n1);
    nn=n1+nn
    n1=BatchNorm2d(decay=0.9,act=tf.nn.relu,gamma_init=gamma_init,name=None)(nn)
    n1=DeConv2d(gf_dim*8,(5,5),(2,2),W_init=w_init,b_init=None)(n1);
    nn=n1+nn
    n1=BatchNorm2d(decay=0.9,act=tf.nn.relu,gamma_init=gamma_init,name=None)(nn)
    n1=DeConv2d(gf_dim*8,(5,5),(2,2),W_init=w_init,b_init=None)(n1);
    nn=n1+nn
    n1=BatchNorm2d(decay=0.9,act=tf.nn.relu,gamma_init=gamma_init,name=None)(nn)
    n1=DeConv2d(gf_dim*4,(5,5),(2,2),W_init=w_init,b_init=None)(n1);
    nn=n1+nn
    n1=BatchNorm2d(decay=0.9,act=tf.nn.relu,gamma_init=gamma_init,name=None)(nn)
    n1=DeConv2d(gf_dim*2,(5,5),(2,2),W_init=w_init,b_init=None)(n1);
    nn=n1+nn
    #non local block maybe bugs here
    f=Conv2d(gf_dim//4,(1,1),(1,1),W_init=w_init,b_init=None)(nn)
    f=MaxPool2d((2,2),(2,2))(f)
    g=Conv2d(gf_dim//4,(1,1),(1,1),W_init=w_init,b_init=None)(nn)
    h=Conv2d(gf_dim,(1,1),(1,1),W_init=w_init,b_init=None)(nn)
    h=MaxPool2d((2,2),(2,2))(h)
    s = tf.matmul(Reshape(shape=[g.shape[0],-1,g.shape[1]])(g), Reshape(shape=[f.shape[0],-1,f.shape[1]])(f), transpose_b=True)
    beta=tf.nn.softmax(s)
    o=tf.matmul(beta,Reshape(shape=[h.shape[0],-1,h.shape[1]])(h))
    o=Reshape(shape=[nn.shape[0],nn.shape[1],nn.shape[2],gf_dim])(o)
    o=Conv2d(gf_dim*2,(1,1),(1,1),W_init=w_init,b_init=None)(o)
    nn=nn+gamma_init*o
    #
    n1=BatchNorm2d(decay=0.9,act=tf.nn.relu,gamma_init=gamma_init,name=None)(nn)
    n1=DeConv2d(gf_dim,(5,5),(2,2),W_init=w_init,b_init=None)(n1);
    nn=n1+nn
    nn=BatchNorm2d(decay=0.9,act=tf.nn.relu,gamma_init=gamma_init,name=None)(nn)
    nn=Conv2d(3, (3, 3), (1, 1), act=tf.nn.tanh, W_init=w_init)(nn)
    return tl.models.Model(inputs=ni,outputs=nn,name='generator')
Пример #28
0
def get_discriminator(shape,df_dim=64):
    w_init=tf.random_normal_initializer(stddev=0.02)
    gamma_init=tf.random_normal_initializer(1.,0.02)
    ni=Input(shape)
    n1=BatchNorm2d(decay=0.9,act=tf.nn.relu,gamma_init=gamma_init,name=None)(ni)
    n1=Conv2d(df_dim,(5,5),(2,2),W_init=w_init,b_init=None)(n1);
    nn=n1+ni
    n1=BatchNorm2d(decay=0.9,act=tf.nn.relu,gamma_init=gamma_init,name=None)(nn)
    n1=Conv2d(df_dim*2,(5,5),(2,2),W_init=w_init,b_init=None)(n1);
    nn=nn+n1;
    f=Conv2d(df_dim//4,(1,1),(1,1),W_init=w_init,b_init=None)(nn)
    f=MaxPool2d((2,2),(2,2))(f)
    g=Conv2d(df_dim//4,(1,1),(1,1),W_init=w_init,b_init=None)(nn)
    h=Conv2d(df_dim,(1,1),(1,1),W_init=w_init,b_init=None)(nn)
    h=MaxPool2d((2,2),(2,2))(h)
    s = tf.matmul(Reshape(shape=[g.shape[0],-1,g.shape[1]])(g), Reshape(shape=[f.shape[0],-1,f.shape[1]])(f), transpose_b=True)
    beta=tf.nn.softmax(s)
    o=tf.matmul(beta,Reshape(shape=[h.shape[0],-1,h.shape[1]])(h))
    o=Reshape(shape=[nn.shape[0],nn.shape[1],nn.shape[2],gf_dim])(o)
    o=Conv2d(df_dim*2,(1,1),(1,1),W_init=w_init,b_init=None)(o)
    nn=nn+gamma_init*o
    n1=BatchNorm2d(decay=0.9,act=tf.nn.relu,gamma_init=gamma_init,name=None)(nn)
    n1=Conv2d(df_dim*4,(5,5),(2,2),W_init=w_init,b_init=None)(n1);
    nn=nn+n1;
    n1=BatchNorm2d(decay=0.9,act=tf.nn.relu,gamma_init=gamma_init,name=None)(nn)
    n1=Conv2d(df_dim*8,(5,5),(2,2),W_init=w_init,b_init=None)(n1);
    nn=nn+n1;
    n1=BatchNorm2d(decay=0.9,act=tf.nn.relu,gamma_init=gamma_init,name=None)(nn)
    n1=Conv2d(df_dim*8,(5,5),(2,2),W_init=w_init,b_init=None)(n1);
    nn=nn+n1;
    n1=BatchNorm2d(decay=0.9,act=tf.nn.relu,gamma_init=gamma_init,name=None)(nn)
    n1=Conv2d(df_dim*16,(5,5),(2,2),W_init=w_init,b_init=None)(n1);
    nn=nn+n1;
    n1=Conv2d(df_dim*16,(5,5),(2,2),W_init=w_init,b_init=None)(nn);
    n1=BatchNorm2d(decay=0.9,act=tf.nn.relu,gamma_init=gamma_init,name=None)(n1)
    nn=nn+n1;
    nn=tf.reduce_sum(nn,axis=[1,2])
    nn=Dense(n_units=1,W_init=w_init,act=tf.identity)(nn)
    return tl.models.Model(inputs=ni,outputs=nn,name='discriminator')
Пример #29
0
def unet(ni, out_channel, is_tanh, out_size=flags.img_size_h):
    w_init = tf.random_normal_initializer(stddev=0.02)
    gamma_init = tf.random_normal_initializer(1., 0.02)
    ngf = 64
    conv1 = Conv2d(ngf, (3, 3), (1, 1), W_init=w_init, act=lrelu)(ni)

    conv2 = Conv2d(ngf, (4, 4), (2, 2), W_init=w_init, act=None,
                   b_init=None)(conv1)
    conv2 = BatchNorm2d(decay=0.9, act=lrelu, gamma_init=gamma_init,
                        name=None)(conv2)

    conv3 = Conv2d(ngf * 2, (4, 4), (1, 1),
                   W_init=w_init,
                   act=None,
                   b_init=None)(conv2)
    conv3 = BatchNorm2d(decay=0.9, act=lrelu, gamma_init=gamma_init,
                        name=None)(conv3)
    conv4 = Conv2d(ngf * 2, (4, 4), (2, 2),
                   W_init=w_init,
                   act=None,
                   b_init=None)(conv3)
    conv4 = BatchNorm2d(decay=0.9, act=lrelu, gamma_init=gamma_init,
                        name=None)(conv4)

    conv5 = Conv2d(ngf * 4, (4, 4), (1, 1),
                   W_init=w_init,
                   act=None,
                   b_init=None)(conv4)
    conv5 = BatchNorm2d(decay=0.9, act=lrelu, gamma_init=gamma_init,
                        name=None)(conv5)
    conv6 = Conv2d(ngf * 4, (4, 4), (2, 2),
                   W_init=w_init,
                   act=None,
                   b_init=None)(conv5)
    conv6 = BatchNorm2d(decay=0.9, act=lrelu, gamma_init=gamma_init,
                        name=None)(conv6)

    conv7 = Conv2d(ngf * 8, (4, 4), (1, 1),
                   W_init=w_init,
                   act=None,
                   b_init=None)(conv6)
    conv7 = BatchNorm2d(decay=0.9, act=lrelu, gamma_init=gamma_init,
                        name=None)(conv7)
    conv8 = Conv2d(ngf * 8, (4, 4), (2, 2),
                   act=lrelu,
                   W_init=w_init,
                   b_init=None)(conv7)
    # 8 8 512 now start upsample

    c_size = conv8.shape[-2]
    ##############################################################################################
    no = None
    for _ in range(1):
        up8 = DeConv2d(ngf * 8, (4, 4), (2, 2), W_init=w_init,
                       b_init=None)(conv8)
        up8 = BatchNorm2d(decay=0.9,
                          act=lrelu,
                          gamma_init=gamma_init,
                          name=None)(up8)
        up7 = Concat(concat_dim=3)([up8, conv7])
        up7 = DeConv2d(ngf * 8, (4, 4), (1, 1), W_init=w_init,
                       b_init=None)(up7)
        up7 = BatchNorm2d(decay=0.9,
                          act=lrelu,
                          gamma_init=gamma_init,
                          name=None)(up7)
        c_size = c_size * 2
        if c_size == out_size:
            no = up7
            break
        up6 = Concat(concat_dim=3)([up7, conv6])
        up6 = DeConv2d(ngf * 4, (4, 4), (2, 2), W_init=w_init,
                       b_init=None)(up6)
        up6 = BatchNorm2d(decay=0.9,
                          act=lrelu,
                          gamma_init=gamma_init,
                          name=None)(up6)
        up5 = Concat(concat_dim=3)([up6, conv5])
        up5 = DeConv2d(ngf * 4, (4, 4), (1, 1), W_init=w_init,
                       b_init=None)(up5)
        up5 = BatchNorm2d(decay=0.9,
                          act=lrelu,
                          gamma_init=gamma_init,
                          name=None)(up5)
        c_size = c_size * 2
        if c_size == out_size:
            no = up5
            break
        up4 = Concat(concat_dim=3)([up5, conv4])
        up4 = DeConv2d(ngf * 2, (4, 4), (2, 2), W_init=w_init,
                       b_init=None)(up4)
        up4 = BatchNorm2d(decay=0.9,
                          act=lrelu,
                          gamma_init=gamma_init,
                          name=None)(up4)
        up3 = Concat(concat_dim=3)([up4, conv3])
        up3 = DeConv2d(ngf * 2, (4, 4), (1, 1), W_init=w_init,
                       b_init=None)(up3)
        up3 = BatchNorm2d(decay=0.9,
                          act=lrelu,
                          gamma_init=gamma_init,
                          name=None)(up3)
        c_size = c_size * 2
        if c_size == out_size:
            no = up3
            break
        up2 = Concat(concat_dim=3)([up3, conv2])
        up2 = DeConv2d(ngf * 1, (4, 4), (2, 2), W_init=w_init,
                       b_init=None)(up2)
        up2 = BatchNorm2d(decay=0.9,
                          act=lrelu,
                          gamma_init=gamma_init,
                          name=None)(up2)
        up1 = Concat(concat_dim=3)([up2, conv1])
        up1 = DeConv2d(ngf * 1, (4, 4), (1, 1), W_init=w_init,
                       b_init=None)(up1)
        up1 = BatchNorm2d(decay=0.9,
                          act=lrelu,
                          gamma_init=gamma_init,
                          name=None)(up1)
        c_size = c_size * 2
        if c_size == out_size:
            no = up1
            break
    if is_tanh:
        up0 = DeConv2d(out_channel, (3, 3), (1, 1),
                       W_init=w_init,
                       act=tf.nn.tanh)(no)
    else:
        up0 = DeConv2d(out_channel, (3, 3), (1, 1),
                       W_init=w_init,
                       b_init=None,
                       act=None)(no)

    return up0
Пример #30
0
 def __init__(self, scale_size=8, data_format="channels_last"):
     super().__init__()
     self.data_format = data_format
     self.scale_size = scale_size
     self.out_channels = 320
     if (self.scale_size == 8):
         strides = (1, 1)
     else:
         strides = (2, 2)
     #block_1 n=1
     self.block_1_1 = Conv2d(n_filter=32,
                             in_channels=3,
                             filter_size=(3, 3),
                             strides=(2, 2),
                             data_format=self.data_format)
     self.block_1_2 = BatchNorm2d(num_features=32,
                                  is_train=True,
                                  act=tf.nn.relu6,
                                  data_format=self.data_format)
     #block_2 n=1
     self.block_2_1 = self.InvertedResidual(n_filter=16,
                                            in_channels=32,
                                            strides=(1, 1),
                                            exp_ratio=1,
                                            data_format=self.data_format)
     #block_3 n=2
     self.block_3_1 = self.InvertedResidual(n_filter=24,
                                            in_channels=16,
                                            strides=(2, 2),
                                            exp_ratio=6,
                                            data_format=self.data_format)
     self.block_3_2 = self.InvertedResidual(n_filter=24,
                                            in_channels=24,
                                            strides=(1, 1),
                                            exp_ratio=6,
                                            data_format=self.data_format)
     #block_4 n=3
     self.block_4_1 = self.InvertedResidual(n_filter=32,
                                            in_channels=24,
                                            strides=(2, 2),
                                            exp_ratio=6,
                                            data_format=self.data_format)
     self.block_4_2 = self.InvertedResidual(n_filter=32,
                                            in_channels=32,
                                            strides=(1, 1),
                                            exp_ratio=6,
                                            data_format=self.data_format)
     self.block_4_3 = self.InvertedResidual(n_filter=32,
                                            in_channels=32,
                                            strides=(1, 1),
                                            exp_ratio=6,
                                            data_format=self.data_format)
     #block_5 n=4
     self.block_5_1 = self.InvertedResidual(n_filter=64,
                                            in_channels=32,
                                            strides=(1, 1),
                                            exp_ratio=6,
                                            data_format=self.data_format)
     self.block_5_2 = self.InvertedResidual(n_filter=64,
                                            in_channels=64,
                                            strides=(1, 1),
                                            exp_ratio=6,
                                            data_format=self.data_format)
     self.block_5_3 = self.InvertedResidual(n_filter=64,
                                            in_channels=64,
                                            strides=(1, 1),
                                            exp_ratio=6,
                                            data_format=self.data_format)
     self.block_5_4 = self.InvertedResidual(n_filter=64,
                                            in_channels=64,
                                            strides=(1, 1),
                                            exp_ratio=6,
                                            data_format=self.data_format)
     #block_6 n=3
     self.block_6_1 = self.InvertedResidual(n_filter=96,
                                            in_channels=64,
                                            strides=strides,
                                            exp_ratio=6,
                                            data_format=self.data_format)
     self.block_6_2 = self.InvertedResidual(n_filter=96,
                                            in_channels=96,
                                            strides=(1, 1),
                                            exp_ratio=6,
                                            data_format=self.data_format)
     self.block_6_3 = self.InvertedResidual(n_filter=96,
                                            in_channels=96,
                                            strides=(1, 1),
                                            exp_ratio=6,
                                            data_format=self.data_format)
     #block_7 n=3
     self.block_7_1 = self.InvertedResidual(n_filter=160,
                                            in_channels=96,
                                            strides=strides,
                                            exp_ratio=6,
                                            data_format=self.data_format)
     self.block_7_2 = self.InvertedResidual(n_filter=160,
                                            in_channels=160,
                                            strides=(1, 1),
                                            exp_ratio=6,
                                            data_format=self.data_format)
     self.block_7_3 = self.InvertedResidual(n_filter=160,
                                            in_channels=160,
                                            strides=(1, 1),
                                            exp_ratio=6,
                                            data_format=self.data_format)
     #block_8 n=1
     self.block_8 = self.InvertedResidual(n_filter=320,
                                          in_channels=160,
                                          strides=(1, 1),
                                          exp_ratio=6,
                                          data_format=self.data_format)