示例#1
0
def generator(input_shape):
    inputs = k.Input(input_shape)
    # b1
    x = compose(Conv2DNormLReLU(64), Conv2DNormLReLU(64))(inputs)
    x = kl.Add()([SeparableConv2D(128, strides=2)(x), Downsample(128)(x)])
    # b2
    x = compose(Conv2DNormLReLU(128), SeparableConv2D(128))(x)
    x = kl.Add()([SeparableConv2D(256, strides=2)(x), Downsample(256)(x)])

    # m
    x = Conv2DNormLReLU(256)(x)
    x = InvertedRes_block(x, alpha=2, filters=256, strides=1)  # r1
    x = InvertedRes_block(x, alpha=2, filters=256, strides=1)  # r2
    x = InvertedRes_block(x, alpha=2, filters=256, strides=1)  # r3
    x = InvertedRes_block(x, alpha=2, filters=256, strides=1)  # r4
    x = InvertedRes_block(x, alpha=2, filters=256, strides=1)  # r5
    x = InvertedRes_block(x, alpha=2, filters=256, strides=1)  # r6
    x = InvertedRes_block(x, alpha=2, filters=256, strides=1)  # r7
    x = InvertedRes_block(x, alpha=2, filters=256, strides=1)  # r8

    # u2
    x = compose(Unsample(128), SeparableConv2D(128), Conv2DNormLReLU(128))(x)
    # u1
    x = compose(Unsample(128), Conv2DNormLReLU(64), Conv2DNormLReLU(64))(x)
    # out
    x = compose(Conv2D(filters=3, kernel_size=1, strides=1),
                kl.Activation(tf.nn.tanh, dtype=tf.float32))(x)
    return k.Model(inputs, x)
示例#2
0
    def __init__(self, num_features, eps=1e-5):
        super(SoftAdaLIN, self).__init__()
        self.norm = adaLIN(num_features, eps)

        self.w_gamma = Parameter(torch.zeros(1, num_features))
        self.w_beta = Parameter(torch.zeros(1, num_features))

        self.c_gamma = compose(nn.Linear(num_features, num_features),
                               kl.LeakyReLU(),
                               nn.Linear(num_features, num_features))
        self.c_beta = compose(nn.Linear(num_features, num_features),
                              kl.LeakyReLU(),
                              nn.Linear(num_features, num_features))
        self.s_gamma = nn.Linear(num_features, num_features)
        self.s_beta = nn.Linear(num_features, num_features)
示例#3
0
def Conv2DNormLReLU(filters,
                    kernel_size=3,
                    strides=1,
                    padding='valid',
                    use_bias=False):
    return compose(Conv2D(filters, kernel_size, strides, padding, use_bias),
                   InstanceNormalization(), kl.LeakyReLU(0.2))
示例#4
0
def InvertedRes_block(inputs, alpha, filters, strides, use_bias=False):

    # pw
    bottleneck_filters = round(alpha * inputs.get_shape().as_list()[-1])
    x = Conv2DNormLReLU(bottleneck_filters, kernel_size=1,
                        use_bias=use_bias)(inputs)

    # dw
    x = compose(dwiseConv2D(), InstanceNormalization(), kl.LeakyReLU(0.2))(x)

    # pw & linear
    x = compose(Conv2D(filters, kernel_size=1), InstanceNormalization())(x)

    # element wise add, only for strides==1
    if (int(inputs.get_shape().as_list()[-1])
            == filters) and (strides == 1 or strides == (1, 1)):
        x = inputs + x

    return x
示例#5
0
def Unsample(filters, kernel_size=3):
    """
      An alternative to transposed convolution where we first resize, then convolve.
      See http://distill.pub/2016/deconv-checkerboard/
      For some reason the shape needs to be statically known for gradient propagation
      through tf.image.resize_images, but we only know that for fixed image size, so we
      plumb through a "training" argument
  """
    return compose(kl.UpSampling2D(interpolation='bilinear'),
                   SeparableConv2D(filters, kernel_size))
示例#6
0
def Conv2D(filters, kernel_size=3, strides=1, padding='valid', use_bias=False):
    f = []
    if kernel_size == 3 or kernel_size == (3, 3):
        f.append(kl.ZeroPadding2D())
    f.append(
        kl.Conv2D(filters,
                  kernel_size,
                  strides=strides,
                  padding=padding,
                  use_bias=use_bias))
    return compose(*f)
示例#7
0
def Conv2DTransposeLReLU(filters,
                         kernel_size=2,
                         strides=2,
                         padding='same',
                         use_bias=False):
    return compose(
        kl.Conv2DTranspose(filters,
                           kernel_size,
                           strides,
                           padding,
                           use_bias=use_bias), InstanceNormalization(),
        kl.LeakyReLU(0.2))
示例#8
0
    def __init__(self, dim_out):
        super(ConvBlock, self).__init__()
        self.dim_out = dim_out

        self.ConvBlock1 = compose(
            InstanceNormalization(), kl.LeakyReLU(), ReflectionPadding2D(
                (1, 1)),
            kl.Conv2D(dim_out // 2, kernel_size=3, strides=1, use_bias=False))

        self.ConvBlock2 = compose(
            InstanceNormalization(), kl.LeakyReLU(), ReflectionPadding2D(
                (1, 1)),
            kl.Conv2D(dim_out // 4, kernel_size=3, strides=1, use_bias=False))

        self.ConvBlock3 = compose(
            InstanceNormalization(), kl.LeakyReLU(), ReflectionPadding2D(
                (1, 1)),
            kl.Conv2D(dim_out // 4, kernel_size=3, strides=1, use_bias=False))

        self.ConvBlock4 = compose(
            InstanceNormalization(), kl.LeakyReLU(),
            kl.Conv2D(dim_out, kernel_size=1, strides=1, use_bias=False))
示例#9
0
    def __init__(self, dim_in, dim_out, use_res=True):
        super(HourGlass, self).__init__()
        self.use_res = use_res

        self.HG = compose(
            HourGlassBlock(dim_in, dim_out), ConvBlock(dim_out, dim_out),
            kl.Conv2D(dim_out, kernel_size=1, strides=1, use_bias=False),
            InstanceNormalization(), kl.LeakyReLU())

        self.Conv1 = kl.Conv2D(3, kernel_size=1, strides=1)

        if self.use_res:
            self.Conv2 = kl.Conv2D(dim_out, kernel_size=1, strides=1)
            self.Conv3 = kl.Conv2D(dim_out, kernel_size=1, strides=1)
示例#10
0
def dwiseConv2D(kernel_size=3,
                strides=1,
                padding='valid',
                depth_multiplier=1,
                use_bias=False):
    f = []
    if kernel_size == 3 or kernel_size == (3, 3):
        f.append(kl.ZeroPadding2D())
    f.append(
        kl.DepthwiseConv2D(kernel_size,
                           strides,
                           padding,
                           depth_multiplier,
                           use_bias=use_bias))
    return compose(*f)
示例#11
0
def SeparableConv2D(filters,
                    kernel_size=3,
                    strides=1,
                    padding='valid',
                    use_bias=True):
    f = []
    if (kernel_size == 3 or kernel_size == (3, 3)) and (strides == 1
                                                        or strides == (1, 1)):
        f.append(kl.ZeroPadding2D())
    if (strides == 2 or strides == (2, 2)):
        f.append(kl.ZeroPadding2D())
    f.extend([
        kl.SeparableConv2D(filters,
                           kernel_size,
                           strides,
                           padding,
                           use_bias=use_bias),
        InstanceNormalization(),
        kl.LeakyReLU(0.2)
    ])
    return compose(*f)
示例#12
0
    def __init__(self, dim, use_bias=False):
        super(ResnetBlock, self).__init__()
        conv_block = []
        conv_block += [
            ReflectionPadding2D((1, 1)),
            kl.Conv2D(dim,
                      kernel_size=3,
                      strides=1,
                      padding='valid',
                      use_bias=use_bias),
            kl.LeakyReLU()
        ]

        conv_block += [
            ReflectionPadding2D((1, 1)),
            kl.Conv2D(dim,
                      kernel_size=3,
                      strides=1,
                      padding='valid',
                      use_bias=use_bias),
        ]

        self.conv_block = compose(*conv_block)
示例#13
0
def Conv2DSN(filters,
             kernel_size,
             strides,
             padding='valid',
             use_bias=True,
             use_sn=False):
    f = []
    if use_sn:
        f.append(
            Conv2DSpectralNormal(filters,
                                 kernel_size,
                                 strides,
                                 padding=padding,
                                 use_bias=use_bias))
    else:
        f.append(
            kl.Conv2D(filters,
                      kernel_size,
                      strides,
                      padding=padding,
                      use_bias=use_bias))

    return compose(*f)
示例#14
0
def discriminator(input_shape: list, filters: int, nblocks: int,
                  use_sn: bool) -> k.Model:
    """
  
  Args:
      input_shape (list): 
      filters (int): filters
      nblocks (int): blocks numbers
      use_sn (bool): weather use SpectralNormalization
  
  Returns:
      k.Model: discriminator
  """
    inputs = k.Input(input_shape)
    inner_filters = filters // 2
    f = [
        Conv2DSN(inner_filters,
                 kernel_size=3,
                 strides=1,
                 padding='same',
                 use_bias=False,
                 use_sn=use_sn),
        kl.LeakyReLU(0.2)
    ]

    for i in range(1, nblocks):
        f.extend([
            Conv2DSN(inner_filters * 2,
                     kernel_size=3,
                     strides=2,
                     padding='same',
                     use_bias=False,
                     use_sn=use_sn),
            kl.LeakyReLU(0.2),
            # kl.Dropout(0.2),
            Conv2DSN(inner_filters * 4,
                     kernel_size=3,
                     strides=1,
                     padding='same',
                     use_bias=False,
                     use_sn=use_sn),
            InstanceNormalization(),
            kl.LeakyReLU(0.2)
        ])

        inner_filters *= 2
    f.extend([
        Conv2DSN(inner_filters * 2,
                 kernel_size=3,
                 strides=1,
                 padding='same',
                 use_bias=False,
                 use_sn=use_sn),
        InstanceNormalization(),
        kl.LeakyReLU(0.2),
        # kl.Dropout(0.2),
        Conv2DSN(1,
                 kernel_size=3,
                 strides=1,
                 padding='same',
                 use_bias=False,
                 use_sn=use_sn),
        kl.Activation('linear', dtype=tf.float32)
    ])

    x = compose(*f)(inputs)
    return k.Model(inputs, x)
示例#15
0
    def __init__(self, input_nc, ndf=64, n_layers=5):
        super(Discriminator, self).__init__()
        model = [
            ReflectionPadding2D((1, 1)),
            nn.utils.spectral_norm(
                kl.Conv2D(nput_nc,
                          ndf,
                          kernel_size=4,
                          strides=2,
                          padding='valid',
                          use_bias=True)),
            nn.LeakyReLU(0.2, True)
        ]

        for i in range(1, n_layers - 2):
            mult = 2**(i - 1)
            model += [
                ReflectionPadding2D((1, 1)),
                nn.utils.spectral_norm(
                    kl.Conv2D(df * mult,
                              ndf * mult * 2,
                              kernel_size=4,
                              strides=2,
                              padding='valid',
                              use_bias=True)),
                nn.LeakyReLU(0.2, True)
            ]

        mult = 2**(n_layers - 2 - 1)
        model += [
            ReflectionPadding2D((1, 1)),
            nn.utils.spectral_norm(
                kl.Conv2D(df * mult,
                          ndf * mult * 2,
                          kernel_size=4,
                          strides=1,
                          padding='valid',
                          use_bias=True)),
            nn.LeakyReLU(0.2, True)
        ]

        # Class Activation Map
        mult = 2**(n_layers - 2)
        self.gap_fc = nn.utils.spectral_norm(
            nn.Linear(ndf * mult, 1, use_bias=False))
        self.gmp_fc = nn.utils.spectral_norm(
            nn.Linear(ndf * mult, 1, use_bias=False))
        self.conv1x1 = kl.Conv2D(df * mult * 2,
                                 ndf * mult,
                                 kernel_size=1,
                                 strides=1,
                                 use_bias=True)
        self.leaky_relu = nn.LeakyReLU(0.2, True)

        self.pad = ReflectionPadding2D((1, 1))
        self.conv = nn.utils.spectral_norm(
            kl.Conv2D(*mult,
                      1,
                      kernel_size=4,
                      strides=1,
                      padding='valid',
                      use_bias=False))

        self.model = compose(*model)
示例#16
0
def Downsample(filters=256, kernel_size=3):
    return compose(
        kl.Lambda(lambda x: tf.image.resize(x, (tf.shape(x)[1] // 2, tf.shape(
            x)[2] // 2))), SeparableConv2D(filters, kernel_size))
示例#17
0
    def __init__(self, ngf=64, img_size=256, light=False):
        super(ResnetGenerator, self).__init__()
        self.light = light

        self.ConvBlock1 = compose(
            ReflectionPadding2D((3, 3)),
            kl.Conv2D(ngf,
                      kernel_size=7,
                      strides=1,
                      padding='valid',
                      use_bias=False), InstanceNormalization(), kl.LeakyReLU())

        self.HourGlass1 = HourGlass(ngf, ngf)
        self.HourGlass2 = HourGlass(ngf, ngf)

        # Down-Sampling
        self.DownBlock1 = compose(
            ReflectionPadding2D((1, 1)),
            kl.Conv2D(ngf * 2,
                      kernel_size=3,
                      strides=2,
                      padding='valid',
                      use_bias=False), InstanceNormalization(), kl.LeakyReLU())

        self.DownBlock2 = compose(
            ReflectionPadding2D((1, 1)),
            kl.Conv2D(ngf * 4,
                      kernel_size=3,
                      strides=2,
                      padding='valid',
                      use_bias=False), InstanceNormalization(), kl.LeakyReLU())

        # Encoder Bottleneck
        self.EncodeBlock1 = ResnetBlock(ngf * 4)
        self.EncodeBlock2 = ResnetBlock(ngf * 4)
        self.EncodeBlock3 = ResnetBlock(ngf * 4)
        self.EncodeBlock4 = ResnetBlock(ngf * 4)

        # Class Activation Map
        self.gap_fc = kl.Dense(1)
        self.gmp_fc = kl.Dense(1)
        self.conv1x1 = kl.Conv2D(ngf * 4, kernel_size=1, strides=1)
        self.relu = kl.LeakyReLU()

        # Gamma, Beta block
        if self.light:
            self.FC = compose(kl.Dense(ngf * 4), kl.LeakyReLU(),
                              kl.Dense(ngf * 4), kl.LeakyReLU())
        else:
            self.FC = compose(kl.Dense(ngf * 4), kl.LeakyReLU(),
                              kl.Dense(ngf * 4), kl.LeakyReLU())

        # Decoder Bottleneck
        self.DecodeBlock1 = ResnetSoftAdaLINBlock(ngf * 4)
        self.DecodeBlock2 = ResnetSoftAdaLINBlock(ngf * 4)
        self.DecodeBlock3 = ResnetSoftAdaLINBlock(ngf * 4)
        self.DecodeBlock4 = ResnetSoftAdaLINBlock(ngf * 4)

        # Up-Sampling
        self.UpBlock1 = compose(
            kl.UpSampling2D((2, 2)), ReflectionPadding2D((1, 1)),
            kl.Conv2D(ngf * 2,
                      kernel_size=3,
                      strides=1,
                      padding='valid',
                      use_bias=False), LIN(ngf * 2), kl.LeakyReLU())

        self.UpBlock2 = compose(
            kl.UpSampling2D((2, 2)), ReflectionPadding2D((1, 1)),
            kl.Conv2D(ngf,
                      kernel_size=3,
                      strides=1,
                      padding='valid',
                      use_bias=False), LIN(ngf), kl.LeakyReLU())

        self.HourGlass3 = HourGlass(ngf, ngf)
        self.HourGlass4 = HourGlass(ngf, ngf, False)

        self.ConvBlock2 = compose(
            ReflectionPadding2D((3, 3)),
            kl.Conv2D(3,
                      kernel_size=7,
                      strides=1,
                      padding='valid',
                      use_bias=False), kl.Activation('tanh'))