예제 #1
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 hidden_channels=None,
                 ksize=3,
                 pad="SAME",
                 downsample=False,
                 activation=tf.keras.layers.ReLU()):
        super(Block, self).__init__()
        initializer = tf.keras.initializers.glorot_uniform()
        self.activation = activation
        self.downsample = downsample
        self.learnable_sc = (in_channels != out_channels) or downsample
        hidden_channels = in_channels if hidden_channels is None else hidden_channels

        self.c1 = SNConv2D(hidden_channels,
                           kernel_size=ksize,
                           padding=pad,
                           kernel_initializer=initializer)
        self.c2 = SNConv2D(out_channels,
                           kernel_size=ksize,
                           padding=pad,
                           kernel_initializer=initializer)
        self.avg_pool = tf.keras.layers.AveragePooling2D(pool_size=2,
                                                         strides=2,
                                                         padding="SAME")

        if self.learnable_sc:
            self.c_sc = SNConv2D(out_channels,
                                 kernel_size=1,
                                 padding="VALID",
                                 kernel_initializer=initializer)
    def __init__(self, n_channels):
        super(SNNonLocalBlock, self).__init__()
        self.theta = SNConv2D(n_channels // 8,
                              1,
                              strides=1,
                              padding='SAME',
                              use_bias=False)

        self.phi = SNConv2D(n_channels // 8,
                            1,
                            strides=1,
                            padding='SAME',
                            use_bias=False)

        self.max_pool = MaxPool2D(pool_size=2, strides=2)

        self.g = SNConv2D(n_channels // 2,
                          1,
                          strides=1,
                          padding='SAME',
                          use_bias=False)

        self.sigma = self.add_weight(shape=(),
                                     name="sigma",
                                     initializer='zeros',
                                     trainable=True)

        self.conv = SNConv2D(filters=n_channels,
                             kernel_size=1,
                             padding='VALID',
                             strides=1)
예제 #3
0
    def __init__(self, ch=64, activation=tf.keras.layers.ReLU()):
        super(SNResNetPatchGanDiscriminator, self).__init__()
        self.activation = activation
        initializer = tf.keras.initializers.glorot_uniform()

        self.block1 = OptimizedBlock(ch * 2, ksize=4)
        self.block2 = BlockDown(ch * 2,
                                ch * 4,
                                ksize=4,
                                activation=activation,
                                downsample=True)
        self.block3 = BlockDown(ch * 4,
                                ch * 8,
                                ksize=4,
                                activation=activation,
                                downsample=True)
        self.self_atten = SNNonLocalBlock(ch * 8)

        self.bn = tf.keras.layers.BatchNormalization()
        self.c1 = SNConv2D(filters=ch * 8,
                           kernel_size=4,
                           strides=1,
                           padding="VALID",
                           kernel_initializer=initializer,
                           use_bias=False)

        self.pad = tf.keras.layers.ZeroPadding2D()
        self.c2 = SNConv2D(1,
                           4,
                           strides=1,
                           padding="VALID",
                           kernel_initializer=initializer)
예제 #4
0
    def __init__(self, out_channels, ksize=3, pad="SAME", activation=tf.keras.layers.ReLU()):
        super(OptimizedBlock, self).__init__()
        initializer = tf.keras.initializers.glorot_uniform()
        self.activation = activation

        self.c1 = SNConv2D(out_channels, kernel_size=ksize, padding=pad, kernel_initializer=initializer)
        self.c2 = SNConv2D(out_channels, kernel_size=ksize, padding=pad, kernel_initializer=initializer)
        self.c_sc = SNConv2D(out_channels, kernel_size=1, padding="VALID", kernel_initializer=initializer)
        self.avg_pool = tf.keras.layers.AveragePooling2D(pool_size=2, strides=2, padding="VALID")
예제 #5
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 hidden_channels=None,
                 kernel_size=3,
                 padding='SAME',
                 activation=tf.keras.layers.ReLU(),
                 upsample=False,
                 n_classes=0):
        super(Block, self).__init__()
        initializer = tf.keras.initializers.Orthogonal()
        kernel_regularizer = conv_orthogonal_regularizer(0.0001)

        self.activation = activation
        self.upsample = upsample
        self.learnable_sc = in_channels != out_channels or upsample
        hidden_channels = out_channels if hidden_channels is None else hidden_channels
        self.n_classes = n_classes
        self.unpooling_2d = tf.keras.layers.UpSampling2D()

        self.c1 = SNConv2D(hidden_channels,
                           kernel_size=kernel_size,
                           padding=padding,
                           kernel_initializer=initializer,
                           kernel_regularizer=kernel_regularizer)

        self.c2 = SNConv2D(out_channels,
                           kernel_size=kernel_size,
                           padding=padding,
                           kernel_initializer=initializer,
                           kernel_regularizer=kernel_regularizer)
        if n_classes > 0:
            self.b1 = ConditionalBatchNorm(num_categories=n_classes)
            self.b2 = ConditionalBatchNorm(num_categories=n_classes)
        else:
            self.b1 = tf.keras.layers.BatchNormalization()
            self.b2 = tf.keras.layers.BatchNormalization()
        if self.learnable_sc:
            self.c_sc = SNConv2D(out_channels,
                                 kernel_size=1,
                                 padding="VALID",
                                 kernel_initializer=initializer,
                                 kernel_regularizer=kernel_regularizer)
예제 #6
0
    def __init__(self,
                 ch=64,
                 dim_z=128,
                 bottom_width=4,
                 activation=tf.keras.layers.ReLU(),
                 n_classes=0,
                 distribution="normal"):
        super(ResNetGenerator, self).__init__()
        initializer = tf.keras.initializers.Orthogonal()
        kernel_regularizer = conv_orthogonal_regularizer(0.0001)
        dense_regularizer = dense_orthogonal_regularizer(0.0001)
        self.bottom_width = bottom_width
        self.activation = activation
        self.distribution = distribution
        self.dim_z = dim_z
        self.n_classes = n_classes

        self.l1 = SNDense(units=(bottom_width**2) * ch * 16,
                          kernel_initializer=initializer,
                          kernel_regularizer=dense_regularizer)
        self.block2 = Block(ch * 16,
                            ch * 8,
                            activation=activation,
                            upsample=True,
                            n_classes=n_classes)
        self.block3 = Block(ch * 8,
                            ch * 8,
                            activation=activation,
                            upsample=True,
                            n_classes=n_classes)
        self.block4 = Block(ch * 8,
                            ch * 4,
                            activation=activation,
                            upsample=True,
                            n_classes=n_classes)
        self.self_atten = SNNonLocalBlock(
            ch * 4, kernel_regularizer=kernel_regularizer)
        self.block5 = Block(ch * 4,
                            ch * 2,
                            activation=activation,
                            upsample=True,
                            n_classes=n_classes)
        self.block6 = Block(ch * 2,
                            ch,
                            activation=activation,
                            upsample=True,
                            n_classes=n_classes)
        self.b6 = tf.keras.layers.BatchNormalization()
        self.l6 = SNConv2D(3,
                           kernel_size=3,
                           strides=1,
                           padding="SAME",
                           kernel_initializer=initializer,
                           kernel_regularizer=kernel_regularizer)
예제 #7
0
    def __init__(self, ch=64, dim_z=128, bottom_width=4, activation=tf.keras.layers.ReLU(),
                 n_classes=0, distribution="normal"):
        super(ResNetGenerator, self).__init__()
        initializer = tf.keras.initializers.glorot_uniform()
        self.bottom_width = bottom_width
        self.activation = activation
        self.distribution = distribution
        self.dim_z = dim_z
        self.n_classes = n_classes

        self.l1 = SNDense(units=(bottom_width ** 2) * ch * 16, kernel_initializer=initializer)
        self.block2 = Block(ch * 16, ch * 8, activation=activation, upsample=True, n_classes=n_classes)
        self.block3 = Block(ch * 8, ch * 4, activation=activation, upsample=True, n_classes=n_classes)
        self.block4 = Block(ch * 4, ch * 2, activation=activation, upsample=True, n_classes=n_classes)
        self.block5 = Block(ch * 2, ch, activation=activation, upsample=True, n_classes=n_classes)
        self.b6 = tf.keras.layers.BatchNormalization()
        self.l6 = SNConv2D(3, kernel_size=3, strides=1, padding="SAME", kernel_initializer=initializer)
예제 #8
0
    def __init__(self,
                 out_filters,
                 kernel_size,
                 activation,
                 apply_batchnorm=True):
        super(DownSample, self).__init__()
        self.activation = activation
        self.apply_batchnorm = apply_batchnorm
        initializer = tf.keras.initializers.glorot_uniform()

        self.conv = SNConv2D(out_filters,
                             kernel_size,
                             strides=2,
                             padding='SAME',
                             kernel_initializer=initializer,
                             use_bias=not apply_batchnorm)

        if apply_batchnorm:
            self.bn = tf.keras.layers.BatchNormalization()