Exemplo n.º 1
0
    def __init__(self, filters, norm='instancenorm', **kwargs):
        self.filters = filters
        self.input_spec = [InputSpec(ndim=4)]
        self.norm_type = norm
        super(ResidualBlock, self).__init__(**kwargs)

        self.conv_1 = Conv2D(filters=self.filters,
                             kernel_size=(3, 3),
                             strides=1,
                             padding='same')

        if self.norm_type == 'instancenorm':
            self.norm_1 = InstanceNormalization(axis=-1,
                                                center=False,
                                                scale=False)
            self.norm_2 = InstanceNormalization(axis=-1,
                                                center=False,
                                                scale=False)
        elif self.norm_type == 'batchnorm':
            self.norm_1 = BatchNormalization()
            self.norm_2 = BatchNormalization()

        self.acti_1 = Activation('relu')
        self.conv_2 = Conv2D(filters=self.filters,
                             kernel_size=(3, 3),
                             strides=1,
                             padding='same')
        self.adder = Add()
Exemplo n.º 2
0
    def build_discriminator(self):

        img = Input(shape=self.img_shape)

        model = Sequential()
        model.add(
            Conv2D(64,
                   kernel_size=4,
                   strides=2,
                   padding='same',
                   input_shape=self.img_shape))
        model.add(LeakyReLU(alpha=0.8))
        model.add(Conv2D(128, kernel_size=4, strides=2, padding='same'))
        model.add(LeakyReLU(alpha=0.2))
        model.add(InstanceNormalization())
        model.add(Conv2D(256, kernel_size=4, strides=2, padding='same'))
        model.add(LeakyReLU(alpha=0.2))
        model.add(InstanceNormalization())

        model.summary()

        img = Input(shape=self.img_shape)
        features = model(img)

        validity = Conv2D(1, kernel_size=4, strides=1,
                          padding='same')(features)

        label = Flatten()(features)
        label = Dense(self.num_classes + 1, activation="softmax")(label)

        return Model(img, [validity, label])
Exemplo n.º 3
0
 def conv2d(layer_input, filters, f_size=4):
     """Layers used during downsampling"""
     d = Conv2D(filters, kernel_size=f_size, strides=2,
                padding='same')(layer_input)
     d = LeakyReLU(alpha=0.2)(d)
     d = InstanceNormalization()(d)
     return d
Exemplo n.º 4
0
    def test_instancenorm_flat(self):
        # Check basic usage of instancenorm

        model = self._create_and_fit_Sequential_model(InstanceNormalization(),
                                                      (64,))
        self.assertTrue(hasattr(model.layers[0], 'gamma'))
        self.assertTrue(hasattr(model.layers[0], 'beta'))
Exemplo n.º 5
0
 def downsampling2d(layer_input, filters: int):
     """Layers used in the encoder"""
     d = Conv2D(filters=filters, kernel_size=4, strides=2,
                padding='same')(layer_input)
     d = LeakyReLU(alpha=0.2)(d)
     d = InstanceNormalization()(d)
     return d
Exemplo n.º 6
0
 def d_layer(layer_input, filters, f_size=4, normalization=True):
     """Discriminator layer"""
     d = Conv2D(filters, kernel_size=f_size, strides=2,
                padding='same')(layer_input)
     d = LeakyReLU(alpha=0.2)(d)
     if normalization:
         d = InstanceNormalization()(d)
     return d
Exemplo n.º 7
0
    def _residual_bock(self, input_layer, n_filters):
        short_circuit = input_layer
        x = Conv2D(filters=n_filters,
                   kernel_size=(3, 3),
                   strides=1,
                   padding='same')(input_layer)
        x = InstanceNormalization(axis=-1, center=False, scale=False)(x)
        x = Activation('relu')(x)

        x = Conv2D(filters=n_filters,
                   kernel_sizw=(3, 3),
                   strides=1,
                   padding='same')(x)
        x = InstanceNormalization(axis=-1, center=False, scale=False)(x)

        x = Add()([short_circuit, x])
        return x
Exemplo n.º 8
0
 def conv2d(x, filters, kernel_size, strides, padding):
     x = ZeroPadding2D(padding=padding)(x)
     x = Conv2D(filters,
                kernel_size,
                strides,
                padding='valid',
                use_bias=False)(x)
     x = ReLU()(x)
     x = InstanceNormalization(axis=-1)(x)
     return x
Exemplo n.º 9
0
 def deconv2d(x, filters, kernel_size, strides, padding):
     x = UpSampling2D(2)(x)
     x = Conv2D(filters,
                kernel_size,
                strides,
                padding='same',
                use_bias=False)(x)
     x = ReLU()(x)
     x = InstanceNormalization(axis=-1)(x)
     return x
Exemplo n.º 10
0
def test_weights():
    # Check if weights get initialized correctly
    layer = GroupNormalization(groups=1, scale=False, center=False)
    layer.build((None, 3, 4))
    assert len(layer.trainable_weights) == 0
    assert len(layer.weights) == 0

    layer = InstanceNormalization()
    layer.build((None, 3, 4))
    assert len(layer.trainable_weights) == 2
    assert len(layer.weights) == 2
Exemplo n.º 11
0
    def test_weights(self):
        # Check if weights get initialized correctly
        layer = GroupNormalization(groups=1, scale=False, center=False)
        layer.build((None, 3, 4))
        self.assertEqual(len(layer.trainable_weights), 0)
        self.assertEqual(len(layer.weights), 0)

        layer = InstanceNormalization()
        layer.build((None, 3, 4))
        self.assertEqual(len(layer.trainable_weights), 2)
        self.assertEqual(len(layer.weights), 2)
Exemplo n.º 12
0
def test_instance_norm_compute_output_shape(center, scale):

    target_variables_len = [center, scale].count(True)
    target_trainable_variables_len = [center, scale].count(True)
    layer1 = InstanceNormalization(groups=2, center=center, scale=scale)
    layer1.build(input_shape=[8, 28, 28, 16])  # build()
    assert len(layer1.variables) == target_variables_len
    assert len(layer1.trainable_variables) == target_trainable_variables_len

    layer2 = InstanceNormalization(groups=2, center=center, scale=scale)
    layer2.compute_output_shape(input_shape=[8, 28, 28,
                                             16])  # compute_output_shape()
    assert len(layer2.variables) == target_variables_len
    assert len(layer2.trainable_variables) == target_trainable_variables_len

    layer3 = InstanceNormalization(groups=2, center=center, scale=scale)
    layer3(tf.random.normal(shape=[8, 28, 28, 16]))  # call()
    assert len(layer3.variables) == target_variables_len
    assert len(layer3.trainable_variables) == target_trainable_variables_len
Exemplo n.º 13
0
 def upsampling2d(layer_input, skip_input, filters: int):
     """
     Layers used in the decoder
     :param layer_input: input layer
     :param skip_input: another input from the corresponding encoder block
     :param filters: number of filter
     """
     u = UpSampling2D(size=2)(layer_input)
     u = Conv2D(filters=filters,
                kernel_size=4,
                strides=1,
                padding='same',
                activation='relu')(u)
     u = InstanceNormalization()(u)
     u = Concatenate()([u, skip_input])
     return u
Exemplo n.º 14
0
    def __init__(self, filters, norm='instancenorm', **kwargs):
        self.filters = filters
        self.norm_type = norm
        self.input_spec = [InputSpec(ndim=4)]
        super(DownsamplingBlock, self).__init__(**kwargs)

        self.conv_1 = Conv2D(filters=self.filters,
                             kernel_size=(3, 3),
                             strides=2,
                             padding='same')
        if norm == 'instancenorm':
            self.norm_layer = InstanceNormalization(axis=-1,
                                                    center=False,
                                                    scale=False)
        elif norm == 'batchnorm':
            self.norm_layer = BatchNormalization()
        self.acti = Activation('relu')
Exemplo n.º 15
0
 def deconv2d(layer_input,
              skip_input,
              filters,
              f_size=4,
              dropout_rate=0):
     """Layers used during upsampling"""
     u = UpSampling2D(size=2)(layer_input)
     u = Conv2D(filters,
                kernel_size=f_size,
                strides=1,
                padding='same',
                activation='relu')(u)
     if dropout_rate:
         u = Dropout(dropout_rate)(u)
     u = InstanceNormalization()(u)
     u = Concatenate()([u, skip_input])
     return u
Exemplo n.º 16
0
    def __init__(self,
                 filters,
                 norm='instancenorm',
                 use_conv_trans=True,
                 **kwargs):
        """
        use_conv_trans, if set to False, will make this class
        use UpSampling2D instead.
        This argument controls the layer used for upsampling.
        """
        self.filters = filters
        self.use_conv_trans = use_conv_trans
        self.norm_type = norm
        self.input_spec = [InputSpec(ndim=4)]
        super(UpsamplingBlock, self).__init__(**kwargs)

        if (use_conv_trans):
            self.convtr_1 = Conv2DTranspose(filters=self.filters,
                                            kernel_size=(3, 3),
                                            strides=2,
                                            padding='same')
        else:
            self.upsampling_1 = UpSampling2D()
            self.conv_1 = Conv2D(filters=self.filters,
                                 kernel_size=3,
                                 strides=1,
                                 padding='same')

        if self.norm_type == 'instancenorm':
            self.norm_1 = InstanceNormalization(axis=-1,
                                                center=False,
                                                scale=False)
        elif self.norm_type == 'batchnorm':
            self.norm_1 = BatchNormalization()

        self.activ = Activation('relu')
Exemplo n.º 17
0
def test_groups_after_init():
    layers = InstanceNormalization()
    assert layers.groups == -1
Exemplo n.º 18
0
def test_instancenorm_flat():
    # Check basic usage of instancenorm
    model = _create_and_fit_sequential_model(InstanceNormalization(), (64, ))
    assert hasattr(model.layers[0], "gamma")
    assert hasattr(model.layers[0], "beta")
Exemplo n.º 19
0
 def test_groups_after_init(self):
     layers = InstanceNormalization()
     self.assertTrue(layers.groups == -1)