Esempio n. 1
0
 def conv3d(layer_input,
            filters,
            axis=-1,
            se_res_block=True,
            se_ratio=16,
            down_sizing=True):
     if down_sizing == True:
         layer_input = MaxPooling3D(pool_size=(2, 2, 2))(layer_input)
     d = Conv3D(filters, (3, 3, 3), use_bias=False,
                padding='same')(layer_input)
     d = InstanceNormalization(axis=axis)(d)
     d = LeakyReLU(alpha=0.3)(d)
     d = Conv3D(filters, (3, 3, 3), use_bias=False, padding='same')(d)
     d = InstanceNormalization(axis=axis)(d)
     if se_res_block == True:
         se = GlobalAveragePooling3D()(d)
         se = Dense(filters // se_ratio, activation='relu')(se)
         se = Dense(filters, activation='sigmoid')(se)
         se = Reshape([1, 1, 1, filters])(se)
         d = Multiply()([d, se])
         shortcut = Conv3D(filters, (3, 3, 3),
                           use_bias=False,
                           padding='same')(layer_input)
         shortcut = InstanceNormalization(axis=axis)(shortcut)
         d = add([d, shortcut])
     d = LeakyReLU(alpha=0.3)(d)
     return d
def res_block(x, n_filters):

    res = Conv2D(filters=n_filters,
                 kernel_size=(3, 3),
                 strides=(1, 1),
                 padding='same')(x)
    res = InstanceNormalization(axis=3)(res)
    res = LeakyReLU(0.2)(res)

    # res = Conv2D(filters=n_filters, kernel_size=(3, 3), strides=(1, 1), padding='same')(res)
    res = AtrousConvolution2D(n_filters,
                              3,
                              3,
                              atrous_rate=(2, 2),
                              border_mode='same')(res)
    res = InstanceNormalization(axis=3)(res)
    res = Activation('relu')(res)
    # res = LeakyReLU(0.2)(res)
    #
    # res = Conv2D(filters=n_filters, kernel_size=(1, 1), strides=(1, 1), padding='same')(res)
    # res = InstanceNormalization(axis=3)(res)
    # res = LeakyReLU(0.2)(res)

    x = Conv2D(filters=n_filters,
               kernel_size=(1, 1),
               strides=(1, 1),
               padding='same')(x)

    out = Add()([x, res])

    return out
Esempio n. 3
0
    def bottleneck_layer(self, input_tensor, growth_k):
        out_bn_1 = InstanceNormalization(axis=self.concat_axis,
                                         center=True)(input_tensor)
        out_relu_1 = Activation('relu')(out_bn_1)

        out_conv_1 = Convolution3D(filters=growth_k * 4,
                                   kernel_size=(1, 1, 1),
                                   strides=(1, 1, 1),
                                   padding='same',
                                   use_bias=True,
                                   kernel_initializer='he_normal',
                                   kernel_regularizer=None)(out_relu_1)

        out_bn_2 = InstanceNormalization(axis=self.concat_axis,
                                         center=True)(out_conv_1)
        out_relu_2 = Activation('relu')(out_bn_2)

        output_tensor = Convolution3D(filters=growth_k,
                                      kernel_size=(3, 3, 3),
                                      strides=(1, 1, 1),
                                      padding='same',
                                      use_bias=True,
                                      kernel_initializer='he_normal',
                                      kernel_regularizer=None)(out_relu_2)

        return output_tensor
Esempio n. 4
0
    def deconv3d(layer_input, skip_input, filters, axis=-1, se_res_block=True, se_ratio=16):
        u1 = ZeroPadding3D(((0, 1), (0, 1), (0, 1)))(layer_input)
        u1 = Conv3DTranspose(filters, (2, 2, 2), strides=(2, 2, 2), use_bias=False, padding='same')(u1)
        u1 = InstanceNormalization(axis=axis)(u1)
#         u1 = LeakyReLU(alpha=0.3)(u1)
        u1 = Activation('selu')(u1)
        u1 = CropToConcat3D()([u1, skip_input])
        u2 = Conv3D(filters, (3, 3, 3), use_bias=False, padding='same')(u1)
        u2 = InstanceNormalization(axis = axis)(u2)
#         u2 = LeakyReLU(alpha=0.3)(u2)
        u2 = Activation('selu')(u2)
        u2 = Conv3D(filters, (3, 3, 3), use_bias=False, padding='same')(u2)
        u2 = InstanceNormalization(axis = axis)(u2)
        if se_res_block == True:
            se = GlobalAveragePooling3D()(u2)
            se = Dense(filters // se_ratio, activation='relu')(se)
            se = Dense(filters, activation='sigmoid')(se)
            se = Reshape([1, 1, 1, filters])(se)
            u2 = Multiply()([u2, se])
            shortcut = Conv3D(filters, (3, 3, 3), use_bias=False, padding='same')(u1)
            shortcut = InstanceNormalization(axis=axis)(shortcut)
            u2 = layers.add([u2, shortcut])
#         u2 = LeakyReLU(alpha=0.3)(u2)
        u2 = Activation('selu')(u2)
        return u2
def recurrent_block(x, n_filters, t=2):

    padding = 'same'

    x_ = Conv2D(filters=n_filters,
                kernel_size=(1, 1),
                strides=(1, 1),
                padding=padding)(x)

    for i in range(t):
        if i == 0:
            x1 = Conv2D(filters=n_filters,
                        kernel_size=(3, 3),
                        strides=(1, 1),
                        padding=padding)(x)
            x1 = InstanceNormalization()(x1)
            x1 = LeakyReLU(0.2)(x1)

        a = Add()([x_, x1])
        x1 = Conv2D(filters=n_filters,
                    kernel_size=(3, 3),
                    strides=(1, 1),
                    padding=padding)(a)
        x1 = InstanceNormalization()(x1)
        x1 = LeakyReLU(0.2)(x1)

    return x1
Esempio n. 6
0
def resnet_block(x):
    x2 = Conv2D(filters=256, kernel_size=3, strides=1, padding="same")(x)
    x2 = InstanceNormalization(axis=1)(x2)
    x2 = Activation('relu')(x2)

    x2 = Conv2D(filters=256, kernel_size=3, strides=1, padding="same")(x2)
    x2 = InstanceNormalization(axis=1)(x2)

    return Add()([x2, x])
Esempio n. 7
0
def conv_block(feat_maps_out, prev):
    prev = InstanceNormalization(gamma_initializer=gamma_init)(prev, training=1)  # Specifying the axis and mode allows for later merging
    prev = Activation('relu')(prev)  # possibile migliore risultato con ReLU?
    prev = Conv2D(feat_maps_out, (3, 3), padding='same',
                  kernel_initializer=conv_init)(prev)
    prev = InstanceNormalization(gamma_initializer=gamma_init)(prev, training=1)  # Specifying the axis and mode allows for later merging
    prev = Activation('relu')(prev)
    prev = Conv2D(feat_maps_out, (3, 3), padding='same',
                  kernel_initializer=conv_init)(prev)
    return prev
Esempio n. 8
0
def build_generator():
    """
    Create a generator network using the hyperparameter values defined below
    """
    input_shape = (128, 128, 3)
    residual_blocks = 6
    input_layer = Input(shape=input_shape)

    # First Convolution block
    x = Conv2D(filters=32, kernel_size=7, strides=1,
               padding="same")(input_layer)
    x = InstanceNormalization(axis=1)(x)
    x = Activation("relu")(x)

    # 2nd Convolution block
    x = Conv2D(filters=64, kernel_size=3, strides=2, padding="same")(x)
    x = InstanceNormalization(axis=1)(x)
    x = Activation("relu")(x)

    # 3rd Convolution block
    x = Conv2D(filters=128, kernel_size=3, strides=2, padding="same")(x)
    x = InstanceNormalization(axis=1)(x)
    x = Activation("relu")(x)

    # Residual blocks
    for _ in range(residual_blocks):
        x = residual_block(x)

    # Upsampling blocks

    # 1st Upsampling block
    x = Conv2DTranspose(filters=64,
                        kernel_size=3,
                        strides=2,
                        padding='same',
                        use_bias=False)(x)
    x = InstanceNormalization(axis=1)(x)
    x = Activation("relu")(x)

    # 2nd Upsampling block
    x = Conv2DTranspose(filters=32,
                        kernel_size=3,
                        strides=2,
                        padding='same',
                        use_bias=False)(x)
    x = InstanceNormalization(axis=1)(x)
    x = Activation("relu")(x)

    # Last Convolution layer
    x = Conv2D(filters=3, kernel_size=7, strides=1, padding="same")(x)
    output = Activation('tanh')(x)

    model = Model(inputs=[input_layer], outputs=[output])
    return model
    def build_generator(self):

        model = Sequential()

        model.add(Dense(128 * 125, input_dim=self.latent_dim))
        model.add(Reshape((125, 128)))
        # 5
        model.add(UpSampling1D())
        model.add(Conv1D(128, kernel_size=3, strides=2, padding="same"))
        model.add(InstanceNormalization())
        model.add(LeakyReLU(alpha=0.2))

        model.add(UpSampling1D())
        model.add(Conv1D(64, kernel_size=3, strides=2, padding="same"))
        model.add(InstanceNormalization())
        model.add(LeakyReLU(alpha=0.2))
        # 7
        model.add(UpSampling1D())
        model.add(Conv1D(64, kernel_size=3, strides=2, padding="same"))
        model.add(LeakyReLU(alpha=0.2))
        # 8
        model.add(UpSampling1D())
        model.add(Conv1D(64, kernel_size=3, strides=2, padding="same"))
        model.add(LeakyReLU(alpha=0.2))
        # 9
        model.add(UpSampling1D())
        model.add(Conv1D(64, kernel_size=3, strides=1, padding="same"))
        model.add(LeakyReLU(alpha=0.2))

        model.add(UpSampling1D())
        model.add(Conv1D(self.channels, kernel_size=3, padding="same"))
        # change to LeakyRelu since a lot of signal exceeding 1
        model.add(Activation("tanh"))

        model.summary()

        noise = Input(shape=(self.latent_dim, ))
        # img = model(noise)

        label = Input(shape=(4, ))

        # label_embedding = Flatten()(Embedding(4,np.prod(self.signal_shape)//4,input_length=4)(label))

        # label_embedding = Flatten()(Embedding(4,np.prod(self.signal_shape)//4)(label))
        label_embedding = Flatten()(RepeatVector(
            np.prod(self.signal_shape) // 4)(Dense(4)(label)))
        model_input = multiply([noise, label_embedding])
        # label_embedding = Flatten()(RepeatVector(self.latent_dim)(label))
        # print("@",label.shape,label_embedding.shape,noise.shape)
        # model_input = concatenate([noise, label])
        # model_input = noise
        img = model(model_input)

        return Model([noise, label], img)
Esempio n. 10
0
    def last_label_layer(self, input_tensor):
        bn_1 = InstanceNormalization(axis=self.concat_axis, center=True)(input_tensor)
        relu_1 = Activation('relu')(bn_1)
        conv_1 = Convolution3D(filters=256, kernel_size=(3, 3, 3), strides=(1, 1, 1), padding='same',
                               use_bias=True, kernel_initializer='he_normal',
                               kernel_regularizer=None)(relu_1)

        bn_2 = InstanceNormalization(axis=self.concat_axis, center=True)(conv_1)
        relu_2 = Activation('relu')(bn_2)
        conv_2 = Convolution3D(filters=512, kernel_size=(3, 3, 3), strides=(1, 1, 1), padding='same',
                               use_bias=True, kernel_initializer='he_normal',
                               kernel_regularizer=None)(relu_2)
        return subpixel_8_3D_wChannel(out_c=1)(conv_2)
Esempio n. 11
0
def test_instancenorm_perchannel_correctness():

    # have each channel with a different average and std
    x = np.random.normal(loc=5.0, scale=2.0, size=(10, 1, 4, 4))
    y = np.random.normal(loc=10.0, scale=3.0, size=(10, 1, 4, 4))
    z = np.random.normal(loc=-5.0, scale=5.0, size=(10, 1, 4, 4))

    batch = np.append(x, y, axis=1)
    batch = np.append(batch, z, axis=1)

    # this model does not provide a normalization axis
    model = Sequential()
    norm = InstanceNormalization(axis=None,
                                 input_shape=(3, 4, 4),
                                 center=False,
                                 scale=False)
    model.add(norm)
    model.compile(loss='mse', optimizer='sgd')
    model.fit(batch, batch, epochs=4, verbose=0)
    out = model.predict(batch)

    # values will not be normalized per-channel
    for instance in range(10):
        for channel in range(3):
            activations = out[instance, channel]
            assert abs(activations.mean()) > 1e-2
            assert abs(activations.std() - 1.0) > 1e-6

        # but values are still normalized per-instance
        activations = out[instance]
        assert_allclose(activations.mean(), 0.0, atol=1e-1)
        assert_allclose(activations.std(), 1.0, atol=1e-1)

    # this model sets the channel as a normalization axis
    model = Sequential()
    norm = InstanceNormalization(axis=1,
                                 input_shape=(3, 4, 4),
                                 center=False,
                                 scale=False)
    model.add(norm)
    model.compile(loss='mse', optimizer='sgd')

    model.fit(batch, batch, epochs=4, verbose=0)
    out = model.predict(batch)

    # values are now normalized per-channel
    for instance in range(10):
        for channel in range(3):
            activations = out[instance, channel]
            assert_allclose(activations.mean(), 0.0, atol=1e-1)
            assert_allclose(activations.std(), 1.0, atol=1e-1)
Esempio n. 12
0
    def build_discriminator(self):

        model = Sequential()
        model.add(Reshape((self.time_length, self.channels)))

        model.add(
            Conv1D(32,
                   kernel_size=3,
                   strides=2,
                   input_shape=self.signal_shape,
                   padding="same"))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.4))
        model.add(Conv1D(64, kernel_size=3, strides=2, padding="same"))
        model.add(ZeroPadding1D(padding=0))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.4))
        model.add(InstanceNormalization())
        # model.add(BatchNormalization(momentum=0.8))
        model.add(Conv1D(128, kernel_size=3, strides=2, padding="same"))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.4))
        model.add(InstanceNormalization())
        # model.add(BatchNormalization(momentum=0.8))

        model.add(Conv1D(256, kernel_size=3, strides=1, padding="same"))
        # model.add(InstanceNormalization())
        # model.add(BatchNormalization(momentum=0.8))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.4))
        model.add(Flatten())
        model.add(Dense(1, activation='sigmoid'))

        # model.summary()

        img = Input(shape=self.signal_shape)

        label = Input(shape=(1, ))
        # reason is referred as before
        label_embedding = Flatten()(Embedding(self.num_classes,
                                              np.prod(
                                                  self.signal_shape))(label))
        # label_embedding = Flatten()(RepeatVector(np.prod(self.signal_shape))(label))

        flat_img = Flatten()(img)

        model_input = multiply([flat_img, label_embedding])

        validity = model(model_input)

        return Model([img, label], validity)
Esempio n. 13
0
def resnet_block(n_filters, input_layer):
    # weight initialization
    init = RandomNormal(stddev=0.02)
    # first layer convolutional layer
    g = Conv2D(n_filters, (3, 3), padding='same',
               kernel_initializer=init)(input_layer)
    g = InstanceNormalization(axis=-1)(g)
    g = Activation('relu')(g)
    # second convolutional layer
    g = Conv2D(n_filters, (3, 3), padding='same', kernel_initializer=init)(g)
    g = InstanceNormalization(axis=-1)(g)
    # concatenate merge channel-wise with input layer
    g = Concatenate()([g, input_layer])
    return g
Esempio n. 14
0
    def build_generator(self):

        model = Sequential()

        model.add(
            Dense(128 * 125, activation=LeakyReLU(),
                  input_dim=self.latent_dim))
        model.add(Reshape((125, 128)))

        model.add(UpSampling1D())
        model.add(Conv1D(128, kernel_size=3, padding="same"))
        model.add(LeakyReLU())
        model.add(InstanceNormalization())
        # model.add(BatchNormalization(momentum=0.8))

        model.add(UpSampling1D())
        model.add(Conv1D(128, kernel_size=3, padding="same"))
        model.add(LeakyReLU())
        model.add(InstanceNormalization())

        model.add(UpSampling1D())
        model.add(Conv1D(256, kernel_size=3, strides=2, padding="same"))
        model.add(LeakyReLU())
        model.add(InstanceNormalization())
        # model.add(BatchNormalization(momentum=0.8))

        model.add(UpSampling1D())
        model.add(Conv1D(128, kernel_size=3, strides=2, padding="same"))
        model.add(LeakyReLU())
        model.add(InstanceNormalization())

        model.add(Conv1D(self.channels, kernel_size=3, padding="same"))
        # change to LeakyRelu since a lot of signal exceeding 1
        model.add(Activation("tanh"))

        model.summary()

        noise = Input(shape=(self.latent_dim, ))
        # img = model(noise)

        label = Input(shape=(1, ))
        label_embedding = Flatten()(Embedding(self.num_classes,
                                              np.prod(
                                                  self.signal_shape))(label))
        # label_embedding = Flatten()(RepeatVector(self.latent_dim)(label))
        model_input = multiply([noise, label_embedding])
        img = model(model_input)

        return Model([noise, label], img)
Esempio n. 15
0
def define_generator_network(num_resnet_blocks=9):
    input_size = (128, 128, 3)

    #Input RGB image
    input_layer = Input(shape=input_size)

    #Down-sampling using conv2d
    x = Conv2D(filters=64, kernel_size=7, strides=1,
               padding="same")(input_layer)
    x = InstanceNormalization(axis=1)(x)
    x = Activation("relu")(x)

    x = Conv2D(filters=128, kernel_size=3, strides=2, padding="same")(x)
    x = InstanceNormalization(axis=1)(x)
    x = Activation("relu")(x)

    x = Conv2D(filters=256, kernel_size=3, strides=2, padding="same")(x)
    x = InstanceNormalization(axis=1)(x)
    x = Activation("relu")(x)

    #Transforming the hidden representation using the resnet blocks
    for i in range(num_resnet_blocks):
        x = resnet_block(x)

    #Upsampling to recover the transformed image
    #Conv2DTranspose with a stride 2 works like Conv2D with stride 1/2
    x = Conv2DTranspose(filters=128,
                        kernel_size=3,
                        strides=2,
                        padding='same',
                        use_bias=False)(x)
    x = InstanceNormalization(axis=1)(x)
    x = Activation("relu")(x)

    x = Conv2DTranspose(filters=64,
                        kernel_size=3,
                        strides=2,
                        padding='same',
                        use_bias=False)(x)
    x = InstanceNormalization(axis=1)(x)
    x = Activation("relu")(x)

    x = Conv2D(filters=3, kernel_size=7, strides=1, padding="same")(x)
    output = Activation('tanh')(
        x)  #tanh activation to get normalised output image

    model = Model(inputs=[input_layer], outputs=[output])
    return model
 def conv2d(layer_input, filters, f_size=4):
     """Layers used during downsampling"""
     d = Conv2D(filters, kernel_size=f_size, strides=2,
                padding='same')(layer_input)
     d = LeakyReLU(alpha=0.2)(d)
     d = InstanceNormalization()(d)
     return d
Esempio n. 17
0
def test_instancenorm_perinstancecorrectness():
    model = Sequential()
    norm = InstanceNormalization(input_shape=(10, ))
    model.add(norm)
    model.compile(loss='mse', optimizer='sgd')

    # bimodal distribution
    z = np.random.normal(loc=5.0, scale=10.0, size=(2, 10))
    y = np.random.normal(loc=-5.0, scale=17.0, size=(2, 10))
    x = np.append(z, y)
    x = np.reshape(x, (4, 10))
    model.fit(x, x, epochs=4, batch_size=4, verbose=1)
    out = model.predict(x)
    out -= K.eval(norm.beta)
    out /= K.eval(norm.gamma)

    # verify that each instance in the batch is individually normalized
    for i in range(4):
        instance = out[i]
        assert_allclose(instance.mean(), 0.0, atol=1e-1)
        assert_allclose(instance.std(), 1.0, atol=1e-1)

    # if each instance is normalized, so should the batch
    assert_allclose(out.mean(), 0.0, atol=1e-1)
    assert_allclose(out.std(), 1.0, atol=1e-1)
Esempio n. 18
0
def build_discriminator():
    """
    Create a discriminator network using the hyperparameter values defined below
    """
    input_shape = (128, 128, 3)
    hidden_layers = 3

    input_layer = Input(shape=input_shape)

    x = ZeroPadding2D(padding=(1, 1))(input_layer)

    # 1st Convolutional block
    x = Conv2D(filters=64, kernel_size=4, strides=2, padding="valid")(x)
    x = LeakyReLU(alpha=0.2)(x)

    x = ZeroPadding2D(padding=(1, 1))(x)

    # 3 Hidden Convolution blocks
    for i in range(1, hidden_layers + 1):
        x = Conv2D(filters=2**i * 64,
                   kernel_size=4,
                   strides=2,
                   padding="valid")(x)
        x = InstanceNormalization(axis=1)(x)
        x = LeakyReLU(alpha=0.2)(x)

        x = ZeroPadding2D(padding=(1, 1))(x)

    # Last Convolution layer
    output = Conv2D(filters=1, kernel_size=4, strides=1,
                    activation="sigmoid")(x)

    model = Model(inputs=[input_layer], outputs=[output])
    return model
Esempio n. 19
0
 def transition_layer(self, input_tensor, theta=0.5):
     nb_channel = int(input_tensor.shape[-1])
     out_bn_1 = InstanceNormalization(axis=self.concat_axis, center=True)(input_tensor)
     out_conv_1 = Convolution3D(filters=int(nb_channel * theta), kernel_size=(1, 1, 1), strides=(1, 1, 1),
                                padding='same', use_bias=True, kernel_initializer='he_normal',
                                kernel_regularizer=None)(out_bn_1)
     return out_conv_1
Esempio n. 20
0
def _normalization(inputs, norm='bn'):
    if norm == 'bn':
        return BatchNormalization()(inputs)
    elif norm == 'in':
        return InstanceNormalization()(inputs)
    elif norm == 'gn':
        return GroupNormalization()(inputs)
Esempio n. 21
0
def create_convolution_block(input_layer, n_filters, batch_normalization=False, kernel=(3, 3, 3), activation=None,
                             padding='same', strides=(1, 1, 1), instance_normalization=False):
    """

    :param strides:
    :param input_layer:
    :param n_filters:
    :param batch_normalization:
    :param kernel:
    :param activation: Keras activation layer to use. (default is 'relu')
    :param padding:
    :return:
    """
    layer = Conv3D(n_filters, kernel, padding=padding, strides=strides)(input_layer)
    if batch_normalization:
        layer = BatchNormalization(axis=1)(layer)
    elif instance_normalization:
        try:
            from keras_contrib.layers import InstanceNormalization
        except ImportError:
            raise ImportError("Install keras_contrib in order to use instance normalization."
                              "\nTry: pip install git+https://www.github.com/farizrahman4u/keras-contrib.git")
        layer = InstanceNormalization(axis=1)(layer)
    if activation is None:
        return Activation('relu')(layer)
    else:
        return activation()(layer)
Esempio n. 22
0
    def __init__(self,
                 base_filters=32,
                 lrelu_alpha=0.2,
                 pad_type="reflect",
                 norm_type="batch"):
        super(Discriminator, self).__init__(name="Discriminator")
        if pad_type == "reflect":
            self.flat_pad = ReflectionPadding2D()
        elif pad_type == "constant":
            self.flat_pad = ZeroPadding2D()
        else:
            raise ValueError(f"pad_type not recognized {pad_type}")

        self.flat_conv = Conv2D(base_filters, 3)
        self.flat_lru = LeakyReLU(lrelu_alpha)
        self.strided_conv1 = StridedConv(base_filters * 2,
                                         lrelu_alpha,
                                         pad_type,
                                         norm_type)
        self.strided_conv2 = StridedConv(base_filters * 4,
                                         lrelu_alpha,
                                         pad_type,
                                         norm_type)
        self.conv2 = Conv2D(base_filters * 8, 3)

        if norm_type == "instance":
            self.norm = InstanceNormalization()
        elif norm_type == "batch":
            self.norm = BatchNormalization()

        self.lrelu = LeakyReLU(lrelu_alpha)

        self.final_conv = Conv2D(1, 3)
Esempio n. 23
0
def define_discriminator_network():
    input_size = (128, 128, 3)
    num_hidden_layers = 3
    input_layer = Input(shape=input_size)

    x = ZeroPadding2D(padding=(1, 1))(input_layer)

    x = Conv2D(filters=64, kernel_size=4, strides=2, padding="valid")(x)
    x = LeakyReLU(alpha=0.2)(x)

    x = ZeroPadding2D(padding=(1, 1))(x)

    for i in range(1, num_hidden_layers + 1):
        x = Conv2D(filters=2**i * 64,
                   kernel_size=4,
                   strides=2,
                   padding="valid")(x)
        x = InstanceNormalization(axis=1)(x)
        x = LeakyReLU(alpha=0.2)(x)

        x = ZeroPadding2D(padding=(1, 1))(x)

    #Sigmoid activation to normalise output values between 0 and 1 which will be used to train real or fake labels
    output = Conv2D(filters=1, kernel_size=4, strides=1,
                    activation="sigmoid")(x)  #This is the patch output

    model = Model(inputs=[input_layer], outputs=[output])
    return model
Esempio n. 24
0
def convt(x, filters, kernel_size=(3,3), strides=(2,2), relu=True, normalization="instance"):
    y = Conv2DTranspose(filters, kernel_size, strides=strides, padding="same")(x)
    if normalization == "instance":
        y = InstanceNormalization(axis=-1)(y)
    if relu:
        y = Activation("relu")(y)
    return y
        def res_block(_input: Layer, filters: int, kernel_size: int, dropout: bool,
                      activation: Optional[str], name_prefix: str) -> Layer:
            _x = Conv2D(filters, kernel_size=kernel_size, strides=1, padding='same', name=f'{name_prefix}conv1')(_input)
            if dropout:
                _x = DropoutPermanent(rate=0.5, name=f'{name_prefix}dropout1')(_x)
            _x = InstanceNormalization(axis=-1, epsilon=1e-05, name=f'{name_prefix}norm1')(_x)
            if activation == 'lrelu':
                _x = LeakyReLU(alpha=0.2, name=f'{name_prefix}{activation}1')(_x)
            else:
                _x = Activation(activation, name=f'{name_prefix}{activation}1')(_x)

            _x = Conv2D(filters, kernel_size=kernel_size, strides=1, padding='same', name=f'{name_prefix}conv2')(_x)
            if dropout:
                _x = DropoutPermanent(rate=0.5, name=f'{name_prefix}dropout2')(_x)
            _x = InstanceNormalization(axis=-1, epsilon=1e-05, name=f'{name_prefix}norm2')(_x)
            return Add(name=f'{name_prefix}add')([_input, _x])
Esempio n. 26
0
def get_norm(norm_type):
    if norm_type == "instance":
        return InstanceNormalization()
    elif norm_type == 'batch':
        return BatchNormalization()
    else:
        raise ValueError(f"Unrecognized norm_type {norm_type}")
Esempio n. 27
0
def normalise(norm=None, **kwargs):
    if norm == 'instance':
        return InstanceNormalization(**kwargs)
    elif norm == 'batch':
        return BatchNormalization()
    else:
        return Lambda(lambda x: x)
Esempio n. 28
0
 def d_layer(layer_input, filters, f_size=4, normalization=True):
     """Discriminator layer"""
     d = Conv3D(filters, kernel_size=f_size, strides=2,
                padding='same')(layer_input)
     d = LeakyReLU(alpha=0.2)(d)
     if normalization:
         d = InstanceNormalization()(d)
     return d
Esempio n. 29
0
    def DenseNet_v3(self, input):
        # growth_k = 8
        growth_k = 4
        theta = 0.5
        init_conv_0 = Convolution3D(filters=growth_k * 2, kernel_size=(3, 3, 3), strides=(1, 1, 1), padding='same',
                                    use_bias=True, kernel_initializer='he_normal',
                                    kernel_regularizer=None)(input)  # 200 x 200 x 200
        init_bn_0 = InstanceNormalization(axis=self.concat_axis, center=False)(init_conv_0)
        init_relu_0 = Activation('relu')(init_bn_0)
        init_conv_1 = Convolution3D(filters=growth_k * 2, kernel_size=(7, 7, 7), strides=(2, 2, 2), padding='same',
                                    use_bias=True, kernel_initializer='he_normal',
                                    kernel_regularizer=None)(init_relu_0)  # 100 x 100 x 100

        nb_layers = 12
        denseblk_1 = self.dense_block(init_conv_1, nb_layers=nb_layers, growth_k=growth_k)
        transit_1 = self.transition_layer(denseblk_1, theta=theta)
        bsize, zz, yy, xx, c = transit_1.get_shape().as_list()
        transit_1 = Convolution3D(filters=c, kernel_size=(3, 3, 3), strides=(2, 2, 2), padding='same',
                                  use_bias=True, kernel_initializer='he_normal',
                                  kernel_regularizer=None)(transit_1)  # 50 x 50 x50

        nb_layers = 24
        denseblk_2 = self.dense_block(transit_1, nb_layers=nb_layers, growth_k=growth_k)
        transit_2 = self.transition_layer(denseblk_2, theta=theta)
        bsize, zz, yy, xx, c = transit_2.get_shape().as_list()
        transit_2 = Convolution3D(filters=c, kernel_size=(3, 3, 3), strides=(2, 2, 2), padding='same',
                                  use_bias=True, kernel_initializer='he_normal',
                                  kernel_regularizer=None)(transit_2)  # 25 x 25 x 25

        nb_layers = 18
        denseblk_3 = self.dense_block(transit_2, nb_layers=nb_layers, growth_k=growth_k)

        label_0 = self.last_label_layer(denseblk_3)
        label_1 = self.last_label_layer(denseblk_3)
        label_2 = self.last_label_layer(denseblk_3)
        concat_1 = Concatenate()([label_0, label_1, label_2, init_relu_0])
        last_conv_0 = Convolution3D(filters=growth_k, kernel_size=(3, 3, 3), strides=(1, 1, 1), padding='same',
                                    use_bias=True, kernel_initializer='he_normal',
                                    kernel_regularizer=None)(concat_1)  # 200 x 200 x 200
        last_bn_0 = InstanceNormalization(axis=self.concat_axis, center=False)(last_conv_0)
        last_relu_0 = Activation('relu')(last_bn_0)
        last_conv_1 = Convolution3D(filters=3, kernel_size=(3, 3, 3), strides=(1, 1, 1), padding='same',
                                    use_bias=True, kernel_initializer='he_normal',
                                    kernel_regularizer=None)(last_relu_0)

        return last_conv_1
Esempio n. 30
0
def conv_block(x, num_filters, kernel_size=3, strides=2, padding='same'):
    x = Conv2D(filters=num_filters,
               kernel_size=kernel_size,
               strides=strides,
               padding=padding)(x)
    x = InstanceNormalization()(x)
    x = LeakyReLU(alpha=.2)(x)
    return x