Exemple #1
0
 def atrous(layer_input, filters, f_size=4, bn=True):
     a_list = []
     for rate in [2, 4, 8]:
         a = AtrousConvolution2D(filters,
                                 f_size,
                                 atrous_rate=rate,
                                 border_mode='same')(layer_input)
         a_list.append(a)
     a = Concatenate()(a_list)
     a = LeakyReLU(alpha=0.2)(a)
     if bn:
         # a = BatchNormalization(momentum=0.8)(a)
         a = InstanceNormalization()(a)
     return a
Exemple #2
0
 def deconv2d(layer_input, skip_input, filters, f_size=4, dropout_rate=0):
     #layers for upsampling
     u = UpSampling2D(size=2)(layer_input)
     u = Conv2D(filters,
                kernel_size=f_size,
                strides=1,
                padding='same',
                activation='relu')(u)
     if dropout_rate:
         u = Dropout(dropout_rate)(u)
     u = InstanceNormalization()(u)
     u = Concatenate()([u, skip_input])
     #to skip connect
     return u
Exemple #3
0
    def build_discriminator(self):

        img = Input(shape=self.color_img_shape)

        #I make batchnormalization being implemented after relu activation, a commmon sense in deep learning I believe which outperforms the opposite way, even though author in deep color uses batchnormalization before relu activation
        model = Sequential()
        model.add(
            Conv2D(self.df,
                   kernel_size=5,
                   strides=2,
                   padding='same',
                   input_shape=self.color_img_shape))
        model.add(LeakyReLU(alpha=0.2))
        # here output is (128 * 128 * self.df)
        model.add(Conv2D(self.df * 2, kernel_size=5, strides=2,
                         padding='same'))
        model.add(LeakyReLU(alpha=0.2))
        model.add(InstanceNormalization())
        # here output is (64 * 64 * self.df * 2)
        model.add(Conv2D(self.df * 4, kernel_size=5, strides=2,
                         padding='same'))
        model.add(LeakyReLU(alpha=0.2))
        model.add(InstanceNormalization())
        # here output is (32 * 32 * self.df * 4)
        model.add(Conv2D(self.df * 8, kernel_size=5, strides=2,
                         padding='same'))
        model.add(LeakyReLU(alpha=0.2))
        model.add(InstanceNormalization())
        # here output is (16 * 16 * self.df * 8)
        model.add(Flatten())
        model.add(Dense(1))

        model.summary()

        validity = model(img)

        return Model(img, validity)
def g_upsamplinglayer(inputlayer, filter_dim=64):

    #   use_resize_convolution
    #   inputlayer = UpSampling2D(size=(2,2))(inputlayer)
    #   inputlayer = ReflectionPadding2D((1,1))(inputlayer)
    #   inputlayer = Conv2D(filters=filter_dim, kernel_size=3, strides=1, padding='valid')(inputlayer)
    inputlayer = Conv2DTranspose(filters=filter_dim,
                                 kernel_size=3,
                                 strides=2,
                                 padding='same')(inputlayer)
    inputlayer = InstanceNormalization(axis=3, center=True,
                                       epsilon=1e-5)(inputlayer, training=True)
    inputlayer = Activation('relu')(inputlayer)

    return inputlayer
Exemple #5
0
 def disc_layer(in_image,
                out_channels,
                strides=(2, 2),
                instance_norm=True,
                initializer=init):
     'Layer for building Discriminator'
     d = Conv2D(out_channels,
                kernel_size=(4, 4),
                strides=strides,
                padding='same',
                kernel_initializer=initializer)(in_image)
     if instance_norm:
         d = InstanceNormalization(axis=-1)(d)
     d = LeakyReLU(alpha=0.2)(d)
     return d
Exemple #6
0
def decoder_layer(inputs,
                  paired_inputs,
                  filters=16,
                  kernel_size=3,
                  strides=1,
                  activation='relu',
                  instance_norm=False):

    conv = Conv2DTranspose(filters=filters,
                           kernel_size=kernel_size,
                           strides=strides,
                           padding='same')
    x = inputs
    x1 = InstanceNormalization()(x)
    x1 = LeakyReLU(alpha=0.2)(x1)
    x1 = conv(x1)
    x2 = InstanceNormalization()(x1)
    x2 = LeakyReLU(alpha=0.2)(x2)
    x2 = Conv2DTranspose(filters=filters,
                         kernel_size=kernel_size,
                         strides=strides,
                         padding='same')(x2)
    x = concatenate([x1, x2, paired_inputs])
    return x
def trans_conv2d_bn(x,
                    filters,
                    num_row,
                    num_col,
                    padding='same',
                    strides=(2, 2),
                    name=None):

    x = Conv2DTranspose(filters, (num_row, num_col),
                        strides=strides,
                        padding=padding)(x)

    x = InstanceNormalization()(x)

    return x
Exemple #8
0
 def deconv_block(self, x, filters, size):
     x = Conv2DTranspose(filters,
                         kernel_size=size,
                         strides=2,
                         padding='same',
                         use_bias=False,
                         kernel_initializer=RandomNormal(0, 0.02))(x)
     x = InstanceNormalization(axis=1)(x)
     x = Activation('relu')(x)
     #        u = UpSampling2D(size=2)(layer_input)
     #        u = Conv2D(filters, kernel_size=f_size, strides=1, padding='same', activation='relu')(u)
     #        u = InstanceNormalization()(u)
     #        u = Concatenate()([u, skip_input])
     return x
     pass
Exemple #9
0
def atrous(layer_input, filters, f_size=4, bn=True):
    a_list = []

    for rate in [2, 4, 8]:
        # a = AtrousConvolution2D(filters, f_size, atrous_rate=rate, border_mode='same')(layer_input)
        a = Conv2D(filters,
                   kernel_size=f_size,
                   dilation_rate=rate,
                   padding='same')(layer_input)
        a_list.append(a)
    a = Concatenate()(a_list)
    a = LeakyReLU(alpha=0.2)(a)
    if bn:
        a = InstanceNormalization()(a)
    return a
Exemple #10
0
        def upsample(layer_input,
                     skip_input,
                     filters,
                     f_size=4,
                     dropout_rate=0):
            u = UpSampling2D(size=2)(layer_input)
            u = Conv2D(filters, kernel_size=f_size, strides=1,
                       padding='same')(u)
            u = InstanceNormalization(axis=-1, center=False, scale=False)(u)
            u = Activation('relu')(u)
            if dropout_rate:
                u = Dropout(dropout_rate)(u)

            u = Concatenate()([u, skip_input])
            return u
Exemple #11
0
    def deconv2d(self,
                 layer_input,
                 skip_input,
                 filters,
                 f_size=4,
                 dropout_rate=0):
        """アップサンプリング中に使われる層##"""
        u = UpSampling2D(size=2)(layer_input)
        u = Conv2D(filters, kernel_size=f_size, strides=1, padding='same')(u)
        u = LeakyReLU(alpha=0.2)(u)
        if dropout_rate:
            u = Dropout(dropout_rate)(u)
        u = InstanceNormalization()(u)
        u = Concatenate()([u, skip_input])

        return u
Exemple #12
0
def encode_block(input_layer,
                 num_filters,
                 kernel_size=(3, 3, 3),
                 dropout_rate=0.1,
                 downsample=True):
    x = Conv3D(num_filters, kernel_size, padding='same')(input_layer)
    x = InstanceNormalization(axis=3, center=True, epsilon=1e-5)(x,
                                                                 training=True)
    x = Activation('relu')(x)
    x = Dropout(dropout_rate)(x)
    #x = Conv3D(num_filters, kernel_size, padding='same')(x)
    #x = InstanceNormalization(axis=3, center=True, epsilon=1e-5)(x, training=True)
    #x = Activation('relu')(x)
    if downsample == True:
        x = MaxPooling3D((2, 2, 2), padding='same')(x)
    return x
 def define_generator(self, n_resnet=9):
     ''' defines a generator function'''
     # weight initialization
     init = keras.initializers.RandomNormal(stddev=0.02)
     # image input
     input_image = keras.Input(shape=self.image_shape)
     # c7s1-64
     g_layer = keras.layers.Conv2D(64, (7, 7),
                                   padding='same',
                                   kernel_initializer=init)(input_image)
     g_layer = InstanceNormalization(axis=-1)(g_layer)
     g_layer = keras.layers.Activation('relu')(g_layer)
     # d128
     g_layer = keras.layers.Conv2D(128, (3, 3),
                                   strides=(2, 2),
                                   padding='same',
                                   kernel_initializer=init)(g_layer)
     g_layer = InstanceNormalization(axis=-1)(g_layer)
     g_layer = keras.layers.Activation('relu')(g_layer)
     # d256
     g_layer = keras.layers.Conv2D(256, (3, 3),
                                   strides=(2, 2),
                                   padding='same',
                                   kernel_initializer=init)(g_layer)
     g_layer = InstanceNormalization(axis=-1)(g_layer)
     g_layer = keras.layers.Activation('relu')(g_layer)
     # R256
     for _ in range(n_resnet):
         #print('running through loop!')
         g_layer = self.resnet_block(256, g_layer)
     # u128
     g_layer = keras.layers.Conv2DTranspose(
         128, (3, 3),
         strides=(2, 2),
         padding='same',
         kernel_initializer=init)(g_layer)
     g_layer = InstanceNormalization(axis=-1)(g_layer)
     g_layer = keras.layers.Activation('relu')(g_layer)
     # u64
     g_layer = keras.layers.Conv2DTranspose(
         64, (3, 3),
         strides=(2, 2),
         padding='same',
         kernel_initializer=init)(g_layer)
     g_layer = InstanceNormalization(axis=-1)(g_layer)
     g_layer = keras.layers.Activation('relu')(g_layer)
     # c7s1-3
     g_layer = keras.layers.Conv2D(3, (7, 7),
                                   padding='same',
                                   kernel_initializer=init)(g_layer)
     g_layer = InstanceNormalization(axis=-1)(g_layer)
     out_image = keras.layers.Activation('tanh')(g_layer)
     # define model
     model = keras.Model(input_image, out_image)
     return model
Exemple #14
0
 def conv_block(self, x, filters, size, stride=(2,2),has_norm_instance=True,
                padding='valid',
                has_activation_layer=True,
                use_leaky_relu=False):
     x = Conv2D(filters, size, strides = stride, padding=padding, kernel_initializer=RandomNormal(0, 0.02))(x)
     
     if has_norm_instance:
         x = InstanceNormalization(axis=1)(x)
         
     if has_activation_layer:
         if use_leaky_relu:
             x = LeakyReLU(alpha=0.2)(x)
         else:
             x = Activation('relu')(x)
     return x
     pass
Exemple #15
0
def define_generator(image_shape, encoder, n_resnet=3):
    # weight initialization
    init = RandomNormal(stddev=0.02)
    # image input
    in_image = Input(shape=image_shape)
    in_image_t = Input(shape=image_shape)
    # c7s1-64
    g = Conv2D(64, (7, 7), padding='same', kernel_initializer=init)(in_image)
    g = InstanceNormalization(axis=-1)(g)
    g = Activation('relu')(g)
    # d128
    g = Conv2D(128, (3, 3),
               strides=(2, 2),
               padding='same',
               kernel_initializer=init)(g)
    g = InstanceNormalization(axis=-1)(g)
    g = Activation('relu')(g)
    # d256
    g = Conv2D(256, (3, 3),
               strides=(2, 2),
               padding='same',
               kernel_initializer=init)(g)
    g = InstanceNormalization(axis=-1)(g)
    g = Activation('relu')(g)
    # R256
    for _ in range(n_resnet):
        g = resnet_block(256, g)

    label = encoder(in_image_t)
    label = Lambda(lambda x: K.expand_dims(x, axis=1))(label)
    label = Lambda(lambda x: K.expand_dims(x, axis=1))(label)
    label = Lambda(lambda x: K.tile(x, [1, 16, 16, 1]))(label)
    g = Concatenate(axis=3)([g, label])
    # u128
    g = Conv2DTranspose(128, (3, 3),
                        strides=(2, 2),
                        padding='same',
                        kernel_initializer=init)(g)
    g = InstanceNormalization(axis=-1)(g)
    g = Activation('relu')(g)
    # u64
    g = Conv2DTranspose(64, (3, 3),
                        strides=(2, 2),
                        padding='same',
                        kernel_initializer=init)(g)
    g = InstanceNormalization(axis=-1)(g)
    g = Activation('relu')(g)
    # c7s1-3
    g = Conv2D(1, (7, 7), padding='same', kernel_initializer=init)(g)
    g = InstanceNormalization(axis=-1)(g)
    out_image = Activation('tanh')(g)
    # define model
    model = Model([in_image, in_image_t], out_image)
    return model
def define_generator(image_shape, output_shape):
	# weight initialization
	init = RandomNormal(stddev=0.02)
	# image input
	in_image = Input(shape=image_shape)
	channels = int(output_shape[-1])
	e1 = Conv2D(32, (3,3), padding='same', kernel_initializer=init)(in_image)
	e1 = InstanceNormalization(axis=-1)(e1)
	e1 = LeakyReLU(alpha=0.2)(e1)
	# c7s1-64
	e2 = Conv2D(64, (3,3), strides=(2,2), padding='same', kernel_initializer=init)(e1)
	e2 = InstanceNormalization(axis=-1)(e2)
	e2 = LeakyReLU(alpha=0.2)(e2)
	# d128
	e3 = Conv2D(128, (3,3), strides=(2,2), padding='same', kernel_initializer=init)(e2)
	e3 = InstanceNormalization(axis=-1)(e3)
	e3 = LeakyReLU(alpha=0.2)(e3)
	# d256
	e4 = Conv2D(256, (3,3), strides=(2,2), padding='same', kernel_initializer=init)(e3)
	e4 = InstanceNormalization(axis=-1)(e4)
	e4 = LeakyReLU(alpha=0.2)(e4)
	# u128
	d1 = Conv2DTranspose(128, (3,3), strides=(2,2), padding='same', kernel_initializer=init)(e4)
	d1 = Concatenate()([d1, e3])
	d1 = InstanceNormalization(axis=-1)(d1)
	d1 = LeakyReLU(alpha=0.2)(d1)
	# u64
	d2 = Conv2DTranspose(64, (3,3), strides=(2,2), padding='same', kernel_initializer=init)(d1)
	d2 = Concatenate()([d2, e2])
	d2 = InstanceNormalization(axis=-1)(d2)
	d2 = LeakyReLU(alpha=0.2)(d2)
	#u32
	d3 = Conv2DTranspose(32, (3,3), strides=(2,2), padding='same', kernel_initializer=init)(d2)
	d3 = Concatenate()([d3, e1])
	d3 = InstanceNormalization(axis=-1)(d3)
	d3 = LeakyReLU(alpha=0.2)(d3)
	# c7s1-3
	d4 = Conv2DTranspose(channels, (3,3), padding='same', kernel_initializer=init)(d3)
	d4 = InstanceNormalization(axis=-1)(d4)
	out_image = Activation('sigmoid')(d4)
	# define model
	model = Model(in_image, out_image)
	print(model.summary())
	return model
Exemple #17
0
def conv2d_bn(x,
              filters,
              num_row,
              num_col,
              padding='same',
              strides=(1, 1),
              activation='relu',
              name=None):
    x = Conv2D(filters, (num_row, num_col),
               strides=strides,
               padding=padding,
               use_bias=False)(x)
    x = InstanceNormalization()(x)
    if (activation == None):
        return x
    x = LeakyReLU(alpha=0.2)(x)
    return x
Exemple #18
0
 def deconv2d(layer_input,
              skip_input,
              filters,
              f_size=f_size,
              dropout_rate=0,
              output_padding=None):
     """Layers used during upsampling"""
     u = Conv2DTranspose(filters=filters,
                         kernel_size=f_size,
                         strides=2,
                         activation='relu',
                         output_padding=output_padding)(layer_input)
     if dropout_rate:
         u = Dropout(dropout_rate)(u)
     u = InstanceNormalization()(u)
     u = Concatenate()([u, skip_input])
     return u
Exemple #19
0
def create_convolution_block(input_layer,
                             n_filters,
                             batch_normalization=False,
                             kernel=(3, 3),
                             activation=None,
                             padding="same",
                             strides=(1, 1),
                             instance_normalization=False):
    layer = Conv2D(n_filters, kernel, padding=padding,
                   strides=strides)(input_layer)

    if batch_normalization:
        layer = InstanceNormalization(axis=-1)(layer)
    if activation is None:
        return Activation("relu")(layer)
    else:
        return activation()(layer)
Exemple #20
0
 def __deconv2d(layer_input,
                skip_input,
                filters,
                f_size=f_size,
                dropout_rate=0):
     """Layers used during upsampling"""
     u = UpSampling2D(size=2)(layer_input)
     u = Conv2D(filters,
                kernel_size=f_size,
                strides=1,
                padding='valid',
                activation='relu')(u)
     if dropout_rate:
         u = Dropout(dropout_rate)(u)
     u = InstanceNormalization()(u)
     u = Concatenate()([u, skip_input])
     return u
Exemple #21
0
 def d_layer(layer_input,
             filters,
             kernel_size=(4, 4, 2),
             strides=(2, 2, 2),
             bn=True):
     """Discriminator layer"""
     init = RandomNormal(stddev=0.02)
     d = Conv3D(filters,
                kernel_size=kernel_size,
                strides=strides,
                padding='same',
                kernel_initializer=init)(layer_input)
     d = LeakyReLU(alpha=0.2)(d)
     if bn:
         #d = BatchNormalization(momentum=0.8)(d)
         d = InstanceNormalization()(d)
     return d
Exemple #22
0
def create_convolution_block(input_layer,
                             n_filters,
                             batch_normalization=False,
                             kernel=(3, 3, 3),
                             activation=None,
                             padding='same',
                             strides=(1, 1, 1),
                             instance_normalization=False,
                             layer_depth=None):
    """

    :param strides:
    :param input_layer:
    :param n_filters:
    :param batch_normalization:
    :param kernel:
    :param activation: Keras activation layer to use. (default is 'relu')
    :param padding:
    :return:
    """
    layer = Conv3D(n_filters,
                   kernel,
                   padding=padding,
                   strides=strides,
                   name="depth_" + str(layer_depth) + "_conv")(input_layer)
    if batch_normalization:
        layer = BatchNormalization(axis=1,
                                   name="depth_" + str(layer_depth) +
                                   "_bn")(layer)
    elif instance_normalization:
        try:
            from keras_contrib.layers.normalization.instancenormalization import InstanceNormalization
        except ImportError:
            raise ImportError(
                "Install keras_contrib in order to use instance normalization."
                "\nTry: pip install git+https://www.github.com/farizrahman4u/keras-contrib.git"
            )
        layer = InstanceNormalization(axis=1,
                                      name="depth_" + str(layer_depth) +
                                      "_in")(layer)
    if activation is None:
        return Activation('relu',
                          name="depth_" + str(layer_depth) + "_relu")(layer)
    else:
        return activation()(layer)
def MultiResBlock(U, inp, alpha=1.67):

    W = alpha * U

    shortcut = inp

    shortcut = conv2d_bn(shortcut,
                         int(W * 0.167) + int(W * 0.333) + int(W * 0.5),
                         1,
                         1,
                         activation=None,
                         padding='same')

    conv3x3 = conv2d_bn(inp,
                        int(W * 0.167),
                        3,
                        3,
                        activation='relu',
                        padding='same')

    conv5x5 = conv2d_bn(conv3x3,
                        int(W * 0.333),
                        3,
                        3,
                        activation='relu',
                        padding='same')

    conv7x7 = conv2d_bn(conv5x5,
                        int(W * 0.5),
                        3,
                        3,
                        activation='relu',
                        padding='same')

    out = concatenate([conv3x3, conv5x5, conv7x7], axis=3)

    out = BatchNormalization(axis=3)(out)

    out = add([shortcut, out])

    out = Activation('relu')(out)

    out = InstanceNormalization()(out)

    return out
Exemple #24
0
    def unet_upsample(self,
                      layer_input,
                      skip_input,
                      filters,
                      f_size=4,
                      dropout_rate=0):
        """U-Net up sampling layer"""
        model = UpSampling2D(size=2)(layer_input)
        model = Conv2D(filters, kernel_size=f_size, strides=1,
                       padding='same')(model)
        model = InstanceNormalization(axis=-1, center=False,
                                      scale=False)(model)
        model = Activation('relu')(model)
        if dropout_rate:
            model = Dropout(dropout_rate)(model)

        model = Concatenate()([model, skip_input])
        return model
Exemple #25
0
 def block(self, x, f, down=True, bn=True, dropout=False, leaky=True):
     if leaky:
         x = LeakyReLU(0.2)(x)
     else:
         x = layers.Activation('relu')(x)
     if down:
         x = layers.ZeroPadding2D()(x)
         x = layers.Conv2D(f, kernel_size=4, strides=2, use_bias=False)(x)
     else:
         x = layers.Conv2DTranspose(f,
                                    kernel_size=4,
                                    strides=2,
                                    use_bias=False)(x)
         x = layers.Cropping2D((1, 1))(x)
     if bn:
         x = InstanceNormalization()(x)
     if dropout:
         x = layers.Dropout(0.5)(x)
     return x
Exemple #26
0
def upsample(x,
             filters,
             activation,
             kernel_size=(3, 3),
             strides=(2, 2),
             padding="same"):
    """
    docstring
    """
    x = Conv2DTranspose(filters,
                        kernel_size,
                        strides=strides,
                        kernel_initializer=kernel_init,
                        padding=padding)(x)
    x = InstanceNormalization(axis=-1)(x)
    if activation:
        x = activation(x)

    return x
Exemple #27
0
def define_encoder(image_shape, n_resnet=3):
    # weight initialization
    init = RandomNormal(stddev=0.02)
    # image input
    in_image = Input(shape=image_shape)
    # c7s1-64
    g = Conv2D(64, (7, 7), padding='same', kernel_initializer=init)(in_image)
    g = InstanceNormalization(axis=-1)(g)
    g = Activation('relu')(g)
    # d128 64x64
    g = Conv2D(128, (3, 3),
               strides=(2, 2),
               padding='same',
               kernel_initializer=init)(g)
    g = InstanceNormalization(axis=-1)(g)
    g = Activation('relu')(g)
    # d256 32x32
    g = Conv2D(256, (3, 3),
               strides=(2, 2),
               padding='same',
               kernel_initializer=init)(g)
    g = InstanceNormalization(axis=-1)(g)
    g = Activation('relu')(g)
    # d256 16x16
    g = Conv2D(512, (3, 3),
               strides=(2, 2),
               padding='same',
               kernel_initializer=init)(g)
    g = InstanceNormalization(axis=-1)(g)
    g = Activation('relu')(g)
    # d256 8x8
    g = Conv2D(512, (3, 3),
               strides=(2, 2),
               padding='same',
               kernel_initializer=init)(g)
    g = InstanceNormalization(axis=-1)(g)
    g = Activation('relu')(g)
    # d256 4x4
    g = Conv2D(1024, (3, 3),
               strides=(2, 2),
               padding='same',
               kernel_initializer=init)(g)
    g = InstanceNormalization(axis=-1)(g)
    g = Activation('relu')(g)
    # d256 2x2

    model = Model(in_image, g)
    print('encoder:')
    print(model.summary())
    return model
Exemple #28
0
def create_convolution_block_up(input_layer,
                                skip_conn,
                                n_filters,
                                batch_normalization=True,
                                kernel_size=(4, 4, 4),
                                activation='relu',
                                padding='same',
                                strides=(2, 2, 2),
                                instance_normalization=False,
                                dropout=True):

    # 3DConv + Normalization + Activation
    # Instance Normalization is said to perform better than Batch Normalization

    init = RandomNormal(mean=0.0, stddev=0.02)  # new
    layer = Conv3DTranspose(n_filters,
                            kernel_size,
                            padding=padding,
                            kernel_initializer=init,
                            strides=strides)(input_layer)

    if batch_normalization:
        layer = BatchNormalization(axis=4)(layer)  # channel_last convention
    # elif instance_normalization:
    elif instance_normalization:
        try:
            from keras_contrib.layers.normalization.instancenormalization import InstanceNormalization
        except ImportError:
            raise ImportError(
                "Install keras_contrib in order to use instance normalization."
                "\nTry: pip install git+https://www.github.com/farizrahman4u/keras-contrib.git"
            )
        layer = InstanceNormalization(axis=4)(layer)

    if dropout:
        layer = SpatialDropout3D(rate=0.5)(layer)

    layer = concatenate([layer, skip_conn], axis=4)

    layer = Activation(activation)(layer)

    return layer
Exemple #29
0
def encoder_layer(inputs,
                  filters=16,
                  kernel_size=3,
                  strides=1,
                  activation='relu',
                  instance_norm=True):

    conv = Conv2D(filters=filters,
                  kernel_size=kernel_size,
                  strides=strides,
                  padding='same')
    x = inputs
    if instance_norm:
        x = InstanceNormalization()(x)
    if activation == 'relu':
        x = Activation('relu')(x)
    else:
        x = LeakyReLU(alpha=0.2)(x)
    x = conv(x)
    return x
Exemple #30
0
def define_generator(image_shape, n_resnet=9):
    # weight initialization
    init = RandomNormal(stddev=0.02)
    # image input
    in_image = Input(shape=image_shape)
    # c7s1-64
    g = Conv2D(64, (7, 7), padding='same', kernel_initializer=init)(in_image)
    g = InstanceNormalization(axis=-1)(g)
    g = Activation('relu')(g)
    # d128
    g = Conv2D(128, (3, 3),
               strides=(2, 2),
               padding='same',
               kernel_initializer=init)(g)
    g = InstanceNormalization(axis=-1)(g)
    g = Activation('relu')(g)
    # d256
    g = Conv2D(256, (3, 3),
               strides=(2, 2),
               padding='same',
               kernel_initializer=init)(g)
    g = InstanceNormalization(axis=-1)(g)
    g = Activation('relu')(g)
    # R256
    for _ in range(n_resnet):
        g = resnet_block(256, g)

    # u128
    g = Conv2DTranspose(128, (3, 3),
                        strides=(2, 2),
                        padding='same',
                        kernel_initializer=init)(g)
    g = InstanceNormalization(axis=-1)(g)
    g = Activation('relu')(g)
    # u64
    g = Conv2DTranspose(64, (3, 3),
                        strides=(2, 2),
                        padding='same',
                        kernel_initializer=init)(g)
    g = InstanceNormalization(axis=-1)(g)
    g = Activation('relu')(g)
    # c7s1-3
    g = Conv2D(3, (7, 7), padding='same', kernel_initializer=init)(g)
    g = InstanceNormalization(axis=-1)(g)
    out_image = Activation('tanh')(g)
    # define model
    model = Model(in_image, out_image)
    return model