Exemple #1
0
    def build_block(self, filter_num, res, input_shape, is_base):
        input_tensor = layers.Input(shape=input_shape, name=f"g_{res}")
        noise = layers.Input(shape=(res, res, 1), name=f"noise_{res}")
        w = layers.Input(shape=512)
        x = input_tensor

        if not is_base:
            x = layers.UpSampling2D((2, 2))(x)
            x = EqualizedConv(filter_num, 3)(x)

        x = AddNoise()([x, noise])
        x = layers.LeakyReLU(0.2)(x)
        x = InstanceNormalization()(x)
        x = AdaIN()([x, w])

        x = EqualizedConv(filter_num, 3)(x)
        x = AddNoise()([x, noise])
        x = layers.LeakyReLU(0.2)(x)
        x = InstanceNormalization()(x)
        x = AdaIN()([x, w])
        return keras.Model([input_tensor, w, noise], x, name=f"genblock_{res}x{res}")
Exemple #2
0
def _single_layer_call(x, layer, instance_norm, activation):
    y = layer(x)

    if instance_norm:
        if not isinstance(instance_norm, dict):
            instance_norm = {}
        y = InstanceNormalization(**instance_norm)(y)

    if activation:
        y = _as_activation(activation)(y)

    return y
Exemple #3
0
def dk(k, use_instancenorm=True):
    block = Sequential()
    block.add(
        Conv2D(k, (3, 3),
               strides=2,
               padding='same',
               kernel_initializer=weight_initializer))
    if use_instancenorm:
        block.add(InstanceNormalization(axis=-1))
    block.add(Activation('relu'))

    return block
Exemple #4
0
    def conv3d(
        inputs: Tensor, 
        filters: int, 
        downsizing: bool = True, 
        loop: int = 2) -> Tensor:

        if downsizing:
            inputs = MaxPool3D(pool_size=(2, 2, 2))(inputs)
        x = inputs
        for _ in range(loop):
            x = Conv3D(filters, (3, 3, 3), strides=(1, 1, 1), use_bias=False, padding='same')(x)
            x = InstanceNormalization()(x)
            x = Activation('relu')(x)
        return x
Exemple #5
0
    def __init__(self, dim, use_bias):
        super(ResnetBlock, self).__init__()
        conv_block = []
        conv_block += [
            ReflectionPad2D(1),
            Conv2D(filters=dim,
                   kernel_size=3,
                   strides=1,
                   padding='valid',
                   use_bias=use_bias),
            InstanceNormalization(axis=3),
            ReLU()
        ]

        conv_block += [
            ReflectionPad2D(1),
            Conv2D(filters=dim,
                   kernel_size=3,
                   strides=1,
                   padding='valid',
                   use_bias=use_bias),
            InstanceNormalization(axis=3)
        ]
        self.conv_block = tf.keras.Sequential(conv_block)
Exemple #6
0
def se_block(inputs: tf.Tensor,
             block_input: tf.Tensor,
             filters: int,
             se_ratio: int = 16) -> tf.Tensor:

    se = GlobalAveragePooling3D()(inputs)
    se = Dense(filters // se_ratio, activation='relu')(se)
    se = Dense(filters, activation='sigmoid')(se)
    se = Reshape([1, 1, 1, filters])(se)
    x = Multiply()([inputs, se])
    shortcut = Conv3D(filters, (3, 3, 3), use_bias=False,
                      padding='same')(block_input)
    shortcut = InstanceNormalization()(shortcut)
    x = Add()([x, shortcut])
    return x
 def down_sample(self, n_filters, kernel_size, add_norm=True):
     x = Sequential()
     x.add(
         Conv2D(n_filters,
                kernel_size,
                strides=2,
                padding='same',
                kernel_initializer=tf.random_normal_initializer(0., 0.02),
                use_bias=False))
     if add_norm:
         x.add(
             InstanceNormalization(
                 gamma_initializer=RandomNormal(mean=0.0, stddev=0.02)))
     x.add(LeakyReLU())
     return x
 def up_sample(self, n_filters, kernel_size, add_dropout=False):
     x = Sequential()
     x.add(
         Conv2DTranspose(n_filters,
                         kernel_size,
                         strides=2,
                         padding='same',
                         kernel_initializer=tf.random_normal_initializer(
                             0., 0.02),
                         use_bias=False))
     x.add(
         InstanceNormalization(
             gamma_initializer=RandomNormal(mean=0.0, stddev=0.02)))
     if add_dropout:
         x.add(Dropout(0.5))
     x.add(ReLU())
     return x
def define_generator(image_shape, n_resnet=9):
    # weight initialization
    init = RandomNormal(stddev=0.02)
    # image input
    in_image = Input(shape=image_shape)
    # c7s1-64
    g = Conv2D(64, (7, 7), padding='same', kernel_initializer=init)(in_image)
    g = InstanceNormalization(axis=-1)(g)
    g = Activation('relu')(g)
    # d128
    g = Conv2D(128, (4, 4),
               strides=(2, 2),
               padding='same',
               kernel_initializer=init)(g)
    g = InstanceNormalization(axis=-1)(g)
    g = Activation('relu')(g)
    # d256
    g = Conv2D(256, (4, 4),
               strides=(2, 2),
               padding='same',
               kernel_initializer=init)(g)
    g = InstanceNormalization(axis=-1)(g)
    g = Activation('relu')(g)
    # R256
    for _ in range(n_resnet):
        g = resnet_block(256, g)
    # u128
    g = Conv2DTranspose(128, (4, 4),
                        strides=(2, 2),
                        padding='same',
                        kernel_initializer=init)(g)
    g = InstanceNormalization(axis=-1)(g)
    g = Activation('relu')(g)
    # u64
    g = Conv2DTranspose(64, (4, 4),
                        strides=(2, 2),
                        padding='same',
                        kernel_initializer=init)(g)
    g = InstanceNormalization(axis=-1)(g)
    g = Activation('relu')(g)
    # c7s1-3
    g = Conv2D(3, (7, 7), padding='same', kernel_initializer=init)(g)
    g = InstanceNormalization(axis=-1)(g)

    out_image = Activation('tanh')(g)
    out_image = Lambda(lambda x: (x + 1) / 2)(out_image)

    model = Model(in_image, out_image)
    return model
def _conv_block(inputs, filters, kernel, strides):
    """Convolution Block
    This function defines a 2D convolution operation with BN and relu6.
    # Arguments
        inputs: Tensor, input tensor of conv layer.
        filters: Integer, the dimensionality of the output space.
        kernel: An integer or tuple/list of 2 integers, specifying the
            width and height of the 2D convolution window.
        strides: An integer or tuple/list of 2 integers,
            specifying the strides of the convolution along the width and height.
            Can be a single integer to specify the same value for
            all spatial dimensions.
    # Returns
        Output tensor.
    """
    channel_axis = 1 if K.image_data_format() == 'channels_first' else -1

    x = Conv2D(filters, kernel, padding='same', strides=strides)(inputs)
    x = InstanceNormalization(axis=channel_axis)(x)
    return Activation(relu6)(x)
Exemple #11
0
def conv3d(inputs: tf.Tensor,
           filters: int,
           is_se_block: bool = True,
           se_ratio: int = 16,
           downsizing: bool = True,
           activation: bool = True,
           loop: int = 2) -> tf.Tensor:

    if downsizing:
        inputs = MaxPooling3D(pool_size=(2, 2, 2))(inputs)

    x = inputs
    for i in range(loop):
        x = Conv3D(filters, (3, 3, 3), use_bias=False, padding='same')(x)
        x = InstanceNormalization()(x)
        if is_se_block and i > 0:
            x = se_block(x, inputs, filters, se_ratio=se_ratio)
        if activation:
            x = LeakyReLU(alpha=0.3)(x)

    return x
Exemple #12
0
def upsample(x: layers.Layer,
             filters: int,
             activation: layers.Activation,
             kernel_size: Tuple[int] = (3, 3),
             strides: Tuple[int] = (2, 2),
             padding: str = 'same',
             kernel_initializer: Initializer = None,
             gamma_initializer: Initializer = None,
             use_bias: bool = False) -> layers.Layer:
    x = layers.Conv2DTranspose(filters,
                               kernel_size,
                               strides=strides,
                               padding=padding,
                               kernel_initializer=kernel_initializer,
                               use_bias=use_bias)(x)
    x = InstanceNormalization(gamma_initializer=gamma_initializer)(x)

    if activation:
        x = activation(x)

    return x
def downsample_block(incoming_layer, num_filters, kernel_size=4):
    """
    Downsampling block of layers used by U-Net generator. Block consists of:
    {Conv2D -> LeakyRelu-> InstanceNormalization}
    Parameters:
        incoming_layer:         type:tf.keras.Layer. Layer which will
                                pass its output to this block
        num_filters:            type:int. Number of filters for
                                the 2d Conv layer
        kernel_size:            type:int. Size of the kernel for
                                the 2d Conv layer
    Returns:
        type:tf.keras.Layer. A downsampling block of layers.
    """
    downsample_layer = Conv2D(num_filters,
                              kernel_size=kernel_size,
                              strides=2,
                              padding='same')(incoming_layer)
    downsample_layer = LeakyReLU(alpha=0.2)(downsample_layer)
    downsample_layer = InstanceNormalization()(downsample_layer)
    return downsample_layer