Beispiel #1
0
def residual_block(x: layers.Layer,
                   activation: layers.Activation,
                   kernel_size: Tuple[int] = (3, 3),
                   strides: Tuple[int] = (1, 1),
                   padding: str = 'valid',
                   kernel_initializer: Initializer = None,
                   gamma_initializer: Initializer = None,
                   use_bias: bool = False) -> layers.Layer:

    dim: int = x.shape[-1]
    input_tensor: layers.Layer = x

    x = ReflectionPadding2D()(input_tensor)
    x = layers.Conv2D(dim,
                      kernel_size,
                      strides=strides,
                      kernel_initializer=kernel_initializer,
                      padding=padding,
                      use_bias=use_bias)(x)
    x = InstanceNormalization(gamma_initializer=gamma_initializer)(x)
    x = activation(x)

    x = ReflectionPadding2D()(input_tensor)
    x = layers.Conv2D(dim,
                      kernel_size,
                      strides=strides,
                      kernel_initializer=kernel_initializer,
                      padding=padding,
                      use_bias=use_bias)(x)
    x = InstanceNormalization(gamma_initializer=gamma_initializer)(x)

    return layers.add([input_tensor, x])
Beispiel #2
0
    def Rk(self, x0):
        init = RandomNormal(stddev=0.02)

        k = int(x0.shape[-1])
        # first layer
        x = ReflectionPadding2D((1, 1))(x0)
        x = Conv2D(filters=k,
                   kernel_size=3,
                   strides=1,
                   padding='valid',
                   kernel_initializer=init)(x)
        x = InstanceNormalization(axis=3, center=True,
                                  epsilon=1e-5)(x, training=True)
        x = Activation('relu')(x)
        # second layer
        x = ReflectionPadding2D((1, 1))(x)
        x = Conv2D(filters=k,
                   kernel_size=3,
                   strides=1,
                   padding='valid',
                   kernel_initializer=init)(x)
        x = InstanceNormalization(axis=3, center=True,
                                  epsilon=1e-5)(x, training=True)
        # merge
        x = add([x, x0])
        return x
def define_discriminator(image_shape):
    # source image input
    input_image = Input(shape=image_shape)
    # C64
    d = Conv2D(64, (4, 4), strides=(2, 2), padding='same',
               kernel_initializer=init)(input_image)
    d = LeakyReLU(alpha=0.2)(d)
    # C128
    d = Conv2D(128, (4, 4), strides=(2, 2),
               padding='same', kernel_initializer=init)(d)
    d = InstanceNormalization(axis=-1)(d)
    d = LeakyReLU(alpha=0.2)(d)
    # C256
    d = Conv2D(256, (4, 4), strides=(2, 2),
               padding='same', kernel_initializer=init)(d)
    d = InstanceNormalization(axis=-1)(d)
    d = LeakyReLU(alpha=0.2)(d)
    # C512
    d = Conv2D(512, (4, 4), strides=(2, 2),
               padding='same', kernel_initializer=init)(d)
    d = InstanceNormalization(axis=-1)(d)
    d = LeakyReLU(alpha=0.2)(d)
    # last output layer
    d = Conv2D(512, (4, 4), padding='same', kernel_initializer=init)(d)
    d = InstanceNormalization(axis=-1)(d)
    d = LeakyReLU(alpha=0.2)(d)
    # patch output
    patch_out = Conv2D(1, (4, 4), padding='same', kernel_initializer=init)(d)
    # define model
    model = Model(input_image, patch_out)
    return model
Beispiel #4
0
def build_discriminator(input_shape, k_init, gamma_init):
    
    inp = layers.Input(shape=input_shape)
    
    #C64 block - No instance norm as per original implementation
    x = layers.Conv2D(64, kernel_size=(4,4), kernel_initializer=k_init, strides=(2, 2), padding='same')(inp)
    x = layers.LeakyReLU(alpha=0.2)(x)

    #C128 block
    x = layers.Conv2D(128, kernel_size=(4,4), kernel_initializer=k_init, strides=(2,2), padding='same',
                    use_bias=False)(x)
    x = InstanceNormalization(axis=-1, gamma_initializer = gamma_init)(x)
    x = layers.LeakyReLU(alpha=-0.2)(x)

    #C256 block
    x = layers.Conv2D(256, kernel_size=(4,4), kernel_initializer=k_init, strides=2, padding='same',
                    use_bias=False)(x)
    x = InstanceNormalization(axis=-1, gamma_initializer = gamma_init)(x)
    x = layers.LeakyReLU(alpha=0.2)(x)

    #C512 block
    x = layers.Conv2D(512, kernel_size=(4,4), padding='same', kernel_initializer=k_init,
                    use_bias=False)(x)
    x = InstanceNormalization(axis=-1, gamma_initializer = gamma_init)(x)
    x = layers.LeakyReLU(alpha=0.2)(x)

    #Patch output based on PatchGAN
    output = layers.Conv2D(1, kernel_size=(4,4),strides=(1,1), padding='same', kernel_initializer=k_init)(x)

    return tf.keras.models.Model(inputs=inp, outputs=output)
def residual_block(filters, previous, input):

    for i in range(9):
        x = Conv2D(filters,
                   3,
                   strides=1,
                   padding='same',
                   kernel_initializer=weight_initialzer)(input)
        x = InstanceNormalization(axis=-1)(x)
        x = ReLU()(x)
        x = Conv2D(filters,
                   3,
                   strides=1,
                   padding='same',
                   kernel_initializer=weight_initialzer)(x)
        x = InstanceNormalization(axis=-1)(x)

        if previous is not None:
            x_out = Concatenate()([previous, x])
            previous = x_out
            input = x_out

        else:
            previous = x
            input = x

    return x_out
Beispiel #6
0
def discriminator(num_filters=64, num_downsamplings=3):
    num_filters_ = num_filters
    x_in = Input(shape=(None, None, 3))

    x = Conv2D(num_filters, kernel_size=4, padding='same')(x_in)
    x = LeakyReLU(alpha=0.2)(x)

    for _ in range(num_downsamplings - 1):
        num_filters = min(num_filters * 2, num_filters_ * 8)
        x = Conv2D(num_filters,
                   kernel_size=4,
                   strides=2,
                   padding='same',
                   use_bias=False)(x)
        x = InstanceNormalization()(x)
        x = LeakyReLU(alpha=0.2)(x)

    num_filters = min(num_filters * 2, num_filters_ * 8)
    x = Conv2D(num_filters,
               kernel_size=4,
               strides=1,
               padding='same',
               use_bias=False)(x)
    x = InstanceNormalization()(x)
    x = LeakyReLU(alpha=0.2)(x)

    x = Conv2D(1, kernel_size=4, strides=1, padding='same')(x)

    return Model(x_in, x)
Beispiel #7
0
    def __init__(self, num_filters, kernel_init=None, gamma_init=None, use_1x1conv=False, strides=1):
        super(ResNetBlock, self).__init__()
        
        if kernel_init == None:
            self.kernel_init = tf.keras.initializers.RandomNormal(0.0,0.02) # Used in the original implementation
        else:
            self.kernel_init = kernel_init
        
        if gamma_init == None:
            self.gamma_init = tf.keras.initializers.RandomNormal(0.0,0.02) # Used in the original implementation
        else:
            self.kernel_init = kernel_init  
        
        self.conv_1 = layers.Conv2D(256, kernel_size=(3,3), strides=(1, 1), padding='valid', 
                                    kernel_initializer = self.kernel_init, use_bias=False)
        self.conv_2 = layers.Conv2D(256, kernel_size=(3,3), strides=(1, 1), padding='valid', 
                                    kernel_initializer = self.kernel_init, use_bias=False)
        self.conv_3 = None

        if use_1x1conv == True:
            self.conv_3 = layers.Conv2D(256, kernel_size=(1,1), strides=1)
        
        # Normalization layers
        self.instance_norm_1 = InstanceNormalization(axis=-1, gamma_initializer = self.gamma_init)
        self.instance_norm_2 = InstanceNormalization(axis=-1, gamma_initializer = self.gamma_init)

        # Reflection padding layers
        self.reflect_pad1 = ReflectionPad2D()
        self.reflect_pad2 = ReflectionPad2D()
Beispiel #8
0
    def __init__(self):
        super(TransformerNet, self).__init__()
        self.conv1 = ConvLayer(32, kernel_size=9, strides=1)
        # self.conv1 = ConvLayer(32, kernel_size=3, strides=1)
        self.in1 = InstanceNormalization()
        self.conv2 = ConvLayer(64, kernel_size=3, strides=2)
        self.in2 = InstanceNormalization()
        self.conv3 = ConvLayer(128, kernel_size=3, strides=2)
        self.in3 = InstanceNormalization()

        self.res1 = ResidualBlock(128)
        self.res2 = ResidualBlock(128)
        self.res3 = ResidualBlock(128)
        self.res4 = ResidualBlock(128)
        self.res5 = ResidualBlock(128)

        self.deconv1 = UpsampleConvLayer(64,
                                         kernel_size=3,
                                         strides=1,
                                         upsample=2)
        self.in4 = InstanceNormalization()
        self.deconv2 = UpsampleConvLayer(32,
                                         kernel_size=3,
                                         strides=1,
                                         upsample=2)
        self.in5 = InstanceNormalization()
        self.deconv3 = ConvLayer(3, kernel_size=9, strides=1)
        # self.deconv3 = ConvLayer(3, kernel_size=3, strides=1)

        self.relu = ReLU()
Beispiel #9
0
def define_discriminator(image_shape):
	# weight initialization
	init = RandomNormal(stddev=0.02)
	# source image input
	in_image = Input(shape=image_shape)
	# C64
	d = Conv2D(64, (4, 4), strides=(2, 2), padding='same', kernel_initializer=init)(in_image)
	d = LeakyReLU(alpha=0.2)(d)
	# C128
	d = Conv2D(128, (4, 4), strides=(2, 2), padding='same', kernel_initializer=init)(d)
	d = InstanceNormalization(axis=-1)(d)
	d = LeakyReLU(alpha=0.2)(d)
	# C256
	d = Conv2D(256, (4, 4), strides=(2, 2), padding='same', kernel_initializer=init)(d)
	d = InstanceNormalization(axis=-1)(d)
	d = LeakyReLU(alpha=0.2)(d)
	# C512
	d = Conv2D(512, (4, 4), strides=(2, 2), padding='same', kernel_initializer=init)(d)
	d = InstanceNormalization(axis=-1)(d)
	d = LeakyReLU(alpha=0.2)(d)
	# second last output layer
	d = Conv2D(512, (4, 4), padding='same', kernel_initializer=init)(d)
	d = InstanceNormalization(axis=-1)(d)
	d = LeakyReLU(alpha=0.2)(d)
	# patch output
	patch_out = Conv2D(1, (4, 4), padding='same', kernel_initializer=init)(d)
	# define model
	model = Model(in_image, patch_out)
	# compile model
	model.compile(loss='mse', optimizer=Adam(lr=0.0002, beta_1=0.5), loss_weights=[0.5])
	return model
Beispiel #10
0
def dnn_2(input_dim, nb_class):
    inputs = Input(shape=input_dim)
    x = Dense(256, kernel_initializer='he_uniform')(x)
    x = InstanceNormalization()(x)
    x = Activation(swish)(x)
    x = GaussianDropout(0.3)(x)
    x = Dense(128, kernel_initializer='he_uniform')(x)
    x = InstanceNormalization()(x)
    x = Activation(swish)(x)
    x = GaussianDropout(0.3)(x)
    x = Dense(64, kernel_initializer='he_uniform')(x)
    x = InstanceNormalization()(x)
    x = Activation(swish)(x)
    x = GaussianDropout(0.3)(x)
    x = Dense(32, kernel_initializer='he_uniform')(x)
    x = InstanceNormalization()(x)
    x = Activation(swish)(x)
    x = GaussianDropout(0.3)(x)

    output = Dense(1,
                   activation='sigmoid',
                   kernel_initializer='glorot_uniform')(model)
    model = Model(inputs=inputs, outputs=outputs)

    return model
Beispiel #11
0
 def conv3d(layer_input,
            filters,
            axis=-1,
            se_res_block=True,
            se_ratio=16,
            down_sizing=True):
     if down_sizing == True:
         layer_input = MaxPooling3D(pool_size=(2, 2, 2))(layer_input)
     d = Conv3D(filters, (3, 3, 3), use_bias=False,
                padding='same')(layer_input)
     d = InstanceNormalization(axis=axis)(d)
     d = LeakyReLU(alpha=0.3)(d)
     d = Conv3D(filters, (3, 3, 3), use_bias=False, padding='same')(d)
     d = InstanceNormalization(axis=axis)(d)
     if se_res_block == True:
         se = GlobalAveragePooling3D()(d)
         se = Dense(filters // se_ratio, activation='relu')(se)
         se = Dense(filters, activation='sigmoid')(se)
         se = Reshape([1, 1, 1, filters])(se)
         d = Multiply()([d, se])
         shortcut = Conv3D(filters, (3, 3, 3),
                           use_bias=False,
                           padding='same')(layer_input)
         shortcut = InstanceNormalization(axis=axis)(shortcut)
         d = add([d, shortcut])
     d = LeakyReLU(alpha=0.3)(d)
     return d
Beispiel #12
0
    def _build_discriminator(self):

        input_layer = Input(shape=self.img_shape)
        x = input_layer

        # A CycleGAN discriminator is a series of convolutional layers, all with instance normalization (except the first layer).
        x = Conv2D(filters=32, kernel_size=4, strides=2, padding='same')(x)
        x = LeakyReLU(0.2)(x)

        x = Conv2D(filters=64, kernel_size=4, strides=2, padding='same')(x)
        x = InstanceNormalization(axis=-1, center=False, scale=False)(x)
        x = LeakyReLU(0.2)(x)

        x = Conv2D(filters=128, kernel_size=4, strides=2, padding='same')(x)
        x = InstanceNormalization(axis=-1, center=False, scale=False)(x)
        x = LeakyReLU(0.2)(x)

        x = Conv2D(filters=256, kernel_size=4, strides=1, padding='same')(x)
        x = InstanceNormalization(axis=-1, center=False, scale=False)(x)
        x = LeakyReLU(0.2)(x)

        # The final layer is a convolutional layer with only one filter and no activation.
        output_layer = Conv2D(filters=1,
                              kernel_size=4,
                              strides=1,
                              padding='same')(x)

        return Model(input_layer, output_layer)
Beispiel #13
0
    def __init__(self, filters, block, z_dim, **kwargs):
        """
        :param filters: the number of convolution filters (fixed 3x3 size)
        :param block: the block number for naming
        :param z_dim: the z-dimension for mapping features to style vector
        """
        super(EncoderBlock, self).__init__(**kwargs)
        # Attributes
        self.filters = filters
        self.block = block
        self.z_dim = z_dim

        # Trainable Layers
        self.conv1 = Conv2DEQ(filters=filters,
                              kernel_size=(3, 3),
                              padding="same",
                              name=f"E_block_{block}_Conv_1")
        self.act1 = LeakyReLU(0.2, name=f"E_block_{block}_Act_1")
        self.msd = MeanAndStDev(name=f"E_block_{block}_msd")
        self.in1 = InstanceNormalization(name=f"E_block_{block}_IN_1",
                                         center=False,
                                         scale=False)
        self.in2 = InstanceNormalization(name=f"E_block_{block}_IN_2",
                                         center=False,
                                         scale=False)
        self.conv2 = Conv2DEQ(filters=filters,
                              kernel_size=(3, 3),
                              padding="same",
                              name=f"E_block_{block}_Conv_2")
        self.act2 = LeakyReLU(0.2, name=f"E_block_{block}_Act_2")
        self.downsample = AveragePooling2D(name=f"E_block_{block}_DownSample")
        self.mapStyle1 = DenseEQ(units=z_dim, name=f"E_block_{block}_style_1")
        self.mapStyle2 = DenseEQ(units=z_dim, name=f"E_block_{block}_style_2")
        self.flatten = Flatten(name=f"E_block_{block}_flatten")
    def __init__(self):
        super(Discriminator, self).__init__()
        # define layers here

        init = RandomNormal(stddev=0.02)
        self.architecture = []
        # C64
        self.architecture.append(Conv2D(64, (4,4), strides=(2,2), padding='same', kernel_initializer=init))
        self.architecture.append(LeakyReLU(alpha=0.2))
        # C128
        self.architecture.append(Conv2D(128, (4,4), strides=(2,2), padding='same', kernel_initializer=init))
        self.architecture.append(InstanceNormalization(axis=-1))
        self.architecture.append(LeakyReLU(alpha=0.2))
        # C256
        self.architecture.append(Conv2D(256, (4,4), strides=(2,2), padding='same', kernel_initializer=init))
        self.architecture.append(InstanceNormalization(axis=-1))
        self.architecture.append(LeakyReLU(alpha=0.2))
        # C512
        self.architecture.append(Conv2D(512, (4,4), strides=(2,2), padding='same', kernel_initializer=init))
        self.architecture.append(InstanceNormalization(axis=-1))
        self.architecture.append(LeakyReLU(alpha=0.2))
        # second last output layer
        self.architecture.append(Conv2D(512, (4,4), padding='same', kernel_initializer=init))
        self.architecture.append(InstanceNormalization(axis=-1))
        self.architecture.append(LeakyReLU(alpha=0.2))
        # patch output
        self.architecture.append(Conv2D(1, (4,4), padding='same', kernel_initializer=init))
def generator_small(image_shape,
                    n_resBlocks=6,
                    norm_type='instancenorm',
                    channels_base=32):

    n_channels = image_shape[2]
    # weight initialization
    init = RandomNormal(stddev=0.02)

    in_image = Input(shape=image_shape)
    t = in_image
    # c7s1-32
    t = ReflectionPadding2D(padding=(3, 3))(t)
    t = Conv2D(channels_base, (7, 7), padding="valid",
               kernel_initializer=init)(t)
    t = InstanceNormalization(axis=-1)(t)
    t = Activation("relu")(t)
    # d64 x,y,64
    t = Conv2D(2 * channels_base, (3, 3),
               strides=(2, 2),
               padding="same",
               kernel_initializer=init)(t)
    t = InstanceNormalization(axis=-1)(t)
    t = Activation("relu")(t)
    # d128 x,y,128
    t = Conv2D(4 * channels_base, (3, 3),
               strides=(2, 2),
               padding="same",
               kernel_initializer=init)(t)
    t = InstanceNormalization(axis=-1)(t)
    t = Activation("relu")(t)

    # Resnet Blocks
    for _ in range(n_resBlocks):
        t = resnet_block(t, 4 * channels_base)

    # u64
    t = Conv2DTranspose(2 * channels_base, (3, 3),
                        strides=(2, 2),
                        padding="same",
                        kernel_initializer=init)(t)
    t = InstanceNormalization(axis=-1)(t)
    t = Activation("relu")(t)
    # u32
    t = Conv2DTranspose(channels_base, (3, 3),
                        strides=(2, 2),
                        padding="same",
                        kernel_initializer=init)(t)
    t = InstanceNormalization(axis=-1)(t)
    t = Activation("relu")(t)
    # c7s1-3
    t = ReflectionPadding2D(padding=(3, 3))(t)
    t = Conv2D(n_channels, (7, 7), padding="valid", kernel_initializer=init)(t)
    # t = InstanceNormalization(axis=-1)(t)
    output = Activation("tanh")(t)

    result = tf.keras.Model(inputs=in_image, outputs=output)
    return result
Beispiel #16
0
    def __init__(self,
                 segmap_filters,
                 beta1=0.5,
                 beta2=0.999,
                 learning_rate=0.0004):
        super(Discriminator, self).__init__()
        # Padding, Stride, etc calculations
        KERNEL_SIZE = 4
        ALPHA_VAL = 0.2

        self.beta1 = beta1
        self.beta2 = beta2
        self.learning_rate = learning_rate
        self.optimizer = tf.keras.optimizers.Adam(
            learning_rate=self.learning_rate,
            beta_1=self.beta1,
            beta_2=self.beta2)

        # Initial first block
        self.glorot = tf.keras.initializers.GlorotNormal()
        # filters=64, stride=2
        self.conv1 = tf.Variable(
            self.glorot(
                shape=[KERNEL_SIZE, KERNEL_SIZE, segmap_filters + 3, 64]))
        self.bias1 = tf.Variable(self.glorot(shape=[64]))
        self.leaky1 = LeakyReLU(alpha=ALPHA_VAL)

        # Second block
        self.conv2 = tf.Variable(
            self.glorot(shape=[KERNEL_SIZE, KERNEL_SIZE, 64, 128]))
        self.bias2 = tf.Variable(self.glorot(shape=[128]))
        self.inorm1 = InstanceNormalization()
        self.leaky2 = LeakyReLU(alpha=ALPHA_VAL)

        # Third block
        self.conv3 = tf.Variable(
            self.glorot(shape=[KERNEL_SIZE, KERNEL_SIZE, 128, 256]))
        self.bias3 = tf.Variable(self.glorot(shape=[256]))
        self.inorm2 = InstanceNormalization()
        self.leaky3 = LeakyReLU(alpha=ALPHA_VAL)

        # Fourth block
        self.conv4 = tf.Variable(
            self.glorot(shape=[KERNEL_SIZE, KERNEL_SIZE, 256, 512]))
        self.bias4 = tf.Variable(self.glorot(shape=[512]))
        self.inorm3 = InstanceNormalization()
        self.leaky4 = LeakyReLU(alpha=ALPHA_VAL)

        # Final Convolutional Layer, as like PatchGAN implementation
        self.conv5 = tf.Variable(
            self.glorot(shape=[KERNEL_SIZE, KERNEL_SIZE, 512, 1]))
        self.bias5 = tf.Variable(self.glorot(shape=[1]))

        # In weird pytorch code
        self.inorm4 = InstanceNormalization()
        self.leaky5 = LeakyReLU(alpha=ALPHA_VAL)

        self.bce = tf.keras.losses.BinaryCrossentropy()
def Rk(input, k):
    block = Conv2D(k, (3, 3), padding='same', kernel_initializer=weight_initializer)(input)
    block = InstanceNormalization(axis=-1)(block)
    block = Activation('relu')(block)

    block = Conv2D(k, (3, 3), padding='same', kernel_initializer=weight_initializer)(block)
    block = InstanceNormalization(axis=-1)(block)

    return block + input
def build_discriminator(name="discriminator"):
    inp = Input((None, None, 3))

    net = Conv2D(filters=32,
                 kernel_size=(3, 3),
                 strides=(1, 1),
                 padding="SAME",
                 activation=None)(inp)
    net = LeakyReLU(0.2)(net)

    net = Conv2D(filters=64,
                 kernel_size=(3, 3),
                 strides=(2, 2),
                 padding="SAME",
                 activation=None)(net)
    net = LeakyReLU(0.2)(net)
    net = Conv2D(filters=128,
                 kernel_size=(3, 3),
                 strides=(1, 1),
                 padding="SAME",
                 activation=None)(net)
    net = InstanceNormalization()(net)
    net = LeakyReLU(0.2)(net)

    net = Conv2D(filters=128,
                 kernel_size=(3, 3),
                 strides=(2, 2),
                 padding="SAME",
                 activation=None)(net)
    net = LeakyReLU(0.2)(net)
    net = Conv2D(filters=256,
                 kernel_size=(3, 3),
                 strides=(1, 1),
                 padding="SAME",
                 activation=None)(net)
    net = InstanceNormalization()(net)
    net = LeakyReLU(0.2)(net)

    net = Conv2D(filters=256,
                 kernel_size=(3, 3),
                 strides=(1, 1),
                 padding="SAME",
                 activation=None)(net)
    net = InstanceNormalization()(net)
    net = LeakyReLU(0.2)(net)

    net = Conv2D(filters=1,
                 kernel_size=(3, 3),
                 strides=(1, 1),
                 padding="SAME",
                 activation=None)(net)

    return Model(inp, net, name=name)
Beispiel #19
0
def resnet_block(input_layer, n_filters):
    # first layer convolutional layer
    g = Conv2D(n_filters, (3, 3), padding='same',
               kernel_initializer=init)(input_layer)
    g = InstanceNormalization(axis=-1)(g)
    g = Activation('relu')(g)
    # second convolutional layer
    g = Conv2D(n_filters, (3, 3), padding='same', kernel_initializer=init)(g)
    g = InstanceNormalization(axis=-1)(g)
    # concatenate merge channel-wise with input layer
    g = Concatenate()([g, input_layer])
    return g
Beispiel #20
0
def res_block(x_in, num_filters):
    x = tf.pad(x_in, [[0, 0], [1, 1], [1, 1], [0, 0]], mode='REFLECT')
    x = Conv2D(num_filters, kernel_size=3, padding='valid', use_bias=False)(x)
    x = InstanceNormalization()(x)
    x = ReLU()(x)

    x = tf.pad(x, [[0, 0], [1, 1], [1, 1], [0, 0]], mode='REFLECT')
    x = Conv2D(num_filters, kernel_size=3, padding='valid', use_bias=False)(x)
    x = InstanceNormalization()(x)
    x = Add()([x_in, x])

    return x
Beispiel #21
0
        def encoder_step(layer, Nf, inorm=True):            
            x = Conv3D(Nf, kernel_size=3, strides=2, kernel_initializer='he_normal', padding='same')(layer)
            x = InstanceNormalization()(x)
            x = LeakyReLU()(x)
            x=Dropout(0.2)(x)

            x = Conv3D(Nf*2, kernel_size=3,kernel_initializer='he_normal', padding='same')(x)
            x = InstanceNormalization()(x)
            x = LeakyReLU()(x)
            x=Dropout(0.2)(x)            
            
            return x
Beispiel #22
0
        def decoder_step(layer, layer_to_concatenate, Nf):
            x = Conv3DTranspose(Nf, kernel_size=5, strides=2, padding='same', kernel_initializer='he_normal')(layer)
            x = InstanceNormalization()(x)
            x = LeakyReLU()(x)
            x = Concatenate()([x, layer_to_concatenate])
            x = Dropout(0.2)(x)

            x = Conv3D(Nf, kernel_size=3,kernel_initializer='he_normal', padding='same')(x)
            x = InstanceNormalization()(x)
            x = LeakyReLU()(x)
            x=Dropout(0.2)(x)             
            return x
Beispiel #23
0
        def residual(layer_input, filters):
            shortcut = layer_input
            y = ReflectionPadding2D(padding =(1,1))(layer_input)
            y = Conv2D(filters, kernel_size=(3, 3), strides=1, padding='valid', kernel_initializer = self.weight_init)(y)
            y = InstanceNormalization(axis = -1, center = False, scale = False)(y)
            y = Activation('relu')(y)
            
            y = ReflectionPadding2D(padding =(1,1))(y)
            y = Conv2D(filters, kernel_size=(3, 3), strides=1, padding='valid', kernel_initializer = self.weight_init)(y)
            y = InstanceNormalization(axis = -1, center = False, scale = False)(y)

            return add([shortcut, y])
def generator():

    x1 = Input(shape=(img_width, img_height, num_channels * num_stacked_imgs))

    x2 = Conv2D(128,
                kernel_size=7,
                strides=1,
                padding='same',
                kernel_initializer=weight_initialzer)(x1)

    x3 = Conv2D(128,
                kernel_size=3,
                strides=2,
                padding='same',
                kernel_initializer=weight_initialzer)(x2)
    x3 = InstanceNormalization(axis=-1)(x3)
    x3 = ReLU()(x3)

    x4 = Conv2D(256,
                kernel_size=3,
                strides=2,
                padding='same',
                kernel_initializer=weight_initialzer)(x3)
    x4 = InstanceNormalization(axis=-1)(x4)
    x4 = ReLU()(x4)

    x13 = residual_block(256, None, x4)

    x14 = Conv2DTranspose(128,
                          kernel_size=3,
                          strides=2,
                          padding='same',
                          kernel_initializer=weight_initialzer)(x13)
    x14 = InstanceNormalization(axis=-1)(x14)
    x14 = ReLU()(x14)

    x15 = Conv2DTranspose(256,
                          kernel_size=3,
                          strides=2,
                          padding='same',
                          kernel_initializer=weight_initialzer)(x14)
    x15 = InstanceNormalization(axis=-1)(x15)
    x15 = ReLU()(x15)

    x16 = Conv2D(3,
                 kernel_size=7,
                 strides=1,
                 padding='same',
                 kernel_initializer=weight_initialzer,
                 activation='tanh')(x15)

    return Model(x1, x16)
Beispiel #25
0
        def residual(input, filters):
            shortcut = input

            x = ReflectionPadding2D(padding=(1,1))(input)
            x = Conv2D(filters=filters, kernel_size=3, strides=1, padding='valid')(x)
            x = InstanceNormalization(axis=-1, center=False, scale=False)(x)
            x = Activation('relu')(x)
            
            x = ReflectionPadding2D(padding=(1,1))(x)
            x = Conv2D(filters, kernel_size=3, strides=1, padding='valid')(x)
            x = InstanceNormalization(axis=-1, center=False, scale=False)(x)

            return add([shortcut, x])
Beispiel #26
0
def build_generator(input_shape, k_init, gamma_init):
    
    inp = layers.Input(shape=input_shape)
    
    x = ReflectionPad2D(padding=(3,3))(inp)
    x = layers.Conv2D(64,kernel_size=(7,7), kernel_initializer=k_init, strides=1, padding='same',
                    use_bias=False)(inp)
    x = InstanceNormalization(axis=-1, gamma_initializer = gamma_init)(x)
    x = layers.Activation('relu')(x)

    #Downsampling Layers
    x = layers.Conv2D(128, kernel_size=(3,3), kernel_initializer=k_init, strides=2, padding='same',
                    use_bias=False)(x)
    x = InstanceNormalization(axis=-1, gamma_initializer = gamma_init)(x)
    x = layers.Activation('relu')(x)
    x = layers.Conv2D(256, kernel_size=(3,3), kernel_initializer=k_init, strides=2, padding='same',
                    use_bias=False)(x)
    x = InstanceNormalization(axis=-1, gamma_initializer = gamma_init)(x)
    x = layers.Activation('relu')(x)

    #ResNet Blocks
    x = ResNetBlock(256, kernel_init=k_init)(x)
    x = ResNetBlock(256, kernel_init=k_init)(x)
    x = ResNetBlock(256, kernel_init=k_init)(x)
    x = ResNetBlock(256, kernel_init=k_init)(x)
    x = ResNetBlock(256, kernel_init=k_init)(x)
    x = ResNetBlock(256, kernel_init=k_init)(x)
    x = ResNetBlock(256, kernel_init=k_init)(x)
    x = ResNetBlock(256, kernel_init=k_init)(x)
    x = ResNetBlock(256, kernel_init=k_init)(x)

    #Upsampling layers
    x = layers.Conv2DTranspose(128, kernel_size=(3,3), kernel_initializer=k_init, strides=(2,2), padding='same',
                            use_bias=False)(x)
    x = InstanceNormalization(axis=-1, gamma_initializer = gamma_init)(x)
    x = layers.Activation('relu')(x)
    x = layers.Conv2DTranspose(64, kernel_size=(3,3), kernel_initializer=k_init, strides=(2,2), padding='same',
                            use_bias=False)(x)
    x = InstanceNormalization(axis=-1, gamma_initializer = gamma_init)(x)
    x = layers.Activation('relu')(x)

    # Final block 
    last_layer = ReflectionPad2D(padding=(3,3))(x)
    last_layer = layers.Conv2D(3, kernel_size=(7,7), padding='valid')(last_layer)
    
    # as with the original paper, the last activation is tanh rather than relu 
    last_layer = layers.Activation('tanh')(last_layer)
    
    return tf.keras.models.Model(inputs=inp, outputs=last_layer)
Beispiel #27
0
    def __init__(self, out_channels, kernel_size=3, stride=1, upsample=2):
        super().__init__()

        # General
        self.upsample = tf.keras.layers.UpSampling2D(size=(upsample, upsample))

        # Right Side
        self.norm_r1 = InstanceNormalization()
        self.conv_r1 = ConvLayer(out_channels, kernel_size, stride)

        self.norm_r2 = InstanceNormalization()
        self.conv_r2 = ConvLayer(out_channels, kernel_size, stride)

        # Left Side
        self.conv_l = ConvLayer(out_channels, 1, 1)
Beispiel #28
0
 def __init__(self,
              filters: int,
              kernel_size: Union[List, int],
              strides: int = 1,
              dilation_rate: float = 1,
              padding: str = "same",
              groups: int = 1,
              apply_activation: bool = True,
              apply_norm: bool = True,
              use_IN: bool = False,
              **kwargs):
     super(MyConv2D, self).__init__(**kwargs)
     self.conv2d = Conv2D(filters,
                          kernel_size,
                          strides,
                          dilation_rate=dilation_rate,
                          padding=padding,
                          kernel_initializer=tf.initializers.GlorotNormal(),
                          use_bias=False,
                          groups=groups)
     self.activation = ReLU()
     self.apply_activation = apply_activation
     self.apply_norm = apply_norm
     self.norm = InstanceNormalization(
         axis=3,
         center=True,
         scale=True,
         beta_initializer="random_uniform",
         gamma_initializer="random_uniform"
     ) if use_IN else BatchNormalization()
Beispiel #29
0
def resnet_d(input_shape=(128, 128, 3), norm=None):
    if norm == "batch":
        norm = None
    _h, _w = input_shape[0], input_shape[1]
    h, w = 8, 8
    m = keras.Sequential(
        [
            # Conv2D(64, 7, 1, "same", kernel_initializer=W_INIT),
        ],
        name="resnet_d")
    if norm == "instance":
        m.add(InstanceNormalization())
        m.add(LeakyReLU(0.2))
    c = 16
    while True:
        strides = [1, 1]
        if _h > h:
            _h //= 2
            strides[0] = 2
        if _w > w:
            _w //= 2
            strides[1] = 2
        m.add(Conv2D(c, 3, strides, "same", kernel_initializer=W_INIT))
        m.add(ResBlock(filters=c, bottlenecks=2, norm=norm))
        c = min(int(2 * c), 128)
        if _w <= w and _h <= h:
            break
    # m.add(Conv2D(256, 3, 2, "same"))    # 4^4
    # if norm == "instance":
    #     m.add(InstanceNormalization())
    # m.add(LeakyReLU(0.2))
    # m.add(Flatten())
    return m
Beispiel #30
0
 def conv2d(layer_input, filters, f_size=4):
     """Layers used during downsampling"""
     d = Conv2D(filters, kernel_size=f_size, strides=2,
                padding='same')(layer_input)
     d = LeakyReLU(alpha=0.2)(d)
     d = InstanceNormalization()(d)
     return d