def define_discriminator(image_shape):
	# source image input
	in_image = Input(shape=image_shape)
	# C32
	d = Conv2D(32, (3,3), strides=(2,2), padding='same')(in_image)
	d = LeakyReLU(alpha=0.2)(d)
	# C64
	d = Conv2D(64, (3,3), strides=(2,2), padding='same')(d)
	d = InstanceNormalization(axis=-1)(d)
	d = LeakyReLU(alpha=0.2)(d)
	# C128
	d = Conv2D(128, (3,3), strides=(2,2), padding='same')(d)
	d = InstanceNormalization(axis=-1)(d)
	d = LeakyReLU(alpha=0.2)(d)
	# second last output layer 256
	d = Conv2D(256, (3,3), padding='same')(d)
	d = InstanceNormalization(axis=-1)(d)
	d = LeakyReLU(alpha=0.2)(d)
	# patch output
	patch_out = Conv2D(1, (3,3), padding='same')(d)
	# define model
	model = Model(in_image, patch_out)
	# compile model
	model.compile(loss='mse', optimizer=Adam(lr=0.0002, beta_1=0.5), loss_weights=[0.5]) #different
	print(model.summary())
	return model
示例#2
0
文件: cyclegan.py 项目: beskyfil/mvi
def define_discriminator(image_shape):
	# weight initialization
	init = RandomNormal(stddev=0.02)
	# source image input
	in_image = Input(shape=image_shape)

	d = Conv2D(32, (3,3), strides=(2,2), padding='same', kernel_initializer=init)(in_image)
	d = LeakyReLU(alpha=0.2)(d)

	d = Conv2D(64, (3,3), strides=(2,2), padding='same', kernel_initializer=init)(d)
	d = InstanceNormalization(axis=-1)(d)
	d = LeakyReLU(alpha=0.2)(d)

	d = Conv2D(128, (3,3), strides=(2,2), padding='same', kernel_initializer=init)(d)
	d = InstanceNormalization(axis=-1)(d)
	d = LeakyReLU(alpha=0.2)(d)

	d = Conv2D(256, (3,3), strides=(2,2), padding='same', kernel_initializer=init)(d)
	d = InstanceNormalization(axis=-1)(d)
	d = LeakyReLU(alpha=0.2)(d)

	patch_out = Conv2D(1, (4,4), padding='same', kernel_initializer=init)(d)
	model = Model(in_image, patch_out)
	model.compile(loss='mse', optimizer=Adam(lr=0.0002, beta_1=0.5), loss_weights=[0.5])
	return model
示例#3
0
def D1(input_shape):
    init = RandomNormal(stddev=0.02)
    image = Input(input_shape)
	
    x = Conv2D(64, 4, strides = 1, padding = 'SAME', kernel_initializer = init)(image)
    x = LeakyReLU(alpha=0.2)(x)
    
    x = Conv2D(128, 4, strides = 1, padding = 'SAME', kernel_initializer = init)(x)
    x = LeakyReLU(alpha=0.2)(x)
    
    x = InstanceNormalization(axis = -1, center = False, scale = False)(x)
    
    x = Conv2D(256, 4, strides = 1, padding = 'SAME', kernel_initializer = init)(x)
    x = LeakyReLU(alpha=0.2)(x)
    
    x = InstanceNormalization(axis = -1, center = False, scale = False)(x)
    
    x = Conv2D(512, 4, strides = 1, padding = 'SAME', kernel_initializer = init)(x)
    x = LeakyReLU(alpha=0.2)(x)
    
    x = InstanceNormalization(axis = -1, center = False, scale = False)(x)
    
    x = Conv2D(1, 4, strides = 1, padding = 'SAME', kernel_initializer = init)(x)
    
    return Model(inputs = image, outputs = x, name="D1")
示例#4
0
def define_discriminator(image_shape):
	# weight initialization
	init = keras.initializers.RandomNormal(stddev=0.02)
	# source image input
	in_image = Input(shape=image_shape)
	# C64
	d = Conv2D(64, (4,4), strides=(2,2), padding='same', kernel_initializer=init)(in_image)
	d = LeakyReLU(alpha=0.2)(d)
	# C128
	d = Conv2D(128, (4,4), strides=(2,2), padding='same', kernel_initializer=init)(d)
	d = InstanceNormalization(axis=-1)(d)
	d = LeakyReLU(alpha=0.2)(d)
	# C256
	d = Conv2D(256, (4,4), strides=(2,2), padding='same', kernel_initializer=init)(d)
	d = InstanceNormalization(axis=-1)(d)
	d = LeakyReLU(alpha=0.2)(d)
	# C512
	d = Conv2D(512, (4,4), strides=(2,2), padding='same', kernel_initializer=init)(d)
	d = InstanceNormalization(axis=-1)(d)
	d = LeakyReLU(alpha=0.2)(d)
	# second last output layer
	d = Conv2D(512, (4,4), padding='same', kernel_initializer=init)(d)
	d = InstanceNormalization(axis=-1)(d)
	d = LeakyReLU(alpha=0.2)(d)
	# patch output
	patch_out = Conv2D(1, (4,4), padding='same', kernel_initializer=init)(d)
	# define model
	model = Model(in_image, patch_out)
	# compile model
	model.compile(loss='mse', optimizer=Adam(lr=0.0002, beta_1=0.5), loss_weights=[0.5])
	return model
    def downsample(self, x, channels, i_norm=False, fm=None):
        """  Downsampling is similar to an implementation in BigGAN """
        shortcut = x

        x = ReLU(name=fm)(x)
        x = ConvSN2D(
            channels,
            (3, 3),
            strides=(1, 1),
            padding='same',
        )(x)
        if i_norm:
            x = InstanceNormalization(axis=-1)(x)  # use in generator
        x = ReLU()(x)
        x = ConvSN2D(channels, (3, 3),
                     strides=(1, 1),
                     padding='same',
                     kernel_initializer='he_normal')(x)
        if i_norm:
            x = InstanceNormalization(axis=-1)(x)  # use in generator
        x = AveragePooling2D(pool_size=(2, 2))(x)

        shortcut = ConvSN2D(channels, (1, 1),
                            padding='same',
                            kernel_initializer='he_normal')(shortcut)
        shortcut = AveragePooling2D(pool_size=(2, 2))(shortcut)

        x = Add()([x, shortcut])
        return x
示例#6
0
    def build_model(self, input_shape, nb_classes):
        input_layer = keras.layers.Input(input_shape)

        # conv block -1
        conv1 = keras.layers.Conv1D(filters=128,
                                    kernel_size=5,
                                    strides=1,
                                    padding='same')(input_layer)
        conv1 = InstanceNormalization()(conv1)
        conv1 = keras.layers.PReLU(shared_axes=[1])(conv1)
        conv1 = keras.layers.Dropout(rate=0.2)(conv1)
        conv1 = keras.layers.MaxPooling1D(pool_size=2)(conv1)
        # conv block -2
        conv2 = keras.layers.Conv1D(filters=256,
                                    kernel_size=11,
                                    strides=1,
                                    padding='same')(conv1)
        conv2 = InstanceNormalization()(conv2)
        conv2 = keras.layers.PReLU(shared_axes=[1])(conv2)
        conv2 = keras.layers.Dropout(rate=0.2)(conv2)
        conv2 = keras.layers.MaxPooling1D(pool_size=2)(conv2)
        # conv block -3
        conv3 = keras.layers.Conv1D(filters=512,
                                    kernel_size=21,
                                    strides=1,
                                    padding='same')(conv2)
        conv3 = InstanceNormalization()(conv3)
        conv3 = keras.layers.PReLU(shared_axes=[1])(conv3)
        conv3 = keras.layers.Dropout(rate=0.2)(conv3)
        # split for attention
        attention_data = keras.layers.Lambda(lambda x: x[:, :, :256])(conv3)
        attention_softmax = keras.layers.Lambda(lambda x: x[:, :, 256:])(conv3)
        # attention mechanism
        attention_softmax = keras.layers.Softmax()(attention_softmax)
        multiply_layer = keras.layers.Multiply()(
            [attention_softmax, attention_data])
        # last layer
        dense_layer = keras.layers.Dense(units=256,
                                         activation='sigmoid')(multiply_layer)
        dense_layer = InstanceNormalization()(dense_layer)
        # output layer
        flatten_layer = keras.layers.Flatten()(dense_layer)
        output_layer = keras.layers.Dense(units=nb_classes,
                                          activation='softmax')(flatten_layer)

        model = keras.models.Model(inputs=input_layer, outputs=output_layer)

        model.compile(loss='categorical_crossentropy',
                      optimizer=keras.optimizers.Adam(0.00001),
                      metrics=['accuracy'])

        file_path = self.output_directory + 'best_model.hdf5'

        model_checkpoint = keras.callbacks.ModelCheckpoint(filepath=file_path,
                                                           monitor='loss',
                                                           save_best_only=True)

        self.callbacks = [model_checkpoint]

        return model
示例#7
0
 def downsample2d(layer_inputs, filters, kernel_size, strides):
     h1            = Conv2D(filters, kernel_size, strides=strides, padding='same')(layer_inputs)
     h1_norm       = InstanceNormalization(epsilon=1e-06)(h1)
     h1_gates      = Conv2D(filters, kernel_size, strides=strides, padding='same')(layer_inputs)
     h1_norm_gates = InstanceNormalization(epsilon=1e-06)(h1_gates)
     h1_glu        = gated_liner_units(h1_norm, h1_norm_gates)
     return h1_glu
    def build_discriminator(self):

        img = Input(shape=self.img_shape)

        model = Sequential()
        model.add(
            Conv2D(64,
                   kernel_size=4,
                   strides=2,
                   padding='same',
                   input_shape=self.img_shape))
        model.add(LeakyReLU(alpha=0.8))
        model.add(Conv2D(128, kernel_size=4, strides=2, padding='same'))
        model.add(LeakyReLU(alpha=0.2))
        model.add(InstanceNormalization())
        model.add(Conv2D(256, kernel_size=4, strides=2, padding='same'))
        model.add(LeakyReLU(alpha=0.2))
        model.add(InstanceNormalization())

        model.summary()

        img = Input(shape=self.img_shape)
        features = model(img)

        validity = Conv2D(1, kernel_size=4, strides=1,
                          padding='same')(features)

        label = Flatten()(features)
        label = Dense(self.num_classes + 1, activation="softmax")(label)

        return Model(img, [validity, label])
示例#9
0
def ResPath(filters, length, inp):
    shortcut = inp
    shortcut = conv2d_bn(shortcut,
                         filters,
                         1,
                         1,
                         activation=None,
                         padding='same')
    out = conv2d_bn(inp, filters, 3, 3, activation='relu', padding='same')
    out = add([shortcut, out])
    out = Activation('relu')(out)
    out = InstanceNormalization()(out)
    for i in range(length - 1):
        shortcut = out
        shortcut = conv2d_bn(shortcut,
                             filters,
                             1,
                             1,
                             activation=None,
                             padding='same')

        out = conv2d_bn(out, filters, 3, 3, activation='relu', padding='same')
        out = add([shortcut, out])
        out = Activation('relu')(out)
        out = InstanceNormalization()(out)
    return out
        def adain_resblk(X, f, filters, stage, s):
            conv_name_base = 'adain_resblk_' + stage
            F1, F2, F3 = filters

            X_shortcut = X
            print(X_shortcut)

            X = Conv2D(filters=F1, kernel_size=(1, 1), strides=(s, s), padding='valid', name=conv_name_base + '2a',
                       kernel_initializer=glorot_uniform(seed=0))(X)
            X = InstanceNormalization(beta_regularizer=beta_2, gamma_regularizer=gamma)(X)
            X = ReLU()(X)
            print(X)

            X = Conv2D(filters=F2, kernel_size=(f, f), strides=(1, 1), padding='same', name=conv_name_base + '2b',
                       kernel_initializer=glorot_uniform(seed=0))(X)
            X = InstanceNormalization(beta_regularizer=beta_2, gamma_regularizer=gamma)(X)
            X = ReLU()(X)
            print(X)

            X = Conv2D(filters=F3, kernel_size=(1, 1), strides=(1, 1), padding='valid', name=conv_name_base + '2c',
                       kernel_initializer=glorot_uniform(seed=0))(X)

            X_shortcut = Conv2D(filters=F3, kernel_size=(1, 1), strides=(s, s), padding='valid',
                                name=conv_name_base + '1', kernel_initializer=glorot_uniform(seed=0))(X_shortcut)

            print(X, X_shortcut)
            X = Add()([X, X_shortcut])
            X = ReLU()(X)

            return X
示例#11
0
    def residual_block(self, previous_layer):
        filters = int(previous_layer.shape[-1])

        # First layer
        layer = ReflectionPadding2D((1, 1))(previous_layer)
        layer = Conv2D(filters=filters,
                       kernel_size=(3, 3),
                       strides=(1, 1),
                       padding='valid')(layer)
        layer = InstanceNormalization(axis=3, epsilon=1e-5,
                                      center=True)(layer, training=True)
        # layer = PReLU(alpha_initializer = 'zeros', alpha_regularizer = None, alpha_constraint = None, shared_axes = [1, 2])(layer)
        layer = Activation('relu')(layer)

        # Second layer
        layer = ReflectionPadding2D((1, 1))(layer)
        layer = Conv2D(filters=filters,
                       kernel_size=(3, 3),
                       strides=(1, 1),
                       padding='valid')(layer)
        layer = InstanceNormalization(axis=3, epsilon=1e-5,
                                      center=True)(layer, training=True)
        # layer = PReLU(alpha_initializer = 'zeros', alpha_regularizer = None, alpha_constraint = None, shared_axes = [1, 2])(layer)
        layer = Activation('relu')(layer)

        # Merge
        layer = add([layer, previous_layer])

        return layer
示例#12
0
    def build_generator(self):
        """Generator network."""
        # Input tensors
        inp_c = Input(shape=(self.c_dim, ))
        inp_img = Input(shape=(self.image_size, self.image_size, 3))

        # Replicate spatially and concatenate domain information
        c = Lambda(lambda x: K.repeat(x, self.image_size**2))(inp_c)
        c = Reshape((self.image_size, self.image_size, self.c_dim))(c)
        x = Concatenate()([inp_img, c])

        # First Conv2D
        x = Conv2D(filters=self.g_conv_dim,
                   kernel_size=7,
                   strides=1,
                   padding='same',
                   use_bias=False)(x)
        x = InstanceNormalization(axis=-1)(x)
        x = ReLU()(x)

        # Down-sampling layers
        curr_dim = self.g_conv_dim
        for i in range(2):
            x = ZeroPadding2D(padding=1)(x)
            x = Conv2D(filters=curr_dim * 2,
                       kernel_size=4,
                       strides=2,
                       padding='valid',
                       use_bias=False)(x)
            x = InstanceNormalization(axis=-1)(x)
            x = ReLU()(x)
            curr_dim = curr_dim * 2

        # Bottleneck layers.
        for i in range(self.g_repeat_num):
            x = self.ResidualBlock(x, curr_dim)

        # Up-sampling layers
        for i in range(2):
            x = UpSampling2D(size=2)(x)
            x = Conv2D(filters=curr_dim // 2,
                       kernel_size=4,
                       strides=1,
                       padding='same',
                       use_bias=False)(x)
            x = InstanceNormalization(axis=-1)(x)
            x = ReLU()(x)
            curr_dim = curr_dim // 2

        # Last Conv2D
        x = ZeroPadding2D(padding=3)(x)
        out = Conv2D(filters=3,
                     kernel_size=7,
                     strides=1,
                     padding='valid',
                     activation='tanh',
                     use_bias=False)(x)

        return Model(inputs=[inp_img, inp_c], outputs=out)
 def residual_block(layer_input, filters):
     d = Conv2D(filters, kernel_size=3, strides=1, padding='same')(layer_input)
     d = InstanceNormalization()(d)
     d = Activation('relu')(d)
     d = Conv2D(filters, kernel_size=3, strides=1, padding='same')(d)
     d = InstanceNormalization()(d)
     d = Add()([d, layer_input])
     return d
示例#14
0
 def upsample2d(layer_inputs, filters, kernel_size, strides):
     h1               = Conv2D(filters, kernel_size, strides=strides, padding='same')(layer_inputs)
     h1_shuffle       = SubPixelUpscaling(scale_factor=2)(h1)
     h1_norm          = InstanceNormalization(epsilon=1e-06)(h1_shuffle)
     h1_gates         = Conv2D(filters, kernel_size, strides=strides, padding='same')(layer_inputs)
     h1_shuffle_gates = SubPixelUpscaling(scale_factor=2)(h1_gates)
     h1_norm_gates    = InstanceNormalization(epsilon=1e-06)(h1_shuffle_gates)
     h1_glu           = gated_liner_units(h1_norm, h1_norm_gates)
     return h1_glu
示例#15
0
 def ResidualBlock(self, inp, dim_out):
     """Residual Block with instance normalization."""
     x = ZeroPadding2D(padding = 1)(inp)
     x = Conv2D(filters = dim_out, kernel_size=3, strides=1, padding='valid', use_bias = False)(x)
     x = InstanceNormalization(axis = -1)(x)
     x = ReLU()(x)
     x = ZeroPadding2D(padding = 1)(x)
     x = Conv2D(filters = dim_out, kernel_size=3, strides=1, padding='valid', use_bias = False)(x)
     x = InstanceNormalization(axis = -1)(x)
     return Add()([inp, x])
示例#16
0
 def residual1d(layer_inputs, filters, kernel_size, strides):
     h1            = Conv1D(filters, kernel_size, strides=strides, padding='same')(layer_inputs)
     h1_norm       = InstanceNormalization(epsilon=1e-06)(h1)
     h1_gates      = Conv1D(filters, kernel_size, strides=strides, padding='same')(layer_inputs)
     h1_norm_gates = InstanceNormalization(epsilon=1e-06)(h1_gates)
     h1_glu        = gated_liner_units(h1_norm, h1_norm_gates)
     h2            = Conv1D(filters//2, kernel_size, strides=strides, padding='same')(h1_glu)
     h2_norm       = InstanceNormalization(epsilon=1e-06)(h2)
     h3            = Add()([layer_inputs, h2_norm])
     return h3
示例#17
0
 def decoder(layer_input, skip_input, channel, last_block=False):
     if not last_block:
         concat = Concatenate(axis=-1)([layer_input, skip_input])
         bn1 = InstanceNormalization()(concat)
     else:
         bn1 = InstanceNormalization()(layer_input)
     conv_1 = Conv2D(channel, 1, activation='relu', padding='same')(bn1)
     bn2 = InstanceNormalization()(conv_1)
     conv_2 = Conv2D(channel, 3, activation='relu', padding='same')(bn2)
     return conv_2
示例#18
0
    def get_chowder(self):

        data_input = Input(shape=(
            local_feature_size[0],
            local_feature_size[1],
        ),
                           dtype='float32',
                           name='input')
        fc1 = Dense(128, kernel_regularizer=l2(0.005), name='fc1')(data_input)
        fc1 = InstanceNormalization(axis=-1)(fc1)
        fc1 = LeakyReLU(alpha=0.2)(fc1)
        fc1 = Dropout(rate=0.5)(fc1)

        fc2 = Dense(64, kernel_regularizer=l2(0.005), name='fc2')(fc1)
        fc2 = InstanceNormalization(axis=-1)(fc2)
        fc2 = LeakyReLU(alpha=0.2)(fc2)
        fc2 = Dropout(rate=0.5)(fc2)
        fc3 = Dense(64, kernel_regularizer=l2(0.005), name='fc3')(fc2)
        fc3 = InstanceNormalization(axis=-1)(fc3)
        fc3 = LeakyReLU(alpha=0.2)(fc3)
        fc3 = Dropout(rate=0.5)(fc3)

        gf1 = MaxPooling1D(pool_size=self.local_feature_size[0],
                           name='gf1')(fc1)
        gf2 = MaxPooling1D(pool_size=self.local_feature_size[0],
                           name='gf2')(fc2)

        gf = Concatenate(name='concatenate')([gf1, gf2])
        gf = Expand_dims(dims=local_feature_size[0])(gf)
        feature_combine = Concatenate(axis=-1,
                                      name='concatenate_gl')([fc3, gf])
        weights = Dense(1, kernel_regularizer=l2(0.005),
                        name='weights')(feature_combine)
        weights = Softmax(axis=-2, name='weight')(weights)
        feature_combine = Multiply()([weights, data_input])
        mp = ADD(name='mip')(feature_combine)

        mp = LeakyReLU(alpha=0.2)(mp)
        mp = Dropout(rate=0.1)(mp)
        mp = Flatten()(mp)

        output = Dense(self.n_class,
                       kernel_regularizer=l2(0.005),
                       activation='softmax',
                       name='output')(mp)
        model = Model(inputs=[data_input], outputs=[output])
        adam = optimizers.Adam(lr=self.lr,
                               beta_1=0.9,
                               beta_2=0.999,
                               epsilon=1e-08,
                               decay=0.001)
        model.compile(loss='categorical_crossentropy',
                      optimizer=adam,
                      metrics=['accuracy'])
        self.model = model
示例#19
0
    def build_generator(self, name=None):
        def residual_block(layer_input, filters):
            """Residual block described in paper"""
            d = Conv2D(filters, kernel_size=3, strides=1,
                       padding='same')(layer_input)
            d = InstanceNormalization()(d)
            d = Activation('relu')(d)
            d = Conv2D(filters, kernel_size=3, strides=1, padding='same')(d)
            d = InstanceNormalization()(d)
            d = Add()([d, layer_input])
            return d

        # Low resolution image input
        img_lr = Input(shape=self.hr_shape)

        # with tf.device('/gpu:0') :
        # Pre-residual block
        #c1 = Conv2D(64, kernel_size=9, strides=1, padding='same')(img_lr)
        c1 = Conv2D(64, kernel_size=7, strides=1, padding='same')(img_lr)
        c1 = InstanceNormalization()(c1)
        c1 = Activation('relu')(c1)

        n_downsampling = 2
        for i in range(n_downsampling):
            mult = 2**i
            c1 = Conv2D(filters=64 * mult * 2,
                        kernel_size=(3, 3),
                        strides=2,
                        padding='same')(c1)
            c1 = InstanceNormalization()(c1)
            c1 = Activation('relu')(c1)

        # Propogate through residual blocks
        r = residual_block(c1, self.gf * (n_downsampling**2))
        for _ in range(8):
            r = residual_block(r, self.gf * (n_downsampling**2))

        for i in range(n_downsampling):
            mult = 2**(n_downsampling - i)
            r = UpSampling2D()(r)
            r = Conv2D(filters=int(64 * mult / 2),
                       kernel_size=(3, 3),
                       padding='same')(r)
            r = InstanceNormalization()(r)
            r = Activation('relu')(r)

            # Post-residual block
        c2 = Conv2D(self.channels, kernel_size=7, strides=1, padding='same')(r)
        c2 = Activation('tanh')(c2)
        c2 = Add()([c2, img_lr])
        model = Model(img_lr, [c2], name=name)

        model.summary()
        return model
def build_generator():
    """
    Create a generator network using the hyperparameter values defined below
    """
    input_shape = (128, 128, 3)
    residual_blocks = 6
    input_layer = Input(shape=input_shape)

    # First Convolution block
    x = Conv2D(filters=32, kernel_size=7, strides=1,
               padding="same")(input_layer)
    x = InstanceNormalization(axis=1)(x)
    x = Activation("relu")(x)

    # 2nd Convolution block
    x = Conv2D(filters=64, kernel_size=3, strides=2, padding="same")(x)
    x = InstanceNormalization(axis=1)(x)
    x = Activation("relu")(x)

    # 3rd Convolution block
    x = Conv2D(filters=128, kernel_size=3, strides=2, padding="same")(x)
    x = InstanceNormalization(axis=1)(x)
    x = Activation("relu")(x)

    # Residual blocks
    for _ in range(residual_blocks):
        x = residual_block(x)

    # Upsampling blocks

    # 1st Upsampling block
    x = Conv2DTranspose(filters=64,
                        kernel_size=3,
                        strides=2,
                        padding='same',
                        use_bias=False)(x)
    x = InstanceNormalization(axis=1)(x)
    x = Activation("relu")(x)

    # 2nd Upsampling block
    x = Conv2DTranspose(filters=32,
                        kernel_size=3,
                        strides=2,
                        padding='same',
                        use_bias=False)(x)
    x = InstanceNormalization(axis=1)(x)
    x = Activation("relu")(x)

    # Last Convolution layer
    x = Conv2D(filters=3, kernel_size=7, strides=1, padding="same")(x)
    output = Activation('tanh')(x)

    model = Model(inputs=[input_layer], outputs=[output])
    return model
示例#21
0
def define_discriminator(image_shape, encoder):
    # weight initialization
    init = RandomNormal(stddev=0.02)
    encoder.trainable = False
    # source image input
    in_image = Input(shape=image_shape)
    in_image_t = Input(shape=image_shape)
    # C64
    d = Conv2D(64, (4, 4),
               strides=(2, 2),
               padding='same',
               kernel_initializer=init)(in_image)
    d = LeakyReLU(alpha=0.01)(d)

    label = encoder(in_image_t)
    label = Lambda(lambda x: K.expand_dims(x, axis=1))(label)
    label = Lambda(lambda x: K.expand_dims(x, axis=1))(label)
    label = Lambda(lambda x: K.tile(x, [1, 32, 32, 1]))(label)
    d = Concatenate(axis=3)([d, label])
    # C128
    d = Conv2D(128, (4, 4),
               strides=(2, 2),
               padding='same',
               kernel_initializer=init)(d)
    d = InstanceNormalization(axis=-1)(d)
    d = LeakyReLU(alpha=0.01)(d)
    # C256
    d = Conv2D(256, (4, 4),
               strides=(2, 2),
               padding='same',
               kernel_initializer=init)(d)
    d = InstanceNormalization(axis=-1)(d)
    d = LeakyReLU(alpha=0.01)(d)
    # C512
    d = Conv2D(512, (4, 4),
               strides=(2, 2),
               padding='same',
               kernel_initializer=init)(d)
    d = InstanceNormalization(axis=-1)(d)
    d = LeakyReLU(alpha=0.01)(d)
    # second last output layer
    d = Conv2D(512, (4, 4), padding='same', kernel_initializer=init)(d)
    d = InstanceNormalization(axis=-1)(d)
    d = LeakyReLU(alpha=0.01)(d)
    # patch output
    #d = Flatten()(d) Dense(1, activation="sigmoid")(d)
    patch_out = Conv2D(1, (4, 4), padding='same', kernel_initializer=init)(d)
    # define model
    model = Model([in_image, in_image_t], patch_out)
    # compile model
    model.compile(loss='mse',
                  optimizer=Adam(lr=0.0002, beta_1=0.5),
                  loss_weights=[1])
    return model
示例#22
0
        def residual(layer_input, filters):
            shortcut = layer_input
            y = ReflectionPadding2D(padding =(1,1))(layer_input)
            y = Conv2D(filters, kernel_size=(3, 3), strides=1, padding='valid', kernel_initializer = self.weight_init)(y)
            y = InstanceNormalization(axis = -1, center = False, scale = False)(y)
            y = Activation('relu')(y)
            
            y = ReflectionPadding2D(padding =(1,1))(y)
            y = Conv2D(filters, kernel_size=(3, 3), strides=1, padding='valid', kernel_initializer = self.weight_init)(y)
            y = InstanceNormalization(axis = -1, center = False, scale = False)(y)

            return add([shortcut, y])
示例#23
0
文件: cyclegan.py 项目: beskyfil/mvi
def resnet_block(n_filters, input_layer):
	# weight initialization
	init = RandomNormal(stddev=0.02)
	# first layer convolutional layer
	g = Conv2D(n_filters, (3,3), padding='same', kernel_initializer=init)(input_layer)
	g = InstanceNormalization(axis=-1)(g)
	g = Activation('relu')(g)
	# second convolutional layer
	g = Conv2D(n_filters, (3,3), padding='same', kernel_initializer=init)(g)
	g = InstanceNormalization(axis=-1)(g)
	# concatenate merge channel-wise with input layer
	g = Concatenate()([g, input_layer])
	return g
示例#24
0
def create_discriminator(dim=256, depht=32, name=""):
    """
    Retourne un nouveau discriminator type de ceux que nous allons utiliser
    """
    D = keras.models.Sequential(name="d_{}".format(name))
    #Layer 1 : Convolution avec un filtre de 4x4 qui se déplace de 2 pixels en 2 -> Division du nombre de pixel par 2; depht filtres utilisés
    #On ajoute un InstanceNormalization pour réduire les poids et éviter une explosion du gradient
    #1] Conv; dim*dim*3 -> dim/2*dim/2*depht
    D.add(
        keras.layers.Conv2D(depht, (4, 4),
                            strides=(2, 2),
                            padding="same",
                            input_shape=(dim, dim, 3)))
    D.add(InstanceNormalization(axis=-1))
    D.add(keras.layers.LeakyReLU(alpha=0.2))

    #2] Conv; dim/2*dim/2*depht -> dim/4*dim/4*2*depht
    D.add(
        keras.layers.Conv2D(2 * depht, (4, 4), strides=(2, 2), padding="same"))
    D.add(InstanceNormalization(axis=-1))
    D.add(keras.layers.LeakyReLU(alpha=0.2))

    #3] Conv; dim/4*dim/4*2*depht -> dim/8*dim/8*4*depht
    D.add(
        keras.layers.Conv2D(4 * depht, (4, 4), strides=(2, 2), padding="same"))
    D.add(InstanceNormalization(axis=-1))
    D.add(keras.layers.LeakyReLU(alpha=0.2))

    #4] Conv; dim/8*dim/8*4*depht -> dim/16*dim/16*8*depht
    D.add(
        keras.layers.Conv2D(8 * depht, (4, 4), strides=(2, 2), padding="same"))
    D.add(InstanceNormalization(axis=-1))
    D.add(keras.layers.LeakyReLU(alpha=0.2))

    #5] Conv; dim/16*dim/16*8*depht -> dim/16*dim/16*8*depht
    D.add(
        keras.layers.Conv2D(8 * depht, (4, 4), strides=(1, 1), padding="same"))
    D.add(InstanceNormalization(axis=-1))
    D.add(keras.layers.LeakyReLU(alpha=0.2))

    #6] Con final; dim/16*dim/16*8*depht -> dim/16*dim/16*1
    D.add(keras.layers.Conv2D(1, (4, 4), strides=(1, 1), padding="same"))

    #On compile
    print("{} trainable before compile : {}".format(D.name, D.trainable))
    D.compile(loss="mse",
              optimizer=keras.optimizers.Adam(lr=0.0002, beta_1=0.5),
              loss_weights=[0.5],
              metrics=["accuracy"])
    return D
示例#25
0
 def define_discriminator(self):
     ''' defines the discriminator function'''
     # weight initialization
     init = keras.initializers.RandomNormal(stddev=0.02)
     # source image input
     input_image = keras.Input(shape=self.image_shape)
     # C64
     d_layer = keras.layers.Conv2D(64, (4, 4),
                                   strides=(2, 2),
                                   padding='same',
                                   kernel_initializer=init)(input_image)
     d_layer = keras.layers.LeakyReLU(alpha=0.2)(d_layer)
     # C128
     d_layer = keras.layers.Conv2D(128, (4, 4),
                                   strides=(2, 2),
                                   padding='same',
                                   kernel_initializer=init)(d_layer)
     d_layer = InstanceNormalization(axis=-1)(d_layer)
     d_layer = keras.layers.LeakyReLU(alpha=0.2)(d_layer)
     # C256
     d_layer = keras.layers.Conv2D(256, (4, 4),
                                   strides=(2, 2),
                                   padding='same',
                                   kernel_initializer=init)(d_layer)
     d_layer = InstanceNormalization(axis=-1)(d_layer)
     d_layer = keras.layers.LeakyReLU(alpha=0.2)(d_layer)
     # C512
     d_layer = keras.layers.Conv2D(512, (4, 4),
                                   strides=(2, 2),
                                   padding='same',
                                   kernel_initializer=init)(d_layer)
     d_layer = InstanceNormalization(axis=-1)(d_layer)
     d_layer = keras.layers.LeakyReLU(alpha=0.2)(d_layer)
     # second last output layer
     d_layer = keras.layers.Conv2D(512, (4, 4),
                                   padding='same',
                                   kernel_initializer=init)(d_layer)
     d_layer = InstanceNormalization(axis=-1)(d_layer)
     d_layer = keras.layers.LeakyReLU(alpha=0.2)(d_layer)
     # patch output
     patch_out = keras.layers.Conv2D(1, (4, 4),
                                     padding='same',
                                     kernel_initializer=init)(d_layer)
     # define model
     model = keras.Model(input_image, patch_out)
     # compile model
     model.compile(loss='mse',
                   optimizer=keras.optimizers.Adam(lr=0.0002, beta_1=0.5),
                   loss_weights=[0.5])
     return model
示例#26
0
def create_generator(dim=256, depht=32, n_resnet=9, name=""):
    """Créer un générateur typique utilisé dans la suite
    La structure est plus complexe, on part de la taille d'une image que l'on compress, puis on réétend pour refaire une image.
    Juste pour essayer, j'essaye aussi une autre notation pour créer des réseaux"""
    input_layer = keras.layers.Input(shape=(dim, dim, 3))

    #1] Conv; dim*dim*3 -> dim/2*dim/2*depht
    g = keras.layers.Conv2D(depht, (7, 7), strides=(2, 2),
                            padding="same")(input_layer)
    g = InstanceNormalization(axis=-1)(g)
    g = keras.layers.LeakyReLU(alpha=0.2)(g)

    #2] Conv; dim/2*dim/2*depht -> dim/4*dim/4*2*depht
    g = keras.layers.Conv2D(2 * depht, (3, 3), strides=(2, 2),
                            padding="same")(g)
    g = InstanceNormalization(axis=-1)(g)
    g = keras.layers.LeakyReLU(alpha=0.2)(g)

    #3] Conv; dim/4*dim/4*2*depht -> dim/8*dim/8*4*depht
    g = keras.layers.Conv2D(4 * depht, (3, 3), strides=(2, 2),
                            padding="same")(g)
    g = InstanceNormalization(axis=-1)(g)
    g = keras.layers.LeakyReLU(alpha=0.2)(g)

    #Au milieu, on ajoute autant de resnet_block que l'on vezut
    for _ in range(n_resnet):
        g = create_resnet(n_filters=4 * depht, T=g)

    #On redéroule dans l'autre sens
    #4] ConvT; dim/8*dim/8*4*depht -> dim/4*dim/4*2*depht
    g = keras.layers.Conv2DTranspose(2 * depht, (3, 3),
                                     strides=(2, 2),
                                     padding="same")(g)
    g = InstanceNormalization(axis=-1)(g)
    g = keras.layers.LeakyReLU(alpha=0.2)(g)

    #5] ConvT; dim/4*dim/4*2*depht -> dim/2*dim/2*depht
    g = keras.layers.Conv2DTranspose(depht, (3, 3),
                                     strides=(2, 2),
                                     padding="same",
                                     activation="relu")(g)
    g = InstanceNormalization(axis=-1)(g)
    g = keras.layers.LeakyReLU(alpha=0.2)(g)

    #6] ConvT; dim/2*dim/2*depht -> dim*dim*3
    g = keras.layers.Conv2DTranspose(3, (7, 7), strides=(2, 2),
                                     padding="same")(g)
    g = keras.layers.Activation("tanh")(g)
    M = keras.Model(input_layer, g, name="gen_{}".format(name))
    return M
示例#27
0
def res_block(input_tensor, filters, w_l2=w_l2):

    x = input_tensor
    x = Conv2D(filters, kernel_size=3, kernel_regularizer=regularizers.l2(w_l2),
               kernel_initializer=conv_init, use_bias=False, padding="same")(x)
    x = LeakyReLU(alpha=0.2)(x)
    x = InstanceNormalization()(x)
    x = Conv2D(filters, kernel_size=3, kernel_regularizer=regularizers.l2(w_l2),
               kernel_initializer=conv_init, use_bias=False, padding="same")(x)
    x = add([x, input_tensor])
    x = LeakyReLU(alpha=0.2)(x)
    x = InstanceNormalization()(x)

    return x
示例#28
0
    def Rk(self, layer_input, k):
        # 1st layer
        # Same padding is used instead of reflection padded recommended in the paper for simplicity
        x = Conv2D(filters=k, kernel_size=3, strides=1,
                   padding='same')(layer_input)
        x = InstanceNormalization(axis=-1)(x)
        x = Activation('relu')(x)

        # 2nd layer
        x = Conv2D(filters=k, kernel_size=3, strides=1, padding='same')(x)
        x = InstanceNormalization(axis=-1)(x)

        # concatenate merge channel-wise with input layer
        x = Concatenate()([x, layer_input])
        return x
示例#29
0
 def d_layer(layer_input, filters, f_size=4, normalization=True):
     """Discriminator layer"""
     d = Conv2D(filters, kernel_size=f_size, strides=2, padding='same')(layer_input)
     d = LeakyReLU(alpha=0.2)(d)
     if normalization:
         d = InstanceNormalization()(d)
     return d
 def conv2d(layer_input, filters, f_size=4):
     """Layers used during downsampling"""
     d = Conv2D(filters, kernel_size=f_size, strides=2,
                padding='same')(layer_input)
     d = LeakyReLU(alpha=0.2)(d)
     d = InstanceNormalization()(d)
     return d