Esempio n. 1
0
def res_block(input,
              filters,
              kernel_size=(3, 3),
              strides=(1, 1),
              use_dropout=False):
    """
	Instanciate a Keras Resnet Block using sequential API.
	:param input: Input tensor
	:param filters: Number of filters to use
	:param kernel_size: Shape of the kernel for the convolution
	:param strides: Shape of the strides for the convolution
	:param use_dropout: Boolean value to determine the use of dropout
	:return: Keras Model
	"""
    x = ReflectionPadding2D((1, 1))(input)
    x = Conv2D(
        filters=filters,
        kernel_size=kernel_size,
        strides=strides,
    )(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    if use_dropout:
        x = Dropout(0.5)(x)
    x = ReflectionPadding2D((1, 1))(x)
    x = Conv2D(
        filters=filters,
        kernel_size=kernel_size,
        strides=strides,
    )(x)
    x = BatchNormalization()(x)
    # Two convolution layers followed by a direct connection between input and output
    merged = Add()([input, x])
    return merged
Esempio n. 2
0
def generator_model():
    """Build generator architecture."""
    # Current version : ResNet block
    inputs = Input(shape=image_shape)

    x = ReflectionPadding2D((3, 3))(inputs)
    x = Conv2D(filters=ngf, kernel_size=(7, 7), padding='valid')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    # Increase filter number
    n_downsampling = 2
    for i in range(n_downsampling):
        mult = 2**i
        x = Conv2D(filters=ngf * mult * 2,
                   kernel_size=(3, 3),
                   strides=2,
                   padding='same')(x)
        x = BatchNormalization()(x)
        x = Activation('relu')(x)

    # Apply 9 ResNet blocks
    mult = 2**n_downsampling
    for i in range(n_blocks_gen):
        x = res_block(x, ngf * mult, use_dropout=True)

    # Decrease filter number to 3 (RGB)
    for i in range(n_downsampling):
        mult = 2**(n_downsampling - i)
        x = Conv2DTranspose(filters=int(ngf * mult / 2),
                            kernel_size=(3, 3),
                            strides=2,
                            padding='same')(x)
        x = BatchNormalization()(x)
        x = Activation('relu')(x)

    x = ReflectionPadding2D((3, 3))(x)
    x = Conv2D(filters=output_nc, kernel_size=(7, 7), padding='valid')(x)
    x = Activation('tanh')(x)

    # Add direct connection from input to output and recenter to [-1, 1]
    outputs = Add()([x, inputs])
    outputs = Lambda(lambda z: z / 2)(outputs)

    model = Model(inputs=inputs, outputs=outputs, name='Generator')
    return model
Esempio n. 3
0
def generator_model():
    """Build generator architecture."""
    # Current version : ResNet block
    inputs = Input(shape=image_shape)

    x = ReflectionPadding2D((3, 3))(inputs)
    x = Conv2D(filters=ngf, kernel_size=(7, 7), padding='valid')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    n_downsampling = 3
    for i in range(n_downsampling):
        mult = 2**i
        x = Conv2D(filters=ngf * mult * 2,
                   kernel_size=(3, 3),
                   strides=2,
                   padding='same')(x)
        x = BatchNormalization()(x)
        x = Activation('relu')(x)

    mult = 2**n_downsampling
    for i in range(n_blocks_gen):
        x = res_block(x, ngf * mult, use_dropout=True)

    for i in range(n_downsampling):
        mult = 2**(n_downsampling - i)
        x = Conv2DTranspose(filters=int(ngf * mult / 2),
                            kernel_size=(3, 3),
                            strides=2,
                            padding='same')(x)
        # x = BatchNormalization()(x)
        x = Activation('relu')(x)

    x = ReflectionPadding2D((3, 3))(x)
    x = Conv2D(filters=output_nc, kernel_size=(7, 7), padding='valid')(x)
    x = Activation('tanh')(x)

    outputs = x
    # outputs = Add()([x, inputs])
    # outputs = Lambda(lambda z: K.clip(z, -1, 1))(x)
    # outputs = Lambda(lambda z: z/2)(outputs)

    model = Model(inputs=inputs, outputs=outputs, name='Generator')
    plot_model(model, to_file='generator.png', show_shapes=True)
    return model
Esempio n. 4
0
    def generator_model():
        """生成器模型
        """
        inputs = Input(Config.input_shape_generator)
        x = ReflectionPadding2D((3, 3))(inputs)
        print(x.shape)
        x = Conv2D(filters=Config.ngf, kernel_size=(7, 7), padding="valid")(x)
        x = BatchNormalization()(x)
        x = Activation("relu")(x)

        n_downsampling = 2
        for i in range(n_downsampling):
            mulit = 2**i
            x = Conv2D(filters=Config.ngf * mulit * 2,
                       kernel_size=(3, 3),
                       strides=2,
                       padding="same")(x)
            x = BatchNormalization()(x)
            x = Activation("relu")(x)

        mulit = 2**n_downsampling
        for i in range(Config.n_blocks_gen):
            x = res_block(x, Config.ngf * mulit, use_dropout=True)

        for i in range(n_downsampling):
            mulit = 2**(n_downsampling - i)
            x = Conv2DTranspose(filters=int(Config.ngf * mulit / 2),
                                kernel_size=(3, 3),
                                strides=2,
                                padding="same")(x)
            x = BatchNormalization()(x)
            x = Activation("relu")(x)
        x = ReflectionPadding2D(padding=(3, 3))(x)
        x = Conv2D(filters=Config.output_nc,
                   kernel_size=(7, 7),
                   padding="valid")(x)
        x = Activation("tanh")(x)

        # 输出
        outputs = Add()([inputs, x])
        outputs = Lambda(lambda z: z / 2)(outputs)
        print("generator : ", outputs.shape)
        model = Model(inputs=inputs, outputs=outputs, name="Generator")
        return model
Esempio n. 5
0
def generator_model(args, inputs, istrain, reuse):
    """Build generator architecture."""
    # inputs: tensor with shape [bn, 256,256, 1]
    #    inputs = Input(shape=input_shape_generator)
    with tf.variable_scope('gen_', reuse=reuse):
        x = ReflectionPadding2D((3, 3))(inputs)
        x = Conv2D(filters=ngf, kernel_size=(7, 7), padding='valid')(x)
        x = batch_norm(x, "bn1", is_train=istrain)
        x = Activation('relu')(x)

        #        x = MaxPooling2D((2, 2), padding='same')(x)e')(x)
        #        x = Conv2D(filters=ngf, kernel_size=(7,7), padding='same')(x)
        #        x = batch_norm(x, "bn2", is_train=istrain)
        #        x = Activation('relu')(x)

        n_downsampling = 2
        for i in range(n_downsampling):
            mult = 2**i
            #            x = ReflectionPadding2D((2, 2))(x)
            if args.max_pooling == False:
                x = Conv2D(filters=ngf * mult * 2,
                           kernel_size=(3, 3),
                           strides=2,
                           padding='valid')(x)
            else:
                x = Conv2D(filters=ngf * mult * 2,
                           kernel_size=(3, 3),
                           strides=1,
                           padding='valid')(x)
                x = MaxPooling2D((2, 2), padding='valid')(x)
#            x = BatchNormalization()(x, training=istrain)
            x = batch_norm(x, "down_bn_" + str(i), is_train=istrain)
            tf.summary.histogram('before_active', x)
            x = Activation('relu')(x)
            tf.summary.histogram('after_activate', x)
        mult = 2**n_downsampling
        for i in range(n_blocks_gen):
            x = res_block(x, ngf * mult, use_dropout=True)


#        for i in range(n_downsampling):
#            mult = 2**(n_downsampling - i)
#            x = UpSampling2D()(x)
#            x = Conv2D(filters=int(ngf * mult / 2),kernel_size=(3,3),padding='same')(x)
##            x = Conv2DTranspose(filters=int(ngf * mult / 2), kernel_size=(3, 3), strides=2, padding='same')(x)
##            x = BatchNormalization()(x, training=istrain)
#            x = batch_norm(x, "up_bn_"+str(i), is_train=istrain)
#            x = LeakyReLU(alpha=0.3)(x)

        x = Conv2D(filters=2, kernel_size=(5, 5), padding='same')(x)
        x = batch_norm(x, "final", is_train=istrain)
        wrap = Activation('sigmoid')(x)
        wrap = tf.multiply(tf.add(wrap, -0.5), 8)
        # dense layer
        dense = tf.layers.flatten(wrap)
        output_size = 128
        output_size = args.final_layer  # we use the args value here to decide the final layer number
        #        output_size1 = 16
        dense_out = tf.layers.dense(inputs=dense, units=output_size * 2)
        #        dense_out1 = tf.layers.dense(inputs=dense_out, units=output_size1*2)
        x_mean = tf.reshape(dense_out, [-1, output_size, 2])
        #        x_mean = Conv2D(filters=2, kernel_size=(1,256), padding='valid')(wrap)

        #        x_layer = wrap[...,0]
        #        x_mean = tf.reduce_max(wrap, axis=2)
        x_mean = tf.expand_dims(x_mean, 2)
        wrap = tf.tile(x_mean, multiples=[1, 1, output_size, 1])
        wrap = bicubic_interp_2d(wrap, imsize)
        outputs = Lambda(WarpST_one,
                         arguments={
                             'inputs': inputs,
                             'name': str(random.random())
                         })(wrap)
        return outputs, wrap[:, :, 0, :]