Esempio n. 1
0
def Pelee(net, from_layer='data', use_batchnorm=False):
    PeleeNetBody(net, from_layer)
    add_extra_layers_pelee(net, use_batchnorm=use_batchnorm, prefix='ext1/fe')
    fpn_block(net)
    raw_source_layers = ['p1', 'p2', 'p3', 'p4', 'ext1/fe2_2', 'ext1/fe3_2']
    # add_res_prediction_layers
    last_base_layer = 'fpn_tb'
    for i, from_layer in enumerate(raw_source_layers):
        out_layer = '{}/ext/pm{}'.format(last_base_layer, i + 1)
        res_block(net, from_layer, 256, out_layer, stride=1, use_bn=True)

    return net
Esempio n. 2
0
def VGG_RUN(net, from_layer='data', use_batchnorm=False):

    VGGNetBody(net, from_layer=from_layer, fully_conv=True, reduced=True, dilated=True, dropout=False)

    add_extra_layers_default(net, use_batchnorm=False)

    # add_res_prediction_layers
    raw_source_layers=VGG_SSD.mbox_source_layers
    for i in range(len(raw_source_layers)):
      name = 'ext/pm{}'.format(i+1)
      res_block(net, raw_source_layers[i], 256, name, stride=1, use_bn=True)

    return net
Esempio n. 3
0
def Pelee(net, from_layer='data', use_batchnorm=False):
    PeleeNetBody(net, from_layer)
    add_extra_layers_pelee(net, use_batchnorm=use_batchnorm, prefix='ext1/fe')

    raw_source_layers = ['stage3_tb', 'stage4_tb','ext1/fe1_2', 'ext1/fe2_2','ext1/fe3_2']

    # add_res_prediction_layers
    last_base_layer = 'stage4_tb'
    for i, from_layer in enumerate(raw_source_layers):
        out_layer = '{}/ext/pm{}'.format(last_base_layer, i+2)
        res_block(net, from_layer, 256, out_layer, stride=1, use_bn=True)


    return net
Esempio n. 4
0
def generator_model():
    """Build generator architecture."""
    # Current version : ResNet block
    inputs = Input(shape=image_shape)

    x = ReflectionPadding2D((3, 3))(inputs)
    x = Conv2D(filters=ngf, kernel_size=(7, 7), padding='valid')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    # Increase filter number
    n_downsampling = 2
    for i in range(n_downsampling):
        mult = 2**i
        x = Conv2D(filters=ngf * mult * 2,
                   kernel_size=(3, 3),
                   strides=2,
                   padding='same')(x)
        x = BatchNormalization()(x)
        x = Activation('relu')(x)

    # Apply 9 ResNet blocks
    mult = 2**n_downsampling
    for i in range(n_blocks_gen):
        x = res_block(x, ngf * mult, use_dropout=True)

    # Decrease filter number to 3 (RGB)
    for i in range(n_downsampling):
        mult = 2**(n_downsampling - i)
        x = Conv2DTranspose(filters=int(ngf * mult / 2),
                            kernel_size=(3, 3),
                            strides=2,
                            padding='same')(x)
        x = BatchNormalization()(x)
        x = Activation('relu')(x)

    x = ReflectionPadding2D((3, 3))(x)
    x = Conv2D(filters=output_nc, kernel_size=(7, 7), padding='valid')(x)
    x = Activation('tanh')(x)

    # Add direct connection from input to output and recenter to [-1, 1]
    outputs = Add()([x, inputs])
    outputs = Lambda(lambda z: z / 2)(outputs)

    model = Model(inputs=inputs, outputs=outputs, name='Generator')
    return model
Esempio n. 5
0
def generator_model():
    """Build generator architecture."""
    # Current version : ResNet block
    inputs = Input(shape=image_shape)

    x = ReflectionPadding2D((3, 3))(inputs)
    x = Conv2D(filters=ngf, kernel_size=(7, 7), padding='valid')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    n_downsampling = 3
    for i in range(n_downsampling):
        mult = 2**i
        x = Conv2D(filters=ngf * mult * 2,
                   kernel_size=(3, 3),
                   strides=2,
                   padding='same')(x)
        x = BatchNormalization()(x)
        x = Activation('relu')(x)

    mult = 2**n_downsampling
    for i in range(n_blocks_gen):
        x = res_block(x, ngf * mult, use_dropout=True)

    for i in range(n_downsampling):
        mult = 2**(n_downsampling - i)
        x = Conv2DTranspose(filters=int(ngf * mult / 2),
                            kernel_size=(3, 3),
                            strides=2,
                            padding='same')(x)
        # x = BatchNormalization()(x)
        x = Activation('relu')(x)

    x = ReflectionPadding2D((3, 3))(x)
    x = Conv2D(filters=output_nc, kernel_size=(7, 7), padding='valid')(x)
    x = Activation('tanh')(x)

    outputs = x
    # outputs = Add()([x, inputs])
    # outputs = Lambda(lambda z: K.clip(z, -1, 1))(x)
    # outputs = Lambda(lambda z: z/2)(outputs)

    model = Model(inputs=inputs, outputs=outputs, name='Generator')
    plot_model(model, to_file='generator.png', show_shapes=True)
    return model
Esempio n. 6
0
    def generator_model():
        """生成器模型
        """
        inputs = Input(Config.input_shape_generator)
        x = ReflectionPadding2D((3, 3))(inputs)
        print(x.shape)
        x = Conv2D(filters=Config.ngf, kernel_size=(7, 7), padding="valid")(x)
        x = BatchNormalization()(x)
        x = Activation("relu")(x)

        n_downsampling = 2
        for i in range(n_downsampling):
            mulit = 2**i
            x = Conv2D(filters=Config.ngf * mulit * 2,
                       kernel_size=(3, 3),
                       strides=2,
                       padding="same")(x)
            x = BatchNormalization()(x)
            x = Activation("relu")(x)

        mulit = 2**n_downsampling
        for i in range(Config.n_blocks_gen):
            x = res_block(x, Config.ngf * mulit, use_dropout=True)

        for i in range(n_downsampling):
            mulit = 2**(n_downsampling - i)
            x = Conv2DTranspose(filters=int(Config.ngf * mulit / 2),
                                kernel_size=(3, 3),
                                strides=2,
                                padding="same")(x)
            x = BatchNormalization()(x)
            x = Activation("relu")(x)
        x = ReflectionPadding2D(padding=(3, 3))(x)
        x = Conv2D(filters=Config.output_nc,
                   kernel_size=(7, 7),
                   padding="valid")(x)
        x = Activation("tanh")(x)

        # 输出
        outputs = Add()([inputs, x])
        outputs = Lambda(lambda z: z / 2)(outputs)
        print("generator : ", outputs.shape)
        model = Model(inputs=inputs, outputs=outputs, name="Generator")
        return model
Esempio n. 7
0
def generator_model(args, inputs, istrain, reuse):
    """Build generator architecture."""
    # inputs: tensor with shape [bn, 256,256, 1]
    #    inputs = Input(shape=input_shape_generator)
    with tf.variable_scope('gen_', reuse=reuse):
        x = ReflectionPadding2D((3, 3))(inputs)
        x = Conv2D(filters=ngf, kernel_size=(7, 7), padding='valid')(x)
        x = batch_norm(x, "bn1", is_train=istrain)
        x = Activation('relu')(x)

        #        x = MaxPooling2D((2, 2), padding='same')(x)e')(x)
        #        x = Conv2D(filters=ngf, kernel_size=(7,7), padding='same')(x)
        #        x = batch_norm(x, "bn2", is_train=istrain)
        #        x = Activation('relu')(x)

        n_downsampling = 2
        for i in range(n_downsampling):
            mult = 2**i
            #            x = ReflectionPadding2D((2, 2))(x)
            if args.max_pooling == False:
                x = Conv2D(filters=ngf * mult * 2,
                           kernel_size=(3, 3),
                           strides=2,
                           padding='valid')(x)
            else:
                x = Conv2D(filters=ngf * mult * 2,
                           kernel_size=(3, 3),
                           strides=1,
                           padding='valid')(x)
                x = MaxPooling2D((2, 2), padding='valid')(x)
#            x = BatchNormalization()(x, training=istrain)
            x = batch_norm(x, "down_bn_" + str(i), is_train=istrain)
            tf.summary.histogram('before_active', x)
            x = Activation('relu')(x)
            tf.summary.histogram('after_activate', x)
        mult = 2**n_downsampling
        for i in range(n_blocks_gen):
            x = res_block(x, ngf * mult, use_dropout=True)


#        for i in range(n_downsampling):
#            mult = 2**(n_downsampling - i)
#            x = UpSampling2D()(x)
#            x = Conv2D(filters=int(ngf * mult / 2),kernel_size=(3,3),padding='same')(x)
##            x = Conv2DTranspose(filters=int(ngf * mult / 2), kernel_size=(3, 3), strides=2, padding='same')(x)
##            x = BatchNormalization()(x, training=istrain)
#            x = batch_norm(x, "up_bn_"+str(i), is_train=istrain)
#            x = LeakyReLU(alpha=0.3)(x)

        x = Conv2D(filters=2, kernel_size=(5, 5), padding='same')(x)
        x = batch_norm(x, "final", is_train=istrain)
        wrap = Activation('sigmoid')(x)
        wrap = tf.multiply(tf.add(wrap, -0.5), 8)
        # dense layer
        dense = tf.layers.flatten(wrap)
        output_size = 128
        output_size = args.final_layer  # we use the args value here to decide the final layer number
        #        output_size1 = 16
        dense_out = tf.layers.dense(inputs=dense, units=output_size * 2)
        #        dense_out1 = tf.layers.dense(inputs=dense_out, units=output_size1*2)
        x_mean = tf.reshape(dense_out, [-1, output_size, 2])
        #        x_mean = Conv2D(filters=2, kernel_size=(1,256), padding='valid')(wrap)

        #        x_layer = wrap[...,0]
        #        x_mean = tf.reduce_max(wrap, axis=2)
        x_mean = tf.expand_dims(x_mean, 2)
        wrap = tf.tile(x_mean, multiples=[1, 1, output_size, 1])
        wrap = bicubic_interp_2d(wrap, imsize)
        outputs = Lambda(WarpST_one,
                         arguments={
                             'inputs': inputs,
                             'name': str(random.random())
                         })(wrap)
        return outputs, wrap[:, :, 0, :]