def create_sr_model(self, ip):
        '卷积层'
        x = self.Conv_layer(ip)
        '稠密块,共8个稠密块'
        x_1 = self.dense_block(x)
        x_2 = self.dense_block(x_1)
        x_3 = self.dense_block(x_2)
        x_4 = self.dense_block(x_3)
        x_5 = self.dense_block(x_4)
        x_6 = self.dense_block(x_5)
        x_7 = self.dense_block(x_6)
        x_8 = self.dense_block(x_7)
        '反卷积层'
        y = Deconv2D(128,
                     kernel_size=self.kernel_size,
                     activation='relu',
                     strides=2,
                     padding='same')(x_8)
        y = Deconv2D(128,
                     kernel_size=self.kernel_size,
                     activation='relu',
                     strides=2,
                     padding='same')(y)
        '重构层'
        outputs = Conv2D(self.channels,
                         kernel_size=self.kernel_size,
                         padding='same')(y)

        model = Model(inputs=ip, outputs=outputs)
        return model
    def create_sr_model(self, ip):
        '卷积层'
        x = self.Conv_layer(ip)
        '稠密块,共8个稠密块'
        x_1 = self.dense_block(x)
        x_2 = self.dense_block(x_1)
        x_3 = self.dense_block(x_2)
        x_4 = self.dense_block(x_3)
        x_5 = self.dense_block(x_4)
        x_6 = self.dense_block(x_5)
        x_7 = self.dense_block(x_6)
        x_8 = self.dense_block(x_7)
        'skip_connection'
        sk = keras.layers.concatenate(
            [x, x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8])
        'bottleneck'
        bn = Conv2D(128, kernel_size=1, activation='relu', padding='same')(sk)
        '反卷积层'
        y = Deconv2D(128,
                     kernel_size=self.kernel_size,
                     activation='relu',
                     strides=2,
                     padding='same')(bn)
        y = Deconv2D(128,
                     kernel_size=self.kernel_size,
                     activation='relu',
                     strides=2,
                     padding='same')(y)
        '重构层'
        outputs = Conv2D(self.channels,
                         kernel_size=self.kernel_size,
                         padding='same')(y)

        model = Model(inputs=ip, outputs=outputs)
        return model
예제 #3
0
    def autoencoder(self):
        input = Input(shape=self.img_shape)
        h = Conv2D(64, (5, 5), strides=2, padding='same',
                   activation='relu')(input)
        h = Conv2D(128, (5, 5), strides=2, padding='same',
                   activation='relu')(h)
        h = Conv2D(256, (5, 5), strides=2, padding='same',
                   activation='relu')(h)
        encoded = Conv2D(512, (5, 5),
                         strides=2,
                         padding='same',
                         activation='relu')(h)

        h = Deconv2D(512, (5, 5), strides=2, padding='same',
                     activation='relu')(encoded)
        h = Deconv2D(256, (5, 5), strides=2, padding='same',
                     activation='relu')(h)
        h = Deconv2D(128, (5, 5), strides=2, padding='same',
                     activation='relu')(h)
        decoded = Deconv2D(3, (5, 5),
                           strides=2,
                           padding='same',
                           activation='tanh')(h)

        auto_encoder = Model(input, decoded)
        auto_encoder.summary()

        return auto_encoder
예제 #4
0
def testnet(learningRate=0.001):
    print('Creating model of architecture \'testnet\'')
    x_input = Input((64, 64, 1))

    conv1 = Conv2D(64, (5, 5),
                   padding='same',
                   use_bias=True,
                   activation='relu',
                   name='conv1')(x_input)
    conv2 = Conv2D(64, (3, 3),
                   padding='same',
                   use_bias=True,
                   activation='relu',
                   name='conv2')(conv1)

    deconv = Deconv2D(64, (3, 3),
                      padding='same',
                      use_bias=True,
                      activation='relu',
                      name='deconv')(conv2)
    add1 = Add()([conv2, deconv])

    deconv2 = Deconv2D(64, (3, 3),
                       padding='same',
                       use_bias=True,
                       activation='relu',
                       name='deconv2')(add1)
    add2 = Add()([deconv2, conv1])

    conv3 = Conv2D(32, (3, 3),
                   padding='same',
                   use_bias=True,
                   activation='relu',
                   name='conv3')(add2)
    subpix = SubpixelConv2D(conv3.shape, scale=2, name='subpix1')(conv3)

    conv1_2 = Conv2D(32, (3, 3),
                     padding='same',
                     use_bias=True,
                     activation='relu',
                     name='conv1_2')(x_input)
    subpix1_1 = SubpixelConv2D(conv1_2.shape, scale=2,
                               name='subpix1_1')(conv1_2)

    add3 = Subtract()([subpix1_1, subpix])
    conv4 = Conv2D(1, (1, 1),
                   padding='same',
                   use_bias=True,
                   activation='relu',
                   name='conv4')(add3)

    y_output = conv4

    model = Model(x_input, y_output)
    adam = Adam(lr=learningRate)
    model.compile(optimizer=adam, loss=loss.rmse, metrics=['accuracy'])
    return model
예제 #5
0
    def generator(self):
        if self.G:
            return self.G
        self.G = Sequential()
        dropout = 0.4
        depth = 64 + 64 + 64 + 64
        dim = 7
        # In: 100
        # Out: dim x dim x depth
        self.G.add(Dense(dim * dim * depth, input_dim=100))
        self.G.add(BatchNormalization(momentum=0.9))
        self.G.add(Activation('relu'))
        self.G.add(Reshape((depth, dim, dim)))
        self.G.add(Dropout(dropout))

        # In: dim x dim x depth
        # Out: 2*dim x 2*dim x depth/2
        self.G.add(UpSampling2D())
        self.G.add(
            Deconv2D(int(depth / 2),
                     5,
                     5,
                     border_mode='same',
                     output_shape=(None, int(depth / 2), 2 * dim, 2 * dim)))
        self.G.add(BatchNormalization(momentum=0.9))
        self.G.add(Activation('relu'))

        self.G.add(UpSampling2D())
        self.G.add(
            Deconv2D(int(depth / 4),
                     5,
                     5,
                     border_mode='same',
                     output_shape=(None, int(depth / 4), 4 * dim, 4 * dim)))
        self.G.add(BatchNormalization(momentum=0.9))
        self.G.add(Activation('relu'))

        self.G.add(
            Deconv2D(int(depth / 8),
                     5,
                     5,
                     border_mode='same',
                     output_shape=(None, int(depth / 8), 4 * dim, 4 * dim)))
        self.G.add(BatchNormalization(momentum=0.9))
        self.G.add(Activation('relu'))

        # Out: 28 x 28 x 1 grayscale image [0.0,1.0] per pix
        self.G.add(
            Deconv2D(1,
                     5,
                     5,
                     border_mode='same',
                     output_shape=(None, 1, 4 * dim, 4 * dim)))
        self.G.add(Activation('sigmoid'))
        self.G.summary()
        return self.G
예제 #6
0
def deep_decoder2(input_shape):
    encoded = Input(shape=input_shape)
    print 'encoded shape:', encoded.get_shape().as_list()
    x = encoded
    # x = BatchNormalization(mode=2, axis=3)(encoded)

    # batch_size, h, w, _ = tf.shape(x)
    batch_size = tf.shape(x)[0]
    # dim: (1, 1, 512)
    x = Deconv2D(512,
                 4,
                 4,
                 output_shape=[batch_size, 4, 4, 512],
                 activation='relu',
                 border_mode='same',
                 subsample=(4, 4))(encoded)
    x = BatchNormalization(mode=2, axis=3)(x)
    # (4, 4, 512)
    x = Deconv2D(256,
                 5,
                 5,
                 output_shape=[batch_size, 8, 8, 256],
                 activation='relu',
                 border_mode='same',
                 subsample=(2, 2))(x)
    x = BatchNormalization(mode=2, axis=3)(x)
    # dim: (8, 8, 236)
    # h *= 2; w *= 2
    x = Deconv2D(128,
                 5,
                 5,
                 output_shape=(batch_size, 16, 16, 128),
                 activation='relu',
                 border_mode='same',
                 subsample=(2, 2))(x)
    x = BatchNormalization(mode=2, axis=3)(x)
    # dim: (16, 16, 256)
    x = Deconv2D(64,
                 5,
                 5,
                 output_shape=(batch_size, 32, 32, 64),
                 activation='relu',
                 border_mode='same',
                 subsample=(2, 2))(x)
    x = BatchNormalization(mode=2, axis=3)(x)
    # dim: (32, 32, 64)
    x = Deconv2D(3,
                 5,
                 5,
                 output_shape=(batch_size, 32, 32, 3),
                 activation='linear',
                 border_mode='same',
                 subsample=(1, 1))(x)
    decoded = BatchNormalization(mode=2, axis=3)(x)
    return Model(encoded, decoded)
예제 #7
0
파일: vae.py 프로젝트: hengyuan-hu/dem
def deep_decoder1(input_shape):
    z = Input(shape=input_shape)
    print 'decoder input shape:', z._keras_shape

    batch_size = tf.shape(z)[0]
    # h, w, _ = z._keras_shape[1:]
    # dim: (1, 1, 512)
    x = Deconv2D(512,
                 4,
                 4,
                 output_shape=[batch_size, 4, 4, 512],
                 activation='relu',
                 border_mode='same',
                 subsample=(4, 4))(z)
    x = BatchNormalization(mode=2, axis=3)(x)
    # (4, 4, 512)
    x = Deconv2D(256,
                 5,
                 5,
                 output_shape=[batch_size, 8, 8, 256],
                 activation='relu',
                 border_mode='same',
                 subsample=(2, 2))(x)
    x = BatchNormalization(mode=2, axis=3)(x)
    # dim: (8, 8, 236)
    # h *= 2; w *= 2
    x = Deconv2D(128,
                 5,
                 5,
                 output_shape=(batch_size, 16, 16, 128),
                 activation='relu',
                 border_mode='same',
                 subsample=(2, 2))(x)
    x = BatchNormalization(mode=2, axis=3)(x)
    # dim: (16, 16, 256)
    x = Deconv2D(64,
                 5,
                 5,
                 output_shape=(batch_size, 32, 32, 64),
                 activation='relu',
                 border_mode='same',
                 subsample=(2, 2))(x)
    x = BatchNormalization(mode=2, axis=3)(x)
    # dim: (32, 32, 64)
    x = Deconv2D(3,
                 5,
                 5,
                 output_shape=(batch_size, 32, 32, 3),
                 activation='linear',
                 border_mode='same',
                 subsample=(1, 1))(x)
    decoded = BatchNormalization(mode=2, axis=3)(x)
    return Model(z, decoded)
예제 #8
0
def CreatErrorMapModel(input_shape, lastLayerActivation='hard_sigmoid', PercentageOfTrianable=70, bnAtTheend=True, lossFunction="mae"):
    STOP_LAYER=149
    print(STOP_LAYER)
    img_dim = (256,256,3)
    
    sharedResnet = applications.resnet50.ResNet50(include_top=False,input_shape=input_shape)
#    print(sharedResnet.summary())
#    print(sharedResnet.get_config())
#    print(sharedResnet.to_yaml())
#    for i,l in enumerate(sharedResnet.layers):
#    	print(i,"  ",l," ",l.output_shape,"\n")
#    
    BaseModel=Model(sharedResnet.input, sharedResnet.layers[STOP_LAYER].output)
    x = BaseModel.output
    print("x output shapr is ", x.shape)
    x=Conv2D(32, (3, 3), padding='same', activation='relu')(x)
    x=BatchNormalization()(x)
    x=Conv2D(16, (3, 3), padding='same', activation='relu')(x)
    x=BatchNormalization()(x)
    print("x output shapr is ", x.shape)    
    x = Deconv2D(8, (3, 3), strides=(2, 2), padding="same",  activation='relu')(x)
    x=BatchNormalization()(x)
    x = Deconv2D(4, (3, 3), strides=(2, 2), padding="same", activation='relu')(x)
    x=BatchNormalization()(x)
    x = Deconv2D(3, (3, 3), strides=(2, 2), padding="same", activation='relu')(x)
    x=BatchNormalization()(x)
    x = Deconv2D(3, (3, 3), strides=(2, 2), padding="same", activation='relu')(x)
    x=BatchNormalization()(x)
#    x = Deconv2D(3, (3, 3), strides=(2, 2), padding="same", activation='relu')(x)
#    x=BatchNormalization()(x)

    x = Deconv2D(3, (3, 3), strides=(2, 2), padding="same",  activation=lastLayerActivation)(x)
    if (bnAtTheend==True):
        x=BatchNormalization()(x)

    whole_model = Model(BaseModel.input, outputs=x)
#    print (resnet.layers)
    
    p=int((PercentageOfTrianable/100)*len(whole_model.layers))
    print(len(whole_model.layers), p)
    for layer in whole_model.layers[:p]:
    	layer.trainable = False
   
    for i,l in enumerate(whole_model.layers):
    	print(i,"  ",l," ",l.output_shape,"\n")

#    whole_model.summary() 
    print(lossFunction)
    if(lossFunction=="customLoss"):
    	lossFunction=customLoss
    whole_model.compile(loss=lossFunction,optimizer=optimizers.SGD(lr=1e-4, momentum=0.9), metrics=['mae', 'acc'])
    return  whole_model
예제 #9
0
    def __build_generator(self):
        # Input
        latent_z = Input(shape=(self.latent_dim, ))
        label_y = Input(shape=(self.num_classes, ))
        x = Concatenate()([latent_z, label_y])
        x = Reshape(target_shape=(1, 1, -1))(x)

        # Full Conv 1
        x = Deconv2D(kernel_size=[4, 4], strides=(2, 2), filters=512)(x)
        x = BatchNormalization()(x)
        x = ReLU()(x)

        # Full Conv 2
        x = Deconv2D(kernel_size=[4, 4],
                     strides=(2, 2),
                     filters=256,
                     padding='same')(x)
        x = BatchNormalization()(x)
        x = ReLU()(x)

        # Full Conv 3
        x = Deconv2D(kernel_size=[4, 4],
                     strides=(2, 2),
                     filters=128,
                     padding='same')(x)
        x = BatchNormalization()(x)
        x = ReLU()(x)

        # Full Conv 4
        x = Deconv2D(kernel_size=[4, 4],
                     strides=(2, 2),
                     filters=64,
                     padding='same')(x)
        x = BatchNormalization()(x)
        x = ReLU()(x)

        # Full Conv 5
        x = Deconv2D(kernel_size=[4, 4],
                     strides=(2, 2),
                     filters=self.img_channels,
                     padding='same')(x)
        x = Activation(activation='tanh')(x)

        model = Model([latent_z, label_y], x)

        print('--- GENERATOR ---')
        model.summary()

        if self.generator_weights is not None:
            self.__load_weights(model, self.generator_weights)

        return model
예제 #10
0
def generate_model():
    x_input = Input((64, 64, 1))

    conv1 = Conv2D(64, (5, 5),
                   padding='same',
                   use_bias=True,
                   activation='relu',
                   name='conv1')(x_input)

    conv2 = Conv2D(64, (3, 3),
                   padding='same',
                   use_bias=True,
                   activation='relu',
                   name='conv2')(conv1)

    deconv = Deconv2D(64, (3, 3),
                      padding='same',
                      use_bias=True,
                      activation='relu',
                      name='deconv')(conv2)

    add1 = Add()([deconv, conv2])

    deconv2 = Deconv2D(64, (3, 3),
                       padding='same',
                       use_bias=True,
                       activation='relu',
                       name='deconv2')(add1)

    add2 = Add()([deconv2, conv1])

    conv3 = Conv2D(4, (3, 3),
                   padding='same',
                   use_bias=True,
                   activation='relu',
                   name='conv3')(add2)

    subpix = SubpixelConv2D(conv3.shape, scale=2)(conv3)

    conv4 = Conv2D(1, (3, 3),
                   padding='same',
                   use_bias=True,
                   activation='relu',
                   name='conv4')(subpix)

    y_output = conv4

    model = Model(x_input, y_output)
    adam = Adam(lr=0.001)
    model.compile(optimizer=adam, loss=rmse, metrics=['accuracy'])
    return model
예제 #11
0
def testnet4(learningRate=0.001):
    print('Creating model of architecture \'testnet4\'')
    x_input = Input((64, 64, 1))

    conv1 = Conv2D(64, (5, 5), padding='same', use_bias=True)(x_input)
    batch = BatchNormalization()(conv1)
    relu1 = PReLU(alpha_initializer='zeros')(conv1)
    conv2 = Conv2D(64, (3, 3), padding='same', use_bias=True)(relu1)
    batch = BatchNormalization()(conv2)
    relu2 = PReLU(alpha_initializer='zeros')(conv2)
    conv2_1 = Conv2D(64, (3, 3), padding='same', use_bias=True)(relu2)
    batch = BatchNormalization()(conv2_1)
    relu2_1 = PReLU(alpha_initializer='zeros')(conv2_1)

    deconv = Deconv2D(64, (3, 3), padding='same', use_bias=True)(relu2_1)
    batch = BatchNormalization()(deconv)
    relu3 = PReLU(alpha_initializer='zeros')(deconv)
    add1 = Add()([relu2_1, relu3])

    deconv2 = Deconv2D(64, (3, 3), padding='same', use_bias=True)(add1)
    batch = BatchNormalization()(deconv2)
    relu4 = PReLU(alpha_initializer='zeros')(deconv2)
    add2 = Add()([relu4, relu2])

    deconv2_1 = Deconv2D(64, (3, 3), padding='same', use_bias=True)(add2)
    batch = BatchNormalization()(deconv2_1)
    relu4_1 = PReLU(alpha_initializer='zeros')(deconv2_1)
    add2_1 = Add()([relu4_1, relu1])

    conv3 = Conv2D(32, (3, 3), padding='same', use_bias=True)(add2_1)
    batch = BatchNormalization()(conv3)
    relu5 = PReLU(alpha_initializer='zeros')(conv3)
    subpix = SubpixelConv2D(relu4.shape, scale=2, name='subpix1')(relu5)

    conv1_2 = Conv2D(32, (3, 3), padding='same', use_bias=True)(x_input)
    batch = BatchNormalization()(conv1_2)
    relu6 = PReLU(alpha_initializer='zeros')(conv1_2)
    subpix1_1 = SubpixelConv2D(conv1_2.shape, scale=2, name='subpix2')(relu6)

    add3 = Add()([subpix1_1, subpix])
    conv4 = Conv2D(1, (3, 3), padding='same', use_bias=True,
                   activation='relu')(add3)

    y_output = conv4

    model = Model(x_input, y_output)
    adam = Adam(lr=learningRate)
    model.compile(optimizer=adam, loss=loss.rmse)
    return model
    def build_generator(self):
        input = Input(shape=(self.height, self.width, self.channels))

        layers = conv2d_block(input, 16)
        # layers = MaxPool2D(2)(layers)
        layers = conv2d_block(layers, 32)
        # layers = MaxPool2D(2)(layers)
        layers = conv2d_block(layers, 64)
        # layers = MaxPool2D(2)(layers)
        layers = conv2d_block(layers, 128)
        # layers = MaxPool2D(2)(layers)
        layers = conv2d_block(layers, 256)
        # layers = MaxPool2D(2)(layers)
        layers = conv2d_block(layers, 512)
        # layers = MaxPool2D(2)(layers)
        layers = conv2d_block(layers, 512)
        # layers = MaxPool2D(2)(layers)

        layers = deconv2d_block(layers, 512)
        layers = deconv2d_block(layers, 256)
        layers = deconv2d_block(layers, 128)
        layers = deconv2d_block(layers, 64)
        layers = deconv2d_block(layers, 32)
        layers = deconv2d_block(layers, 16)
        output = Deconv2D(filters=3,
                          kernel_size=(4, 4),
                          strides=2,
                          activation='tanh',
                          padding='same')(layers)

        model = Model(input, output)
        model.summary()
        return model
예제 #13
0
def decoder(list_encoder, list_nb_filters, verbose=0):
    if verbose > 0:
        print("\n-------- Decoder --------")

    l = len(list_encoder)
    temp_layers = [list_encoder[l - 1]]
    for i in range(l - 1):
        if verbose > 0:
            print("level : %i" % i)
            print("-- input : {}".format(i, temp_layers[-1]._keras_shape))

        x = Deconv2D(list_nb_filters[l - i - 2],
                     kernel_size=(2, 2),
                     padding='same',
                     strides=(2, 2),
                     name="decoder_%i_deconv" % i)(temp_layers[-1])
        if verbose > 0:
            print("-- decoder_{}_deconv : {}".format(i, x._keras_shape))
        x = Concatenate(name="decoder_%i_concat" %
                        i)([x, list_encoder[l - i - 2]])
        if verbose > 0:
            print("-- decoder_{}_concat : {}".format(i, x._keras_shape))
        x = base_conv(x, list_nb_filters[l - i - 2], s_id="decoder_%ia" % i)
        if verbose > 0:
            print("-- decoder_{}a : {}".format(i, x._keras_shape))
        x = base_conv(x, list_nb_filters[l - i - 2], s_id="decoder_%ib" % i)
        if verbose > 0:
            print("-- decoder_{}b : {}".format(i, x._keras_shape))
        temp_layers.append(x)
    return temp_layers[-1]
    def create_sr_model(self, ip):

        'Feature extraction'
        x = self.Conv_layers(self.d, 5, ip)
        x = PReLU(shared_axes=[1, 2])(x)
        'Shrinking'
        x = self.Conv_layers(self.s, 1, x)
        x = PReLU(shared_axes=[1, 2])(x)
        'Non-linear mapping'
        for i in range(self.m):
            x = self.Conv_layers(self.s, 3, x)
        x = PReLU(shared_axes=[1, 2])(x)
        'Expanding'
        x = self.Conv_layers(self.d, 1, x)
        x = PReLU(shared_axes=[1, 2])(x)
        'Deconvolution'
        outputs = Deconv2D(self.channels,
                           kernel_size=9,
                           strides=self.scale,
                           padding='same')(x)

        '构成模型'
        model = Model(inputs=ip, outputs=outputs)

        return model
예제 #15
0
def Generator(noise_dim, batch_size, f=512):    
    img_dim = (32,32,3)
    s = img_dim[1]
    output_channels = img_dim[-1]
    start_dim = 4
    nb_upconv = 3
 
    reshape_shape = (start_dim, start_dim, f)
    gen_input = Input(shape=noise_dim)
    x = Dense(f * start_dim * start_dim, input_dim=noise_dim, use_bias=False)(gen_input)
    x = Reshape(reshape_shape)(x)
    x = BatchNormalization(axis=-1)(x)
    x = Activation("relu")(x)

    for i in range(nb_upconv):
        nb_filters = int(f / (2 ** (i + 1)))
        s = start_dim * (2 ** (i + 1))
        o_shape = (batch_size, s, s, nb_filters)
        x = Deconv2D(nb_filters, (4, 4),
                     output_shape=o_shape, strides=(2, 2),
                     padding="same", use_bias=False,
                     kernel_initializer = glorot_normal())(x)
        x = BatchNormalization(axis=-1)(x)
        x = Activation("relu")(x)

    x = Conv2D(output_channels, (3, 3), strides=(1, 1), padding="same", use_bias=False,
               kernel_initializer = glorot_normal())(x)
    x = Activation("tanh")(x)

    generator_model = Model(inputs=[gen_input], outputs=[x])
    return generator_model
예제 #16
0
def deconv(h_0,filters,kernel_size,strides):
        kernel_initializer=keras.initializers.TruncatedNormal(mean=0.0,stddev=0.0001)
    
        h1=Deconv2D(filters=filters,kernel_size=kernel_size,strides=strides,padding='same',kernel_initializer=kernel_initializer)(h_0)
        h1_bn=BatchNormalization()(h1,training=True)
        h1_o=Activation('relu')(h1_bn)
        return h1_o
예제 #17
0
    def decoder(x):
        # with tf.device("cpu:0"):
        # # --------latent space (trainable) ------------
        x = Conv2D(512, (3, 3),
                   activation='relu',
                   padding='same',
                   name='latent')(x)
        encoder_fn = VGG16(include_top=False, weights=None)
        for i in range(len(encoder_fn.layers) - 1, 0,
                       -1):  # ignore the 1st (input) layer
            layer = encoder_fn.layers[i]
            layer.trainable = True
            if isinstance(layer, MaxPooling2D):
                x = UpSampling2D((2, 2))(x)
            else:  # in vgg, everything is Conv2D
                config = layer.get_config()
                config['name'] = "d_" + layer.name
                x = Deconv2D.from_config(config)(x)

        # finally, bring it back to input shape
        x = Deconv2D(3, (3, 3),
                     activation='relu',
                     padding='same',
                     name='dblock1_conv3')(x)
        return x
예제 #18
0
 def up_2x(x, unit, mode='deconv'):
     assert mode in ['deconv', 'upsample']
     if mode == 'deconv':
         x = Deconv2D(unit, kernel_size=4, strides=2, padding='same')(x)
         return x
     elif mode == 'upsample':
         return UpSampling2D(size=(2, 2))(x)
예제 #19
0
    def vae_model(self):
        # encoder
        x = Input(shape=img_size)
        hidden = Conv2D(filters=1,
                        kernel_size=(3, 3),
                        padding='same',
                        activation='relu')(x)
        hidden = Conv2D(filters=4,
                        kernel_size=(3, 3),
                        strides=(2, 2),
                        padding='same',
                        activation='relu')(hidden)
        hidden = Conv2D(filters=16,
                        kernel_size=(3, 3),
                        strides=(2, 2),
                        padding='same',
                        activation='relu')(hidden)
        hidden = Flatten()(hidden)
        hidden = Dense(self.intermediate_dim, activation='relu')(hidden)
        z_mean = Dense(self.latent_dim)(hidden)
        z_sigma = Dense(self.latent_dim)(hidden)

        # decoder
        # reparameterization trick
        z = Lambda(self.sampling,
                   output_shape=(self.latent_dim, ))([z_mean, z_sigma])
        dense_1 = Dense(self.intermediate_dim, activation='relu')(z)
        dense_2 = Dense(7 * 7 * 16, activation='relu')(dense_1)
        reshape = Reshape((7, 7, 16))(dense_2)
        deconv_1 = Deconv2D(filters=16,
                            kernel_size=(3, 3),
                            padding='same',
                            activation='relu')(reshape)
        upsamp_1 = UpSampling2D((2, 2))(deconv_1)
        deconv_2 = Deconv2D(filters=4,
                            kernel_size=(3, 3),
                            padding='same',
                            activation='relu')(upsamp_1)
        upsamp_2 = UpSampling2D((2, 2))(deconv_2)
        x_decoded_mean = Conv2D(filters=1,
                                kernel_size=(3, 3),
                                padding='same',
                                activation='sigmoid')(upsamp_2)
        # loss function layer
        y = VAE_loss()([x, x_decoded_mean, z_sigma, z_mean])

        return Model(x, y), Model(x, z_mean)
예제 #20
0
def dsrcnn(learningRate=0.001):
    print('Creating model of architecture \'DSRCNN\'')
    x_input = Input((64, 64, 1))

    conv1 = Conv2D(64, (5, 5),
                   padding='same',
                   use_bias=True,
                   activation='relu',
                   name='conv1')(x_input)
    conv2 = Conv2D(64, (3, 3),
                   padding='same',
                   use_bias=True,
                   activation='relu',
                   name='conv2')(conv1)

    deconv = Deconv2D(64, (3, 3),
                      padding='same',
                      use_bias=True,
                      activation='relu',
                      name='deconv')(conv2)
    add1 = Add()([deconv, conv2])

    deconv2 = Deconv2D(64, (3, 3),
                       padding='same',
                       use_bias=True,
                       activation='relu',
                       name='deconv2')(add1)
    add2 = Add()([deconv2, conv1])

    conv3 = Conv2D(4, (3, 3),
                   padding='same',
                   use_bias=True,
                   activation='relu',
                   name='conv3')(add2)
    subpix = SubpixelConv2D(conv3.shape, scale=2)(conv3)
    conv4 = Conv2D(1, (3, 3),
                   padding='same',
                   use_bias=True,
                   activation='relu',
                   name='conv4')(subpix)

    y_output = conv4

    model = Model(x_input, y_output)
    adam = Adam(lr=learningRate)
    model.compile(optimizer=adam, loss=loss.rmse, metrics=['accuracy'])
    return model
예제 #21
0
def fcn_vgg_8s(input_shape=(224, 224, 3), classes=21):
    img_shape = Input(shape=input_shape)

    x = Conv2D(64, (3, 3), activation='relu',padding='same')(img_shape)
    x = Conv2D(64, (3, 3), activation='relu',padding='same')(x)
    x = MaxPooling2D((2, 2), strides=(2, 2))(x)

    x = Conv2D(128, (3, 3), activation='relu',padding='same')(x)
    x = Conv2D(128, (3, 3), activation='relu',padding='same')(x)
    x = MaxPooling2D((2, 2), strides=(2, 2))(x)

    x = Conv2D(256, (3, 3), activation='relu',padding='same')(x)
    x = Conv2D(256, (3, 3), activation='relu',padding='same')(x)
    x = MaxPooling2D((2, 2), strides=(2, 2))(x)

    p3 = x
    p3 = Conv2D(classes, (1, 1), activation='relu')(p3)

    x = Conv2D(512, (3, 3), activation='relu',padding='same')(x)
    x = Conv2D(512, (3, 3), activation='relu',padding='same')(x)
    x = MaxPooling2D((2, 2), strides=(2, 2))(x)

    p4 = x
    p4 = Conv2D(classes, (1, 1), activation='relu')(p4)
    p4 = Deconv2D(classes, (4, 4), strides=(2, 2), padding='valid')(p4)
    p4 = Cropping2D(cropping=((1, 1), (1, 1)))(p4)

    x = Conv2D(512, (3, 3), activation='relu',padding='same')(x)
    x = Conv2D(512, (3, 3), activation='relu',padding='same')(x)
    x = MaxPooling2D((2, 2), strides=(2, 2))(x)

    p5 = x
    p5 = Conv2D(classes, (1, 1), activation='relu')(p5)
    p5 = Deconv2D(classes, (8, 8), strides=(4, 4), padding='valid')(p5)
    p5 = Cropping2D(cropping=((2, 2), (2, 2)))(p5)

    merged = add([p3, p4, p5])
    x = Deconv2D(classes, (16, 16), strides=(8, 8), padding='valid')(merged)
    x = Cropping2D(cropping=((4, 4), (4, 4)))(x)

    x = Reshape((input_shape[0] * input_shape[1], classes))(x)
    x = Activation("softmax")(x)
    x = Reshape((input_shape[0], input_shape[1], classes))(x)

    model = Model(img_shape, x)

    return model
예제 #22
0
파일: net.py 프로젝트: plplpld/varroa
def upconv_relu(filters):
    """Layer. Short for batch norm, conv, relu"""

    return Deconv2D(filters=filters,
                    kernel_size=(3, 3),
                    strides=(2, 2),
                    padding="same",
                    activation=relu)
예제 #23
0
def testnet2(learningRate=0.001):
    print('Creating model of architecture \'testnet2\'')
    x_input = Input((64, 64, 1))

    conv1 = Conv2D(64, (5, 5), padding='same', use_bias=True,
                   name='conv1')(x_input)
    relu1 = PReLU(alpha_initializer='zeros', name='relu1')(conv1)
    conv2 = Conv2D(64, (3, 3), padding='same', use_bias=True,
                   name='conv2')(relu1)
    relu2 = PReLU(alpha_initializer='zeros', name='relu2')(conv2)

    deconv = Deconv2D(64, (3, 3), padding='same', use_bias=True,
                      name='deconv')(relu2)
    relu3 = PReLU(alpha_initializer='zeros', name='relu3')(deconv)
    add1 = Add()([relu2, relu3])

    deconv2 = Deconv2D(64, (3, 3),
                       padding='same',
                       use_bias=True,
                       name='deconv2')(add1)
    relu4 = PReLU(alpha_initializer='zeros', name='relu4')(deconv2)
    add2 = Add()([relu4, relu1])

    conv3 = Conv2D(32, (3, 3), padding='same', use_bias=True,
                   name='conv3')(add2)
    relu5 = PReLU(alpha_initializer='zeros', name='relu5')(conv3)
    subpix = SubpixelConv2D(relu4.shape, scale=2, name='subpix1')(relu5)

    conv1_2 = Conv2D(32, (3, 3), padding='same', use_bias=True,
                     name='conv1_2')(x_input)
    relu6 = PReLU(alpha_initializer='zeros', name='relu6')(conv1_2)
    subpix1_1 = SubpixelConv2D(conv1_2.shape, scale=2, name='subpix1_1')(relu6)

    add3 = Add()([subpix1_1, subpix])
    conv4 = Conv2D(1, (3, 3),
                   padding='same',
                   use_bias=True,
                   activation='relu',
                   name='conv4')(add3)

    y_output = conv4

    model = Model(x_input, y_output)
    adam = Adam(lr=learningRate)
    model.compile(optimizer=adam, loss=loss.rmse, metrics=['accuracy'])
    return model
예제 #24
0
def TEST(lr):
    x_input = Input((64, 64, 1))

    conv1 = Conv2D(64, (5, 5),
                   padding='same',
                   use_bias=True,
                   activation='relu')(x_input)

    l1_conv1 = Conv2D(64, (1, 1),
                      padding='same',
                      use_bias=True,
                      activation='relu')(conv1)
    l1_conv2 = Conv2D(64, (3, 3),
                      padding='same',
                      use_bias=True,
                      activation='relu')(conv1)
    l1_conv3 = Conv2D(64, (5, 5),
                      padding='same',
                      use_bias=True,
                      activation='relu')(conv1)

    add1 = Add()([l1_conv1, l1_conv2, l1_conv3])

    conv2 = Conv2D(64, (5, 5),
                   padding='same',
                   use_bias=True,
                   activation='relu')(add1)
    conv3 = Conv2D(64, (5, 5),
                   padding='same',
                   use_bias=True,
                   activation='relu')(conv2)
    deconv1 = Deconv2D(64, (3, 3), padding='same', use_bias=True)(conv3)

    add2 = Add()([conv3, deconv1])
    deconv2 = Deconv2D(64, (3, 3), padding='same', use_bias=True)(add2)

    add3 = Add()([conv2, deconv2])
    conv3 = Conv2D(4, (3, 3), padding='same', use_bias=True,
                   activation='relu')(add3)
    spc1 = SubpixelConv2D(conv3.shape, scale=2)(conv3)

    model = Model(x_input, spc1)

    model.compile(loss=loss.rmse, optimizer=Adam(lr=lr), metrics=['accuracy'])

    return model
예제 #25
0
def getOCRModel():

    inp = Input((32, 32 * 16, 1))

    c1 = LeakyReLU(alpha=0.1)(BatchNormalization()(Conv2D(
        64, kernel_size=3, padding='same')(inp)))
    c1 = LeakyReLU(alpha=0.2)(BatchNormalization()(Conv2D(64,
                                                          kernel_size=3,
                                                          padding='same')(c1)))
    m1 = MaxPooling2D((2, 2))(c1)  #16x(16*16)

    c2 = LeakyReLU(alpha=0.1)(BatchNormalization()(Conv2D(96,
                                                          kernel_size=3,
                                                          padding='same')(m1)))
    c2 = LeakyReLU(alpha=0.2)(BatchNormalization()(Conv2D(96,
                                                          kernel_size=3,
                                                          padding='same')(c2)))
    m2 = MaxPooling2D((2, 2))(c2)  #8x(8*16)

    c3 = LeakyReLU(alpha=0.1)(BatchNormalization()(Conv2D(128,
                                                          kernel_size=3,
                                                          padding='same')(m2)))
    c3 = LeakyReLU(alpha=0.2)(BatchNormalization()(Conv2D(128,
                                                          kernel_size=3,
                                                          padding='same')(c3)))
    m3 = MaxPooling2D((2, 2))(c3)  #4x(4*16)

    c4 = LeakyReLU(alpha=0.1)(BatchNormalization()(Conv2D(256,
                                                          kernel_size=3,
                                                          padding='same')(m3)))
    c4 = LeakyReLU(alpha=0.2)(BatchNormalization()(Conv2D(256,
                                                          kernel_size=3,
                                                          padding='same')(c4)))
    m4 = MaxPooling2D((2, 2))(c4)  #2x(2*16)

    c5 = LeakyReLU(alpha=0.1)(BatchNormalization()(Conv2D(256,
                                                          kernel_size=3,
                                                          padding='same')(m4)))
    dec = LeakyReLU(alpha=0.1)(BatchNormalization()(Deconv2D(
        256, kernel_size=(1, 3), padding='valid')(c5)))
    m5 = MaxPooling2D((2, 2))(dec)  #1x(1*17)

    resh = Reshape((17, 256))(m5)

    lastConv = LeakyReLU(alpha=0.1)(BatchNormalization()(Conv1D(
        256, kernel_size=3, padding='same')(resh)))
    drop = Dropout(0.5)(lastConv)
    lstm = Bidirectional(LSTM(128, return_sequences=True))(drop)
    drop2 = Dropout(0.25)(lstm)
    out = TimeDistributed(Dense(17, activation='softmax'))(drop2)

    model = Model(input=inp, output=out)
    model.compile('adam', 'categorical_crossentropy')
    model.summary()

    return model
예제 #26
0
def deconv2d_block(layers, filters, kernel_size = (4, 4), strides = 2, momentum = 0.8, alpha = 0.2):
    input = layers

    layer = Deconv2D(filters = filters, kernel_size = kernel_size, strides = strides, padding = 'same')(input)
    layer = BatchNormalization(momentum = momentum)(layer)
    output = LeakyReLU(alpha = alpha)(layer)

    # model = Model(model, output)

    return output
예제 #27
0
def subblock(xinput, num):
    conv1 = Conv2D(64, (5, 5), padding='same', use_bias=True)(xinput)
    relu1 = PReLU(alpha_initializer='zeros')(conv1)
    conv2 = Conv2D(64, (3, 3), padding='same', use_bias=True)(relu1)
    relu2 = PReLU(alpha_initializer='zeros')(conv2)

    deconv = Deconv2D(64, (3, 3), padding='same', use_bias=True)(relu2)
    relu3 = PReLU(alpha_initializer='zeros')(deconv)
    add1 = Add()([relu2, relu3])

    deconv2 = Deconv2D(64, (3, 3), padding='same', use_bias=True)(add1)
    relu4 = PReLU(alpha_initializer='zeros')(deconv2)
    add2 = Add()([relu4, relu1])

    conv3 = Conv2D(32, (3, 3), padding='same', use_bias=True)(add2)
    relu5 = PReLU(alpha_initializer='zeros')(conv3)
    subpix = SubpixelConv2D(relu4.shape, scale=2,
                            name='subpix' + str(num))(relu5)
    return subpix
예제 #28
0
def residual_block_upscaling(input_tensor, filters, strides=(2, 2)):
    filter1, filter2, filter3 = filters

    x = BatchNormalization()(input_tensor)
    x = Activation('relu')(x)
    x = Deconv2D(filter1, (1, 1), strides=strides)(x)

    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = Conv2D(filter2, (3, 3), padding='same')(x)

    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = Conv2D(filter3, (1, 1))(x)

    shortcut = Deconv2D(filter3, (1, 1), strides=strides)(input_tensor)

    x = add([x, shortcut])

    return x
예제 #29
0
def upsampling_conv_block(net1, net2, filters, kernel=3):
    net1 = Deconv2D(filters,
                    kernel,
                    strides=2,
                    padding='same',
                    activation='relu')(net1)
    net = Lambda(lambda x: K.concatenate(x, -1))([net1, net2])
    net = Conv2D(filters, kernel, padding='same', activation='relu')(net)
    net = BatchNormalization()(net)
    net = Conv2D(filters, kernel, padding='same', activation='relu')(net)
    net = BatchNormalization()(net)
    return net
예제 #30
0
def unet(inputs=Input((WINDOW_SIZE / 2, PATCH_SIZE, 1)), print_summary=False):
    conv1 = Conv2D(16, 5, strides=2, padding='same')(inputs)
    conv1 = BatchNormalization()(conv1)
    conv1 = LeakyReLU(alpha=0.2)(conv1)

    conv2 = Conv2D(32, 5, strides=2, padding='same')(conv1)
    conv2 = BatchNormalization()(conv2)
    conv2 = LeakyReLU(alpha=0.2)(conv2)

    conv3 = Conv2D(64, 5, strides=2, padding='same')(conv2)
    conv3 = BatchNormalization()(conv3)
    conv3 = LeakyReLU(alpha=0.2)(conv3)

    conv4 = Conv2D(128, 5, strides=2, padding='same')(conv3)
    conv4 = BatchNormalization()(conv4)
    conv4 = LeakyReLU(alpha=0.2)(conv4)

    conv5 = Conv2D(256, 5, strides=2, padding='same')(conv4)
    conv5 = BatchNormalization()(conv5)
    conv5 = LeakyReLU(alpha=0.2)(conv5)

    conv6 = Conv2D(512, 5, strides=2, padding='same')(conv5)
    conv6 = BatchNormalization()(conv6)
    conv6 = LeakyReLU(alpha=0.2)(conv6)

    deconv7 = Deconv2D(256, 5, strides=2, padding='same')(conv6)
    deconv7 = BatchNormalization()(deconv7)
    deconv7 = Dropout(0.5)(deconv7)
    deconv7 = Activation('relu')(deconv7)

    deconv8 = Concatenate(axis=3)([deconv7, conv5])
    deconv8 = Deconv2D(128, 5, strides=2, padding='same')(deconv8)
    deconv8 = BatchNormalization()(deconv8)
    deconv8 = Dropout(0.5)(deconv8)
    deconv8 = Activation('relu')(deconv8)

    deconv9 = Concatenate(axis=3)([deconv8, conv4])
    deconv9 = Deconv2D(64, 5, strides=2, padding='same')(deconv9)
    deconv9 = BatchNormalization()(deconv9)
    deconv9 = Dropout(0.5)(deconv9)
    deconv9 = Activation('relu')(deconv9)

    deconv10 = Concatenate(axis=3)([deconv9, conv3])
    deconv10 = Deconv2D(32, 5, strides=2, padding='same')(deconv10)
    deconv10 = BatchNormalization()(deconv10)
    deconv10 = Activation('relu')(deconv10)

    deconv11 = Concatenate(axis=3)([deconv10, conv2])
    deconv11 = Deconv2D(16, 5, strides=2, padding='same')(deconv11)
    deconv11 = BatchNormalization()(deconv11)
    deconv11 = Activation('relu')(deconv11)

    deconv12 = Concatenate(axis=3)([deconv11, conv1])
    deconv12 = Deconv2D(1, 5, strides=2, padding='same')(deconv12)
    deconv12 = Activation('relu')(deconv12)

    model = Model(inputs=inputs, outputs=deconv12)
    if print_summary:
        model.summary()
    return model