Exemple #1
0
    def build_model(self):
        inputs = Input((self.patch_height, self.patch_width, 1))
        conv1 = self.encoding_block(32, strides=(3, 3), padding='same')(inputs)
        conv1 = self.se_block(ratio=2)(conv1)
        pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
        conv2 = self.encoding_block(64, strides=(3, 3), padding='same')(pool1)
        conv2 = self.se_block(ratio=2)(conv2)
        pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
        conv3 = self.encoding_block(128, strides=(3, 3), padding='same')(pool2)
        conv3 = self.se_block(ratio=2)(conv3)

        up = UpSampling2D(size=(2, 2))(conv3)
        conv4 = decoding_block(filters, strides=(3, 3), padding='same')(up,
                                                                        conv2)
        conv4 = self.se_block(ratio=2)(conv4)
        up1 = UpSampling2D(size=(2, 2))(conv4)
        conv5 = decoding_block(filters, strides=(3, 3), padding='same')(up1,
                                                                        conv1)
        conv5 = self.se_block(ratio=2)(conv5)

        conv6 = Conv2D(self.num_seg_class + 1, (1, 1), padding='same')(conv5)
        conv6 = LeakyReLU(alpha=0.3)(conv6)
        conv6 = core.Reshape((self.patch_height * self.patch_width,
                              self.num_seg_class + 1))(conv6)

        act = Activation('softmax')(conv6)

        model = Model(inputs=inputs, outputs=act)
        model.compile(optimizer='adam',
                      loss='categorical_crossentropy',
                      metrics=['categorical_accuracy'])
        plot_model(model,
                   to_file=os.path.join(self.config.checkpoint, "model.png"),
                   show_shapes=True)
        self.model = model
Exemple #2
0
def generator(input_dim=INPUT_DIM_START, units=START_NN_NEURONCOUNT, activation='relu'):
    
    model = Sequential()
    model.add(Dense(input_dim=input_dim, units=units))
    model.add(BatchNormalization())
    model.add(Activation(activation))
    denselayerneuroncount = CHANNEL_OF_IMAGE*CHANNEL_COEFFICIENT*2*(WIDTH_OF_IMAGE//4)*(HEIGHT_OF_IMAGE//4)
    model.add(Dense(denselayerneuroncount))
    
    model.add(BatchNormalization())
    model.add(Activation(activation))
    model.add(Reshape(((WIDTH_OF_IMAGE//4), (HEIGHT_OF_IMAGE//4), CHANNEL_OF_IMAGE*CHANNEL_COEFFICIENT*2), input_shape=(denselayerneuroncount,)))
    '''
    burada artık elimizde küçük bir matris var çünkü 7*7*128(kanal sayısı) lik bir matris var 
    bunu büyütmek için ise upsampling yapmak gerekli
    
    '''
    model.add(UpSampling2D((UPSAMPLINGRANGE, UPSAMPLINGRANGE)))
    #bu aşamadan sonra matris artık 14*14*128 olacak
    model.add(Conv2D(CHANNEL_COEFFICIENT*CHANNEL_OF_IMAGE, (CONVOLUTIONRANGE, CONVOLUTIONRANGE), padding='same'))
    #bu aşama derinliği yani kanal sayısını etkiler artık matrisin boyutu 14*14*64 oldu
    model.add(BatchNormalization())
    model.add(Activation(activation))
    model.add(UpSampling2D((UPSAMPLINGRANGE, UPSAMPLINGRANGE)))
    #bu aşamadan sonra matrisin boyutu 28*28*64 oldu 
    model.add(Conv2D(CHANNEL_OF_IMAGE, (CONVOLUTIONRANGE, CONVOLUTIONRANGE), padding='same'))
    #bu aşamadan sonra ise matrisin boyutu 28*28*1 oldu
    model.add(Activation('tanh'))
    #tanh olmasının sebebi de 0 ile 1 arasında çıkmasını sağlamak içindir
    print(model.summary())
    return model
Exemple #3
0
    def __init__(self):
        #self.inception = InceptionResNetV2(weights=None, include_top=True)
        #self.inception.load_weights('/data/inception_resnet_v2_weights_tf_dim_ordering_tf_kernels.h5')
        #inception.graph = tf.get_default_graph()

        embed_input = Input(shape=(1000,))
        encoder_input = Input(shape=(256, 256, 1,))
        #encoder_output=GaussianNoise(0.1)(encoder_input)
        encoder_output = Conv2D(64, (3, 3), activation='relu', padding='same', strides=2)(encoder_input)
        encoder_output = Conv2D(128, (3, 3), activation='relu', padding='same')(encoder_output)
        encoder_output = Conv2D(128, (3, 3), activation='relu', padding='same', strides=2)(encoder_output)
        encoder_output = Conv2D(256, (3, 3), activation='relu', padding='same')(encoder_output)
        encoder_output = Conv2D(256, (3, 3), activation='relu', padding='same', strides=2)(encoder_output)
        encoder_output = Conv2D(512, (3, 3), activation='relu', padding='same')(encoder_output)
        encoder_output = Conv2D(512, (3, 3), activation='relu', padding='same')(encoder_output)
        encoder_output = Conv2D(256, (3, 3), activation='relu', padding='same')(encoder_output)  # Fusion

        fusion_output = RepeatVector(32 * 32)(embed_input)
        fusion_output = Reshape(([32, 32, 1000]))(fusion_output)
        fusion_output = concatenate([encoder_output, fusion_output], axis=3)
        fusion_output = Conv2D(256, (1, 1), activation='relu', padding='same')(fusion_output)  # Decoder
        decoder_output = Conv2D(128, (3, 3), activation='relu', padding='same')(fusion_output)

        decoder_output = UpSampling2D((2, 2))(decoder_output)
        decoder_output = Conv2D(64, (3, 3), activation='relu', padding='same')(decoder_output)
        decoder_output = UpSampling2D((2, 2))(decoder_output)
        decoder_output = Conv2D(32, (3, 3), activation='relu', padding='same')(decoder_output)
        decoder_output = Conv2D(16, (3, 3), activation='relu', padding='same')(decoder_output)
        decoder_output = Conv2D(2, (3, 3), activation='tanh', padding='same')(decoder_output)
        decoder_output = UpSampling2D((2, 2))(decoder_output)

        model = Model(inputs=[encoder_input, embed_input], outputs=decoder_output)
        model.compile(optimizer="adagrad", loss='mse')
        self.model = model
Exemple #4
0
def darknet4det4(input_shape, output_channels):
    inputs = Input(shape=(input_shape[0], input_shape[1], 3))
    darknet = Model(inputs, darknet_body(inputs))

    # downsample
    subsample_x = compose(
        DarknetConv2D_BN_Leaky(1024, (3, 3), strides=(1, 1)),
        DarknetConv2D_BN_Leaky(512, (3, 3), strides=(1, 1)),
        DarknetConv2D_BN_Leaky(512, (3, 3), strides=(2, 2), padding='SAME')
    )(darknet.output)
    _, y4 = make_last_layers(subsample_x, 512, output_channels, output_layer_name="output4")

    x, y1 = make_last_layers(darknet.output, 512, output_channels, output_layer_name='output1')
    # print(type(x))

    # upsmaple
    x = compose(
        DarknetConv2D_BN_Leaky(256, (1, 1)),
        UpSampling2D(2))(x)
    x = Concatenate()([x, darknet.layers[152].output])
    x, y2 = make_last_layers(x, 256, output_channels, output_layer_name='output2')

    # upsample
    x = compose(
        DarknetConv2D_BN_Leaky(128, (1, 1)),
        UpSampling2D(2))(x)
    x = Concatenate()([x, darknet.layers[92].output])
    x, y3 = make_last_layers(x, 128, output_channels, output_layer_name='output3')

    # order in descending feature size
    return Model(inputs, [y3, y2, y1, y4])
def construct_autoencoder():
    input_img = Input(shape=(20, 20, 2))

    code = Conv2D(16, (3, 3), activation='relu', padding='same')(input_img)
    code = MaxPooling2D((2, 2), padding='same')(code)
    code = Conv2D(8, (3, 3), activation='relu', padding='same')(code)
    code = MaxPooling2D((2, 2), padding='same')(code)
    code = Conv2D(4, (3, 3), activation='relu', padding='same')(code)
    code = MaxPooling2D((2, 2), padding='same')(code)

    encoder = Model(input_img, code)

    # at this point the representation is (4, 4, 8) i.e. 128-dimensional

    decoder_input = Input(shape=(3, 3, 4))

    x = Conv2D(4, (3, 3), activation='relu', padding='same')(decoder_input)
    x = UpSampling2D((2, 2))(x)
    x = Conv2D(8, (3, 3), activation='relu', padding='same')(x)
    x = UpSampling2D((2, 2))(x)
    x = Conv2D(16, (3, 3), activation='relu')(x)
    x = UpSampling2D((2, 2))(x)
    reconstruction = Conv2D(2, (3, 3), activation='sigmoid', padding='same')(x)

    decoder = Model(decoder_input, reconstruction)

    autoencoder_input = Input(shape=(20, 20, 2))
    output = decoder(encoder(autoencoder_input))
    autoencoder = Model(autoencoder_input, output)
    opt = Adam()
    autoencoder.compile(optimizer=opt, loss='binary_crossentropy')

    return autoencoder, encoder, decoder
Exemple #6
0
def convolutional_autoencoder(spatial_size, channel_first=True):
    model = Sequential()

    # encoder
    if channel_first:
        model.add(
            Conv2D(128, (3, 3),
                   activation='relu',
                   input_shape=(3, spatial_size, spatial_size),
                   padding='same'))
    else:
        model.add(
            Conv2D(128, (3, 3),
                   activation='relu',
                   input_shape=(spatial_size, spatial_size, 3),
                   padding='same'))

    model.add(MaxPooling2D(pool_size=(2, 2), strides=2, padding='same'))
    model.add(Conv2D(64, (3, 3), activation='relu', padding='same'))
    model.add(MaxPooling2D(pool_size=(2, 2), strides=2, padding='same'))
    model.add(Conv2D(64, (3, 3), activation='relu', padding='same'))
    model.add(MaxPooling2D(pool_size=(2, 2), strides=2, padding='same'))

    # decoder
    model.add(Conv2D(64, (3, 3), activation='relu', padding='same'))
    model.add(UpSampling2D(2))
    model.add(Conv2D(64, (3, 3), activation='relu', padding='same'))
    model.add(UpSampling2D(2))
    model.add(Conv2D(128, (3, 3), activation='relu', padding='same'))
    model.add(UpSampling2D(2))
    model.add(Conv2D(3, (3, 3), activation='sigmoid', padding='same'))

    return model
Exemple #7
0
def generator(input_dim=100,
              units=1024,
              activation='relu'
              ):  ##iput dim ilk başlangıç gürültü units ilk nöron sayısı
    model = Sequential()  ## ardışık model
    model.add(Dense(input_dim=input_dim, units=units))  ## layer oluşturma
    model.add(BatchNormalization())
    model.add(Activation(activation))
    model.add(Dense(128 * 7 * 7))  ## nöron sayısı veriyoruz
    model.add(BatchNormalization())
    model.add(Activation(activation))
    model.add(Reshape((7, 7, 128), input_shape=(128 * 7 * 7, )))
    model.add(UpSampling2D(
        (2, 2)))  ## en boy u arttırdık bunlar derinliği etkilemez
    model.add(Conv2D(64, (5, 5), padding='same')
              )  ## filtre olulturduk ve 14 14 128 lik matris üstünde gezdirdik
    model.add(BatchNormalization())
    model.add(Activation(activation))
    model.add(UpSampling2D((2, 2)))
    model.add(Conv2D(1, (5, 5),
                     padding='same'))  ## derinliği tekrar 1 eindirdik
    model.add(Activation('tanh'))  ## değerleri -1 1 arasına çeker
    print(model.summary())
    " generator ile bilgisayara görüntü üretmeyi öğretiyoruz  "
    return model
Exemple #8
0
def mobilenetv2_yolo_body(inputs, num_anchors, num_classes, alpha=1.0):
    # input: 416 x 416 x 3
    # conv_pw_13_relu :13 x 13 x 1024
    # conv_pw_11_relu :26 x 26 x 512
    # conv_pw_5_relu : 52 x 52 x 256
    mobilenetv2 = MobileNetV2(input_tensor=inputs, include_top=False, weights='imagenet')
    x, y1 = make_last_layers_mobilenet(mobilenetv2.output, 17, 512, num_anchors * (num_classes + 5))
    x = Conv2D(256, kernel_size=1, padding='same', use_bias=False, name='block_20_conv')(x)
    x = BatchNormalization(momentum=0.9, name='block_20_BN')(x)
    x = ReLU(6., name='block_20_relu6')(x)
    x = UpSampling2D(2)(x)
    x = Concatenate()([x, MobilenetConv2D(mobilenetv2.get_layer('block_12_project_BN').output, (1, 1), alpha, 384)])

    x, y2 = make_last_layers_mobilenet(x, 21, 256, num_anchors * (num_classes + 5))
    x = Conv2D(128, kernel_size=1, padding='same', use_bias=False, name='block_24_conv')(x)
    x = BatchNormalization(momentum=0.9, name='block_24_BN')(x)
    x = ReLU(6., name='block_24_relu6')(x)
    x = UpSampling2D(2)(x)
    x = Concatenate()([x, MobilenetConv2D(mobilenetv2.get_layer('block_5_project_BN').output, (1, 1), alpha, 128)])
    x, y3 = make_last_layers_mobilenet(x, 25, 128, num_anchors * (num_classes + 5))

    # y1 = Lambda(lambda y: tf.reshape(y, [-1, tf.shape(y)[1],tf.shape(y)[2], num_anchors, num_classes + 5]),name='y1')(y1)
    # y2 = Lambda(lambda y: tf.reshape(y, [-1, tf.shape(y)[1],tf.shape(y)[2], num_anchors, num_classes + 5]),name='y2')(y2)
    # y3 = Lambda(lambda y: tf.reshape(y, [-1, tf.shape(y)[1],tf.shape(y)[2], num_anchors, num_classes + 5]),name='y3')(y3)
    return Model(inputs, [y1, y2, y3])
Exemple #9
0
def UNet(input_size: Tuple[int, int, int] = (128, 128, 1)) -> tf.keras.Model:
    """A standard UNet implementation in TensorFlow

    Args:
        input_size: The size of the input tensor (height, width, channels).

    Raises:
        ValueError: Length of `input_size` is not 3.
        ValueError: `input_size`[0] or `input_size`[1] is not a multiple of 16.

    Returns:
        A TensorFlow UNet model.
    """
    _check_input_size(input_size)
    conv_config = {'activation': 'relu', 'padding': 'same', 'kernel_initializer': 'he_normal'}
    up_config = {'size': (2, 2), 'interpolation': 'bilinear'}
    inputs = Input(input_size)
    conv1 = Conv2D(64, 3, **conv_config)(inputs)
    conv1 = Conv2D(64, 3, **conv_config)(conv1)
    pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)

    conv2 = Conv2D(128, 3, **conv_config)(pool1)
    conv2 = Conv2D(128, 3, **conv_config)(conv2)
    pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)

    conv3 = Conv2D(256, 3, **conv_config)(pool2)
    conv3 = Conv2D(256, 3, **conv_config)(conv3)
    pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)

    conv4 = Conv2D(512, 3, **conv_config)(pool3)
    conv4 = Conv2D(512, 3, **conv_config)(conv4)
    drop4 = Dropout(0.5)(conv4)
    pool4 = MaxPooling2D(pool_size=(2, 2))(drop4)

    conv5 = Conv2D(1024, 3, **conv_config)(pool4)
    conv5 = Conv2D(1024, 3, **conv_config)(conv5)
    drop5 = Dropout(0.5)(conv5)

    up6 = Conv2D(512, 3, **conv_config)(UpSampling2D(**up_config)(drop5))
    merge6 = concatenate([drop4, up6], axis=-1)
    conv6 = Conv2D(512, 3, **conv_config)(merge6)
    conv6 = Conv2D(512, 3, **conv_config)(conv6)

    up7 = Conv2D(256, 3, **conv_config)(UpSampling2D(**up_config)(conv6))
    merge7 = concatenate([conv3, up7], axis=-1)
    conv7 = Conv2D(256, 3, **conv_config)(merge7)
    conv7 = Conv2D(256, 3, **conv_config)(conv7)

    up8 = Conv2D(128, 3, **conv_config)(UpSampling2D(**up_config)(conv7))
    merge8 = concatenate([conv2, up8], axis=-1)
    conv8 = Conv2D(128, 3, **conv_config)(merge8)
    conv8 = Conv2D(128, 3, **conv_config)(conv8)

    up9 = Conv2D(64, 3, **conv_config)(UpSampling2D(**up_config)(conv8))
    merge9 = concatenate([conv1, up9], axis=-1)
    conv9 = Conv2D(64, 3, **conv_config)(merge9)
    conv9 = Conv2D(64, 3, **conv_config)(conv9)
    conv10 = Conv2D(1, 1, activation='sigmoid')(conv9)
    model = Model(inputs=inputs, outputs=conv10)
    return model
    def generator(self):
        if self.G:
            return self.G
        self.G = Sequential()
        dropout = 0.2
        depth = 64 + 64 + 64 + 64
        dim = 8

        self.G.add(Dense(dim * dim * depth, input_dim=100))
        self.G.add(BatchNormalization(momentum=0.9))
        self.G.add(Activation('relu'))
        self.G.add(Reshape((dim, dim, depth)))
        self.G.add(Dropout(dropout))

        self.G.add(UpSampling2D())
        self.G.add(Conv2DTranspose(int(depth / 2), 5, padding='same'))
        self.G.add(BatchNormalization(momentum=0.9))
        self.G.add(Activation('relu'))

        self.G.add(UpSampling2D())
        self.G.add(Conv2DTranspose(int(depth / 4), 5, padding='same'))
        self.G.add(BatchNormalization(momentum=0.9))
        self.G.add(Activation('relu'))

        self.G.add(UpSampling2D())
        self.G.add(Conv2DTranspose(int(depth / 8), 5, padding='same'))
        self.G.add(BatchNormalization(momentum=0.9))
        self.G.add(Activation('relu'))

        # Out: 64 x 64 x 1 grayscale image [0.0,1.0] per pix
        self.G.add(Conv2DTranspose(1, 5, padding='same'))
        self.G.add(Activation('sigmoid'))
        self.G.summary()
        return self.G
Exemple #11
0
def SegNet(classes=1, dropout=0.4):
    input_img = Input((croppedImageSize, croppedImageSize, 1), name='img')
    # Encoder
    x = conv2d_block(input_img, 64, kernel_size=3)
    x = MaxPooling2D(pool_size=(2, 2))(x)

    x = conv2d_block(x, 128, kernel_size=3)
    x = MaxPooling2D(pool_size=(2, 2))(x)

    x = conv2d_block(x, 256, kernel_size=3)
    x = MaxPooling2D(pool_size=(2, 2))(x)

    x = conv2d_block(x, 512, kernel_size=3)
    x = Dropout(dropout)(x)
    # Decoder
    x = conv2d_block(x, 512, kernel_size=3)

    x = UpSampling2D(size=(2, 2))(x)
    x = conv2d_block(x, 256, kernel_size=3)

    x = UpSampling2D(size=(2, 2))(x)
    x = conv2d_block(x, 128, kernel_size=3)

    x = UpSampling2D(size=(2, 2))(x)
    x = conv2d_block(x, 64, kernel_size=3)
    x = Dropout(dropout)(x)
    x = Conv2D(1, (1, 1), activation='sigmoid')(x)
    model = Model(inputs=[input_img], outputs=[x])
    return model
Exemple #12
0
def create_generator():
    # Create the Generator network structure
    generator = Sequential()

    generator.add(Dense(12544, input_dim=100))
    generator.add(BatchNormalization(momentum=0.9))
    generator.add(Activation('relu'))
    generator.add(Reshape((7, 7, 256)))
    generator.add(Dropout(0.4))

    generator.add(UpSampling2D())
    generator.add(Conv2DTranspose(int(128), 5, padding='same'))
    generator.add(BatchNormalization(momentum=0.9))
    generator.add(Activation('relu'))
    generator.add(UpSampling2D())
    generator.add(Conv2DTranspose(int(64), 5, padding='same'))
    generator.add(BatchNormalization(momentum=0.9))
    generator.add(Activation('relu'))
    generator.add(Conv2DTranspose(int(32), 5, padding='same'))
    generator.add(BatchNormalization(momentum=0.9))
    generator.add(Activation('relu'))

    generator.add(Conv2DTranspose(1, 5, padding='same'))
    generator.add(Activation('sigmoid'))

    generator.compile(optimizer=RMSprop(lr=0.0004, clipvalue=1.0, decay=3e-8),
                      loss='binary_crossentropy',
                      metrics=['accuracy'])

    generator.summary()

    return generator
Exemple #13
0
def sameshape_CNN(input_shape, use_b_norm=True):
    inputs = Input(shape=input_shape)
    # print("0)", inputs.shape)
    conv01 = Conv2D(32, kernel_size=(5, 5), input_shape=input_shape)(inputs)
    # print("1)", conv01.shape)
    if use_b_norm:
        conv01 = BatchNormalization()(conv01)
    conv01 = Activation('relu')(conv01)
    conv01_pool = MaxPooling2D((3, 3), strides=(3, 3))(conv01)

    conv02 = Conv2D(64, kernel_size=(3, 3), padding="same")(conv01_pool)
    # print("2)", conv02.shape)
    if use_b_norm:
        conv02 = BatchNormalization()(conv02)
    conv02 = Activation('relu')(conv02)
    conv02_pool = MaxPooling2D((2, 2), strides=(2, 2))(conv02)
    # print("3)", conv02_pool.shape)

    up02 = UpSampling2D((2, 2))(conv02_pool)
    up02 = concatenate([conv02, up02], axis=3)
    # print("4)", up02.shape)

    up01 = UpSampling2D((3, 3))(up02)
    if use_b_norm:
        up01 = BatchNormalization()(up01)
    up01 = concatenate([conv01, up01], axis=3)
    # print("5)", up01.shape)

    output = Conv2D(1, (1, 1), activation='relu')(up01)
    # print("6)", output.shape)
    output = Flatten()(output)
    model = Model(inputs=inputs, outputs=output)
    model.compile(loss="mean_squared_error", optimizer='adam')
    # possible losses are: https://www.tensorflow.org/api_docs/python/tf/losses
    return model
    def __init__(self, num_classes=2, l2_value=0.0):
        super(Unet, self).__init__()

        print("Creating Unet model.")

        self.conv1 = unet_conv2d(64, kernel_regularizer=l2(l2_value))
        self.pool1 = MaxPooling2D(pool_size=(2, 2))

        self.conv2 = unet_conv2d(128, kernel_regularizer=l2(l2_value))
        self.pool2 = MaxPooling2D(pool_size=(2, 2))

        self.conv3 = unet_conv2d(256, kernel_regularizer=l2(l2_value))
        self.pool3 = MaxPooling2D(pool_size=(2, 2))

        self.conv4 = unet_conv2d(512, kernel_regularizer=l2(l2_value))
        self.pool4 = MaxPooling2D(pool_size=(2, 2))
        
        self.center = unet_conv2d(1024, kernel_regularizer=l2(l2_value))

        self.up_conv5 = UpSampling2D(size=(2, 2))
        self.conv6 = unet_conv2d(512, kernel_regularizer=l2(l2_value))

        self.up_conv6 = UpSampling2D(size=(2, 2))
        self.conv7 = unet_conv2d(256, kernel_regularizer=l2(l2_value))

        self.up_conv7 = UpSampling2D(size=(2, 2))
        self.conv8 = unet_conv2d(128, kernel_regularizer=l2(l2_value))

        self.up_conv8 = UpSampling2D(size=(2, 2))
        self.conv9 = unet_conv2d(64, kernel_regularizer=l2(l2_value))

        self.conv10 = Conv2D(num_classes, (1, 1))
Exemple #15
0
 def _build_decoder(self, code_layer):
     """
     Builds a 4-layer-convolutional decoder.
     """
     dense_layer = Dense(2048, activation='relu')(code_layer)
     reshape_layer = Reshape((4, 4, 128))(dense_layer)
     upsampling_layer_1 = UpSampling2D(POOLING_SIZE)(reshape_layer)
     conv_layer_1 = Conv2D(64,
                           CONVOLUTION_KERNEL_SIZE,
                           activation='relu',
                           padding='same')(upsampling_layer_1)
     upsampling_layer_2 = UpSampling2D(POOLING_SIZE)(conv_layer_1)
     conv_layer_2 = Conv2D(32,
                           CONVOLUTION_KERNEL_SIZE,
                           activation='relu',
                           padding='same')(upsampling_layer_2)
     upsampling_layer_3 = UpSampling2D(POOLING_SIZE)(conv_layer_2)
     conv_layer_3 = Conv2D(16,
                           CONVOLUTION_KERNEL_SIZE,
                           activation='relu',
                           padding='same')(upsampling_layer_3)
     upsampling_layer_4 = UpSampling2D(POOLING_SIZE)(conv_layer_3)
     output_layer = Conv2D(3,
                           CONVOLUTION_KERNEL_SIZE,
                           activation='sigmoid',
                           padding='same')(upsampling_layer_4)
     return output_layer
def generator_model():
    model = Sequential()

    model.add(Dense(1024, input_dim=100))
    model.add(Activation('tanh'))

    model.add(Dense(128 * 7 * 7))
    model.add(
        BatchNormalization())  # 批量归一化: 将前一层的激活值重新规范化,使得其输出数据的均值接近0,其标准差接近1
    model.add(Activation('tanh'))

    model.add(Reshape((7, 7, 128)))

    # 2维上采样层,即将数据的行和列分别重复2次
    model.add(UpSampling2D(size=(2, 2)))

    model.add(Conv2D(64, (5, 5), padding='same'))
    model.add(Activation('tanh'))

    model.add(UpSampling2D(size=(2, 2)))

    # 卷积核设为1即输出图像的维度
    model.add(Conv2D(1, (5, 5), padding='same'))
    model.add(Activation('tanh'))
    return model
Exemple #17
0
def get_upsampled_signal(x):
    y = Conv2DTranspose(filters=64,
                        kernel_size=(3, 3),
                        strides=1,
                        padding='same',
                        name='decode_convtrans_1')(x)
    y = LeakyReLU(name='decode_relu_1')(y)
    y = UpSampling2D(size=(2, 2), name='decode_upsample_1')(y)
    y = Conv2DTranspose(filters=64,
                        kernel_size=(3, 3),
                        strides=1,
                        padding='same',
                        name='decode_convtrans_2')(x)
    y = LeakyReLU(name='decode_relu_2')(y)
    y = UpSampling2D(size=(2, 2), name='decode_upsample_2')(y)
    y = Conv2DTranspose(filters=32,
                        kernel_size=(3, 3),
                        strides=1,
                        padding='same',
                        name='decode_convtrans_3')(y)
    y = LeakyReLU(name='decode_relu_3')(y)
    y = UpSampling2D(size=(2, 2), name='decode_upsample_3')(y)
    y = Conv2DTranspose(filters=16,
                        kernel_size=(2, 2),
                        strides=1,
                        padding='same',
                        name='decode_convtrans_4')(y)
    y = LeakyReLU(name='decode_relu_4')(y)
    return y
Exemple #18
0
def UNet(input_size=(128, 128, 3)):
    """Creates a U-Net model.
    This U-Net model is composed of 5 "contracting blocks" and 5 "expansive blocks".

    Args:
        input_size (tuple, optional): Shape of input image. Defaults to (128, 128, 3).

    Returns:
        'Model' object: U-Net model.
    """
    
    conv_config = {'activation': 'relu', 'padding': 'same', 'kernel_initializer': 'he_normal'}
    inputs = Input(input_size)
    conv1 = Conv2D(64, 3, **conv_config)(inputs)
    conv1 = Conv2D(64, 3, **conv_config)(conv1)
    pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)

    conv2 = Conv2D(128, 3, **conv_config)(pool1)
    conv2 = Conv2D(128, 3, **conv_config)(conv2)
    pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)

    conv3 = Conv2D(256, 3, **conv_config)(pool2)
    conv3 = Conv2D(256, 3, **conv_config)(conv3)
    pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)

    conv4 = Conv2D(512, 3, **conv_config)(pool3)
    conv4 = Conv2D(512, 3, **conv_config)(conv4)
    drop4 = Dropout(0.5)(conv4)
    pool4 = MaxPooling2D(pool_size=(2, 2))(drop4)

    conv5 = Conv2D(1024, 3, **conv_config)(pool4)
    conv5 = Conv2D(1024, 3, **conv_config)(conv5)
    drop5 = Dropout(0.5)(conv5)

    up6 = Conv2D(512, 2, **conv_config)(UpSampling2D(size=(2, 2))(drop5))
    merge6 = concatenate([drop4, up6], axis=3)
    conv6 = Conv2D(512, 3, **conv_config)(merge6)
    conv6 = Conv2D(512, 3, **conv_config)(conv6)

    up7 = Conv2D(256, 2, **conv_config)(UpSampling2D(size=(2, 2))(conv6))
    merge7 = concatenate([conv3, up7], axis=3)
    conv7 = Conv2D(256, 3, **conv_config)(merge7)
    conv7 = Conv2D(256, 3, **conv_config)(conv7)

    up8 = Conv2D(128, 2, **conv_config)(UpSampling2D(size=(2, 2))(conv7))
    merge8 = concatenate([conv2, up8], axis=3)
    conv8 = Conv2D(128, 3, **conv_config)(merge8)
    conv8 = Conv2D(128, 3, **conv_config)(conv8)

    up9 = Conv2D(64, 2, **conv_config)(UpSampling2D(size=(2, 2))(conv8))
    merge9 = concatenate([conv1, up9], axis=3)
    conv9 = Conv2D(64, 3, **conv_config)(merge9)
    conv9 = Conv2D(64, 3, **conv_config)(conv9)
    conv9 = Conv2D(2, 3, **conv_config)(conv9)
    conv10 = Conv2D(1, 1, activation='sigmoid')(conv9)
    model = Model(inputs=inputs, outputs=conv10)
    return model
Exemple #19
0
def train_net(steps, epochs):
    # Get images
    X = []
    for filename in os.listdir('./color_images/Train/'):
        X.append(img_to_array(load_img('./color_images/Train/' + filename)))
        # print(filename)
    X = np.array(X, dtype=float)
    # Set up training and test data
    split = int(0.95 * len(X))
    Xtrain = X[:split]
    Xtrain = 1.0 / 255 * Xtrain
    # Design the neural network
    model = Sequential()
    model.add(InputLayer(input_shape=(None, None, 1)))
    model.add(Conv2D(8, (3, 3), input_shape=(None, None, 1), activation='relu', padding='same', strides=2))
    model.add(Conv2D(8, (3, 3), activation='relu', padding='same'))
    model.add(Conv2D(16, (3, 3), activation='relu', padding='same'))
    model.add(Conv2D(16, (3, 3), activation='relu', padding='same', strides=2))
    model.add(Conv2D(32, (3, 3), activation='relu', padding='same'))
    model.add(Conv2D(32, (3, 3), activation='relu', padding='same', strides=2))
    model.add(UpSampling2D((2, 2)))
    model.add(Conv2D(32, (3, 3), activation='relu', padding='same'))
    model.add(UpSampling2D((2, 2)))
    model.add(Conv2D(16, (3, 3), activation='relu', padding='same'))
    model.add(UpSampling2D((2, 2)))
    model.add(Conv2D(2, (3, 3), activation='tanh', padding='same'))
    # Finish model
    model.compile(optimizer='rmsprop', loss='mse')
    # Image transformer
    datagen = ImageDataGenerator(
        shear_range=0.2,
        zoom_range=0.2,
        rotation_range=20,
        horizontal_flip=True)
    # Generate training data
    batch_size = 50

    def image_a_b_gen(batch_size):
        for batch in datagen.flow(Xtrain, batch_size=batch_size):
            lab_batch = rgb2lab(batch)
            X_batch = lab_batch[:, :, :, 0]
            Y_batch = lab_batch[:, :, :, 1:] / 128
            yield (X_batch.reshape(X_batch.shape + (1,)), Y_batch)

    # Train model
    TensorBoard(log_dir='/output')
    model.fit_generator(image_a_b_gen(batch_size), steps_per_epoch=steps, epochs=epochs)
    # Test images
    Xtest = rgb2lab(1.0 / 255 * X[split:])[:, :, :, 0]
    Xtest = Xtest.reshape(Xtest.shape + (1,))
    Ytest = rgb2lab(1.0 / 255 * X[split:])[:, :, :, 1:]
    Ytest = Ytest / 128
    print(model.evaluate(Xtest, Ytest, batch_size=batch_size))
    model.save('./result/network.h5')
    del model
Exemple #20
0
    def __init__(self, num_classes=2, l2_value=0.0):
        super(AttentionalUnet, self).__init__()

        print("Creating AttentionalUnet model.")

        self.conv1 = unet_conv2d(64, kernel_regularizer=l2(l2_value))
        self.pool1 = MaxPooling2D(pool_size=(2, 2))

        self.conv2 = unet_conv2d(128, kernel_regularizer=l2(l2_value))
        self.pool2 = MaxPooling2D(pool_size=(2, 2))

        self.conv3 = unet_conv2d(256, kernel_regularizer=l2(l2_value))
        self.pool3 = MaxPooling2D(pool_size=(2, 2))

        self.conv4 = unet_conv2d(512, kernel_regularizer=l2(l2_value))
        self.pool4 = MaxPooling2D(pool_size=(2, 2))

        self.center = unet_conv2d(1024, kernel_regularizer=l2(l2_value))

        self.gating = gating_signal(128)

        # AttentionBlock1 layers

        self.att1 = SubAttentionBlock(256)

        # AttentionBlock end

        self.up_conv5 = UpSampling2D(size=(2, 2))
        self.up_conv6 = unet_conv2d(512, kernel_regularizer=l2(l2_value))

        # AttentionBlock2 layers

        self.att2 = SubAttentionBlock(128)
        # AttentionBlock end

        self.up_conv7 = UpSampling2D(size=(2, 2))
        self.up_conv8 = unet_conv2d(256, kernel_regularizer=l2(l2_value))

        # AttentionBlock3 layers

        self.att3 = SubAttentionBlock(64)
        # AttentionBlock end

        self.up_conv9 = UpSampling2D(size=(2, 2))
        self.up_conv10 = unet_conv2d(128, kernel_regularizer=l2(l2_value))

        # AttentionBlock4 layers

        self.att4 = SubAttentionBlock(32)
        # AttentionBlock end

        self.up_conv11 = UpSampling2D(size=(2, 2))
        self.up_conv12 = unet_conv2d(64, kernel_regularizer=l2(l2_value))

        self.up_conv13 = Conv2D(num_classes, (1, 1))
Exemple #21
0
def UNet64_sigmoid_tanh(input_shape):
    """gleich wie UNet64_output_expansed, teilweise mit sigmoid und tanh statt relu."""
    inputs = Input(shape=input_shape)

    conv01 = Conv2D(10, kernel_size=(3, 3),
                    padding="same")(inputs)  # 10 x 64x64
    conv01 = Activation('tanh')(conv01)
    conv01_pool = MaxPooling2D((2, 2), strides=(2, 2))(conv01)  # 10 x 32x32
    print("0)", conv01_pool.shape, "10 x 32x32")

    conv02 = Conv2D(20, kernel_size=(3, 3),
                    padding="same")(conv01_pool)  # 20 x 32x32
    conv02 = Activation('tanh')(conv02)
    conv02_pool = MaxPooling2D((2, 2), strides=(2, 2))(conv02)  # 20 x 16x16
    print("1)", conv02_pool.shape, "20 x 16x16")

    conv03 = Conv2D(20, kernel_size=(3, 3),
                    padding="same")(conv02_pool)  # 20 x 16x16
    conv03 = Activation('tanh')(conv03)
    conv03_pool = MaxPooling2D((2, 2), strides=(2, 2))(conv03)  # 20 x 8x8
    print("2)", conv03_pool.shape, "20 x 8x8")

    conv04 = Conv2D(20, kernel_size=(3, 3),
                    padding="same")(conv03_pool)  # 20 x 8x8
    conv04 = Activation('relu')(conv04)
    conv04_pool = MaxPooling2D((2, 2), strides=(2, 2))(conv04)  # 20 x 4x4
    print("3)", conv04_pool.shape, "20 x 4x4")

    ### UPSAMPLING:
    up04 = UpSampling2D((2, 2))(conv04_pool)  # 20 x 8x8
    up04 = concatenate([conv04, up04], axis=3)  # 20+20 x 8x8
    print("4)", up04.shape, "40 x 8x8")

    up03 = UpSampling2D((2, 2))(up04)  # 40 x 16x16
    up03 = concatenate([conv03, up03], axis=3)  # 20+40 x 16x16
    print("5)", up03.shape, "60 x 16x16")

    up02 = UpSampling2D((2, 2))(up03)  # 60 x 32x32
    up02 = concatenate([conv02, up02], axis=3)  # 20+60 x 32x32
    print("6)", up02.shape, "80 x 32x32")

    up01 = UpSampling2D((2, 2))(up02)  # 80 x 64x64
    up01 = concatenate([conv01, up01], axis=3)  # 15+80 x 64x64
    print("7)", up01.shape, "95 x 64x64")

    output = Conv2D(1, (3, 3), activation='relu',
                    padding="same")(up01)  # 1 x 64x64
    # output = Activation('tanh')(output)
    print("8)", output.shape, "1 x 64x64")
    output = Flatten()(output)
    model = Model(inputs=inputs, outputs=output)
    model.compile(loss="mean_squared_error", optimizer='nadam')
    # ToDo: try Nesterov Adam optimizer (nadam)
    # http://proceedings.mlr.press/v28/sutskever13.pdf
    return model
Exemple #22
0
def Colorize():
    embed_input = Input(shape=(1000, ))

    # Encoder
    encoder_input = Input(shape=(
        256,
        256,
        1,
    ))
    encoder_output = Conv2D(64, (3, 3),
                            activation='relu',
                            padding='same',
                            strides=2)(encoder_input)
    encoder_output = Conv2D(128, (3, 3), activation='relu',
                            padding='same')(encoder_output)
    encoder_output = Conv2D(128, (3, 3),
                            activation='relu',
                            padding='same',
                            strides=2)(encoder_output)
    encoder_output = Conv2D(256, (3, 3), activation='relu',
                            padding='same')(encoder_output)
    encoder_output = Conv2D(256, (3, 3),
                            activation='relu',
                            padding='same',
                            strides=2)(encoder_output)
    encoder_output = Conv2D(512, (3, 3), activation='relu',
                            padding='same')(encoder_output)
    encoder_output = Conv2D(512, (3, 3), activation='relu',
                            padding='same')(encoder_output)
    encoder_output = Conv2D(256, (3, 3), activation='relu',
                            padding='same')(encoder_output)

    # Fusion
    fusion_output = RepeatVector(32 * 32)(embed_input)
    fusion_output = Reshape(([32, 32, 1000]))(fusion_output)
    fusion_output = concatenate([encoder_output, fusion_output], axis=3)
    fusion_output = Conv2D(256, (1, 1), activation='relu',
                           padding='same')(fusion_output)

    # Decoder
    decoder_output = Conv2D(128, (3, 3), activation='relu',
                            padding='same')(fusion_output)
    decoder_output = UpSampling2D((2, 2))(decoder_output)
    decoder_output = Conv2D(64, (3, 3), activation='relu',
                            padding='same')(decoder_output)
    decoder_output = UpSampling2D((2, 2))(decoder_output)
    decoder_output = Conv2D(32, (3, 3), activation='relu',
                            padding='same')(decoder_output)
    decoder_output = Conv2D(16, (3, 3), activation='relu',
                            padding='same')(decoder_output)
    decoder_output = Conv2D(2, (3, 3), activation='tanh',
                            padding='same')(decoder_output)
    decoder_output = UpSampling2D((2, 2))(decoder_output)
    return Model(inputs=[encoder_input, embed_input], outputs=decoder_output)
Exemple #23
0
def CMNet(input_size=(256, 256, 3)):
    inputs = Input(input_size)

    conv1 = Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(inputs)
    conv1 = Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv1)
    pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
    conv2 = Conv2D(128, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool1)
    conv2 = Conv2D(128, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv2)
    pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
    conv3 = Conv2D(256, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool2)
    conv3 = Conv2D(256, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv3)
    pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
    conv4 = Conv2D(512, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool3)
    conv4 = Conv2D(512, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv4)
    drop4 = Dropout(0.2)(conv4)
    conv5 = CACP(drop4, 1024)
    drop5 = Dropout(0.2)(conv5)
    conv6 = Conv2D(512, 3, activation='relu', padding='same', kernel_initializer='he_normal')(drop5)
    conv6 = Conv2D(512, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv6)

    pred1 = Conv2D(1, 1, activation='sigmoid', name="pred1")(conv6)
    lossLay1 = UpSampling2D(size=(8, 8), name="lossLay1", interpolation='bilinear')(pred1)
    pred1 = UpSampling2D(interpolation='bilinear')(pred1)

    up7 = Conv2D(256, 1, activation='relu', strides=1, padding='same', kernel_initializer='he_normal')(
        UpSampling2D(size=(2, 2), interpolation='bilinear')(conv6))
    merge7 = concatenate([conv3, up7, pred1])
    conv7 = Conv2D(256, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge7)
    conv7 = Conv2D(256, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv7)

    pred2 = Conv2D(1, 1, activation='sigmoid', name="pred2")(conv7)
    lossLay2 = UpSampling2D(size=(4, 4), name="lossLay2", interpolation='bilinear')(pred2)
    pred2 = UpSampling2D(interpolation='bilinear')(pred2)

    up8 = Conv2D(128, 1, activation='relu', strides=1, padding='same', kernel_initializer='he_normal')(
        UpSampling2D(size=(2, 2), interpolation='bilinear')(conv7))
    merge8 = concatenate([conv2, up8, pred2])
    conv8 = Conv2D(128, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge8)
    conv8 = Conv2D(128, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv8)

    pred3 = Conv2D(1, 1, activation='sigmoid', name="pred3")(conv8)
    lossLay3 = UpSampling2D(size=(2, 2), name="lossLay3", interpolation='bilinear')(pred3)
    pred3 = UpSampling2D(interpolation='bilinear')(pred3)

    up9 = Conv2D(64, 1, activation='relu', strides=1, padding='same', kernel_initializer='he_normal')(
        UpSampling2D(size=(2, 2), interpolation='bilinear')(conv8))
    merge9 = concatenate([conv1, up9, pred3])
    conv9 = Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge9)
    conv9 = Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv9)

    result = Conv2D(1, 1, activation='sigmoid', name="result")(conv9)  # 预测结果4
    layers = [lossLay1, lossLay2, lossLay3]
    model = Model(inputs=inputs, outputs=result)
    myloss = Weighted_Diceloss(layers)
    model.compile(optimizer=Adam(lr=1e-4), loss=myloss, metrics=['accuracy'])
    model.summary()
    return model
Exemple #24
0
def UNet64_output_expansed(input_shape):
    """gleich wie UNet64, nur am output ist ein 3x3 Kernel eingesetzt."""
    inputs = Input(shape=input_shape)

    conv01 = Conv2D(10, kernel_size=(3, 3),
                    padding="same")(inputs)  # 10 x 64x64
    conv01 = Activation('relu')(conv01)
    conv01_pool = MaxPooling2D((2, 2), strides=(2, 2))(conv01)  # 10 x 32x32
    print("0)", conv01_pool.shape, "10 x 32x32")

    conv02 = Conv2D(20, kernel_size=(3, 3),
                    padding="same")(conv01_pool)  # 20 x 32x32
    conv02 = Activation('relu')(conv02)
    conv02_pool = MaxPooling2D((2, 2), strides=(2, 2))(conv02)  # 20 x 16x16
    print("1)", conv02_pool.shape, "20 x 16x16")

    conv03 = Conv2D(20, kernel_size=(3, 3),
                    padding="same")(conv02_pool)  # 20 x 16x16
    conv03 = Activation('relu')(conv03)
    conv03_pool = MaxPooling2D((2, 2), strides=(2, 2))(conv03)  # 20 x 8x8
    print("2)", conv03_pool.shape, "20 x 8x8")

    conv04 = Conv2D(20, kernel_size=(3, 3),
                    padding="same")(conv03_pool)  # 20 x 8x8
    conv04 = Activation('relu')(conv04)
    conv04_pool = MaxPooling2D((2, 2), strides=(2, 2))(conv04)  # 20 x 4x4
    print("3)", conv04_pool.shape, "20 x 4x4")

    ### UPSAMPLING:
    up04 = UpSampling2D((2, 2))(conv04_pool)  # 20 x 8x8
    up04 = concatenate([conv04, up04], axis=3)  # 20+20 x 8x8
    print("4)", up04.shape, "40 x 8x8")

    up03 = UpSampling2D((2, 2))(up04)  # 40 x 16x16
    up03 = concatenate([conv03, up03], axis=3)  # 20+40 x 16x16
    print("5)", up03.shape, "60 x 16x16")

    up02 = UpSampling2D((2, 2))(up03)  # 60 x 32x32
    up02 = concatenate([conv02, up02], axis=3)  # 20+60 x 32x32
    print("6)", up02.shape, "80 x 32x32")

    up01 = UpSampling2D((2, 2))(up02)  # 80 x 64x64
    up01 = concatenate([conv01, up01], axis=3)  # 10+80 x 64x64
    print("7)", up01.shape, "90 x 64x64")

    output = Conv2D(1, (3, 3), activation='relu',
                    padding="same")(up01)  # 1 x 64x64
    print("8)", output.shape, "1 x 64x64")
    output = Flatten()(output)
    model = Model(inputs=inputs, outputs=output)
    model.compile(loss="mean_squared_error", optimizer='adam')
    return model
Exemple #25
0
def upsample(inp,
             factor,
             nchannels,
             config,
             bn=None,
             activation=None,
             upsampling='bilinear',
             residual=False):

    if residual:
        r1 = UpSampling2D(size=(factor, factor), interpolation=upsampling)(inp)
        up = Conv2D(nchannels, factor, **config)(r1)
        r2 = Conv2DTranspose(nchannels,
                             factor,
                             strides=(factor, factor),
                             padding='same')(inp)

    else:
        if upsampling in ['bilinear', 'nearest']:
            up = UpSampling2D(size=(factor, factor),
                              interpolation=upsampling)(inp)
            up = Conv2D(nchannels, factor, **config)(up)
        elif upsampling == 'conv':
            up = Conv2DTranspose(nchannels,
                                 factor,
                                 strides=(factor, factor),
                                 padding='same')(inp)
        elif upsampling == 'subpixel':
            up = SubPixelConv2D(upsample_factor=factor,
                                nchannels=nchannels)(inp)
        else:
            raise (NotImplementedError)

    if bn and bn == 'before':
        act = config['activation']
        config['activation'] = None

    if bn:
        up = BatchNormalization()(up)

        if bn == 'before':
            if act:
                up = Activation(act)(up)
            else:
                up = activation(up)

            config['activation'] = act

    if residual:
        up = up + r2

    return up
Exemple #26
0
    def build_model(self):
        state_size = (self.config.screen_width, self.config.screen_height,
                      self.config.screen_dim)
        action_size = self.config.action_size

        action = Input(shape=(action_size, ), name="action")
        image = Input(shape=state_size, name="image")

        img_conv = Sequential()
        img_conv.add(
            Conv2D(256, (4, 4),
                   strides=(2, 2),
                   activation="relu",
                   input_shape=state_size))
        img_conv.add(Conv2D(128, (3, 3), strides=(2, 2), activation="relu"))
        img_conv.add(Conv2D(64, (1, 1), strides=(2, 2), activation="relu"))
        img_conv.add(Flatten())
        img_conv.add(Dense(64, activation="relu"))
        img_conv.add(Dense(64, activation="relu"))

        action_s = Sequential()
        action_s.add(Dense(64, activation="relu", input_shape=(action_size, )))
        action_s.add(Dense(64, activation="relu"))

        stream_1 = img_conv(image)
        stream_2 = action_s(action)

        x = concatenate([stream_1, stream_2])
        x = Dense(128 * 21 * 21, activation="relu")(x)
        x = Reshape((21, 21, 128))(x)
        #x = BatchNormalization(momentum=0.8)(x)

        x = UpSampling2D()(x)
        x = Conv2D(128, (3, 3), padding="same", activation="relu")(x)
        #x = BatchNormalization(momentum=0.8)(x)

        x = UpSampling2D()(x)
        x = Conv2D(64, (3, 3), padding="same", activation="relu")(x)
        #x = BatchNormalization(momentum=0.8)(x)

        x = Conv2D(self.config.screen_dim, (3, 3),
                   padding='same',
                   activation="relu")(x)

        model = Model(inputs=[action, image], outputs=[x])
        model.compile(optimizer=Adam(0.00001),
                      loss=['mse'],
                      metrics=['accuracy'])
        #model.summary()

        return model
Exemple #27
0
    def init_model(self):
        x = Input(shape=(3, 3, CHAT_HISTORY_LENGTH))
        # 3 x 3 x 128
        layer = Conv2D(128, 3, padding="same", activation="relu")(x)
        layer = UpSampling2D()(layer)
        # 6 x 6 x 32
        layer = Conv2D(32, 3, padding="same", activation="relu")(layer)
        layer = UpSampling2D()(layer)
        layer = Cropping2D(cropping=((0, 1), (0, 1)))(layer)
        # 11 x 11 x 8
        layer = Conv2D(8, 3, padding="same", activation="relu")(layer)
        y = Conv2D(4, 3, padding="same", activation="relu")(layer)

        model = tf.keras.models.Model(inputs=x, outputs=y)
        self.model = model
Exemple #28
0
def yolo_body(inputs, num_anchors, num_classes):
    """Create YOLO_V3 model CNN body in Keras."""
    darknet = Model(inputs, darknet_body(inputs))
    x, y1, gap1 = make_last_layers(darknet.output, 512,
                                   num_anchors * (num_classes + 5), 1)

    x = compose(DarknetConv2D_BN_Leaky(256, (1, 1)), UpSampling2D(2))(x)
    x = Concatenate()([x, darknet.layers[152].output])
    x, y2, gap2 = make_last_layers(x, 256, num_anchors * (num_classes + 5), 2)

    x = compose(DarknetConv2D_BN_Leaky(128, (1, 1)), UpSampling2D(2))(x)
    x = Concatenate()([x, darknet.layers[92].output])
    x, y3, gap3 = make_last_layers(x, 128, num_anchors * (num_classes + 5), 3)

    return Model(inputs, [y1, y2, y3])
Exemple #29
0
def FCN16(nClasses, input_height, input_width):

    img_input = Input(shape=(input_height, input_width, 3))
    # model = vgg16.VGG16(include_top=False,weights='imagenet',input_tensor=img_input)
    # vgg去除全连接层为:7x7x512
    # vgg:5个block,1:filters:64,kernel:3;3-128;3-256;3-512
    model = FCN32(11, 320, 320)
    model.load_weights("model.h5")

    skip1 = Conv2DTranspose(512,
                            kernel_size=(3, 3),
                            strides=(2, 2),
                            padding='same',
                            kernel_initializer="he_normal",
                            name="upsampling6")(model.get_layer("fc7").output)
    summed = add(inputs=[skip1, model.get_layer("block4_pool").output])
    up7 = UpSampling2D(size=(16, 16),
                       interpolation='bilinear',
                       name='upsamping_7')(summed)
    o = Conv2D(nClasses,
               kernel_size=(3, 3),
               activation='relu',
               padding='same',
               name='conv_7')(up7)

    o = Reshape((-1, nClasses))(o)
    o = Activation("softmax")(o)
    fcn16 = Model(model.input, o)
    return fcn16
Exemple #30
0
def tiny_yolo_body(inputs, num_anchors, num_classes):
    '''Create Tiny YOLO_v3 model CNN body in keras.'''
    x1 = compose(
        DarknetConv2D_BN_Leaky(16, (3, 3)),
        MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same'),
        DarknetConv2D_BN_Leaky(32, (3, 3)),
        MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same'),
        DarknetConv2D_BN_Leaky(64, (3, 3)),
        MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same'),
        DarknetConv2D_BN_Leaky(128, (3, 3)),
        MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same'),
        DarknetConv2D_BN_Leaky(256, (3, 3)))(inputs)
    x2 = compose(
        MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same'),
        DarknetConv2D_BN_Leaky(512, (3, 3)),
        MaxPooling2D(pool_size=(2, 2), strides=(1, 1), padding='same'),
        DarknetConv2D_BN_Leaky(1024, (3, 3)),
        DarknetConv2D_BN_Leaky(256, (1, 1)))(x1)
    y1 = compose(DarknetConv2D_BN_Leaky(512, (3, 3)),
                 DarknetConv2D(num_anchors * (num_classes + 5), (1, 1)))(x2)

    x2 = compose(DarknetConv2D_BN_Leaky(128, (1, 1)), UpSampling2D(2))(x2)
    y2 = compose(Concatenate(), DarknetConv2D_BN_Leaky(256, (3, 3)),
                 DarknetConv2D(num_anchors * (num_classes + 5),
                               (1, 1)))([x2, x1])

    return Model(inputs, [y1, y2])