예제 #1
0
    def init_model(self, out_activation, l1, l2, **kwargs):
        """
        Build the UNet model with the specified input image shape.

        OBS: Depending on image dim cropping may be necessary between layers

        OBS: In some cases, the output is smaller than the input.
        self.label_crop stores the number of pixels that must be cropped from
        the target labels matrix to compare correctly.
        """
        inputs = Input(shape=self.img_shape)

        # Apply regularization if not None or 0
        # l1 regularization used for layer activity
        # l2 regularization used for convolution weights
        if l2:
            kr = regularizers.l2(l2)
        else:
            kr = None
        if l1:
            ar = regularizers.l1(l1)
        else:
            ar = None

        """
        Contracting path
        Note: Listed tensor shapes assume img_row = 256, img_col = 256
        """
        # [256, 256, 1] -> [256, 256, 64] -> [256, 256, 64] -> [128, 128, 64]
        conv1 = Conv2D(int(64*self.cf), 3, activation='relu', padding='same',
                       activity_regularizer=ar, kernel_regularizer=kr)(inputs)
        conv1 = Conv2D(int(64*self.cf), 3, activation='relu', padding='same',
                       activity_regularizer=ar, kernel_regularizer=kr)(conv1)
        bn1 = BatchNormalization()(conv1)
        pool1 = MaxPooling2D(pool_size=(2, 2))(bn1)

        # [128, 128, 64] -> [128, 128, 128] -> [128, 128, 128] -> [64, 64, 128]
        conv2 = Conv2D(int(128*self.cf), 3, activation='relu', padding='same',
                       activity_regularizer=ar, kernel_regularizer=kr)(pool1)
        conv2 = Conv2D(int(128*self.cf), 3, activation='relu', padding='same',
                       activity_regularizer=ar, kernel_regularizer=kr)(conv2)
        bn2 = BatchNormalization()(conv2)
        pool2 = MaxPooling2D(pool_size=(2, 2))(bn2)

        # [64, 64, 128] -> [64, 64, 256] -> [64, 64, 256] -> [32, 32, 256]
        conv3 = Conv2D(int(256*self.cf), 3, activation='relu', padding='same',
                       activity_regularizer=ar, kernel_regularizer=kr)(pool2)
        conv3 = Conv2D(int(256*self.cf), 3, activation='relu', padding='same',
                       activity_regularizer=ar, kernel_regularizer=kr)(conv3)
        bn3 = BatchNormalization()(conv3)
        pool3 = MaxPooling2D(pool_size=(2, 2))(bn3)

        # [32, 32, 256] -> [32, 32, 512] -> [32, 32, 512] -> [16, 16, 512]
        conv4 = Conv2D(int(512*self.cf), 3, activation='relu', padding='same',
                       activity_regularizer=ar, kernel_regularizer=kr)(pool3)
        conv4 = Conv2D(int(512*self.cf), 3, activation='relu', padding='same',
                       activity_regularizer=ar, kernel_regularizer=kr)(conv4)
        bn4 = BatchNormalization()(conv4)
        pool4 = MaxPooling2D(pool_size=(2, 2))(bn4)

        # [16, 16, 512] -> [16, 16, 1024] -> [16, 16, 1024]
        conv5 = Conv2D(int(1024*self.cf), 3, activation='relu', padding='same',
                       activity_regularizer=ar, kernel_regularizer=kr)(pool4)
        conv5 = Conv2D(int(1024*self.cf), 3, activation='relu', padding='same',
                       activity_regularizer=ar, kernel_regularizer=kr)(conv5)
        bn5 = BatchNormalization()(conv5)

        """
        Up-sampling
        """
        # [16, 16, 1024] -> [32, 32, 1024] -> [32, 32, 512]
        up1 = UpSampling2D(size=(2, 2))(bn5)
        conv6 = Conv2D(int(512*self.cf), 2, activation='relu', padding='same',
                       activity_regularizer=ar, kernel_regularizer=kr)(up1)
        bn6 = BatchNormalization()(conv6)

        # Merge conv4 [32, 32, 512] with conv6 [32, 32, 512]
        # --> [32, 32, 1024]
        cropped_bn4 = self.crop_nodes_to_match(bn4, bn6)
        merge6 = Concatenate(axis=-1)([cropped_bn4, bn6])

        # [32, 32, 1024] -> [32, 32, 512] -> [32, 32, 512]
        conv6 = Conv2D(int(512*self.cf), 3, activation='relu', padding='same',
                       activity_regularizer=ar, kernel_regularizer=kr)(merge6)
        conv6 = Conv2D(int(512*self.cf), 3, activation='relu', padding='same',
                       activity_regularizer=ar, kernel_regularizer=kr)(conv6)
        bn7 = BatchNormalization()(conv6)

        # [32, 32, 512] -> [64, 64, 512] -> [64, 64, 256]
        up2 = UpSampling2D(size=(2, 2))(bn7)
        conv7 = Conv2D(int(256*self.cf), 2, activation='relu', padding='same',
                       activity_regularizer=ar, kernel_regularizer=kr)(up2)
        bn8 = BatchNormalization()(conv7)

        # Merge conv3 [64, 64, 256] with conv7 [64, 64, 256]
        # --> [32, 32, 512]
        cropped_bn3 = self.crop_nodes_to_match(bn3, bn8)
        merge7 = Concatenate(axis=-1)([cropped_bn3, bn8])

        # [64, 64, 512] -> [64, 64, 256] -> [64, 64, 256]
        conv7 = Conv2D(int(256*self.cf), 3, activation='relu', padding='same',
                       activity_regularizer=ar, kernel_regularizer=kr)(merge7)
        conv7 = Conv2D(int(256*self.cf), 3, activation='relu', padding='same',
                       activity_regularizer=ar, kernel_regularizer=kr)(conv7)
        bn9 = BatchNormalization()(conv7)

        # [64, 64, 256] -> [128, 128, 256] -> [128, 128, 128]
        up3 = UpSampling2D(size=(2, 2))(bn9)
        conv8 = Conv2D(int(128*self.cf), 2, activation='relu', padding='same',
                       activity_regularizer=ar, kernel_regularizer=kr)(up3)
        bn10 = BatchNormalization()(conv8)

        # Merge conv2 [128, 128, 128] with conv8 [128, 128, 128]
        # --> [128, 128, 256]
        cropped_bn2 = self.crop_nodes_to_match(bn2, bn10)
        merge8 = Concatenate(axis=-1)([cropped_bn2, bn10])

        # [128, 128, 256] -> [128, 128, 128] -> [128, 128, 128]
        conv8 = Conv2D(int(128*self.cf), 3, activation='relu', padding='same',
                       activity_regularizer=ar, kernel_regularizer=kr)(merge8)
        conv8 = Conv2D(int(128*self.cf), 3, activation='relu', padding='same',
                       activity_regularizer=ar, kernel_regularizer=kr)(conv8)
        bn11 = BatchNormalization()(conv8)

        # [128, 128, 128] -> [256, 256, 128] -> [256, 256, 64]
        up4 = UpSampling2D(size=(2, 2))(bn11)
        conv9 = Conv2D(int(64*self.cf), 2, activation='relu', padding='same',
                       activity_regularizer=ar, kernel_regularizer=kr)(up4)
        bn12 = BatchNormalization()(conv9)

        # Merge conv1 [256, 256, 64] with conv9 [256, 256, 64]
        # --> [256, 256, 128]
        cropped_bn1 = self.crop_nodes_to_match(bn1, bn12)
        merge9 = Concatenate(axis=-1)([cropped_bn1, bn12])

        # [256, 256, 128] -> [256, 256, 64] -> [256, 256, 64]
        conv9 = Conv2D(int(64*self.cf), 3, activation='relu', padding='same',
                       activity_regularizer=ar, kernel_regularizer=kr)(merge9)
        conv9 = Conv2D(int(64*self.cf), 3, activation='relu', padding='same',
                       activity_regularizer=ar, kernel_regularizer=kr)(conv9)
        bn13 = BatchNormalization()(conv9)

        """
        Output modeling layer
        """
        # [256, 256, 64] -> [256, 256, n_classes]
        out = Conv2D(self.n_classes, 1, activation=out_activation)(bn13)

        return [inputs], [out]
예제 #2
0
def train_net():
    '''
    :return:
    '''

    model_description = 'parsing_model_weights'

    size_batch = 8  #images[0].shape[-1]
    print size_batch

    dataset_Nof_steps = 10
    epoches_number = 1000000
    overwrite_weights = True
    testing_amount = 0.01
    # -----------------
    # Net META parameters:
    # main_Nkernels_down = 32
    # main_Nkernels_up = 64
    # funnel_down_layers_n = 5
    # funnel_up_layers_n = 3
    # fc_layers_n = 2
    Nmain_fc_neurons = 2**12
    act = 'relu'
    pre_up_x_y = 16
    # -----------------
    (X_train, Y_train), (X_test, Y_test) = mini_batch((0.0, 0.001),
                                                      0.1)  #testing_amount)
    max_shape_images = X_train[0].shape
    max_shape_masks = Y_train[0].shape
    print max_shape_images, max_shape_masks

    ######################################################
    # funnel down net:
    # W_regularizer=l1l2(l1=0.0001, l2=0.0001), b_regularizer=None, activity_regularizer=activity_l1l2(l1=0.0001, l2=0.0001)
    input_img = Input(shape=max_shape_images)
    conv = BatchNormalization()(input_img)

    conv = Convolution2D(64, 5, 5, border_mode='same')(conv)
    # conv = BatchNormalization()(conv)
    conv = Activation(act)(conv)
    conv = MaxPooling2D((2, 2), strides=(2, 2))(conv)
    # conv = Dropout(0.0)(conv)

    conv = Convolution2D(128, 3, 3, border_mode='same')(conv)
    # conv = BatchNormalization()(conv)
    conv = Activation(act)(conv)
    conv = MaxPooling2D((2, 2), strides=(2, 2))(conv)
    # conv = Dropout(0.05)(conv)

    conv = Convolution2D(256, 3, 3, border_mode='same')(conv)
    # conv = BatchNormalization()(conv)
    conv = Activation(act)(conv)
    conv = MaxPooling2D((2, 2), strides=(2, 2))(conv)
    # conv = Dropout(0.1)(conv)

    conv = Convolution2D(512, 3, 3, border_mode='same')(conv)
    # conv = BatchNormalization()(conv)
    conv = Activation(act)(conv)
    conv = MaxPooling2D((2, 2), strides=(2, 2))(conv)
    # conv = Dropout(0.15)(conv)

    # conv = Convolution2D(128, 1, 1, border_mode='same')(conv)
    # # conv = BatchNormalization()(conv)
    # conv = Activation(act)(conv)
    # # conv = MaxPooling2D((2, 2), strides=(2, 2))(conv)

    # conv = Convolution2D(1024, 1, 1, border_mode='same')(conv)
    # # conv = BatchNormalization()(conv)
    # conv = Activation(act)(conv)
    # conv = MaxPooling2D((2, 2), strides=(2, 2))(conv)

    fc = Flatten()(conv)
    fc = Dense(2 * 4096, activation=act)(fc)
    # fc = Dense(4096, activation=act)(fc)
    # fc = Dense(4096, activation=act)(fc)
    # fc = Dense(2*4096, activation=act)(fc)
    fc = Dense(16384, activation=act)(fc)
    conv = Reshape((4, 128 / 2, 128 / 2))(fc)

    conv = Convolution2D(256, 9, 9, border_mode='same')(conv)
    # conv = BatchNormalization()(conv)
    conv = Activation(act)(conv)

    conv = UpSampling2D((2, 2))(conv)
    conv = Convolution2D(256, 3, 3, border_mode='same')(conv)
    # conv = BatchNormalization()(conv)
    conv = Activation(act)(conv)

    conv = Convolution2D(256, 2, 2, border_mode='same')(conv)
    # # conv = BatchNormalization()(conv)
    conv = Activation(act)(conv)

    conv = Convolution2D(64, 1, 1, border_mode='same')(conv)
    # conv = BatchNormalization()(conv)
    conv = Activation(act)(conv)

    conv = Convolution2D(23, 1, 1, border_mode='same')(conv)
    #    conv = Reshape((max_shape_masks[0], max_shape_masks[1]*max_shape_masks[2]))(conv)
    conv = Reshape((23, 128 * 128))(conv)
    conv = Permute((2, 1))(conv)
    conv = Activation('softmax')(conv)

    model = Model(input=input_img, output=conv)
    model.summary()

    optimizer_method = 'adam'  #SGD(lr=1e-1, decay=1e-6, momentum=0.9, nesterov=True)#Adagrad()#Adadelta()#RMSprop()#Adam()#Adadelta()#
    model.compile(loss='categorical_crossentropy',
                  optimizer=optimizer_method,
                  metrics=['accuracy'])
    # binary_crossentropy
    # categorical_crossentropy

    ############################################################################################
    # if previus file exist:
    # if os.path.isfile(model_description + '.hdf5'):
    #     print 'loading weights file: ' + os.path.join(model_description + '.hdf5')
    #     model.load_weights(model_description + '.hdf5')
    ############################################################################################

    EarlyStopping(monitor='val_loss', patience=0,
                  verbose=1)  #monitor='val_acc'
    checkpointer = ModelCheckpoint(model_description + '.hdf5',
                                   monitor='val_loss',
                                   verbose=1,
                                   save_best_only=True)

    # # this will do preprocessing and realtime data augmentation
    # datagen = ImageDataGenerator(
    #     featurewise_center=False,  # set input mean to 0 over the dataset
    #     samplewise_center=False,  # set each sample mean to 0
    #     featurewise_std_normalization=False,  # divide inputs by std of the dataset
    #     samplewise_std_normalization=False,  # divide each input by its std
    #     zca_whitening=False,  # apply ZCA whitening
    #     rotation_range=0,  # randomly rotate images in the range (degrees, 0 to 180)
    #     width_shift_range=0,  # randomly shift images horizontally (fraction of total width)
    #     height_shift_range=0,  # randomly shift images vertically (fraction of total height)
    #     horizontal_flip=False,  # randomly flip images
    #     vertical_flip=False)  # randomly flip images
    # # compute quantities required for featurewise normalization
    # # (std, mean, and principal components if ZCA whitening is applied)
    for epoch_No in range(epoches_number):
        for step in range(dataset_Nof_steps):
            p0 = 1. * step / dataset_Nof_steps
            p1 = p0 + 1. / dataset_Nof_steps
            portion = (p0, p1)
            (X_train,
             Y_train), (X_validation,
                        Y_validation) = mini_batch(portion, testing_amount)
            print 'step No: ', step + 1, '/', dataset_Nof_steps, '...  @ epoch No: ', epoch_No + 1
            model.fit(X_train,
                      Y_train,
                      batch_size=size_batch,
                      nb_epoch=1,
                      verbose=1,
                      callbacks=[checkpointer],
                      validation_split=0.0,
                      validation_data=(X_validation, Y_validation),
                      shuffle=True,
                      class_weight=None,
                      sample_weight=None)
            #
            # datagen.fit(X_train)
            # # fit the model on the batches generated by datagen.flow()
            # model.fit_generator(datagen.flow(X_train, Y_train, shuffle=True, batch_size=size_batch),
            #                     nb_epoch=1, verbose=1, validation_data=(X_validation, Y_validation),
            #                     callbacks=[checkpointer], class_weight=None, max_q_size=10, samples_per_epoch=len(X_validation))
            # model.train_on_batch(X_train, Y_train)
            # model.test_on_batch(X_test, Y_test)

    model.save_weights(model_description + '.hdf5', overwrite_weights)
예제 #3
0
    BatchNormalization(),
    Activation('relu'),
    Convolution2D(512, kernel, kernel, border_mode='same'),
    BatchNormalization(),
    Activation('relu'),
    MaxPooling2D(),
]

autoencoder = models.Sequential()
autoencoder.encoding_layers = encoding_layers

for l in autoencoder.encoding_layers:
    autoencoder.add(l)

decoding_layers = [
    UpSampling2D(),
    Convolution2D(512, kernel, kernel, border_mode='same'),
    BatchNormalization(),
    Activation('relu'),
    Convolution2D(512, kernel, kernel, border_mode='same'),
    BatchNormalization(),
    Activation('relu'),
    Convolution2D(512, kernel, kernel, border_mode='same'),
    BatchNormalization(),
    Activation('relu'),
    UpSampling2D(),
    Convolution2D(512, kernel, kernel, border_mode='same'),
    BatchNormalization(),
    Activation('relu'),
    Convolution2D(512, kernel, kernel, border_mode='same'),
    BatchNormalization(),
예제 #4
0
def build_generator():
    """
    Generator follows the DCGAN architecture and creates generated image representations through learning.
    """

    # we apply different kernel sizes in order to match the original image size

    if datasets == 'mnist':
        #         noise = Input(shape=(latent_dim,))
        #         label = Input(shape=(1,), dtype='int32')

        #         label_embedding = Flatten()(Embedding(num_classes, 100)(label))

        #         model_input = multiply([noise, label_embedding])

        #         x = Dense(14 * 14 * 1, activation="relu")(model_input)
        #         x = Reshape((14, 14, 1))(x)
        #         x = BatchNormalization(momentum=0.8)(x)
        #         x = UpSampling2D()(x)
        #         x = Conv2D(64, kernel_size=9, strides=1, padding="valid")(x)
        #         x = LeakyReLU(alpha=0.2)(x)
        #         x = BatchNormalization(momentum=0.8)(x)
        # #         x = UpSampling2D()(x)
        # #         x = Conv2D(64, kernel_size=3, padding="same")(x)
        # #         x = LeakyReLU(alpha=0.2)(x)
        # #         x = BatchNormalization(momentum=0.8)(x)
        #         x = PrimaryCap(x, dim_capsule=8, n_channels=32, kernel_size=9, strides=2, padding='valid')
        #         x = CapsuleLayer(num_capsule=10, dim_capsule=98, routings=routings)(x)
        # #         x = Mask()(x)
        # #         y = layers.Input(shape=(num_classes,))
        # #         x = Mask()([x, y])
        #         x = Flatten()(x)

        #         x = Reshape((7, 7, 20))(x)
        #         x = BatchNormalization(momentum=0.8)(x)
        #         x = UpSampling2D()(x)
        #         x = Conv2D(128, kernel_size=3, padding="same")(x)
        #         x = LeakyReLU(alpha=0.2)(x)
        #         x = BatchNormalization(momentum=0.8)(x)
        #         x = UpSampling2D()(x)
        #         x = Conv2D(channels, kernel_size=3, padding="same")(x)
        #         img = Activation("tanh")(x)

        #         return Model([noise, label], img)
        #         return Model([noise, y], img)

        model = Sequential()

        model.add(Dense(128 * 7 * 7, activation="relu", input_dim=latent_dim))
        model.add(Reshape((7, 7, 128)))
        model.add(BatchNormalization(momentum=0.8))
        model.add(UpSampling2D())
        model.add(Conv2D(512, kernel_size=3, padding="same"))
        model.add(LeakyReLU(alpha=0.2))
        model.add(BatchNormalization(momentum=0.8))
        model.add(UpSampling2D())
        model.add(Conv2D(256, kernel_size=3, padding="same"))
        model.add(LeakyReLU(alpha=0.2))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Conv2D(channels, kernel_size=3, padding='same'))
        model.add(Activation("tanh"))
        model.summary()

        noise = Input(shape=(latent_dim, ))
        label = Input(shape=(1, ), dtype='int32')

        label_embedding = Flatten()(Embedding(num_classes, 100)(label))

        model_input = multiply([noise, label_embedding])

        img = model(model_input)

        return Model([noise, label], img)

    if datasets == 'cifar10':
        #         noise = Input(shape=(latent_dim,))
        #         label = Input(shape=(1,), dtype='int32')

        #         label_embedding = Flatten()(Embedding(num_classes, 100)(label))

        #         model_input = multiply([noise, label_embedding])

        #         x = Dense(16 * 16 * 1, activation="relu")(model_input)
        #         x = Reshape((16, 16, 1))(x)
        #         x = BatchNormalization(momentum=0.8)(x)
        #         x = UpSampling2D()(x)
        #         x = Conv2D(64, kernel_size=9, strides=1, padding="valid")(x)
        #         x = LeakyReLU(alpha=0.2)(x)
        #         x = BatchNormalization(momentum=0.8)(x)
        # #         x = UpSampling2D()(x)
        # #         x = Conv2D(64, kernel_size=3, padding="same")(x)
        # #         x = LeakyReLU(alpha=0.2)(x)
        # #         x = BatchNormalization(momentum=0.8)(x)
        #         x = PrimaryCap(x, dim_capsule=8, n_channels=32, kernel_size=9, strides=2, padding='valid')
        #         x = CapsuleLayer(num_capsule=10, dim_capsule=64, routings=routings)(x)
        # #         x = Mask()(x)
        # #         y = layers.Input(shape=(num_classes,))
        # #         x = Mask()([x, y])
        #         x = Flatten()(x)

        #         x = Reshape((8, 8, 10))(x)
        #         x = BatchNormalization(momentum=0.8)(x)
        #         x = UpSampling2D()(x)
        #         x = Conv2D(128, kernel_size=3, padding="same")(x)
        #         x = LeakyReLU(alpha=0.2)(x)
        #         x = BatchNormalization(momentum=0.8)(x)
        #         x = UpSampling2D()(x)
        #         x = Conv2D(channels, kernel_size=3, padding="same")(x)
        #         img = Activation("tanh")(x)

        #         return Model([noise, label], img)
        model = Sequential()

        model.add(Dense(128 * 8 * 8, activation="relu", input_dim=latent_dim))
        model.add(Reshape((8, 8, 128)))
        model.add(BatchNormalization(momentum=0.8))
        model.add(UpSampling2D())
        model.add(Conv2D(512, kernel_size=3, padding="same"))
        model.add(Activation("relu"))
        model.add(BatchNormalization(momentum=0.8))
        model.add(UpSampling2D())
        model.add(Conv2D(256, kernel_size=3, padding="same"))
        model.add(Activation("relu"))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Conv2D(channels, kernel_size=3, padding='same'))

        model.add(Activation("tanh"))

        model.summary()

        noise = Input(shape=(latent_dim, ))
        label = Input(shape=(1, ), dtype='int32')

        label_embedding = Flatten()(Embedding(num_classes, 100)(label))

        model_input = multiply([noise, label_embedding])

        img = model(model_input)

        return Model([noise, label], img)
예제 #5
0
def sam_vgg(data):
    trainable = True  #FalseTrue
    # conv_1
    conv_1_out = Conv2D(64, (3, 3),
                        activation='relu',
                        padding='same',
                        name='block1_conv1',
                        trainable=trainable)(data)
    conv_1_out = Conv2D(64, (3, 3),
                        activation='relu',
                        padding='same',
                        name='block1_conv2',
                        trainable=trainable)(conv_1_out)
    conv_1_out = MaxPooling2D((2, 2), strides=(2, 2),
                              name='block1_pool')(conv_1_out)

    # conv_2
    conv_2_out = Conv2D(128, (3, 3),
                        activation='relu',
                        padding='same',
                        name='block2_conv1',
                        trainable=trainable)(conv_1_out)
    conv_2_out = Conv2D(128, (3, 3),
                        activation='relu',
                        padding='same',
                        name='block2_conv2',
                        trainable=trainable)(conv_2_out)
    conv_2_out = MaxPooling2D((2, 2), strides=(2, 2),
                              name='block2_pool')(conv_2_out)

    # conv_3
    conv_3_out = Conv2D(256, (3, 3),
                        activation='relu',
                        padding='same',
                        name='block3_conv1',
                        trainable=trainable)(conv_2_out)
    conv_3_out = Conv2D(256, (3, 3),
                        activation='relu',
                        padding='same',
                        name='block3_conv2',
                        trainable=trainable)(conv_3_out)
    conv_3_out = Conv2D(256, (3, 3),
                        activation='relu',
                        padding='same',
                        name='block3_conv3',
                        trainable=trainable)(conv_3_out)
    conv_3_out = MaxPooling2D((2, 2),
                              strides=(2, 2),
                              name='block3_pool',
                              padding='same')(conv_3_out)

    # conv_4
    conv_4_out = Conv2D(512, (3, 3),
                        activation='relu',
                        padding='same',
                        name='block4_conv1',
                        trainable=trainable)(conv_3_out)
    conv_4_out = Conv2D(512, (3, 3),
                        activation='relu',
                        padding='same',
                        name='block4_conv2',
                        trainable=trainable)(conv_4_out)
    conv_4_out = Conv2D(512, (3, 3),
                        activation='relu',
                        padding='same',
                        name='block4_conv3',
                        trainable=trainable)(conv_4_out)
    conv_4_out = MaxPooling2D((2, 2),
                              strides=(1, 1),
                              name='block4_pool',
                              padding='same')(conv_4_out)

    # conv_5
    conv_5_out = Conv2D(512, (3, 3),
                        activation='relu',
                        padding='same',
                        dilation_rate=2,
                        name='block5_conv1',
                        trainable=trainable)(conv_4_out)
    conv_5_out = Conv2D(512, (3, 3),
                        activation='relu',
                        padding='same',
                        dilation_rate=2,
                        name='block5_conv2',
                        trainable=trainable)(conv_5_out)
    conv_5_out = Conv2D(512, (3, 3),
                        activation='relu',
                        padding='same',
                        dilation_rate=2,
                        name='block5_conv3',
                        trainable=trainable)(conv_5_out)

    # conv_5_out = Flatten()(conv_5_out)
    # conv_5_out = RepeatVector(nb_timestep)(conv_5_out)
    # conv_5_out = Reshape((nb_timestep, 28, 28, 512))(conv_5_out)
    #
    #
    # # part land output
    # part_land_outs = (ConvLSTM2D(filters=512, kernel_size=(3, 3),
    #                              padding='same', return_sequences=False, stateful=False))(conv_5_out)
    # part_land_outs = BatchNormalization()(part_land_outs)
    # part_land_outs = Activation('sigmoid')(part_land_outs)
    part_land_outs = Conv2D(8, (3, 3),
                            padding='same',
                            activation='sigmoid',
                            name='part_land',
                            trainable=trainable)(conv_5_out)

    # part body land output
    part_body_land_outs = Conv2D(2, (3, 3),
                                 padding='same',
                                 activation='sigmoid',
                                 name='part_body_land',
                                 trainable=trainable)(conv_5_out)
    # part_body_land_outs = (ConvLSTM2D(filters=2, kernel_size=(3, 3),
    #                              padding='same', activation='sigmoid', return_sequences=False, stateful=False,
    #                              name='part_body_land'))(conv_5_out)

    # full body land output
    full_body_land_outs = Conv2D(1, (3, 3),
                                 padding='same',
                                 activation='sigmoid',
                                 name='full_body_land',
                                 trainable=trainable)(conv_5_out)

    # full_body_land_outs = (ConvLSTM2D(filters=1, kernel_size=(3, 3),
    #                                   padding='same', activation='sigmoid', return_sequences=False, stateful=False,
    #                                   name='full_body_land'))(conv_5_out)

    # outs = Flatten()(conv_5_out)
    # outs = RepeatVector(nb_timestep)(outs)
    # outs = Reshape((nb_timestep, 28, 28, 512))(outs)
    # attenLSTM_outs = AttentiveConvLSTM(nb_filters_in=512, nb_filters_out=512, nb_filters_att=512,
    #                          nb_cols=3, nb_rows=3)(outs)
    # attenLSTM_outs = Lambda(Kreshape, arguments={'shape': [-1, 28, 28, 512]}, output_shape=[28, 28, 512])(attenLSTM_outs)

    beta_part_body_conv = Conv2D(16, (3, 3),
                                 padding='same',
                                 activation='relu',
                                 trainable=trainable)(part_land_outs)
    beta_part_body_conv = MaxPooling2D((2, 2),
                                       strides=(2, 2))(beta_part_body_conv)
    beta_part_body_conv = Conv2D(16, (5, 5),
                                 padding='same',
                                 activation='relu',
                                 trainable=trainable)(beta_part_body_conv)
    beta_part_body_conv = Conv2D(16, (5, 5),
                                 padding='same',
                                 activation='relu',
                                 trainable=trainable)(beta_part_body_conv)
    beta_part_body_land_outs = Conv2D(2, (3, 3),
                                      padding='same',
                                      activation='sigmoid',
                                      trainable=trainable)(beta_part_body_conv)
    beta_part_body_land_outs = UpSampling2D(
        size=(2, 2), name='beta_part_body_land')(beta_part_body_land_outs)

    beta_full_body_conv = Conv2D(8, (3, 3),
                                 padding='same',
                                 activation='relu',
                                 trainable=trainable)(part_body_land_outs)
    beta_full_body_conv = MaxPooling2D((2, 2),
                                       strides=(2, 2))(beta_full_body_conv)
    beta_full_body_conv = Conv2D(8, (5, 5),
                                 padding='same',
                                 activation='relu',
                                 trainable=trainable)(beta_full_body_conv)
    beta_full_body_conv = Conv2D(8, (5, 5),
                                 padding='same',
                                 activation='relu',
                                 trainable=trainable)(beta_full_body_conv)
    beta_full_body_land_outs = Conv2D(1, (3, 3),
                                      padding='same',
                                      activation='sigmoid',
                                      trainable=trainable)(beta_full_body_conv)
    beta_full_body_land_outs = UpSampling2D(
        size=(2, 2), name='beta_full_body_land')(beta_full_body_land_outs)

    # gamma
    gamma_part_body_conv = Conv2D(16, (3, 3),
                                  padding='same',
                                  activation='relu',
                                  trainable=trainable)(full_body_land_outs)
    gamma_part_body_conv = MaxPooling2D((2, 2),
                                        strides=(2, 2))(gamma_part_body_conv)
    gamma_part_body_conv = Conv2D(16, (5, 5),
                                  padding='same',
                                  activation='relu',
                                  trainable=trainable)(gamma_part_body_conv)
    gamma_part_body_conv = Conv2D(16, (5, 5),
                                  padding='same',
                                  activation='relu',
                                  trainable=trainable)(gamma_part_body_conv)
    gamma_part_body_land_outs = Conv2D(
        2, (3, 3), padding='same', activation='sigmoid',
        trainable=trainable)(gamma_part_body_conv)
    gamma_part_body_land_outs = UpSampling2D(
        size=(2, 2), name='gamma_part_body_land')(gamma_part_body_land_outs)

    # gamma
    gamma_part_conv = Conv2D(64, (3, 3),
                             padding='same',
                             activation='relu',
                             trainable=trainable)(part_body_land_outs)
    gamma_part_conv = MaxPooling2D((2, 2), strides=(2, 2))(gamma_part_conv)
    gamma_part_conv = Conv2D(64, (5, 5),
                             padding='same',
                             activation='relu',
                             trainable=trainable)(gamma_part_conv)
    gamma_part_conv = Conv2D(64, (5, 5),
                             padding='same',
                             activation='relu',
                             trainable=trainable)(gamma_part_conv)
    gamma_part_land_outs = Conv2D(8, (3, 3),
                                  padding='same',
                                  activation='sigmoid',
                                  trainable=trainable)(gamma_part_conv)
    gamma_part_land_outs = UpSampling2D(
        size=(2, 2), name='gamma_part_land')(gamma_part_land_outs)

    part_land_outs1 = Lambda(lambda x: K.expand_dims(x[:, :, :, 0]),
                             output_shape=(28, 28, 1))(part_land_outs)
    part_land_outs2 = Lambda(lambda x: K.expand_dims(x[:, :, :, 1]),
                             output_shape=(28, 28, 1))(part_land_outs)
    part_land_outs3 = Lambda(lambda x: K.expand_dims(x[:, :, :, 2]),
                             output_shape=(28, 28, 1))(part_land_outs)
    part_land_outs4 = Lambda(lambda x: K.expand_dims(x[:, :, :, 3]),
                             output_shape=(28, 28, 1))(part_land_outs)
    part_land_outs5 = Lambda(lambda x: K.expand_dims(x[:, :, :, 4]),
                             output_shape=(28, 28, 1))(part_land_outs)
    part_land_outs6 = Lambda(lambda x: K.expand_dims(x[:, :, :, 5]),
                             output_shape=(28, 28, 1))(part_land_outs)
    part_land_outs7 = Lambda(lambda x: K.expand_dims(x[:, :, :, 6]),
                             output_shape=(28, 28, 1))(part_land_outs)
    part_land_outs8 = Lambda(lambda x: K.expand_dims(x[:, :, :, 7]),
                             output_shape=(28, 28, 1))(part_land_outs)

    gamma_part_land_outs1 = Lambda(lambda x: K.expand_dims(x[:, :, :, 0]),
                                   output_shape=(28, 28,
                                                 1))(gamma_part_land_outs)
    gamma_part_land_outs2 = Lambda(lambda x: K.expand_dims(x[:, :, :, 1]),
                                   output_shape=(28, 28,
                                                 1))(gamma_part_land_outs)
    gamma_part_land_outs3 = Lambda(lambda x: K.expand_dims(x[:, :, :, 2]),
                                   output_shape=(28, 28,
                                                 1))(gamma_part_land_outs)
    gamma_part_land_outs4 = Lambda(lambda x: K.expand_dims(x[:, :, :, 3]),
                                   output_shape=(28, 28,
                                                 1))(gamma_part_land_outs)
    gamma_part_land_outs5 = Lambda(lambda x: K.expand_dims(x[:, :, :, 4]),
                                   output_shape=(28, 28,
                                                 1))(gamma_part_land_outs)
    gamma_part_land_outs6 = Lambda(lambda x: K.expand_dims(x[:, :, :, 5]),
                                   output_shape=(28, 28,
                                                 1))(gamma_part_land_outs)
    gamma_part_land_outs7 = Lambda(lambda x: K.expand_dims(x[:, :, :, 6]),
                                   output_shape=(28, 28,
                                                 1))(gamma_part_land_outs)
    gamma_part_land_outs8 = Lambda(lambda x: K.expand_dims(x[:, :, :, 7]),
                                   output_shape=(28, 28,
                                                 1))(gamma_part_land_outs)

    com_part_land_outs1 = Concatenate()(
        [part_land_outs1, gamma_part_land_outs1])
    com_part_land_outs1 = Conv2D(1, (1, 1),
                                 kernel_initializer=RandomUniform(minval=0,
                                                                  maxval=1,
                                                                  seed=None),
                                 trainable=True)(com_part_land_outs1)
    com_part_land_outs2 = Concatenate()(
        [part_land_outs2, gamma_part_land_outs2])
    com_part_land_outs2 = Conv2D(1, (1, 1),
                                 kernel_initializer=RandomUniform(minval=0,
                                                                  maxval=1,
                                                                  seed=None),
                                 trainable=True)(com_part_land_outs2)
    com_part_land_outs3 = Concatenate()(
        [part_land_outs3, gamma_part_land_outs3])
    com_part_land_outs3 = Conv2D(1, (1, 1),
                                 kernel_initializer=RandomUniform(minval=0,
                                                                  maxval=1,
                                                                  seed=None),
                                 trainable=True)(com_part_land_outs3)
    com_part_land_outs4 = Concatenate()(
        [part_land_outs4, gamma_part_land_outs4])
    com_part_land_outs4 = Conv2D(1, (1, 1),
                                 kernel_initializer=RandomUniform(minval=0,
                                                                  maxval=1,
                                                                  seed=None),
                                 trainable=True)(com_part_land_outs4)
    com_part_land_outs5 = Concatenate()(
        [part_land_outs5, gamma_part_land_outs5])
    com_part_land_outs5 = Conv2D(1, (1, 1),
                                 kernel_initializer=RandomUniform(minval=0,
                                                                  maxval=1,
                                                                  seed=None),
                                 trainable=True)(com_part_land_outs5)
    com_part_land_outs6 = Concatenate()(
        [part_land_outs6, gamma_part_land_outs6])
    com_part_land_outs6 = Conv2D(1, (1, 1),
                                 kernel_initializer=RandomUniform(minval=0,
                                                                  maxval=1,
                                                                  seed=None),
                                 trainable=True)(com_part_land_outs6)
    com_part_land_outs7 = Concatenate()(
        [part_land_outs7, gamma_part_land_outs7])
    com_part_land_outs7 = Conv2D(1, (1, 1),
                                 kernel_initializer=RandomUniform(minval=0,
                                                                  maxval=1,
                                                                  seed=None),
                                 trainable=True)(com_part_land_outs7)
    com_part_land_outs8 = Concatenate()(
        [part_land_outs8, gamma_part_land_outs8])
    com_part_land_outs8 = Conv2D(1, (1, 1),
                                 kernel_initializer=RandomUniform(minval=0,
                                                                  maxval=1,
                                                                  seed=None),
                                 trainable=True)(com_part_land_outs8)

    com_part_land_outs = Concatenate(name='com_part_land')([
        com_part_land_outs1, com_part_land_outs2, com_part_land_outs3,
        com_part_land_outs4, com_part_land_outs5, com_part_land_outs6,
        com_part_land_outs7, com_part_land_outs8
    ])

    part_body_land_outs1 = Lambda(lambda x: K.expand_dims(x[:, :, :, 0]),
                                  output_shape=(28, 28,
                                                1))(part_body_land_outs)
    part_body_land_outs2 = Lambda(lambda x: K.expand_dims(x[:, :, :, 1]),
                                  output_shape=(28, 28,
                                                1))(part_body_land_outs)
    beta_part_body_land_outs1 = Lambda(
        lambda x: K.expand_dims(x[:, :, :, 0]),
        output_shape=(28, 28, 1))(beta_part_body_land_outs)
    beta_part_body_land_outs2 = Lambda(
        lambda x: K.expand_dims(x[:, :, :, 1]),
        output_shape=(28, 28, 1))(beta_part_body_land_outs)
    gamma_part_body_land_outs1 = Lambda(
        lambda x: K.expand_dims(x[:, :, :, 0]),
        output_shape=(28, 28, 1))(gamma_part_body_land_outs)
    gamma_part_body_land_outs2 = Lambda(
        lambda x: K.expand_dims(x[:, :, :, 1]),
        output_shape=(28, 28, 1))(gamma_part_body_land_outs)

    com_part_body_land_outs1 = Concatenate()([
        part_body_land_outs1, beta_part_body_land_outs1,
        gamma_part_body_land_outs1
    ])
    com_part_body_land_outs1 = Conv2D(1, (1, 1),
                                      kernel_initializer=RandomUniform(
                                          minval=0, maxval=1, seed=None),
                                      trainable=True)(com_part_body_land_outs1)
    com_part_body_land_outs2 = Concatenate()([
        part_body_land_outs2, beta_part_body_land_outs2,
        gamma_part_body_land_outs2
    ])
    com_part_body_land_outs2 = Conv2D(1, (1, 1),
                                      kernel_initializer=RandomUniform(
                                          minval=0, maxval=1, seed=None),
                                      trainable=True)(com_part_body_land_outs2)

    com_part_body_land_outs = Concatenate(name='com_part_body_land')(
        [com_part_body_land_outs1, com_part_body_land_outs2])

    com_full_body_land_outs = Concatenate()(
        [full_body_land_outs, beta_full_body_land_outs])
    com_full_body_land_outs = Conv2D(1, (1, 1),
                                     kernel_initializer=RandomUniform(
                                         minval=0, maxval=1, seed=None),
                                     name='com_full_body_land',
                                     trainable=True)(com_full_body_land_outs)

    return [
        part_land_outs, part_body_land_outs, full_body_land_outs,
        beta_part_body_land_outs, beta_full_body_land_outs,
        gamma_part_body_land_outs, gamma_part_land_outs, com_part_land_outs,
        com_part_body_land_outs, com_full_body_land_outs
    ]  #
예제 #6
0
파일: Decusr.py 프로젝트: htemiz/DECUSR
               padding='same')(main_input)
L_FEB = Conv2D(16, (3, 3),
               kernel_initializer='glorot_uniform',
               activation='relu',
               padding='same')(L_FEB)
L_FEB = Conv2D(16, (3, 3),
               kernel_initializer='glorot_uniform',
               activation='relu',
               padding='same')(L_FEB)
L_FEB = Conv2D(16, (1, 1),
               kernel_initializer='glorot_uniform',
               activation='relu',
               padding='same')(L_FEB)

# Feature upscaling layer (Lfup)
L_FUP = UpSampling2D(self.scale, name='upsampler_locally_connected')(L_FEB)

# Direct upscaling layer (Ldup)
L_DUP = UpSampling2D(self.scale)(main_input)

# REPEATING BLOCKS
RB1 = concatenate([L_FUP, L_DUP])
RB1 = Conv2D(16, (3, 3),
             kernel_initializer='glorot_uniform',
             activation='relu',
             padding='same')(RB1)
RB1 = Conv2D(16, (3, 3),
             kernel_initializer='glorot_uniform',
             activation='relu',
             padding='same')(RB1)
RB1 = Conv2D(16, (1, 1),
예제 #7
0
    def autoencoder_model(self):
        a = 1.0
        input_img = Input(shape=(4, self.frame_height, self.frame_width))

        # state encoder
        x = Convolution2D(16, 3, 3, subsample=(2, 2),
                          border_mode='same')(input_img)
        x = ELU(a)(x)
        #x = BatchNormalization(mode=2)(x)
        x = Convolution2D(32, 3, 3, subsample=(2, 2), border_mode='same')(x)
        x = ELU(a)(x)
        #x = BatchNormalization(mode=2)(x)
        x = Convolution2D(64, 3, 3, subsample=(2, 2), border_mode='same')(x)
        x = ELU(a)(x)
        #x = BatchNormalization(mode=2)(x)
        x = Flatten()(x)
        encoded_state = Dense(200)(x)
        encoded_state = ELU(a)(encoded_state)
        # encoded_state = Lambda(lambda a: K.greater(a, K.zeros_like(a)), output_shape=(32,))(encoded_state)
        state_encoder = Model(input=input_img, output=encoded_state)

        input = Input(shape=(200, ))

        x1 = Dense(input_dim=200, output_dim=64 * 9 * 10)
        _x1 = x1(encoded_state)
        __x1 = x1(input)
        x2 = Reshape((64, 9, 10))
        _x2 = x2(_x1)
        __x2 = x2(__x1)
        x3 = ELU(a)
        _x3 = x3(_x2)
        __x3 = x3(__x2)
        x4 = Convolution2D(32, 3, 3, border_mode='same')
        _x4 = x4(_x3)
        __x4 = x4(__x3)
        x5 = UpSampling2D((2, 2))
        _x5 = x5(_x4)
        __x5 = x5(__x4)
        x6 = ELU(a)
        _x6 = x6(_x5)
        __x6 = x6(__x5)
        #x = BatchNormalization(mode=2)(x)
        x7 = Convolution2D(16, 3, 3, border_mode='same')
        _x7 = x7(_x6)
        __x7 = x7(__x6)
        x8 = UpSampling2D((2, 2))
        _x8 = x8(_x7)
        __x8 = x8(__x7)
        x9 = ELU(a)
        _x9 = x9(_x8)
        __x9 = x9(__x8)
        #x = BatchNormalization(mode=2)(x)
        x10 = Convolution2D(4, 3, 3, border_mode='same')
        _x10 = x10(_x9)
        __x10 = x10(__x9)
        x11 = UpSampling2D((2, 2))
        _x11 = x11(_x10)
        __x11 = x11(__x10)
        x12 = ELU(a)
        _x12 = x12(_x11)
        __x12 = x12(__x11)
        #x = BatchNormalization(mode=2)(x)
        decoded = Convolution2D(4,
                                3,
                                3,
                                activation='sigmoid',
                                border_mode='same')
        _decoded = decoded(_x12)
        __decoded = decoded(__x12)

        autoencoder = Model(input_img, _decoded)
        autoencoder.compile(optimizer=Adam(lr=5e-4), loss='mse')
        #autoencoder.summary()
        decoder = Model(input, __decoded)
        return autoencoder, state_encoder, decoder
예제 #8
0
# input noise
randomDim = 100

# Optimizer
adam = Adam(lr=0.0002, beta_1=0.5)

##GAN model
#generator
generator = Sequential()
generator.add(
    Dense(128 * 7 * 7,
          input_dim=randomDim,
          kernel_initializer=initializers.RandomNormal(stddev=0.02)))
generator.add(LeakyReLU(0.2))  # 0보다 작을 때 기울기 0.2
generator.add(Reshape((7, 7, 128)))
generator.add(UpSampling2D(size=(2, 2)))
generator.add(Conv2D(64, kernel_size=(5, 5), padding='same'))
generator.add(LeakyReLU(0.2))
generator.add(UpSampling2D(size=(2, 2)))
generator.add(Conv2D(1, kernel_size=(5, 5), padding='same', activation='tanh'))
generator.summary()

#discriminator
discriminator = Sequential()
discriminator.add(
    Conv2D(64,
           kernel_size=(5, 5),
           strides=(2, 2),
           padding='same',
           input_shape=(28, 28, 1),
           kernel_initializer=initializers.RandomNormal(stddev=0.02)))
예제 #9
0
def build_model(dim, learn_rate, lmbda, drop, FL, init, n_filters):
    """Function that builds the (UNET) convolutional neural network. 

    Parameters
    ----------
    dim : int
        Dimension of input images (assumes square).
    learn_rate : float
        Learning rate.
    lmbda : float
        Convolution2D regularization parameter. 
    drop : float
        Dropout fraction.
    FL : int
        Filter length.
    init : string
        Weight initialization type.
    n_filters : int
        Number of filters in each layer.

    Returns
    -------
    model : keras model object
        Constructed Keras model.
    """
    print('Making UNET model...')
    img_input = Input(batch_shape=(None, dim, dim, 1))

    a1 = Convolution2D(n_filters,
                       FL,
                       FL,
                       activation='relu',
                       init=init,
                       W_regularizer=l2(lmbda),
                       border_mode='same')(img_input)
    a1 = Convolution2D(n_filters,
                       FL,
                       FL,
                       activation='relu',
                       init=init,
                       W_regularizer=l2(lmbda),
                       border_mode='same')(a1)
    a1P = MaxPooling2D((2, 2), strides=(2, 2))(a1)

    a2 = Convolution2D(n_filters * 2,
                       FL,
                       FL,
                       activation='relu',
                       init=init,
                       W_regularizer=l2(lmbda),
                       border_mode='same')(a1P)
    #a2 = AtrousConvolution2D(n_filters * 2, FL, FL, activation='relu', init=init,
    #                   W_regularizer=l2(lmbda), border_mode='same', atrous_rate=(2,2))(a1P)    # sus
    a2 = Convolution2D(n_filters * 2,
                       FL,
                       FL,
                       activation='relu',
                       init=init,
                       W_regularizer=l2(lmbda),
                       border_mode='same')(a2)  # original a2

    #a2 = Convolution2D(n_filters * 2, FL, FL, activation='relu', init=init,
    #                   W_regularizer=l2(lmbda), border_mode='same', dilation_rate=2)(a2)    # sus # keras 2
    #a2 = AtrousConvolution2D(n_filters * 2, FL, FL, activation='relu', init=init,
    #                   W_regularizer=l2(lmbda), border_mode='same', atrous_rate=(2,2))(a2)    # sus
    a2P = MaxPooling2D((2, 2), strides=(2, 2))(a2)

    a3 = Convolution2D(n_filters * 4,
                       FL,
                       FL,
                       activation='relu',
                       init=init,
                       W_regularizer=l2(lmbda),
                       border_mode='same')(a2P)
    #a3 = AtrousConvolution2D(n_filters * 2, FL, FL, activation='relu', init=init,
    #                   W_regularizer=l2(lmbda), border_mode='same', atrous_rate=(2,2))(a2P)       # sus
    a3 = Convolution2D(n_filters * 4,
                       FL,
                       FL,
                       activation='relu',
                       init=init,
                       W_regularizer=l2(lmbda),
                       border_mode='same')(a3)
    a3P = MaxPooling2D(
        (2, 2),
        strides=(2, 2),
    )(a3)

    u = Convolution2D(n_filters * 4,
                      FL,
                      FL,
                      activation='relu',
                      init=init,
                      W_regularizer=l2(lmbda),
                      border_mode='same')(a3P)
    u = Convolution2D(n_filters * 4,
                      FL,
                      FL,
                      activation='relu',
                      init=init,
                      W_regularizer=l2(lmbda),
                      border_mode='same')(u)

    u = UpSampling2D((2, 2))(u)
    u = merge((a3, u), mode='concat', concat_axis=3)
    u = Dropout(drop)(u)
    u = Convolution2D(n_filters * 2,
                      FL,
                      FL,
                      activation='relu',
                      init=init,
                      W_regularizer=l2(lmbda),
                      border_mode='same')(u)
    u = Convolution2D(n_filters * 2,
                      FL,
                      FL,
                      activation='relu',
                      init=init,
                      W_regularizer=l2(lmbda),
                      border_mode='same')(u)

    u = UpSampling2D((2, 2))(u)
    u = merge((a2, u), mode='concat', concat_axis=3)
    u = Dropout(drop)(u)
    u = Convolution2D(n_filters,
                      FL,
                      FL,
                      activation='relu',
                      init=init,
                      W_regularizer=l2(lmbda),
                      border_mode='same')(u)
    u = Convolution2D(n_filters,
                      FL,
                      FL,
                      activation='relu',
                      init=init,
                      W_regularizer=l2(lmbda),
                      border_mode='same')(u)

    u = UpSampling2D((2, 2))(u)
    u = merge((a1, u), mode='concat', concat_axis=3)
    u = Dropout(drop)(u)
    u = Convolution2D(n_filters,
                      FL,
                      FL,
                      activation='relu',
                      init=init,
                      W_regularizer=l2(lmbda),
                      border_mode='same')(u)
    u = Convolution2D(n_filters,
                      FL,
                      FL,
                      activation='relu',
                      init=init,
                      W_regularizer=l2(lmbda),
                      border_mode='same')(u)

    # Final output
    final_activation = 'sigmoid'
    u = Convolution2D(1,
                      1,
                      1,
                      activation=final_activation,
                      init=init,
                      W_regularizer=l2(lmbda),
                      border_mode='same')(u)
    u = Reshape((dim, dim))(u)
    if k2:
        model = Model(inputs=img_input, outputs=u)
    else:
        model = Model(input=img_input, output=u)

    optimizer = Adam(lr=learn_rate)
    model.compile(loss='binary_crossentropy', optimizer=optimizer)
    print(model.summary())

    return model
예제 #10
0
# convolution + pooling 5
cae.add(Convolution2D(128, filterSize, filterSize, border_mode='same'))
cae.add(MaxPooling2D(pool_size=(poolSize, poolSize)))
cae.add(Activation('relu'))

# dense network
cae.add(Flatten())
cae.add(Dense(1024))
cae.add(Activation('relu'))
cae.add(Dense(128 * 4 * 4))
cae.add(Activation('relu'))
cae.add(Reshape(dims=(128, 4, 4)))
cae.add(Activation('relu'))

# unpooling + deconvolution 1
cae.add(UpSampling2D(size=(poolSize, poolSize)))
cae.add(Convolution2D(64, filterSize, filterSize, border_mode='same'))
cae.add(Activation('relu'))

# unpooling + deconvolution 2
cae.add(UpSampling2D(size=(poolSize, poolSize)))
cae.add(Convolution2D(32, filterSize, filterSize, border_mode='same'))
cae.add(Activation('relu'))

# unpooling + deconvolution 3
cae.add(UpSampling2D(size=(poolSize, poolSize)))
cae.add(Convolution2D(16, filterSize, filterSize, border_mode='same'))
cae.add(Activation('relu'))

# unpooling + deconvolution 4
cae.add(UpSampling2D(size=(poolSize, poolSize)))
예제 #11
0
def build_model(dim, learn_rate, lmbda, drop, FL, init, n_filters):
    """Function that builds the (UNET) convolutional neural network.

    Parameters
    ----------
    dim : int
        Dimension of input images (assumes square).
    learn_rate : float
        Learning rate.
    lmbda : float
        Convolution2D regularization parameter.
    drop : float
        Dropout fraction.
    FL : int
        Filter length.
    init : string
        Weight initialization type.
    n_filters : int
        Number of filters in each layer.

    Returns
    -------
        
    model : keras model object
        Constructed Keras model.
    """
    print('Making UNET model...')
    img_input = Input(batch_shape=(None, dim, dim, 1))

    #    a1 = Convolution2D(n_filters, FL, FL, activation='relu', init=init,
    #                       W_regularizer=l2(lmbda), border_mode='same')(img_input)
    #    a1 = Convolution2D(n_filters, FL, FL, activation='relu', init=init,
    #                       W_regularizer=l2(lmbda), border_mode='same')(a1)
    #    a1P = MaxPooling2D((2, 2), strides=(2, 2))(a1)
    #
    #    a2 = Convolution2D(n_filters * 2, FL, FL, activation='relu', init=init,
    #                       W_regularizer=l2(lmbda), border_mode='same')(a1P)
    #    a2 = Convolution2D(n_filters * 2, FL, FL, activation='relu', init=init,
    #                       W_regularizer=l2(lmbda), border_mode='same')(a2)
    #    a2P = MaxPooling2D((2, 2), strides=(2, 2))(a2)
    #
    #    a3 = Convolution2D(n_filters * 4, FL, FL, activation='relu', init=init,
    #                       W_regularizer=l2(lmbda), border_mode='same')(a2P)
    #    a3 = Convolution2D(n_filters * 4, FL, FL, activation='relu', init=init,
    #                       W_regularizer=l2(lmbda), border_mode='same')(a3)
    #    a3P = MaxPooling2D((2, 2), strides=(2, 2),)(a3)
    #
    #    u = Convolution2D(n_filters * 4, FL, FL, activation='relu', init=init,
    #                      W_regularizer=l2(lmbda), border_mode='same')(a3P)
    #    u = Convolution2D(n_filters * 4, FL, FL, activation='relu', init=init,
    #                      W_regularizer=l2(lmbda), border_mode='same')(u)
    #
    #    u = UpSampling2D((2, 2))(u)
    #    u = merge((a3, u), mode='concat', concat_axis=3)
    #    u = Dropout(drop)(u)
    #    u = Convolution2D(n_filters * 2, FL, FL, activation='relu', init=init,
    #                      W_regularizer=l2(lmbda), border_mode='same')(u)
    #    u = Convolution2D(n_filters * 2, FL, FL, activation='relu', init=init,
    #                      W_regularizer=l2(lmbda), border_mode='same')(u)
    #
    #    u = UpSampling2D((2, 2))(u)
    #    u = merge((a2, u), mode='concat', concat_axis=3)
    #    u = Dropout(drop)(u)
    #    u = Convolution2D(n_filters, FL, FL, activation='relu', init=init,
    #                      W_regularizer=l2(lmbda), border_mode='same')(u)
    #    u = Convolution2D(n_filters, FL, FL, activation='relu', init=init,
    #                      W_regularizer=l2(lmbda), border_mode='same')(u)
    #
    #    u = UpSampling2D((2, 2))(u)
    #    u = merge((a1, u), mode='concat', concat_axis=3)
    #    u = Dropout(drop)(u)
    #    u = Convolution2D(n_filters, FL, FL, activation='relu', init=init,
    #                      W_regularizer=l2(lmbda), border_mode='same')(u)
    #    u = Convolution2D(n_filters, FL, FL, activation='relu', init=init,
    #                      W_regularizer=l2(lmbda), border_mode='same')(u)
    #
    #    # Final output
    #    final_activation = 'sigmoid'
    #    u = Convolution2D(1, 1, 1, activation=final_activation, init=init,
    #                      W_regularizer=l2(lmbda), border_mode='same')(u)
    #    u = Reshape((dim, dim))(u)
    #    if k2:
    #        model = Model(inputs=img_input, outputs=u)
    #    else:
    #        model = Model(input=img_input, output=u)
    #
    #    optimizer = Adam(lr=learn_rate)
    #    model.compile(loss='binary_crossentropy', optimizer=optimizer)

    #    22222222----------------------------------------------------------------------------------------2222222222222222222222222222222
    # 1024
    #    down0 = Convolution2D(64,3,3,activation = 'relu',init=init,W_regularizer=l2(lmbda),border_mode='same')(img_input)
    ##    down0 = Convolution2D(64,3,3,activation = 'relu',init=init,W_regularizer=l2(lmbda),border_mode='same')(img_input)
    ##    plus_0 = Convolution2D(32,2,2,border_mode='same')(down0)
    ##a1P = MaxPooling2D((2, 2), strides=(2, 2))(a1)
    #    down0 = MaxPooling2D((2, 2), strides=(2, 2))(down0)
    #    down0 = Activation('relu')(down0)
    #
    #    # 512
    #
    #
    #    down1 = Convolution2D(64,3,3,activation = 'relu',border_mode='same')(down0)
    #    down1 = Convolution2D(32,2,2,border_mode='same')(down1)
    #    plus_1 = merge((down1, down0), mode='concat', concat_axis=3)
    #    down1= MaxPooling2D((2, 2), strides=(2, 2))(plus_1)
    #    down1 = Activation('relu')(down1)
    #
    #    # 256
    #
    #    down2 = Convolution2D(64,3,3,activation = 'relu',border_mode='same')(down1)
    #    down2 = Convolution2D(32,2,2,border_mode='same')(down2)
    #    plus_2 = merge((down2, down1), mode='concat', concat_axis=3)
    #    down2= MaxPooling2D((2, 2), strides=(2, 2))(plus_2)
    #    down2 = Activation('relu')(down2)
    #
    #    # 128
    #
    #    down3 = Convolution2D(64,3,3,activation = 'relu',border_mode='same')(down2)
    #    down3 = Convolution2D(32,2,2,border_mode='same')(down3)
    #    plus_3 = merge((down3, down2), mode='concat', concat_axis=3)
    #    down3= MaxPooling2D((2, 2), strides=(2, 2))(plus_3)
    #    down3 = Activation('relu')(down3)
    #
    #    # 64
    #
    #
    #    down4 = Convolution2D(64,3,3,activation = 'relu',border_mode='same')(down3)
    #    down4 = Convolution2D(32,2,2,border_mode='same')(down4)
    #    plus_4 = merge((down4, down3), mode='concat', concat_axis=3)
    #    down4= MaxPooling2D((2, 2), strides=(2, 2))(plus_4)
    #    down4 = Activation('relu')(down4)
    #
    #    # 32
    #
    #
    #    down5 = Convolution2D(64,3,3,activation = 'relu',border_mode='same')(down4)
    #    down5 = Convolution2D(32,2,2,border_mode='same')(down5)
    #    plus_5 = merge((down5, down4), mode='concat', concat_axis=3)
    #    down5= MaxPooling2D((2, 2), strides=(2, 2))(plus_5)
    #    down5 = Activation('relu')(down5)
    #
    #    # 16
    #
    #
    #    down6 = Convolution2D(64,3,3,activation = 'relu',border_mode='same')(down5)
    #    down6 = Convolution2D(32,2,2,border_mode='same')(down6)
    #    plus_6 = merge((down6, down5), mode='concat', concat_axis=3)
    #    down6= MaxPooling2D((2, 2), strides=(2, 2))(plus_6)
    #    down6 = Activation('relu')(down6)
    #
    #    # 8
    ## u = UpSampling2D((2, 2))(u)
    ##    u = merge((a3, u), mode='concat', concat_axis=3)
    ##    u = Dropout(drop)(u)
    ##    u = Convolution2D(n_filters * 2, FL, FL, activation='relu', init=init,
    ##                      W_regularizer=l2(lmbda), border_mode='same')(u)
    ##    u = Convolution2D(n_filters * 2, FL, FL, activation='relu', init=init,
    ##                      W_regularizer=l2(lmbda), border_mode='same')(u)
    #    up7 = UpSampling2D((2, 2))(down6)
    #
    #    # 16
    #
    #
    #    up6 = merge((up7,plus_6),mode='concat',concat_axis=3)
    #    up6 = Convolution2D(64,3,3,activation = 'relu',border_mode='same')(up6)
    #    up6 = Convolution2D(32,3,3,border_mode='same')(up6)
    #    up6 = merge((up6,up7),mode='concat',concat_axis=3)
    #    up6 = Activation('relu')(up6)
    #    up6 = UpSampling2D((2, 2))(up6)
    #
    #    # 32
    #
    #
    #    up5 = merge((up6,plus_5),mode='concat',concat_axis=3)
    #    up5 = Convolution2D(64,3,3,activation = 'relu',border_mode='same')(up5)
    #    up5 = Convolution2D(32,3,3,border_mode='same')(up5)
    #    up5 = merge((up5,up6),mode='concat',concat_axis=3)
    #    up5 = Activation('relu')(up5)
    #    up5 = UpSampling2D((2, 2))(up5)
    #
    #    # 64
    #
    #
    #
    #    up4 = merge((up5,plus_4),mode='concat',concat_axis=3)
    #    up4 = Convolution2D(64,3,3,activation = 'relu',border_mode='same')(up4)
    #    up4 = Convolution2D(32,3,3,border_mode='same')(up4)
    #    up4 = merge((up4,up5),mode='concat',concat_axis=3)
    #    up4 = Activation('relu')(up4)
    #    up4 = UpSampling2D((2, 2))(up4)
    #
    #    # 128
    #
    #    up3 = merge((up4,plus_3),mode='concat',concat_axis=3)
    #    up3 = Convolution2D(64,3,3,activation = 'relu',border_mode='same')(up3)
    #    up3 = Convolution2D(32,3,3,border_mode='same')(up3)
    #    up3 = merge((up3,up4),mode='concat',concat_axis=3)
    #    up3 = Activation('relu')(up3)
    #    up3 = UpSampling2D((2, 2))(up3)
    #    # 256
    #
    #
    #    up2 = merge((up3,plus_2),mode='concat',concat_axis=3)
    #    up2 = Convolution2D(64,3,3,activation = 'relu',border_mode='same')(up2)
    #    up2 = Convolution2D(32,3,3,border_mode='same')(up2)
    #    up2 = merge((up2,up3),mode='concat',concat_axis=3)
    #    up2 = Activation('relu')(up2)
    #    up2 = UpSampling2D((2, 2))(up2)
    #
    #    # 512
    #
    #
    #    up1 = merge((up2,plus_1),mode='concat',concat_axis=3)
    #    up1 = Convolution2D(64,3,3,activation = 'relu',border_mode='same')(up1)
    #    up1 = Convolution2D(32,3,3,border_mode='same')(up1)
    #    up1 = merge((up1,up2),mode='concat',concat_axis=3)
    #    up1 = Activation('relu')(up1)
    #    up1 = UpSampling2D((2, 2))(up1)
    #
    #    # 1024
    #
    ##    classify = Conv2D(1, (1, 1), activation='sigmoid')(up1)
    #    classify = Convolution2D(1,1,1,activation = 'sigmoid')(up1)
    #    down0 = Conv2D(64, (3, 3), padding='same')(img_input)
    #    down0 = Activation('relu')(down0)
    #    down0 = Conv2D(64, (3, 3), padding='same')(img_input)
    #    down0 = Activation('relu')(down0)
    #    plus_0 = Conv2D(32, (2, 2), padding='same')(down0)
    #    down0 = MaxPooling2D((2, 2), strides=(2, 2))(down0)
    #    down0 = Activation('relu')(down0)
    #
    #    # 512
    #
    #    down1 = Conv2D(64, (3, 3), padding='same')(down0)
    #    down1 = Activation('relu')(down1)
    #    down1 = Conv2D(32, (3, 3), padding='same')(down1)
    #    plus_1 = concatenate([down1, down0], axis=3)
    #    down1= MaxPooling2D((2, 2), strides=(2, 2))(plus_1)
    #    down1 = Activation('relu')(down1)
    #
    #    # 256
    #
    #    down2 = Conv2D(64, (3, 3), padding='same')(down1)
    #    down2 = Activation('relu')(down2)
    #    down2 = Conv2D(32, (2, 2), padding='same')(down2)
    #    plus_2 = concatenate([down2, down1], axis=3)
    #    down2 = MaxPooling2D((2, 2), strides=(2, 2))(plus_2)
    #    down2 = Activation('relu')(down2)
    #
    #    # 128
    #
    #    down3 = Conv2D(64, (3, 3), padding='same')(down2)
    #    down3 = Activation('relu')(down3)
    #    down3 = Conv2D(32, (2, 2), padding='same')(down3)
    #    plus_3 = concatenate([down3, down2], axis=3)
    #    down3 = MaxPooling2D((2, 2), strides=(2, 2))(plus_3)
    #    down3 = Activation('relu')(down3)
    #
    #    # 64
    #
    #    down4 = Conv2D(64, (3, 3), padding='same')(down3)
    #    down4 = Activation('relu')(down4)
    #    down4 = Conv2D(32, (2, 2), padding='same')(down4)
    #    plus_4 = concatenate([down4, down3], axis=3)
    #    down4 = MaxPooling2D((2, 2), strides=(2, 2))(plus_4)
    #    down4 = Activation('relu')(down4)
    #
    #    # 32
    #
    #    down5 = Conv2D(64, (3, 3), padding='same')(down4)
    #    down5 = Activation('relu')(down5)
    #    down5 = Conv2D(32, (2, 2), padding='same')(down5)
    #    plus_5 = concatenate([down5, down4], axis=3)
    #    down5 = MaxPooling2D((2, 2), strides=(2, 2))(plus_5)
    #    down5 = Activation('relu')(down5)
    #
    #    # 16
    #
    #    down6 = Conv2D(64, (3, 3), padding='same')(down5)
    #    down6 = Activation('relu')(down6)
    #    down6 = Conv2D(32, (2, 2), padding='same')(down6)
    #    plus_6 = concatenate([down6, down5], axis=3)
    #    down6 = MaxPooling2D((2, 2), strides=(2, 2))(plus_6)
    #    down6 = Activation('relu')(down6)
    #
    #    # 8
    #
    #    up7 = UpSampling2D((2, 2))(down6)
    #
    #    # 16
    #
    #    up6 = concatenate([up7, plus_6], axis=3)
    #    up6 = Conv2D(64, (3, 3), padding='same')(up6)
    #    up6 = Activation('relu')(up6)
    #    up6 = Conv2D(32, (3, 3), padding='same')(up6)
    #    up6 = concatenate([up6, up7], axis=3)
    #    up6 = Activation('relu')(up6)
    #    up6 = UpSampling2D((2, 2))(up6)
    #
    #    # 32
    #
    #    up5 = concatenate([up6, plus_5], axis=3)
    #    up5 = Conv2D(64, (3, 3), padding='same')(up5)
    #    up5 = Activation('relu')(up5)
    #    up5 = Conv2D(32, (3, 3), padding='same')(up5)
    #    up5 = concatenate([up5, up6], axis=3)
    #    up5 = Activation('relu')(up5)
    #    up5 = UpSampling2D((2, 2))(up5)
    #
    #    # 64
    #
    #    up4 = concatenate([up5, plus_4], axis=3)
    #    up4 = Conv2D(64, (3, 3), padding='same')(up4)
    #    up4 = Activation('relu')(up4)
    #    up4 = Conv2D(32, (3, 3), padding='same')(up4)
    #    up4 = concatenate([up4, up5], axis=3)
    #    up4 = Activation('relu')(up4)
    #    up4 = UpSampling2D((2, 2))(up4)
    #
    #    # 128
    #
    #    up3 = concatenate([up4, plus_3], axis=3)
    #    up3 = Conv2D(64, (3, 3), padding='same')(up3)
    #    up3 = Activation('relu')(up3)
    #    up3 = Conv2D(32, (3, 3), padding='same')(up3)
    #    up3 = concatenate([up3, up4], axis=3)
    #    up3 = Activation('relu')(up3)
    #    up3 = UpSampling2D((2, 2))(up3)
    #
    #    # 256
    #
    #    up2 = concatenate([up3, plus_2], axis=3)
    #    up2 = Conv2D(64, (3, 3), padding='same')(up2)
    #    up2 = Activation('relu')(up2)
    #    up2 = Conv2D(32, (3, 3), padding='same')(up2)
    #    up2 = concatenate([up2, up3], axis=3)
    #    up2 = Activation('relu')(up2)
    #    up2 = UpSampling2D((2, 2))(up2)
    #
    #    # 512
    #
    #    up1 = concatenate([up2, plus_1], axis=3)
    #    up1 = Conv2D(64, (3, 3), padding='same')(up1)
    #    up1 = Activation('relu')(up1)
    #    up1 = Conv2D(32, (3, 3), padding='same')(up1)
    #    up1 = concatenate([up1, up2], axis=3)
    #    up1 = Activation('relu')(up1)
    #    up1 = UpSampling2D((2, 2))(up1)
    #
    #
    #    classify = Conv2D(1, (1, 1), activation='sigmoid')(up1)
    #    classify = Reshape((dim, dim))(classify)
    #
    #
    #    model = Model(inputs=img_input, outputs=classify)
    #
    #    optimizer = Adam(lr=learn_rate)
    #    model.compile(loss='binary_crossentropy', optimizer=optimizer)

    droprate = 0.25
    n_filters = 32
    upconv = False
    growth_factor = 2
    #inputs = BatchNormalization()(inputs)
    conv1 = Conv2D(n_filters, (3, 3), activation='relu',
                   padding='same')(img_input)
    conv1 = Conv2D(n_filters, (3, 3), activation='relu', padding='same')(conv1)
    pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
    pool1 = Dropout(droprate)(pool1)

    n_filters *= growth_factor
    pool1 = BatchNormalization()(pool1)
    conv2 = Conv2D(n_filters, (3, 3), activation='relu', padding='same')(pool1)
    conv2 = Conv2D(n_filters, (3, 3), activation='relu', padding='same')(conv2)
    pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
    pool2 = Dropout(droprate)(pool2)

    n_filters *= growth_factor
    pool2 = BatchNormalization()(pool2)
    conv3 = Conv2D(n_filters, (3, 3), activation='relu', padding='same')(pool2)
    conv3 = Conv2D(n_filters, (3, 3), activation='relu', padding='same')(conv3)
    pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
    pool3 = Dropout(droprate)(pool3)

    n_filters *= growth_factor
    pool3 = BatchNormalization()(pool3)
    conv4_0 = Conv2D(n_filters, (3, 3), activation='relu',
                     padding='same')(pool3)
    conv4_0 = Conv2D(n_filters, (3, 3), activation='relu',
                     padding='same')(conv4_0)
    pool4_0 = MaxPooling2D(pool_size=(2, 2))(conv4_0)
    pool4_0 = Dropout(droprate)(pool4_0)

    n_filters *= growth_factor
    pool4_0 = BatchNormalization()(pool4_0)
    conv4_1 = Conv2D(n_filters, (3, 3), activation='relu',
                     padding='same')(pool4_0)
    conv4_1 = Conv2D(n_filters, (3, 3), activation='relu',
                     padding='same')(conv4_1)
    pool4_1 = MaxPooling2D(pool_size=(2, 2))(conv4_1)
    pool4_1 = Dropout(droprate)(pool4_1)

    n_filters *= growth_factor
    pool4_1 = BatchNormalization()(pool4_1)
    conv4_2 = Conv2D(n_filters, (3, 3), activation='relu',
                     padding='same')(pool4_1)
    conv4_2 = Conv2D(n_filters, (3, 3), activation='relu',
                     padding='same')(conv4_2)
    pool4_2 = MaxPooling2D(pool_size=(2, 2))(conv4_2)
    pool4_2 = Dropout(droprate)(pool4_2)

    n_filters *= growth_factor
    pool4_2 = BatchNormalization()(pool4_2)
    conv5 = Conv2D(n_filters, (3, 3), activation='relu',
                   padding='same')(pool4_2)
    conv5 = Conv2D(n_filters, (3, 3), activation='relu', padding='same')(conv5)
    conv5 = Dropout(droprate)(conv5)

    n_filters //= growth_factor
    if upconv:
        up6 = concatenate([
            Conv2DTranspose(n_filters, (2, 2), strides=(2, 2),
                            padding='same')(conv5), conv4_2
        ])
    else:
        up6 = concatenate([UpSampling2D(size=(2, 2))(conv5), conv4_2])
    up6 = BatchNormalization()(up6)
    conv6 = Conv2D(n_filters, (3, 3), activation='relu', padding='same')(up6)
    conv6 = Conv2D(n_filters, (3, 3), activation='relu', padding='same')(conv6)
    conv6 = Dropout(droprate)(conv6)

    n_filters //= growth_factor
    if upconv:
        up6_1 = concatenate([
            Conv2DTranspose(n_filters, (2, 2), strides=(2, 2),
                            padding='same')(conv6), conv4_1
        ])
    else:
        up6_1 = concatenate([UpSampling2D(size=(2, 2))(conv6), conv4_1])
    up6_1 = BatchNormalization()(up6_1)
    conv6_1 = Conv2D(n_filters, (3, 3), activation='relu',
                     padding='same')(up6_1)
    conv6_1 = Conv2D(n_filters, (3, 3), activation='relu',
                     padding='same')(conv6_1)
    conv6_1 = Dropout(droprate)(conv6_1)

    n_filters //= growth_factor
    if upconv:
        up6_2 = concatenate([
            Conv2DTranspose(n_filters, (2, 2), strides=(2, 2),
                            padding='same')(conv6_1), conv4_0
        ])
    else:
        up6_2 = concatenate([UpSampling2D(size=(2, 2))(conv6_1), conv4_0])
    up6_2 = BatchNormalization()(up6_2)
    conv6_2 = Conv2D(n_filters, (3, 3), activation='relu',
                     padding='same')(up6_2)
    conv6_2 = Conv2D(n_filters, (3, 3), activation='relu',
                     padding='same')(conv6_2)
    conv6_2 = Dropout(droprate)(conv6_2)

    n_filters //= growth_factor
    if upconv:
        up7 = concatenate([
            Conv2DTranspose(n_filters, (2, 2), strides=(2, 2),
                            padding='same')(conv6_2), conv3
        ])
    else:
        up7 = concatenate([UpSampling2D(size=(2, 2))(conv6_2), conv3])
    up7 = BatchNormalization()(up7)
    conv7 = Conv2D(n_filters, (3, 3), activation='relu', padding='same')(up7)
    conv7 = Conv2D(n_filters, (3, 3), activation='relu', padding='same')(conv7)
    conv7 = Dropout(droprate)(conv7)

    n_filters //= growth_factor
    if upconv:
        up8 = concatenate([
            Conv2DTranspose(n_filters, (2, 2), strides=(2, 2),
                            padding='same')(conv7), conv2
        ])
    else:
        up8 = concatenate([UpSampling2D(size=(2, 2))(conv7), conv2])
    up8 = BatchNormalization()(up8)
    conv8 = Conv2D(n_filters, (3, 3), activation='relu', padding='same')(up8)
    conv8 = Conv2D(n_filters, (3, 3), activation='relu', padding='same')(conv8)
    conv8 = Dropout(droprate)(conv8)

    n_filters //= growth_factor
    if upconv:
        up9 = concatenate([
            Conv2DTranspose(n_filters, (2, 2), strides=(2, 2),
                            padding='same')(conv8), conv1
        ])
    else:
        up9 = concatenate([UpSampling2D(size=(2, 2))(conv8), conv1])
    up9 = BatchNormalization()(up9)
    conv9 = Conv2D(n_filters, (3, 3), activation='relu', padding='same')(up9)
    conv9 = Conv2D(n_filters, (3, 3), activation='relu', padding='same')(conv9)

    conv10 = Conv2D(1, (1, 1), activation='sigmoid')(conv9)
    conv10 = Reshape((256, 256))(conv10)

    model = Model(inputs=img_input, outputs=conv10)

    model.compile(optimizer=Adam(), loss='binary_crossentropy')

    print(model.summary())

    return model
예제 #12
0
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')

x = Input(shape=(96, 96, 3))

# Encoder
conv1_1 = Conv2D(16, (3, 3), activation='relu', padding='same')(x)
pool1 = MaxPooling2D((2, 2), padding='same')(conv1_1)
conv1_2 = Conv2D(8, (3, 3), activation='relu', padding='same')(pool1)
pool2 = MaxPooling2D((2, 2), padding='same')(conv1_2)
conv1_3 = Conv2D(8, (3, 3), activation='relu', padding='same')(pool2)
h = MaxPooling2D((2, 2), padding='same')(conv1_3)

# Decoder
conv2_1 = Conv2D(8, (3, 3), activation='relu', padding='same')(h)
up1 = UpSampling2D((2, 2))(conv2_1)
conv2_2 = Conv2D(8, (3, 3), activation='relu', padding='same')(up1)
up2 = UpSampling2D((2, 2))(conv2_2)
conv2_3 = Conv2D(16, (3, 3), activation='relu', padding='same')(up2)
up3 = UpSampling2D((2, 2))(conv2_3)
r = Conv2D(3, (3, 3), activation='sigmoid', padding='same')(up3)

autoencoder = Model(inputs=x, outputs=r)
# autoencoder.load_weights("../model/banklogo_1000_0.51.h5")  # model/banklogo_1000_0.51.h5   郭男bad模型
autoencoder.load_weights("../model/banklogo_hualing_bad.h5")  # 华菱 bad模型

# autoencoder.compile(optimizer='adadelta', loss='binary_crossentropy')

autoencoder.summary()

decoded_imgs = autoencoder.predict(X_test)
예제 #13
0
 def creat_generator(self):
     # layer 0
     d0 = Input(shape=self.image_shape)
     # layer 1
     d1 = Conv2D(filters=64, kernel_size=4, strides=2, padding='same')(d0)
     d1 = LeakyReLU(alpha=0.2)(d1)
     # layer 2
     d2 = Conv2D(filters=128, kernel_size=4, strides=2, padding='same')(d1)
     d2 = LeakyReLU(alpha=0.2)(d2)
     d2 = BatchNormalization(momentum=0.8)(d2)
     # layer 3
     d3 = Conv2D(filters=256, kernel_size=4, strides=2, padding='same')(d2)
     d3 = LeakyReLU(alpha=0.2)(d3)
     d3 = BatchNormalization(momentum=0.8)(d3)
     # layer 4
     d4 = Conv2D(filters=512, kernel_size=4, strides=2, padding='same')(d3)
     d4 = LeakyReLU(alpha=0.2)(d4)
     d4 = BatchNormalization(momentum=0.8)(d4)
     # layer 5
     d5 = Conv2D(filters=512, kernel_size=4, strides=2, padding='same')(d4)
     d5 = LeakyReLU(alpha=0.2)(d5)
     d5 = BatchNormalization(momentum=0.8)(d5)
     # layer 6
     d6 = Conv2D(filters=512, kernel_size=4, strides=2, padding='same')(d5)
     d6 = LeakyReLU(alpha=0.2)(d6)
     d6 = BatchNormalization(momentum=0.8)(d6)
     # layer 7
     d7 = Conv2D(filters=512, kernel_size=4, strides=2, padding='same')(d6)
     d7 = LeakyReLU(alpha=0.2)(d7)
     d7 = BatchNormalization(momentum=0.8)(d7)
     # layer 6
     u6 = UpSampling2D(size=2)(d7)
     u6 = Conv2D(filters=512,
                 kernel_size=4,
                 strides=1,
                 padding='same',
                 activation='relu')(u6)
     u6 = BatchNormalization(momentum=0.8)(u6)
     u6 = Concatenate()([u6, d6])
     # layer 5
     u5 = UpSampling2D(size=2)(u6)
     u5 = Conv2D(filters=512,
                 kernel_size=4,
                 strides=1,
                 padding='same',
                 activation='relu')(u5)
     u5 = BatchNormalization(momentum=0.8)(u5)
     u5 = Concatenate()([u5, d5])
     # layer 4
     u4 = UpSampling2D(size=2)(u5)
     u4 = Conv2D(filters=512,
                 kernel_size=4,
                 strides=1,
                 padding='same',
                 activation='relu')(u4)
     u4 = BatchNormalization(momentum=0.8)(u4)
     u4 = Concatenate()([u4, d4])
     # layer 3
     u3 = UpSampling2D(size=2)(u4)
     u3 = Conv2D(filters=256,
                 kernel_size=4,
                 strides=1,
                 padding='same',
                 activation='relu')(u3)
     u3 = BatchNormalization(momentum=0.8)(u3)
     u3 = Concatenate()([u3, d3])
     # layer 2
     u2 = UpSampling2D(size=2)(u3)
     u2 = Conv2D(filters=128,
                 kernel_size=4,
                 strides=1,
                 padding='same',
                 activation='relu')(u2)
     u2 = BatchNormalization(momentum=0.8)(u2)
     u2 = Concatenate()([u2, d2])
     # layer 1
     u1 = UpSampling2D(size=2)(u2)
     u1 = Conv2D(filters=64,
                 kernel_size=4,
                 strides=1,
                 padding='same',
                 activation='relu')(u1)
     u1 = BatchNormalization(momentum=0.8)(u1)
     u1 = Concatenate()([u1, d1])
     # layer 0
     u0 = UpSampling2D(size=2)(u1)
     u0 = Conv2D(self.nC,
                 kernel_size=4,
                 strides=1,
                 padding='same',
                 activation='tanh')(u0)
     return Model(d0, u0)
예제 #14
0
    def model_generator_bathy(
        self,
        units=512,
        dropout=0.5,
        reg=lambda: regularizers.l1_l2(l1=1e-7, l2=1e-7)):
        decoder = Sequential(name="decoder")
        h = 5

        decoder.add(
            Dense(units * 4 * 4,
                  input_dim=self.latent_dim,
                  kernel_regularizer=reg()))
        # check channel order on below
        decoder.add(Reshape((4, 4, units)))
        # decoder.add(SpatialDropout2D(dropout))
        #decoder.add(LeakyReLU(0.2))
        decoder.add(PReLU())
        decoder.add(
            Conv2D(units // 2, (h, h),
                   activation='linear',
                   padding='same',
                   kernel_regularizer=reg()))
        # decoder.add(SpatialDropout2D(dropout))
        #decoder.add(LeakyReLU(0.2))
        decoder.add(PReLU())
        decoder.add(UpSampling2D(size=(2, 2)))
        decoder.add(
            Conv2D(units // 4, (h, h),
                   activation='linear',
                   padding='same',
                   kernel_regularizer=reg()))
        # decoder.add(SpatialDropout2D(dropout))
        #decoder.add(LeakyReLU(0.2))
        decoder.add(PReLU())
        decoder.add(UpSampling2D(size=(2, 2)))
        decoder.add(
            Conv2D(units // 8, (h, h),
                   activation='linear',
                   padding='same',
                   kernel_regularizer=reg()))
        # decoder.add(SpatialDropout2D(dropout))
        #decoder.add(LeakyReLU(0.2))
        decoder.add(PReLU())
        decoder.add(UpSampling2D(size=(2, 2)))  # 32 x 32
        decoder.add(
            Conv2D(1, (h, h),
                   activation='linear',
                   padding='same',
                   kernel_regularizer=reg()))

        ##added relu above
        ##decoder.add(Activation('linear'))

        # hack to bring back to size of bathymetry 21x21
        decoder.add(Cropping2D(cropping=((6, 5), (6, 5))))

        #decoder.summary()
        # above assumes a particular output dimension, instead try below
        #decoder.add(Dense(np.prod(self.img_shape), activation='sigmoid'))
        #decoder.add(Reshape(self.img_shape))

        decoder.summary()

        z = Input(shape=(self.latent_dim, ))
        bpatch = decoder(z)

        return Model(z, bpatch)
예제 #15
0
def get_main_net(input_shape=(512, 512, 1), weights_path=None):
    img_input = Input(input_shape)
    bn_img = Lambda(img_normalization, name='img_norm')(img_input)
    # feature extraction VGG
    conv = conv_bn_prelu(bn_img, (64, 3, 3), '1_1')
    conv = conv_bn_prelu(conv, (64, 3, 3), '1_2')
    conv = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(conv)

    conv = conv_bn_prelu(conv, (128, 3, 3), '2_1')
    conv = conv_bn_prelu(conv, (128, 3, 3), '2_2')
    conv = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(conv)

    conv = conv_bn_prelu(conv, (256, 3, 3), '3_1')
    conv = conv_bn_prelu(conv, (256, 3, 3), '3_2')
    conv = conv_bn_prelu(conv, (256, 3, 3), '3_3')
    conv = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(conv)

    # multi-scale ASPP
    scale_1 = conv_bn_prelu(conv, (256, 3, 3), '4_1', dilation_rate=(1, 1))
    ori_1 = conv_bn_prelu(scale_1, (128, 1, 1), 'ori_1_1')
    ori_1 = Conv2D(90, (1, 1), padding='same', name='ori_1_2')(ori_1)
    seg_1 = conv_bn_prelu(scale_1, (128, 1, 1), 'seg_1_1')
    seg_1 = Conv2D(1, (1, 1), padding='same', name='seg_1_2')(seg_1)

    scale_2 = conv_bn_prelu(conv, (256, 3, 3), '4_2', dilation_rate=(4, 4))
    ori_2 = conv_bn_prelu(scale_2, (128, 1, 1), 'ori_2_1')
    ori_2 = Conv2D(90, (1, 1), padding='same', name='ori_2_2')(ori_2)
    seg_2 = conv_bn_prelu(scale_2, (128, 1, 1), 'seg_2_1')
    seg_2 = Conv2D(1, (1, 1), padding='same', name='seg_2_2')(seg_2)

    scale_3 = conv_bn_prelu(conv, (256, 3, 3), '4_3', dilation_rate=(8, 8))
    ori_3 = conv_bn_prelu(scale_3, (128, 1, 1), 'ori_3_1')
    ori_3 = Conv2D(90, (1, 1), padding='same', name='ori_3_2')(ori_3)
    seg_3 = conv_bn_prelu(scale_3, (128, 1, 1), 'seg_3_1')
    seg_3 = Conv2D(1, (1, 1), padding='same', name='seg_3_2')(seg_3)

    # sum fusion for ori
    ori_out = Lambda(merge_sum)([ori_1, ori_2, ori_3])
    ori_out_1 = Activation('sigmoid', name='ori_out_1')(ori_out)
    ori_out_2 = Activation('sigmoid', name='ori_out_2')(ori_out)

    # sum fusion for segmentation
    seg_out = Lambda(merge_sum)([seg_1, seg_2, seg_3])
    seg_out = Activation('sigmoid', name='seg_out')(seg_out)
    # ----------------------------------------------------------------------------
    # enhance part
    filters_cos, filters_sin = gabor_bank(stride=2, Lambda=8)
    filter_img_real = Conv2D(
        filters_cos.shape[3], (filters_cos.shape[0], filters_cos.shape[1]),
        weights=[filters_cos, np.zeros([filters_cos.shape[3]])],
        padding='same',
        name='enh_img_real_1')(img_input)
    filter_img_imag = Conv2D(
        filters_sin.shape[3], (filters_sin.shape[0], filters_sin.shape[1]),
        weights=[filters_sin, np.zeros([filters_sin.shape[3]])],
        padding='same',
        name='enh_img_imag_1')(img_input)
    ori_peak = Lambda(ori_highest_peak)(ori_out_1)
    ori_peak = Lambda(select_max)(ori_peak)  # select max ori and set it to 1
    upsample_ori = UpSampling2D(size=(8, 8))(ori_peak)
    seg_round = Activation('softsign')(seg_out)
    upsample_seg = UpSampling2D(size=(8, 8))(seg_round)
    mul_mask_real = Lambda(merge_mul)([filter_img_real, upsample_ori])
    enh_img_real = Lambda(reduce_sum, name='enh_img_real_2')(mul_mask_real)
    mul_mask_imag = Lambda(merge_mul)([filter_img_imag, upsample_ori])
    enh_img_imag = Lambda(reduce_sum, name='enh_img_imag_2')(mul_mask_imag)
    enh_img = Lambda(atan2, name='phase_img')([enh_img_imag, enh_img_real])
    enh_seg_img = Lambda(merge_concat,
                         name='phase_seg_img')([enh_img, upsample_seg])
    # ----------------------------------------------------------------------------
    # mnt part
    mnt_conv = conv_bn_prelu(enh_seg_img, (64, 9, 9), 'mnt_1_1')
    mnt_conv = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(mnt_conv)

    mnt_conv = conv_bn_prelu(mnt_conv, (128, 5, 5), 'mnt_2_1')
    mnt_conv = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(mnt_conv)

    mnt_conv = conv_bn_prelu(mnt_conv, (256, 3, 3), 'mnt_3_1')
    mnt_conv = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(mnt_conv)

    mnt_o_1 = Lambda(merge_concat)([mnt_conv, ori_out_1])
    mnt_o_2 = conv_bn_prelu(mnt_o_1, (256, 1, 1), 'mnt_o_1_1')
    mnt_o_3 = Conv2D(180, (1, 1), padding='same', name='mnt_o_1_2')(mnt_o_2)
    mnt_o_out = Activation('sigmoid', name='mnt_o_out')(mnt_o_3)

    mnt_w_1 = conv_bn_prelu(mnt_conv, (256, 1, 1), 'mnt_w_1_1')
    mnt_w_2 = Conv2D(8, (1, 1), padding='same', name='mnt_w_1_2')(mnt_w_1)
    mnt_w_out = Activation('sigmoid', name='mnt_w_out')(mnt_w_2)

    mnt_h_1 = conv_bn_prelu(mnt_conv, (256, 1, 1), 'mnt_h_1_1')
    mnt_h_2 = Conv2D(8, (1, 1), padding='same', name='mnt_h_1_2')(mnt_h_1)
    mnt_h_out = Activation('sigmoid', name='mnt_h_out')(mnt_h_2)

    mnt_s_1 = conv_bn_prelu(mnt_conv, (256, 1, 1), 'mnt_s_1_1')
    mnt_s_2 = Conv2D(1, (1, 1), padding='same', name='mnt_s_1_2')(mnt_s_1)
    mnt_s_out = Activation('sigmoid', name='mnt_s_out')(mnt_s_2)

    if args.mode == 'deploy':
        model = Model(inputs=[
            img_input,
        ],
                      outputs=[
                          enh_img_real, ori_out_1, ori_out_2, seg_out,
                          mnt_o_out, mnt_w_out, mnt_h_out, mnt_s_out
                      ])
    else:
        model = Model(inputs=[
            img_input,
        ],
                      outputs=[
                          ori_out_1, ori_out_2, seg_out, mnt_o_out, mnt_w_out,
                          mnt_h_out, mnt_s_out
                      ])
    if weights_path:
        model.load_weights(weights_path, by_name=True)
    return model
model.add(MaxPooling2D(pool_size=(2, 2)))

model.add(Conv2D(256, (3, 3), activation='relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))

model.add(Conv2D(512, (3, 3), activation='relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))

#Decoder Part
model.add(Conv2D(512, (3, 3), activation='relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))

model.add(UpSampling2D(size=(2, 2), dim_ordering='default'))
model.add(Conv2D(256, (3, 3), activation='relu'))
model.add(BatchNormalization())

model.add(UpSampling2D(size=(2, 2), dim_ordering='default'))
model.add(Conv2D(128, (3, 3), activation='relu'))
model.add(BatchNormalization())

model.add(UpSampling2D(size=(2, 2), dim_ordering='default'))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(BatchNormalization())

model.add(Conv2D(nb_classes, (1, 1), activation='softmax'))


def SegNet(input_shape=(224, 224, 3), classes=2):
예제 #17
0
 def deconv2d(layer_input):
     u = UpSampling2D(size=2)(layer_input)
     u = Conv2D(256, kernel_size=3, strides=1, padding='same')(u)
     u = Activation('relu')(u)
     return u
예제 #18
0
def generate_model(weight_decay=0.0005):
    '''
    define the model structure
    ---------------------------------------------------------------------------
    INPUT:
        weight_decay: all the weights in the layer would be decayed by this factor
        
    OUTPUT:
        model: the model structure after being defined
        
        # References
        - [An Improved Deep Learning Architecture for Person Re-Identification]
    ---------------------------------------------------------------------------
    '''
    def upsample_neighbor_function(input_x):
        input_x_pad = K.spatial_2d_padding(input_x, padding=((2, 2), (2, 2)))
        x_length = K.int_shape(input_x)[1]
        y_length = K.int_shape(input_x)[2]
        output_x_list = []
        output_y_list = []
        for i_x in range(2, x_length + 2):
            for i_y in range(2, y_length + 2):
                output_y_list.append(input_x_pad[:, i_x - 2:i_x + 3,
                                                 i_y - 2:i_y + 3, :])
            output_x_list.append(K.concatenate(output_y_list, axis=2))
            output_y_list = []
        return K.concatenate(output_x_list, axis=1)

    max_pooling = MaxPooling2D()

    x1_input = Input(shape=(160, 60, 3))
    x2_input = Input(shape=(160, 60, 3))

    share_conv_1 = Conv2D(20,
                          5,
                          kernel_regularizer=l2(weight_decay),
                          activation="relu")
    x1 = share_conv_1(x1_input)
    x2 = share_conv_1(x2_input)
    x1 = max_pooling(x1)
    x2 = max_pooling(x2)

    share_conv_2 = Conv2D(25,
                          5,
                          kernel_regularizer=l2(weight_decay),
                          activation="relu")
    x1 = share_conv_2(x1)
    x2 = share_conv_2(x2)
    x1 = max_pooling(x1)
    x2 = max_pooling(x2)

    upsample_same = UpSampling2D(size=(5, 5))
    x1_up = upsample_same(x1)
    x2_up = upsample_same(x2)
    upsample_neighbor = Lambda(upsample_neighbor_function)
    x1_nn = upsample_neighbor(x1)
    x2_nn = upsample_neighbor(x2)
    negative = Lambda(lambda x: -x)
    x1_nn = negative(x1_nn)
    x2_nn = negative(x2_nn)
    x1 = Add()([x1_up, x2_nn])
    x2 = Add()([x2_up, x1_nn])

    conv_3_1 = Conv2D(25,
                      5,
                      strides=(5, 5),
                      kernel_regularizer=l2(weight_decay),
                      activation="relu")
    conv_3_2 = Conv2D(25,
                      5,
                      strides=(5, 5),
                      kernel_regularizer=l2(weight_decay),
                      activation="relu")
    x1 = conv_3_1(x1)
    x2 = conv_3_2(x2)

    conv_4_1 = Conv2D(25,
                      3,
                      kernel_regularizer=l2(weight_decay),
                      activation="relu")
    conv_4_2 = Conv2D(25,
                      3,
                      kernel_regularizer=l2(weight_decay),
                      activation="relu")
    x1 = conv_4_1(x1)
    x2 = conv_4_2(x2)
    x1 = max_pooling(x1)
    x2 = max_pooling(x2)

    y = Concatenate()([x1, x2])
    y = Flatten()(y)
    y = Dense(500, kernel_regularizer=l2(weight_decay), activation='relu')(y)
    y = Dense(2, kernel_regularizer=l2(weight_decay), activation='softmax')(y)

    model = Model(inputs=[x1_input, x2_input], outputs=[y])
    model.summary()

    return model
예제 #19
0
def DenseUNet(nb_dense_block=4,
              growth_rate=48,
              nb_filter=96,
              reduction=0.0,
              dropout_rate=0.0,
              weight_decay=1e-4,
              weights_path=None,
              args=None):
    '''Instantiate the DenseNet 161 architecture,
        # Arguments
            nb_dense_block: number of dense blocks to add to end
            growth_rate: number of filters to add per dense block ,channels of feature maps to concate to the others
            nb_filter: initial number of filters
            reduction: reduction factor of transition blocks.
            dropout_rate: dropout rate
            weight_decay: weight decay factor
            classes: optional number of classes to classify images
            weights_path: path to pre-trained weights
        # Returns
            A Keras model instance.
    '''
    eps = 1.1e-5

    # compute compression factor
    compression = 1.0 - reduction

    # Handle Dimension Ordering for different backends
    global concat_axis
    if K.image_dim_ordering() == 'tf':
        concat_axis = 3
        img_input = Input(batch_shape=(args.b, args.input_size,
                                       args.input_size, 3),
                          name='data')
    else:
        concat_axis = 1
        img_input = Input(shape=(3, 224, 224), name='data')

    # From architecture for ImageNet (Table 1 in the paper)
    nb_filter = 96
    nb_layers = [6, 12, 36, 24]  # For DenseNet-161
    box = []
    # Initial convolution
    x = ZeroPadding2D((3, 3), name='conv1_zeropadding')(img_input)
    x = Conv2D(nb_filter, (7, 7), strides=(2, 2), name='conv1',
               use_bias=False)(x)
    x = BatchNormalization(epsilon=eps, axis=concat_axis, name='conv1_bn')(x)
    x = Scale(axis=concat_axis, name='conv1_scale')(x)
    x = Activation('relu', name='relu1')(x)
    box.append(x)
    x = ZeroPadding2D((1, 1), name='pool1_zeropadding')(x)
    x = MaxPooling2D((3, 3), strides=(2, 2), name='pool1')(x)

    # Add dense blocks
    for block_idx in range(nb_dense_block - 1):
        stage = block_idx + 2
        x, nb_filter = dense_block(x,
                                   stage,
                                   nb_layers[block_idx],
                                   nb_filter,
                                   growth_rate,
                                   dropout_rate=dropout_rate,
                                   weight_decay=weight_decay)
        box.append(x)
        # Add transition_block
        x = transition_block(x,
                             stage,
                             nb_filter,
                             compression=compression,
                             dropout_rate=dropout_rate,
                             weight_decay=weight_decay)
        nb_filter = int(nb_filter * compression)

    final_stage = stage + 1
    x, nb_filter = dense_block(x,
                               final_stage,
                               nb_layers[-1],
                               nb_filter,
                               growth_rate,
                               dropout_rate=dropout_rate,
                               weight_decay=weight_decay)

    x = BatchNormalization(epsilon=eps,
                           axis=concat_axis,
                           name='conv' + str(final_stage) + '_blk_bn')(x)
    x = Scale(axis=concat_axis,
              name='conv' + str(final_stage) + '_blk_scale')(x)
    x = Activation('relu', name='relu' + str(final_stage) + '_blk')(x)
    box.append(x)

    up0 = UpSampling2D(size=(2, 2))(x)
    conv_up0 = Conv2D(768, (3, 3),
                      padding="same",
                      kernel_initializer="normal",
                      name="conv_up0")(up0)
    bn_up0 = BatchNormalization(name="bn_up0")(conv_up0)
    ac_up0 = Activation('relu', name='ac_up0')(bn_up0)

    up1 = UpSampling2D(size=(2, 2))(ac_up0)
    conv_up1 = Conv2D(384, (3, 3),
                      padding="same",
                      kernel_initializer="normal",
                      name="conv_up1")(up1)
    bn_up1 = BatchNormalization(name="bn_up1")(conv_up1)
    ac_up1 = Activation('relu', name='ac_up1')(bn_up1)

    up2 = UpSampling2D(size=(2, 2))(ac_up1)
    conv_up2 = Conv2D(96, (3, 3),
                      padding="same",
                      kernel_initializer="normal",
                      name="conv_up2")(up2)
    bn_up2 = BatchNormalization(name="bn_up2")(conv_up2)
    ac_up2 = Activation('relu', name='ac_up2')(bn_up2)

    up3 = UpSampling2D(size=(2, 2))(ac_up2)
    conv_up3 = Conv2D(96, (3, 3),
                      padding="same",
                      kernel_initializer="normal",
                      name="conv_up3")(up3)
    bn_up3 = BatchNormalization(name="bn_up3")(conv_up3)
    ac_up3 = Activation('relu', name='ac_up3')(bn_up3)

    up4 = UpSampling2D(size=(2, 2))(ac_up3)
    conv_up4 = Conv2D(64, (3, 3),
                      padding="same",
                      kernel_initializer="normal",
                      name="conv_up4")(up4)
    conv_up4 = Dropout(rate=0.3)(conv_up4)
    bn_up4 = BatchNormalization(name="bn_up4")(conv_up4)
    ac_up4 = Activation('relu', name='ac_up4')(bn_up4)

    x = Conv2D(3, (1, 1),
               padding="same",
               kernel_initializer="normal",
               name="dense167classifer")(ac_up4)

    model = Model(img_input, x, name='denseu161')

    return model
예제 #20
0
def uNet3(seq_len=3):
    
    print("Model = uNet2")
    
    inputs = Input((N, N, seq_len))
    #inputs = Input((ISZ, ISZ, 8))
    conv1 = Conv2D(32, 3, 3, activation='relu', border_mode='same')(inputs)
    drop1 = Dropout(0.5)(conv1)
    conv1 = Conv2D(32, 3, 3, activation='relu', border_mode='same')(drop1)
    pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
    batch1 = BatchNormalization()(pool1)

    conv2 = Conv2D(64, 3, 3, activation='relu', border_mode='same')(batch1)
    drop2 = Dropout(0.5)(conv2)
    conv2 = Conv2D(64, 3, 3, activation='relu', border_mode='same')(conv2)
    pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
    batch2 = BatchNormalization()(pool2)

    conv3 = Conv2D(128, 3, 3, activation='relu', border_mode='same')(batch2)
    drop3 = Dropout(0.5)(conv3)
    conv3 = Conv2D(128, 3, 3, activation='relu', border_mode='same')(conv3)
    pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
    batch3 = BatchNormalization()(pool3)

    conv4 = Conv2D(256, 3, 3, activation='relu', border_mode='same')(batch3)
    drop4 = Dropout(0.5)(conv4)
    conv4 = Conv2D(256, 3, 3, activation='relu', border_mode='same')(conv4)
    pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)
    batch4 = BatchNormalization()(pool4)

    conv5 = Conv2D(512, 3, 3, activation='relu', border_mode='same')(batch4)
    drop5 = Dropout(0.5)(conv5)
    conv5 = Conv2D(512, 3, 3, activation='relu', border_mode='same')(conv5)
    batch5 = BatchNormalization()(conv5)

    up6 = merge([UpSampling2D(size=(2, 2))(batch5), conv4], mode='concat', concat_axis=3)
    conv6 = Conv2D(256, 3, 3, activation='relu', border_mode='same')(up6)
    drop6 = Dropout(0.5)(conv6)
    conv6 = Conv2D(256, 3, 3, activation='relu', border_mode='same')(conv6)
    batch6 = BatchNormalization()(conv6)

    up7 = merge([UpSampling2D(size=(2, 2))(batch6), conv3], mode='concat', concat_axis=3)
    conv7 = Conv2D(128, 3, 3, activation='relu', border_mode='same')(up7)
    drop7 = Dropout(0.5)(conv7)
    conv7 = Conv2D(128, 3, 3, activation='relu', border_mode='same')(conv7)
    batch7 = BatchNormalization()(conv7)

    up8 = merge([UpSampling2D(size=(2, 2))(batch7), conv2], mode='concat', concat_axis=3)
    conv8 = Conv2D(64, 3, 3, activation='relu', border_mode='same')(up8)
    drop8 = Dropout(0.5)(conv8)
    conv8 = Conv2D(64, 3, 3, activation='relu', border_mode='same')(conv8)
    batch8 = BatchNormalization()(conv8)
    
    up9 = merge([UpSampling2D(size=(2, 2))(batch8), conv1], mode='concat', concat_axis=3)
    conv9 = Conv2D(32, 3, 3, activation='relu', border_mode='same')(up9)
    drop9 = Dropout(0.5)(conv9)
    conv9 = Conv2D(32, 3, 3, activation='relu', border_mode='same')(conv9)
    batch9 = BatchNormalization()(conv9)
    
    # For regression see: https://github.com/ncullen93/Unet-ants/blob/master/code/models/create_unet_model.py
    conv10 = Conv2D(1, 1, activation='tanh')(batch9)

    # Flatten
    flat = Flatten()(conv10)
    
    # Reshape
    reshape = Reshape((N,N))(flat)
    
    model = Model(input=inputs, output=reshape)
    
    print((model.summary()))

    return model
예제 #21
0
    def generator_model(self):
        input_img = Input(shape=(4, self.frame_height, self.frame_width))

        # state encoder
        x = Convolution2D(16,
                          3,
                          3,
                          subsample=(2, 2),
                          activation='relu',
                          border_mode='same')(input_img)
        x = BatchNormalization(mode=2)(x)
        x = Convolution2D(32,
                          3,
                          3,
                          subsample=(2, 2),
                          activation='relu',
                          border_mode='same')(x)
        x = BatchNormalization(mode=2)(x)
        x = Convolution2D(64,
                          3,
                          3,
                          subsample=(2, 2),
                          activation='relu',
                          border_mode='same')(x)
        x = BatchNormalization(mode=2)(x)
        x = Flatten()(x)
        encoded_state = Dense(32, activation='relu')(x)
        #encoded_state = Lambda(lambda a: K.greater(a, K.zeros_like(a)), output_shape=(32,))(encoded_state)
        state_encoder = Model(input=input_img, output=encoded_state)

        # action encoder
        action = Input(shape=(3, ))
        x = Dense(input_dim=3, output_dim=8, activation='relu')(action)
        encoded_action = Dense(8, activation='relu')(x)

        encoded = merge([encoded_state, encoded_action], mode='concat')
        #encoded = Lambda(lambda a: K.cast(a, 'float32'), output_shape=(40,))(encoded)

        x = Dense(input_dim=40, output_dim=64 * 9 * 10)(encoded)
        x = Reshape((64, 9, 10))(x)
        x = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(x)
        x = UpSampling2D((2, 2))(x)
        x = BatchNormalization(mode=2)(x)
        x = Convolution2D(16, 3, 3, activation='relu', border_mode='same')(x)
        x = UpSampling2D((2, 2))(x)
        x = BatchNormalization(mode=2)(x)
        x = Convolution2D(4, 3, 3, activation='relu', border_mode='same')(x)
        x = UpSampling2D((2, 2))(x)
        x = BatchNormalization(mode=2)(x)
        decoded = Convolution2D(1,
                                3,
                                3,
                                activation='sigmoid',
                                border_mode='same')(x)

        autoencoder = Model([input_img, action], decoded)
        autoencoder.compile(optimizer=Adam(lr=5e-4),
                            loss='binary_crossentropy')
        #autoencoder.summary()

        #######################################################
        encoded_state = Input(shape=(32, ))
        action = Input(shape=(3, ))
        x = Dense(input_dim=3, output_dim=8, activation='relu')(action)
        encoded_action = Dense(8, activation='relu')(x)
        encoded = merge([encoded_state, encoded_action], mode='concat')
        encoded = Lambda(lambda a: K.cast(a, 'float32'),
                         output_shape=(40, ))(encoded)
        x = Dense(input_dim=40, output_dim=64 * 9 * 10)(encoded)
        x = Reshape((64, 9, 10))(x)
        x = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(x)
        x = UpSampling2D((2, 2))(x)
        x = BatchNormalization(mode=2)(x)
        x = Convolution2D(16, 3, 3, activation='relu', border_mode='same')(x)
        x = UpSampling2D((2, 2))(x)
        x = BatchNormalization(mode=2)(x)
        x = Convolution2D(4, 3, 3, activation='relu', border_mode='same')(x)
        x = UpSampling2D((2, 2))(x)
        x = BatchNormalization(mode=2)(x)
        decoded = Convolution2D(1,
                                3,
                                3,
                                activation='sigmoid',
                                border_mode='same')(x)
        decoder = Model([encoded_state, action], decoded)
        decoder.compile(optimizer=Adam(lr=5e-4), loss='binary_crossentropy')

        return autoencoder, state_encoder, decoder
예제 #22
0
                        input_shape=input_shape))
recog.add(BatchNormalization(mode=1))
c=get_0_layer_output=K.function([recog.layers[0].input, 
                                 K.learning_phase()],[recog.layers[part].output]);c=get_0_layer_output([img2[0].reshape(1,shape,shape,1), 0])[0][0];c[c>.01*np.min(c)]=1
recog.add(BatchNormalization(mode=2))
recog.add(Activation('sigmoid'))
recog.add(Lambda(lambda x: x+thre*c,output_shape=(26,26,20)))
recog.add(BatchNormalization(mode=2))
recog.add(MaxPooling2D(pool_size=(3,3)))
recog.add(BatchNormalization(mode=2))
recog.add(Activation('relu'))
recog.add(Convolution2D(20, 3, 3,
                            init='glorot_uniform'))
recog.add(BatchNormalization(mode=2))
recog.add(Activation('relu'))
recog.add(UpSampling2D(size=(3, 3)))
recog.add(Convolution2D(20, 3, 3,init='glorot_uniform'))
recog.add(BatchNormalization(mode=2))
recog.add(Activation('relu'))
recog.add(UpSampling2D(size=(2, 2)))
recog.add(Convolution2D(1, 5, 5,init='glorot_uniform'))
recog.add(BatchNormalization(mode=2))
recog.add(Activation('relu'))

recog.compile(loss='mean_squared_error', optimizer=sgd,metrics = ['mae'])
recog.summary()

recog.fit(img2[1].reshape(1,shape,shape,1), img2[1].reshape(1,shape,shape,1),
                nb_epoch=100,
                batch_size=30,verbose=1)
예제 #23
0
def CBR(channels, layer_input):
    x = UpSampling2D(size=2)(layer_input)
    x = Conv2D(channels,3,strides=1,padding="same",kernel_initializer=conv_init)(x)
    # x = BatchNormalization(momentum=0.9)(x)
    # x = Activation('relu')(x)
    return x
예제 #24
0
    def model_generator(self,
                        units=512,
                        dropout=0.5,
                        reg=lambda: regularizers.l1_l2(l1=1e-7, l2=1e-7)):
        decoder = Sequential(name="decoder")
        h = 5

        decoder.add(
            Dense(units * 4 * 4,
                  use_bias=True,
                  input_dim=self.latent_dim,
                  kernel_regularizer=reg()))
        # check channel order on below
        #decoder.add(BatchNormalization())
        decoder.add(Reshape((4, 4, units)))
        # decoder.add(SpatialDropout2D(dropout))
        #decoder.add(LeakyReLU(0.2))
        decoder.add(PReLU())
        decoder.add(
            Conv2D(units // 2, (h, h),
                   activation='linear',
                   use_bias=True,
                   padding='same',
                   kernel_regularizer=reg()))
        # decoder.add(SpatialDropout2D(dropout))
        #decoder.add(LeakyReLU(0.2))
        #decoder.add(BatchNormalization())
        decoder.add(PReLU())
        decoder.add(UpSampling2D(size=(2, 2)))
        decoder.add(
            Conv2D(units // 4, (h, h),
                   activation='linear',
                   use_bias=True,
                   padding='same',
                   kernel_regularizer=reg()))
        # decoder.add(SpatialDropout2D(dropout))
        #decoder.add(LeakyReLU(0.2))
        #decoder.add(BatchNormalization())
        decoder.add(PReLU())
        decoder.add(UpSampling2D(size=(2, 2)))
        decoder.add(
            Conv2D(units // 8, (h, h),
                   activation='linear',
                   use_bias=True,
                   padding='same',
                   kernel_regularizer=reg()))
        # decoder.add(SpatialDropout2D(dropout))
        #decoder.add(LeakyReLU(0.2))
        #decoder.add(BatchNormalization())
        decoder.add(PReLU())
        decoder.add(UpSampling2D(size=(2, 2)))
        decoder.add(
            Conv2D(units // 16, (h, h),
                   activation='linear',
                   use_bias=True,
                   padding='same',
                   kernel_regularizer=reg()))
        # add one more PReLU for fine scale detail?

        # added another upsampling step to get to 64 x 64
        #decoder.add(LeakyReLU(0.2))
        #decoder.add(BatchNormalization())
        decoder.add(PReLU())
        decoder.add(UpSampling2D(size=(2, 2)))
        decoder.add(
            Conv2D(3, (h, h),
                   activation='sigmoid',
                   use_bias=True,
                   padding='same',
                   kernel_regularizer=reg()))
        #decoder.add(BatchNormalization())
        #decoder.add(Activation('sigmoid'))

        #decoder.summary()
        # above assumes a particular output dimension, instead try below
        #decoder.add(Dense(np.prod(self.img_shape), activation='sigmoid'))
        #decoder.add(Reshape(self.img_shape))

        decoder.summary()

        z = Input(shape=(self.latent_dim, ))
        img = decoder(z)

        return Model(z, img)
예제 #25
0
def sam_resnet(data):

    # dcn = dcn_resnet(input_tensor=data, trainable=True)
    bn_axis = 3
    trainable = True  #
    # conv_1
    conv_1_out = ZeroPadding2D((3, 3), batch_size=1)(data)
    conv_1_out = Conv2D(64, (7, 7),
                        strides=(2, 2),
                        name='conv1',
                        trainable=trainable)(conv_1_out)
    conv_1_out = BatchNormalization(axis=bn_axis,
                                    name='bn_conv1',
                                    trainable=trainable)(conv_1_out)
    conv_1_out_b = Activation('relu')(conv_1_out)
    conv_1_out = MaxPooling2D((3, 3), strides=(2, 2),
                              padding='same')(conv_1_out_b)

    # conv_2
    conv_2_out = conv_block(conv_1_out,
                            3, [64, 64, 256],
                            stage=2,
                            block='a',
                            strides=(1, 1),
                            trainable=trainable)
    conv_2_out = identity_block(conv_2_out,
                                3, [64, 64, 256],
                                stage=2,
                                block='b',
                                trainable=trainable)
    conv_2_out = identity_block(conv_2_out,
                                3, [64, 64, 256],
                                stage=2,
                                block='c',
                                trainable=trainable)

    # conv_3
    conv_3_out = conv_block(conv_2_out,
                            3, [128, 128, 512],
                            stage=3,
                            block='a',
                            strides=(2, 2),
                            trainable=trainable)
    conv_3_out = identity_block(conv_3_out,
                                3, [128, 128, 512],
                                stage=3,
                                block='b',
                                trainable=trainable)
    conv_3_out = identity_block(conv_3_out,
                                3, [128, 128, 512],
                                stage=3,
                                block='c',
                                trainable=trainable)
    conv_3_out = identity_block(conv_3_out,
                                3, [128, 128, 512],
                                stage=3,
                                block='d',
                                trainable=trainable)

    # conv_4
    conv_4_out = conv_block(conv_3_out,
                            3, [256, 256, 1024],
                            stage=4,
                            block='a',
                            trainable=trainable)
    conv_4_out = identity_block(conv_4_out,
                                3, [256, 256, 1024],
                                stage=4,
                                block='b',
                                trainable=trainable)
    conv_4_out = identity_block(conv_4_out,
                                3, [256, 256, 1024],
                                stage=4,
                                block='c',
                                trainable=trainable)
    conv_4_out = identity_block(conv_4_out,
                                3, [256, 256, 1024],
                                stage=4,
                                block='d',
                                trainable=trainable)
    conv_4_out = identity_block(conv_4_out,
                                3, [256, 256, 1024],
                                stage=4,
                                block='e',
                                trainable=trainable)
    conv_4_out = identity_block(conv_4_out,
                                3, [256, 256, 1024],
                                stage=4,
                                block='f',
                                trainable=trainable)

    # conv_5
    conv_5_out = conv_block(conv_4_out,
                            3, [512, 512, 2048],
                            stage=5,
                            block='a',
                            strides=(1, 1),
                            trainable=trainable)  #
    conv_5_out = identity_block(conv_5_out,
                                3, [512, 512, 2048],
                                stage=5,
                                block='b',
                                trainable=trainable)
    conv_5_out = identity_block(conv_5_out,
                                3, [512, 512, 2048],
                                stage=5,
                                block='c',
                                trainable=trainable)
    #
    # processing Resnet output
    resnet_outs = Conv2D(512, (3, 3),
                         padding='same',
                         activation='relu',
                         name='resnet_out',
                         trainable=trainable)(conv_5_out)
    resnet_outs = Flatten()(resnet_outs)
    resnet_outs = RepeatVector(nb_timestep)(resnet_outs)
    resnet_outs = Reshape((nb_timestep, 14, 14, 512))(resnet_outs)

    # Attentive Convolutional LSTM
    convLSTM_outs = AttentiveConvLSTM(nb_filters_in=512,
                                      nb_filters_out=512,
                                      nb_filters_att=512,
                                      nb_cols=3,
                                      nb_rows=3,
                                      name='attenconvsltm')(
                                          resnet_outs)  #, trainable=True
    convLSTM_outs = Lambda(Kreshape,
                           arguments={'shape': [-1, 14, 14, 512]},
                           output_shape=[14, 14, 512])(convLSTM_outs)
    # final land output
    land_outs = Conv2D(8, (1, 1),
                       padding='same',
                       activation='sigmoid',
                       name='land_con5',
                       trainable=True)(convLSTM_outs)
    # upsamping land output for 3rd block
    conv_3_out = Conv2D(64, (3, 3),
                        padding='same',
                        activation='relu',
                        name='conv_3_out',
                        trainable=True)(conv_3_out)
    up3_land_outs = UpSampling2D(size=(2, 2))(land_outs)
    up3_land_outs = Concatenate()([conv_3_out, up3_land_outs])
    up3_land_outs = Flatten()(up3_land_outs)
    up3_land_outs = RepeatVector(nb_timestep)(up3_land_outs)
    up3_land_outs = Reshape((nb_timestep, 28, 28, 72))(up3_land_outs)
    up3_land_outs = (ConvLSTM2D(filters=8,
                                kernel_size=(3, 3),
                                padding='same',
                                activation='sigmoid',
                                return_sequences=False,
                                stateful=False,
                                name='land_con3'))(up3_land_outs)
    # # upsamping land output for 2nd block
    conv_2_out = Conv2D(64, (3, 3),
                        padding='same',
                        activation='relu',
                        name='conv_2_out',
                        trainable=True)(conv_2_out)
    up2_land_outs = UpSampling2D(size=(2, 2))(up3_land_outs)
    up2_land_outs = Concatenate()([conv_2_out, up2_land_outs])
    up2_land_outs = Flatten()(up2_land_outs)
    up2_land_outs = RepeatVector(nb_timestep)(up2_land_outs)
    up2_land_outs = Reshape((nb_timestep, 56, 56, 72))(up2_land_outs)
    up2_land_outs = (ConvLSTM2D(filters=8,
                                kernel_size=(3, 3),
                                padding='same',
                                activation='sigmoid',
                                return_sequences=False,
                                stateful=False,
                                name='land_con2'))(up2_land_outs)

    # # # upsamping land output for 1st block
    up1_land_outs = UpSampling2D(size=(2, 2))(up2_land_outs)
    up1_land_outs = Concatenate()([conv_1_out_b, up1_land_outs])
    up1_land_outs = Flatten()(up1_land_outs)
    up1_land_outs = RepeatVector(nb_timestep)(up1_land_outs)
    up1_land_outs = Reshape((nb_timestep, 112, 112, 72))(up1_land_outs)
    up1_land_outs = (ConvLSTM2D(filters=8,
                                kernel_size=(3, 3),
                                padding='same',
                                activation='sigmoid',
                                return_sequences=False,
                                stateful=False,
                                name='land_con1'))(up1_land_outs)

    # outs = Lambda(Kpool, arguments={'pool_size': (14, 14)}, output_shape=[1, 1, 512])(outs)
    # # outs = K.pool2d(outs, (7,7), pool_mode='avg')

    outs = AveragePooling2D((14, 14), name='avg_pool')(convLSTM_outs)
    outs = Flatten()(outs)
    #
    attri_outs = Dense(1000,
                       kernel_initializer='normal',
                       activation='sigmoid',
                       name='attri',
                       trainable=trainable)(outs)
    #
    cate_outs = Dense(cate_num,
                      kernel_initializer='normal',
                      activation='softmax',
                      name='cate',
                      trainable=trainable)(outs)
    #
    type_outs = Dense(type_num,
                      kernel_initializer='normal',
                      activation='softmax',
                      name='type',
                      trainable=trainable)(outs)

    # land_outs = Dense(196, kernel_initializer='normal', activation='sigmoid', name='land_all', trainable=True)(outs)
    # land_outs = Reshape((14, 14, 1))(land_outs)
    return [
        attri_outs, cate_outs, type_outs, land_outs, up3_land_outs,
        up2_land_outs, up1_land_outs
    ]  #
예제 #26
0
파일: unet.py 프로젝트: xahidbuffon/SUIM
def UNet0(input_size = (256, 256, 3), no_of_class = 3):
    inputs = Input(input_size)
    conv1 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(inputs)
    conv1 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv1)
    pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
    conv2 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool1)
    conv2 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv2)
    pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
    conv3 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool2)
    conv3 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv3)
    pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
    conv4 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool3)
    conv4 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv4)
    drop4 = Dropout(0.5)(conv4)
    pool4 = MaxPooling2D(pool_size=(2, 2))(drop4)

    conv5 = Conv2D(1024, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool4)
    conv5 = Conv2D(1024, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv5)
    drop5 = Dropout(0.5)(conv5)

    up6 = Conv2D(512, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(drop5))
    merge6 = concatenate([drop4,up6], axis = 3)
    conv6 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge6)
    conv6 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv6)

    up7 = Conv2D(256, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv6))
    merge7 = concatenate([conv3,up7], axis = 3)
    conv7 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge7)
    conv7 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv7)

    up8 = Conv2D(128, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv7))
    merge8 = concatenate([conv2,up8], axis = 3)
    conv8 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge8)
    conv8 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv8)

    up9 = Conv2D(64, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv8))
    merge9 = concatenate([conv1,up9], axis = 3)
    conv9 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge9)
    conv9 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv9)
    conv9 = Conv2D(3, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv9)
    conv10 = Conv2D(no_of_class, 3, padding = 'same', activation = 'sigmoid')(conv9)

    model = Model(input = inputs, output = conv10)
    model.compile(optimizer = Adam(lr = 1e-4), loss = 'binary_crossentropy', metrics = ['accuracy'])    
    model.summary()


    return model
예제 #27
0
def generator_unet_upsampling(x_dim,
                              y_dim,
                              model_name="generator_unet_upsampling"):
    filters_num = 16

    if K.image_dim_ordering() == "channels_first":
        bn_axis = 1
        y_channels = y_dim[0]
        min_s = min(x_dim[1:])
    else:
        bn_axis = -1
        y_channels = y_dim[-1]
        min_s = min(x_dim[:-1])

    unet_input = Input(shape=x_dim, name="unet_input")

    conv_num = int(np.floor(np.log(min_s) / np.log(2)))
    list_filters_num = [filters_num * min(8, (2**i)) for i in range(conv_num)]

    # Encoder
    first_conv = Conv2D(list_filters_num[0], (3, 3),
                        strides=(2, 2),
                        name='unet_conv2D_1',
                        padding='same')(unet_input)
    list_encoder = [first_conv]
    for i, f in enumerate(list_filters_num[1:]):
        name = 'unet_conv2D_' + str(i + 2)
        conv = conv_block_unet(list_encoder[-1], f, name, bn_axis)
        list_encoder.append(conv)

    # prepare decoder filters
    list_filters_num = list_filters_num[:-2][::-1]
    if len(list_filters_num) < conv_num - 1:
        list_filters_num.append(filters_num)

    # Decoder
    first_up_conv = up_conv_block_unet(list_encoder[-1],
                                       list_encoder[-2],
                                       list_filters_num[0],
                                       "unet_upconv2D_1",
                                       bn_axis,
                                       dropout=True)
    list_decoder = [first_up_conv]
    for i, f in enumerate(list_filters_num[1:]):
        name = "unet_upconv2D_" + str(i + 2)
        if i < 2:
            d = True
        else:
            d = False
        up_conv = up_conv_block_unet(list_decoder[-1],
                                     list_encoder[-(i + 3)],
                                     f,
                                     name,
                                     bn_axis,
                                     dropout=d)
        list_decoder.append(up_conv)

    x = Activation('relu')(list_decoder[-1])
    x = UpSampling2D(size=(2, 2))(x)
    x = Conv2D(y_channels, (3, 3), name="last_conv", padding='same')(x)
    x = Activation('tanh')(x)

    generator_unet = Model(input=[unet_input], outputs=[x])
    return generator_unet
예제 #28
0
파일: segnet.py 프로젝트: prokhn/InnoRoboML
    def build(model_name,
              img_h,
              img_w,
              img_layers,
              n_labels,
              kernel=3,
              save_path='models/{}.json') -> models.Sequential:
        encoding_layers = [
            Conv2D(64,
                   kernel,
                   padding='same',
                   input_shape=(img_w, img_h, img_layers)),
            BatchNormalization(),
            Activation('relu'),
            Conv2D(64, kernel, padding='same'),
            BatchNormalization(),
            Activation('relu'),
            MaxPooling2D(),
            Conv2D(128, (kernel, kernel), padding='same'),
            BatchNormalization(),
            Activation('relu'),
            Conv2D(128, (kernel, kernel), padding='same'),
            BatchNormalization(),
            Activation('relu'),
            MaxPooling2D(),
            Conv2D(256, (kernel, kernel), padding='same'),
            BatchNormalization(),
            Activation('relu'),
            Conv2D(256, (kernel, kernel), padding='same'),
            BatchNormalization(),
            Activation('relu'),
            Conv2D(256, (kernel, kernel), padding='same'),
            BatchNormalization(),
            Activation('relu'),
            MaxPooling2D(),
            Conv2D(512, (kernel, kernel), padding='same'),
            BatchNormalization(),
            Activation('relu'),
            Conv2D(512, (kernel, kernel), padding='same'),
            BatchNormalization(),
            Activation('relu'),
            Conv2D(512, (kernel, kernel), padding='same'),
            BatchNormalization(),
            Activation('relu'),
            MaxPooling2D(),
            Conv2D(512, (kernel, kernel), padding='same'),
            BatchNormalization(),
            Activation('relu'),
            Conv2D(512, (kernel, kernel), padding='same'),
            BatchNormalization(),
            Activation('relu'),
            Conv2D(512, (kernel, kernel), padding='same'),
            BatchNormalization(),
            Activation('relu'),
            MaxPooling2D(),
        ]

        decoding_layers = [
            UpSampling2D(),
            Conv2D(512, (kernel, kernel), padding='same'),
            BatchNormalization(),
            Activation('relu'),
            Conv2D(512, (kernel, kernel), padding='same'),
            BatchNormalization(),
            Activation('relu'),
            Conv2D(512, (kernel, kernel), padding='same'),
            BatchNormalization(),
            Activation('relu'),
            UpSampling2D(),
            Conv2D(512, (kernel, kernel), padding='same'),
            BatchNormalization(),
            Activation('relu'),
            Conv2D(512, (kernel, kernel), padding='same'),
            BatchNormalization(),
            Activation('relu'),
            Conv2D(256, (kernel, kernel), padding='same'),
            BatchNormalization(),
            Activation('relu'),
            UpSampling2D(),
            Conv2D(256, (kernel, kernel), padding='same'),
            BatchNormalization(),
            Activation('relu'),
            Conv2D(256, (kernel, kernel), padding='same'),
            BatchNormalization(),
            Activation('relu'),
            Conv2D(128, (kernel, kernel), padding='same'),
            BatchNormalization(),
            Activation('relu'),
            UpSampling2D(),
            Conv2D(128, (kernel, kernel), padding='same'),
            BatchNormalization(),
            Activation('relu'),
            Conv2D(64, (kernel, kernel), padding='same'),
            BatchNormalization(),
            Activation('relu'),
            UpSampling2D(),
            Conv2D(64, (kernel, kernel), padding='same'),
            BatchNormalization(),
            Activation('relu'),
            Conv2D(n_labels, (1, 1), padding='valid'),
            BatchNormalization(),
        ]

        autoencoder = models.Sequential()
        autoencoder.encoding_layers = encoding_layers

        for l in autoencoder.encoding_layers:
            autoencoder.add(l)
            # print(l.input_shape, l.output_shape, l)

        autoencoder.decoding_layers = decoding_layers
        for l in autoencoder.decoding_layers:
            autoencoder.add(l)

        autoencoder.add(Reshape((n_labels, img_h * img_w)))
        autoencoder.add(Permute((2, 1)))
        autoencoder.add(Activation('softmax'))

        with open(save_path.format(model_name), 'w') as outfile:
            outfile.write(
                json.dumps(json.loads(autoencoder.to_json()), indent=2))

        return autoencoder
예제 #29
0
def train(epochs, batchSize=128):

    K.set_image_dim_ordering('th')

    # Deterministic output.
    # Tired of seeing the same results every time? Remove the line below.
    np.random.seed(1000)

    # The results are a little better when the dimensionality of the random vector is only 10.
    # The dimensionality has been left at 100 for consistency with other GAN implementations.
    randomDim = 100

    # Load MNIST data
    (X_train, y_train), (X_test, y_test) = fashion_mnist.load_data()
    X_train = (X_train.astype(np.float32) - 127.5)/127.5
    X_train = X_train[:, np.newaxis, :, :]

    # Optimizer
    adam = Adam(lr=0.0002, beta_1=0.5)

    # Generator
    generator = Sequential()
    generator.add(Dense(128*7*7, input_dim=randomDim, kernel_initializer=initializers.RandomNormal(stddev=0.02)))
    generator.add(LeakyReLU(0.2))
    generator.add(Reshape((128, 7, 7)))
    generator.add(UpSampling2D(size=(2, 2)))
    generator.add(Conv2D(64, kernel_size=(5, 5), padding='same'))
    generator.add(LeakyReLU(0.2))
    generator.add(UpSampling2D(size=(2, 2)))
    generator.add(Conv2D(1, kernel_size=(5, 5), padding='same', activation='tanh'))
    generator.compile(loss='binary_crossentropy', optimizer=adam)

    #set up the model
    # serialize model to JSON
    model_json = generator.to_json()
    with open("models_colin/dc_generator_arch.json", "w") as json_file:
        json_file.write(model_json)

    # Discriminator
    discriminator = Sequential()
    discriminator.add(Conv2D(64, kernel_size=(5, 5), strides=(2, 2), padding='same', input_shape=(1, 28, 28), kernel_initializer=initializers.RandomNormal(stddev=0.02)))
    discriminator.add(LeakyReLU(0.2))
    discriminator.add(Dropout(0.3))
    discriminator.add(Conv2D(128, kernel_size=(5, 5), strides=(2, 2), padding='same'))
    discriminator.add(LeakyReLU(0.2))
    discriminator.add(Dropout(0.3))
    discriminator.add(Flatten())
    discriminator.add(Dense(1, activation='sigmoid'))
    discriminator.compile(loss='binary_crossentropy', optimizer=adam)

    # Combined network
    discriminator.trainable = False
    ganInput = Input(shape=(randomDim,))
    x = generator(ganInput)
    ganOutput = discriminator(x)
    gan = Model(inputs=ganInput, outputs=ganOutput)
    gan.compile(loss='binary_crossentropy', optimizer=adam)

    dLosses = []
    gLosses = []
    logging.info('set up models')

    batchCount = X_train.shape[0] / batchSize

    logging.info('epochs %d batch size %d batches per epoch %d' % (epochs, batchSize, batchCount))

    for e in range(1, epochs+1):
        logging.info('epoch %d' % e)
        for _ in range(int(batchCount)):
            # Get a random set of input noise and images
            noise = np.random.normal(0, 1, size=[batchSize, randomDim])
            imageBatch = X_train[np.random.randint(0, X_train.shape[0], size=batchSize)]

            # Generate fake MNIST images
            generatedImages = generator.predict(noise)
            X = np.concatenate([imageBatch, generatedImages])

            # Labels for generated and real data
            yDis = np.zeros(2*batchSize)
            # One-sided label smoothing
            yDis[:batchSize] = 0.9

            # Train discriminator
            discriminator.trainable = True
            dloss = discriminator.train_on_batch(X, yDis)

            # Train generator
            noise = np.random.normal(0, 1, size=[batchSize, randomDim])
            yGen = np.ones(batchSize)
            discriminator.trainable = False
            gloss = gan.train_on_batch(noise, yGen)

        # Store loss of most recent batch from this epoch
        dLosses.append(dloss)
        gLosses.append(gloss)

        if e == 1 or e % 5 == 0:
            #plotGeneratedImages(e)
            saveModels(e, generator, discriminator)
예제 #30
0
nch = 200

# CNN生成图片
# 通过100维的

g_input = Input(shape=[100])  # 输入100维的向量
# 100 维 --> 39200 (nch=200*14*14), 权重 (100+1)* 39200 (input_dimendion + bias) * output_dimension
H = Dense(nch * 14 * 14, kernel_initializer='glorot_normal')(g_input)  # Glorot正态分布初始化权重
H = BatchNormalization()(H)
H = Activation('relu')(H)

# 39200 --> 200 * 14 * 14
H = Reshape([nch, 14, 14])(H)  # 转成200 * 14 * 14

# 上采样 200 * 14 * 14 --> 200 * 28 * 28
H = UpSampling2D(size=(2, 2))(H)

# 200 * 28 * 28 --> 100 * 28 * 28
H = Convolution2D(100, (3, 3), padding="same", kernel_initializer='glorot_normal')(H)
H = BatchNormalization()(H)
H = Activation('relu')(H)

# 100 * 28 * 28 --> 50 * 28 * 28
H = Convolution2D(50, (3, 3), padding="same", kernel_initializer='glorot_normal')(H)
H = BatchNormalization()(H)
H = Activation('relu')(H)

# 50 * 28 * 28 --> 1 * 28 * 28
H = Convolution2D(1, (1, 1), padding="same", kernel_initializer='glorot_normal')(H)
g_V = Activation('sigmoid')(H)