Пример #1
0
def keras_model(img_width=256, img_height=256):
    '''
    Modified from https://keunwoochoi.wordpress.com/2017/10/11/u-net-on-keras-2-0/
    '''
    n_ch_exps = [
        4, 5, 6, 7, 8, 9
    ]  # the n-th deep channel's exponent i.e. 2**n 16,32,64,128,256
    k_size = (3, 3)  # size of filter kernel
    k_init = 'he_normal'  # kernel initializer

    if K.image_data_format() == 'channels_first':
        ch_axis = 1
        input_shape = (3, img_width, img_height)
    elif K.image_data_format() == 'channels_last':
        ch_axis = 3
        input_shape = (img_width, img_height, 3)

    inp = Input(shape=input_shape)
    encodeds = []

    # encoder
    enc = inp
    print(n_ch_exps)
    for l_idx, n_ch in enumerate(n_ch_exps):
        enc = Conv2D(filters=2**n_ch,
                     kernel_size=k_size,
                     activation='relu',
                     padding='same',
                     kernel_initializer=k_init)(enc)
        enc = Dropout(0.1 * l_idx, )(enc)
        enc = Conv2D(filters=2**n_ch,
                     kernel_size=k_size,
                     activation='relu',
                     padding='same',
                     kernel_initializer=k_init)(enc)
        encodeds.append(enc)
        # print(l_idx, enc)
        if n_ch < n_ch_exps[
                -1]:  # do not run max pooling on the last encoding/downsampling step
            enc = MaxPooling2D(pool_size=(2, 2))(enc)

    # decoder
    dec = enc
    print(n_ch_exps[::-1][1:])
    decoder_n_chs = n_ch_exps[::-1][1:]
    for l_idx, n_ch in enumerate(decoder_n_chs):
        l_idx_rev = len(n_ch_exps) - l_idx - 2  #
        dec = Conv2DTranspose(filters=2**n_ch,
                              kernel_size=k_size,
                              strides=(2, 2),
                              activation='relu',
                              padding='same',
                              kernel_initializer=k_init)(dec)
        dec = concatenate([dec, encodeds[l_idx_rev]], axis=ch_axis)
        dec = Conv2D(filters=2**n_ch,
                     kernel_size=k_size,
                     activation='relu',
                     padding='same',
                     kernel_initializer=k_init)(dec)
        dec = Dropout(0.1 * l_idx)(dec)
        dec = Conv2D(filters=2**n_ch,
                     kernel_size=k_size,
                     activation='relu',
                     padding='same',
                     kernel_initializer=k_init)(dec)

    outp = Conv2DTranspose(filters=1,
                           kernel_size=k_size,
                           activation='sigmoid',
                           padding='same',
                           kernel_initializer='glorot_normal')(dec)

    model = Model(inputs=[inp], outputs=[outp])

    return model
Пример #2
0
def CCAE_256(input_shape):
    model = Sequential()
    pad3 = 'same'
    model.add(
        Conv2D(64,
               5,
               strides=2,
               padding='same',
               activation='relu',
               name='conv1',
               input_shape=input_shape))
    model.add(MaxPooling2D((2, 2), padding="same"))
    model.add(BatchNormalization())
    model.add(
        Conv2D(128,
               5,
               strides=2,
               padding='same',
               activation='relu',
               name='conv2'))
    model.add(MaxPooling2D((2, 2), padding="same"))
    model.add(BatchNormalization())
    model.add(
        Conv2D(256,
               3,
               strides=2,
               padding=pad3,
               activation='relu',
               name='conv3'))

    model.add(Flatten())
    model.add(BatchNormalization())
    model.add(Dense(units=128, name='embedding'))
    model.add(Dense(units=128 * 16, name='dense'))
    model.add(BatchNormalization())
    model.add(Reshape((4, 4, 128)))

    #model.summary()
    model.add(
        Conv2DTranspose(64,
                        3,
                        strides=2,
                        padding=pad3,
                        activation='relu',
                        name='deconv3'))
    model.add(UpSampling2D((2, 2)))
    model.add(
        Conv2DTranspose(32,
                        5,
                        strides=2,
                        padding='same',
                        activation='relu',
                        name='deconv2'))
    model.add(UpSampling2D((2, 2)))
    model.add(Conv2DTranspose(16, 5, strides=2, padding='same',
                              name='deconv1'))
    model.add(
        Conv2DTranspose(input_shape[2],
                        5,
                        strides=2,
                        padding='same',
                        name='deconv_out'))
    #model.summary()
    return model
Пример #3
0
    def build_generator(self):
        model = Sequential()

        # model.add(Dense(1024, input_dim=self.latent_dim))
        # model.add(Reshape((4, 4, -1)))
        # model.add(Conv2DTranspose(128, 8, data_format="channels_last"))
        # model.add(BatchNormalization())
        # model.add(LeakyReLU(alpha=0.2))
        # model.add(Dropout(rate=0.3))
        # model.add(Conv2DTranspose(128, 8, data_format="channels_last"))
        # model.add(BatchNormalization())
        # model.add(LeakyReLU(alpha=0.2))
        # model.add(Dropout(rate=0.3))
        # model.add(Conv2DTranspose(128, 8, data_format="channels_last"))
        # model.add(BatchNormalization())
        # model.add(LeakyReLU(alpha=0.2))
        # model.add(Dropout(rate=0.3))
        # # model.add(Conv2DTranspose(128, 7, data_format="channels_last"))
        # # model.add(BatchNormalization())
        # # model.add(LeakyReLU(alpha=0.2))
        # # model.add(Dropout(rate=0.3))
        # # model.add(Conv2DTranspose(128, 4, data_format="channels_last"))
        # # model.add(BatchNormalization())
        # # model.add(LeakyReLU(alpha=0.2))
        # # model.add(Dropout(rate=0.3))
        # # model.add(Conv2DTranspose(128, 4, data_format="channels_last"))
        # # model.add(BatchNormalization())
        # # model.add(LeakyReLU(alpha=0.2))
        # # model.add(Dropout(rate=0.3))
        # # model.add(Conv2DTranspose(128, 4, data_format="channels_last"))
        # # model.add(BatchNormalization())
        # # model.add(LeakyReLU(alpha=0.2))
        # # model.add(Dropout(rate=0.3))
        # # model.add(Conv2DTranspose(64, 5, data_format="channels_last", padding='same'))
        # # model.add(BatchNormalization())
        # # model.add(LeakyReLU(alpha=0.2))
        # # model.add(Conv2DTranspose(64, 5, data_format="channels_last", padding='same'))
        # # model.add(BatchNormalization())
        # # model.add(LeakyReLU(alpha=0.2))
        # # model.add(Conv2DTranspose(64, 5, data_format="channels_last", padding='same'))
        # # model.add(BatchNormalization())
        # # model.add(LeakyReLU(alpha=0.2))
        # # model.add(Conv2DTranspose(64, 5, data_format="channels_last", padding='same'))
        # # model.add(BatchNormalization())
        # # model.add(LeakyReLU(alpha=0.2))
        # # model.add(Conv2DTranspose(64, 5, data_format="channels_last", padding='same'))
        # # model.add(BatchNormalization())
        # # model.add(LeakyReLU(alpha=0.2))
        # model.add(Conv2DTranspose(self.num_channels, 4, data_format="channels_last"))
        # model.add(BatchNormalization())
        # model.add(LeakyReLU(alpha=0.2))
        # model.add(Activation('tanh'))

        model.add(
            Dense(64 * self.img_rows * self.img_cols,
                  activation="relu",
                  input_dim=self.latent_dim))
        model.add(Reshape((self.img_rows, self.img_cols, 64)))
        #model.add(UpSampling2D())
        model.add(Conv2DTranspose(64, kernel_size=3, padding="same"))
        model.add(BatchNormalization(momentum=0.8))
        #model.add(Activation("relu"))
        model.add(LeakyReLU())
        model.add(Dropout(rate=0.3))
        #model.add(UpSampling2D())
        model.add(Conv2DTranspose(32, kernel_size=3, padding="same"))
        model.add(BatchNormalization(momentum=0.8))
        #model.add(Activation("relu"))
        model.add(LeakyReLU())
        model.add(Dropout(rate=0.3))
        model.add(
            Conv2DTranspose(self.num_channels, kernel_size=3, padding="same"))
        model.add(Activation("tanh"))

        print("Starting Generator layers")
        for layer in model.layers:
            print(layer.input_shape, layer.output_shape)
        print("Ending Generator layers")

        model.summary()

        noise = Input(shape=(self.latent_dim, ))
        label = Input(shape=(1, ), dtype='int32')
        label_embedding = Flatten()(Embedding(self.num_classes,
                                              self.latent_dim)(label))

        model_input = multiply([noise, label_embedding])
        img = model(model_input)

        return Model([noise, label], img)
    def bottleneck_decoder(self,
                           tensor,
                           nfilters,
                           upsampling=False,
                           normal=False,
                           name=''):
        """

        :param tensor: input tensor
        :param nfilters: number of filters
        :param upsampling: Enables Transposed convolution
        :param normal: Enables 3x3 convolution on feature map
        :param name: The name for the weight variable.
        :return: decoder output
        """
        y = tensor
        skip = tensor
        if upsampling:
            skip = Conv2D(filters=nfilters,
                          kernel_size=(1, 1),
                          kernel_initializer='he_normal',
                          strides=(1, 1),
                          padding='same',
                          use_bias=False,
                          name=f'1x1_conv_skip_{name}')(skip)
            skip = UpSampling2D(size=(2, 2),
                                name=f'upsample_skip_{name}')(skip)

        y = Conv2D(filters=nfilters // 4,
                   kernel_size=(1, 1),
                   kernel_initializer='he_normal',
                   strides=(1, 1),
                   padding='same',
                   use_bias=False,
                   name=f'1x1_conv_{name}')(y)
        y = BatchNormalization(momentum=0.1, name=f'bn_1x1_{name}')(y)
        y = PReLU(shared_axes=[1, 2], name=f'prelu_1x1_{name}')(y)

        if upsampling:
            # upsample with learned weights through convolution with a fractional stride
            y = Conv2DTranspose(filters=nfilters // 4,
                                kernel_size=(3, 3),
                                kernel_initializer='he_normal',
                                strides=(2, 2),
                                padding='same',
                                name=f'3x3_deconv_{name}')(y)
        elif normal:
            Conv2D(filters=nfilters // 4,
                   kernel_size=(3, 3),
                   strides=(1, 1),
                   kernel_initializer='he_normal',
                   padding='same',
                   name=f'3x3_conv_{name}')(y)
        y = BatchNormalization(momentum=0.1, name=f'bn_main_{name}')(y)
        y = PReLU(shared_axes=[1, 2], name=f'prelu_{name}')(y)

        y = Conv2D(filters=nfilters,
                   kernel_size=(1, 1),
                   kernel_initializer='he_normal',
                   use_bias=False,
                   name=f'final_1x1_{name}')(y)
        y = BatchNormalization(momentum=0.1, name=f'bn_final_{name}')(y)

        y = Add(name=f'add_{name}')([y, skip])
        y = ReLU(name=f'relu_out_{name}')(y)

        return y
    def build(self):
        """
        Build the model for training
        """

        print('. . . . .Building VGG. . . . .')

        inputs = Input(shape=(self.im_height, self.im_width, 3))

        # Block 1
        block1_conv1 = Conv2D(64, (3, 3),
                              activation='relu',
                              padding='same',
                              name='block1_conv1')(inputs)
        block1_conv2 = Conv2D(64, (3, 3),
                              activation='relu',
                              padding='same',
                              name='block1_conv2')(block1_conv1)
        block1_pool = MaxPooling2D((2, 2), strides=(2, 2),
                                   name='block1_pool')(block1_conv2)

        # Block 2
        block2_conv1 = Conv2D(128, (3, 3),
                              activation='relu',
                              padding='same',
                              name='block2_conv1')(block1_pool)
        block2_conv2 = Conv2D(128, (3, 3),
                              activation='relu',
                              padding='same',
                              name='block2_conv2')(block2_conv1)
        block2_pool = MaxPooling2D((2, 2), strides=(2, 2),
                                   name='block2_pool')(block2_conv2)

        # Block 3
        block3_conv1 = Conv2D(256, (3, 3),
                              activation='relu',
                              padding='same',
                              name='block3_conv1')(block2_pool)
        block3_conv2 = Conv2D(256, (3, 3),
                              activation='relu',
                              padding='same',
                              name='block3_conv2')(block3_conv1)
        block3_conv3 = Conv2D(256, (3, 3),
                              activation='relu',
                              padding='same',
                              name='block3_conv3')(block3_conv2)
        block3_pool = MaxPooling2D((2, 2), strides=(2, 2),
                                   name='block3_pool')(block3_conv3)

        # Block 4
        block4_conv1 = Conv2D(512, (3, 3),
                              activation='relu',
                              padding='same',
                              name='block4_conv1')(block3_pool)
        block4_conv2 = Conv2D(512, (3, 3),
                              activation='relu',
                              padding='same',
                              name='block4_conv2')(block4_conv1)
        block4_conv3 = Conv2D(512, (3, 3),
                              activation='relu',
                              padding='same',
                              name='block4_conv3')(block4_conv2)
        block4_pool = MaxPooling2D((2, 2), strides=(2, 2),
                                   name='block4_pool')(block4_conv3)

        # Block 5
        block5_conv1 = Conv2D(512, (3, 3),
                              activation='relu',
                              padding='same',
                              name='block5_conv1')(block4_pool)
        block5_conv2 = Conv2D(512, (3, 3),
                              activation='relu',
                              padding='same',
                              name='block5_conv2')(block5_conv1)
        block5_conv3 = Conv2D(512, (3, 3),
                              activation='relu',
                              padding='same',
                              name='block5_conv3')(block5_conv2)
        block5_pool = MaxPooling2D((2, 2), strides=(2, 2),
                                   name='block5_pool')(block5_conv3)

        pool5_conv1x1 = Conv2D(2, (1, 1), activation='relu',
                               padding='same')(block5_pool)
        upsample_1 = Conv2DTranspose(2,
                                     kernel_size=(4, 4),
                                     strides=(2, 2),
                                     padding="same")(pool5_conv1x1)

        pool4_conv1x1 = Conv2D(2, (1, 1), activation='relu',
                               padding='same')(block4_pool)
        add_1 = Add()([upsample_1, pool4_conv1x1])

        upsample_2 = Conv2DTranspose(2,
                                     kernel_size=(4, 4),
                                     strides=(2, 2),
                                     padding="same")(add_1)
        pool3_conv1x1 = Conv2D(2, (1, 1), activation='relu',
                               padding='same')(block3_pool)
        add_2 = Add()([upsample_2, pool3_conv1x1])

        upsample_3 = Conv2DTranspose(2,
                                     kernel_size=(16, 16),
                                     strides=(8, 8),
                                     padding="same")(add_2)
        output = Dense(2, activation='softmax')(upsample_3)

        model = Model(inputs, output, name='multinet_seg')

        print('. . . . .Build Compeleted. . . . .')
        return model
Пример #6
0
def build_vae(input_shape: tuple,
              latent_dim: int,
              beta: float = 1.0,
              enable_mse: bool = False,
              enable_graph: bool = False,
              verbose: int = 0):
    intermediate_dim = 512
    original_dim = np.prod(input_shape)
    
    if verbose > 0:
        print("input_shape:", input_shape)

    if len(input_shape)==1:
        # VAE model = encoder + decoder
        # build encoder model
        inputs = Input(shape=input_shape, name='encoder_input')
        x = Dense(intermediate_dim, activation='relu')(inputs)
        z_mean = Dense(latent_dim, name='z_mean')(x)
        z_log_var = Dense(latent_dim, name='z_log_var')(x)

        # use reparameterization trick to push the sampling out as input
        # note that "output_shape" isn't necessary with the TensorFlow backend
        z = Lambda(sampling, output_shape=(latent_dim,), name='z')([z_mean, z_log_var])

        # instantiate encoder model
        encoder = Model(inputs, [z_mean, z_log_var, z], name='encoder')
        encoder.summary()
        if enable_graph:
            plot_model(encoder, to_file='vae_mlp_encoder.png', show_shapes=True)

        # build decoder model
        latent_inputs = Input(shape=(latent_dim,), name='z_sampling')
        x = Dense(intermediate_dim, activation='relu')(latent_inputs)
        outputs = Dense(original_dim, activation='sigmoid')(x)

        # instantiate decoder model
        decoder = Model(latent_inputs, outputs, name='decoder')
        decoder.summary()
        if enable_graph:
            plot_model(decoder, to_file='vae_mlp_decoder.png', show_shapes=True)
    else:
        filters = 2
        kernel_size = 5
        inputs = Input(shape=input_shape, name='encoder_input')
        x = Conv2D(filters=8, kernel_size=kernel_size, activation="relu", strides=2, padding="same")(inputs)
        x = Conv2D(filters=16, kernel_size=kernel_size, activation="relu", strides=2, padding="same")(x)
        shape = K.int_shape(x)[1:]

        x = Flatten()(x)
        x = Dense(intermediate_dim, activation='relu')(x)
        z_mean = Dense(latent_dim, name='z_mean')(x)
        z_log_var = Dense(latent_dim, name='z_log_var')(x)

        # use reparameterization trick to push the sampling out as input
        # note that "output_shape" isn't necessary with the TensorFlow backend
        z = Lambda(sampling, output_shape=(latent_dim,), name='z')([z_mean, z_log_var])

        # instantiate encoder model
        encoder = Model(inputs, [z_mean, z_log_var, z], name='encoder')
        encoder.summary()
        if enable_graph:
            plot_model(encoder, to_file='vae_mlp_encoder.png', show_shapes=True)

        # build decoder model
        latent_inputs = Input(shape=(latent_dim,), name='z_sampling')
        x = Dense(np.prod(shape), activation='relu')(latent_inputs)
        x = Reshape(shape)(x)
        x = Conv2DTranspose(filters=8, kernel_size=kernel_size, activation="relu", strides=2, padding="same")(x)
        outputs = Conv2DTranspose(filters=1, kernel_size=kernel_size, activation="sigmoid", strides=2, padding="same")(x)

        # instantiate decoder model
        decoder = Model(latent_inputs, outputs, name='decoder')
        decoder.summary()
        if enable_graph:
            plot_model(decoder, to_file='vae_mlp_decoder.png', show_shapes=True)

    # instantiate VAE model
    outputs = decoder(encoder(inputs)[2])
    vae = Model(inputs, outputs, name='vae_mlp')

    return vae, encoder, decoder, vae_loss(enable_mse, beta, original_dim, z_mean, z_log_var)
Пример #7
0
def Nest_Net(img_input, keep_prob=0.5, num_class=1, deep_supervision=True):
    nb_filter = [32, 64, 128, 256, 512]

    # Handle Dimension Ordering for different backends
    global bn_axis
    global keep_rate
    keep_rate = keep_prob
    bn_axis = 3
    # if K.image_dim_ordering() == 'tf':
    #     bn_axis = 3
    #     img_input = Input(shape=(img_rows, img_cols, color_type), name='main_input')
    # else:
    #     bn_axis = 1
    #     img_input = Input(shape=(color_type, img_rows, img_cols), name='main_input')

    conv1_1 = standard_unit(img_input, stage='11', nb_filter=nb_filter[0])
    pool1 = MaxPooling2D((2, 2), strides=(2, 2), name='pool1')(conv1_1)

    conv2_1 = standard_unit(pool1, stage='21', nb_filter=nb_filter[1])
    pool2 = MaxPooling2D((2, 2), strides=(2, 2), name='pool2')(conv2_1)

    up1_2 = Conv2DTranspose(nb_filter[0], (2, 2),
                            strides=(2, 2),
                            name='up12',
                            padding='same')(conv2_1)
    conv1_2 = concatenate([up1_2, conv1_1], name='merge12', axis=bn_axis)
    conv1_2 = standard_unit(conv1_2, stage='12', nb_filter=nb_filter[0])

    conv3_1 = standard_unit(pool2, stage='31', nb_filter=nb_filter[2])
    pool3 = MaxPooling2D((2, 2), strides=(2, 2), name='pool3')(conv3_1)

    up2_2 = Conv2DTranspose(nb_filter[1], (2, 2),
                            strides=(2, 2),
                            name='up22',
                            padding='same')(conv3_1)
    conv2_2 = concatenate([up2_2, conv2_1], name='merge22', axis=bn_axis)
    conv2_2 = standard_unit(conv2_2, stage='22', nb_filter=nb_filter[1])

    up1_3 = Conv2DTranspose(nb_filter[0], (2, 2),
                            strides=(2, 2),
                            name='up13',
                            padding='same')(conv2_2)
    conv1_3 = concatenate([up1_3, conv1_1, conv1_2],
                          name='merge13',
                          axis=bn_axis)
    conv1_3 = standard_unit(conv1_3, stage='13', nb_filter=nb_filter[0])

    conv4_1 = standard_unit(pool3, stage='41', nb_filter=nb_filter[3])
    pool4 = MaxPooling2D((2, 2), strides=(2, 2), name='pool4')(conv4_1)

    up3_2 = Conv2DTranspose(nb_filter[2], (2, 2),
                            strides=(2, 2),
                            name='up32',
                            padding='same')(conv4_1)
    conv3_2 = concatenate([up3_2, conv3_1], name='merge32', axis=bn_axis)
    conv3_2 = standard_unit(conv3_2, stage='32', nb_filter=nb_filter[2])

    up2_3 = Conv2DTranspose(nb_filter[1], (2, 2),
                            strides=(2, 2),
                            name='up23',
                            padding='same')(conv3_2)
    conv2_3 = concatenate([up2_3, conv2_1, conv2_2],
                          name='merge23',
                          axis=bn_axis)
    conv2_3 = standard_unit(conv2_3, stage='23', nb_filter=nb_filter[1])

    up1_4 = Conv2DTranspose(nb_filter[0], (2, 2),
                            strides=(2, 2),
                            name='up14',
                            padding='same')(conv2_3)
    conv1_4 = concatenate([up1_4, conv1_1, conv1_2, conv1_3],
                          name='merge14',
                          axis=bn_axis)
    conv1_4 = standard_unit(conv1_4, stage='14', nb_filter=nb_filter[0])

    conv5_1 = standard_unit(pool4, stage='51', nb_filter=nb_filter[4])

    up4_2 = Conv2DTranspose(nb_filter[3], (2, 2),
                            strides=(2, 2),
                            name='up42',
                            padding='same')(conv5_1)
    conv4_2 = concatenate([up4_2, conv4_1], name='merge42', axis=bn_axis)
    conv4_2 = standard_unit(conv4_2, stage='42', nb_filter=nb_filter[3])

    up3_3 = Conv2DTranspose(nb_filter[2], (2, 2),
                            strides=(2, 2),
                            name='up33',
                            padding='same')(conv4_2)
    conv3_3 = concatenate([up3_3, conv3_1, conv3_2],
                          name='merge33',
                          axis=bn_axis)
    conv3_3 = standard_unit(conv3_3, stage='33', nb_filter=nb_filter[2])

    up2_4 = Conv2DTranspose(nb_filter[1], (2, 2),
                            strides=(2, 2),
                            name='up24',
                            padding='same')(conv3_3)
    conv2_4 = concatenate([up2_4, conv2_1, conv2_2, conv2_3],
                          name='merge24',
                          axis=bn_axis)
    conv2_4 = standard_unit(conv2_4, stage='24', nb_filter=nb_filter[1])

    up1_5 = Conv2DTranspose(nb_filter[0], (2, 2),
                            strides=(2, 2),
                            name='up15',
                            padding='same')(conv2_4)
    conv1_5 = concatenate([up1_5, conv1_1, conv1_2, conv1_3, conv1_4],
                          name='merge15',
                          axis=bn_axis)
    conv1_5 = standard_unit(conv1_5, stage='15', nb_filter=nb_filter[0])

    nestnet_output_1 = Conv2D(num_class, (1, 1),
                              name='output_1',
                              kernel_initializer='he_normal',
                              padding='same',
                              kernel_regularizer=l2(1e-4))(conv1_2)
    nestnet_output_2 = Conv2D(num_class, (1, 1),
                              name='output_2',
                              kernel_initializer='he_normal',
                              padding='same',
                              kernel_regularizer=l2(1e-4))(conv1_3)
    nestnet_output_3 = Conv2D(num_class, (1, 1),
                              name='output_3',
                              kernel_initializer='he_normal',
                              padding='same',
                              kernel_regularizer=l2(1e-4))(conv1_4)
    nestnet_output_4 = Conv2D(num_class, (1, 1),
                              name='output_4',
                              kernel_initializer='he_normal',
                              padding='same',
                              kernel_regularizer=l2(1e-4))(conv1_5)

    if deep_supervision:
        output = [
            nestnet_output_1, nestnet_output_2, nestnet_output_3,
            nestnet_output_4
        ]
        # model = Model(input=img_input, output=[nestnet_output_1,
        #                                        nestnet_output_2,
        #                                        nestnet_output_3,
        #                                        nestnet_output_4])
    else:
        #model = Model(input=img_input, output=[nestnet_output_4])
        output = [nestnet_output_4]

    return output
Пример #8
0
def build_model(image_size,
                n_classes):
    ''' NOTE(GP) original description. Unused argument descriptions have been removed
    Build a Keras model with SSD architecture, see references.

    The model consists of convolutional feature layers and a number of convolutional
    predictor layers that take their input from different feature layers.
    The model is fully convolutional.

    The implementation found here is a smaller version of the original architecture
    used in the paper (where the base network consists of a modified VGG-16 extended
    by a few convolutional feature layers), but of course it could easily be changed to
    an arbitrarily large SSD architecture by following the general design pattern used here.
    This implementation has 7 convolutional layers and 4 convolutional predictor
    layers that take their input from layers 4, 5, 6, and 7, respectively.

    In case you're wondering why this function has so many arguments: All arguments except
    the first two (`image_size` and `n_classes`) are only needed so that the anchor box
    layers can produce the correct anchor boxes. In case you're training the network, the
    parameters passed here must be the same as the ones used to set up `SSDBoxEncoder`.
    In case you're loading trained weights, the parameters passed here must be the same
    as the ones used to produce the trained weights.

    Some of these arguments are explained in more detail in the documentation of the
    `SSDBoxEncoder` class.

    Note: Requires Keras v2.0 or later. Training currently works only with the
    TensorFlow backend (v1.0 or later).

    Arguments:
        image_size (tuple): The input image size in the format `(height, width, channels)`.
        n_classes (int): The number of categories for classification including
            the background class (i.e. the number of positive classes +1 for
            the background calss).

    Returns:
        model: The Keras SSD model.

    References:
        https://arxiv.org/abs/1512.02325v5
    '''

    # Input image format
    img_height, img_width, img_channels = image_size[0], image_size[1], image_size[2]

    # Design the actual network
    x = Input(shape=(img_height, img_width, img_channels))
    normed = Lambda(lambda z: z/127.5 - 1., # Convert input feature range to [-1,1]
                    output_shape=(img_height, img_width, img_channels),
                    name='lambda1')(x)
    # 400 * 400 * 3
    conv1 = Conv2D(16, (5, 5), name='conv1', strides=(1, 1), padding="same")(normed) # 400 * 400
    conv1 = BatchNormalization(axis=3, momentum=0.99, name='bn1')(conv1) # Tensorflow uses filter format [filter_height, filter_width, in_channels, out_channels], hence axis = 3
    conv1 = ELU(name='elu1')(conv1)
    pool1 = MaxPooling2D(pool_size=(2, 2), name='pool1')(conv1)
#    200 * 200
    conv2 = Conv2D(32, (3, 3), name='conv2', strides=(1, 1), padding="same")(pool1) # 200 * 200
    conv2 = BatchNormalization(axis=3, momentum=0.99, name='bn2')(conv2)
    conv2 = ELU(name='elu2')(conv2)
    pool2 = MaxPooling2D(pool_size=(2, 2), name='pool2')(conv2)
#    100 * 100
    conv3 = Conv2D(64, (3, 3), name='conv3', strides=(1, 1), padding="same")(pool2) # 100 * 100
    conv3 = BatchNormalization(axis=3, momentum=0.99, name='bn3')(conv3)
    conv3 = ELU(name='elu3')(conv3)
#     LAYER REMOVED FOR TIME BEING
#     conv3b = Conv2D(32, (3, 3), name='conv3b', strides=(1, 1), padding="same")(conv3) # 100 * 100
#     conv3b = BatchNormalization(axis=3, momentum=0.99, name='bn3b')(conv3b)
#     conv3b = ELU(name='elu3b')(conv3b)
    pool3 = MaxPooling2D(pool_size=(2, 2), name='pool3')(conv3)
#     LAYER REMOVED FOR TIME BEING
#     conv4 = Conv2D(64, (3, 3), name='conv4', strides=(1, 1), padding="same")(pool3) # 50*50
#     conv4 = BatchNormalization(axis=3, momentum=0.99, name='bn4')(conv4)
#     conv4 = ELU(name='elu4')(conv4)
#     pool4 = MaxPooling2D(pool_size=(2, 2), name='pool4')(conv4)
#    50 * 50
    conv5 = Conv2D(128, (3, 3), name='conv5', strides=(1, 1), padding="same")(pool3) #50*50
    conv5 = BatchNormalization(axis=3, momentum=0.99, name='bn5')(conv5)
    conv5 = ELU(name='elu5')(conv5)
    pool5 = MaxPooling2D(pool_size=(2, 2), name='pool5')(conv5)
# 25 * 25
    conv6 = Conv2D(128, (3, 3), name='conv6', strides=(1, 1), padding="valid")(pool5) #25*25
    conv6 = BatchNormalization(axis=3, momentum=0.99, name='bn6')(conv6)
    conv6 = ELU(name='elu6')(conv6)
    pool6 = MaxPooling2D(pool_size=(2, 2), name='pool6')(conv6)
#    12*12
    conv6b = Conv2D(128, (3, 3), name='conv6b', strides=(1, 1), padding="same")(pool6) #12*12
    conv6b = BatchNormalization(axis=3, momentum=0.99, name='bn6b')(conv6b)
    conv6b = ELU(name='elu6b')(conv6b)
    pool6b = MaxPooling2D(pool_size=(2, 2), name='pool6b')(conv6b)
#    6*6
#    NOTE (GP) These layers upsample the image back to 51*51
    deconv1 = Conv2DTranspose(128, (3,3),name='deconv1', strides=(1, 1), padding='same')(pool6b)
    deconv1 = BatchNormalization(axis=3, momentum=0.99, name='bndc1')(deconv1)
    deconv1 = ELU(name='eludc1')(deconv1)    
    uppool1 = UpSampling2D(3)(deconv1) 
   
    deconv2 = Conv2DTranspose(64, (3,3),name='deconv2', strides=(1, 1), padding='valid')(uppool1)
    deconv2 = BatchNormalization(axis=3, momentum=0.99, name='bndc2')(deconv2)
    deconv2 = ELU(name='eludc2')(deconv2)  
    uppool2 = UpSampling2D(3)(deconv2) 
#     51 * 51

#    NOTE(GP) This final layer produces the softmax predictions of each class in a 51 * 51 area
    out = Conv2DTranspose(21, (3,3),name='out', strides=(1, 1), activation='softmax', padding='same')(uppool2)
    
    predictions = out

    model = Model(inputs=x, outputs=predictions)

    return model
Пример #9
0
def get_mnist_model(args):
    '''
    Return: G, D, GAN
    '''
    '''
    Building Generator
    '''
    init_kernel = tf.random_normal_initializer(mean=0.0, stddev=0.02)
    Z_in = Input(shape=(200, ))
    x = Dense(1024, activation='relu', kernel_initializer=init_kernel)(Z_in)
    x = BatchNormalization()(x)

    x = Dense(7 * 7 * 128, activation='relu',
              kernel_initializer=init_kernel)(x)
    x = BatchNormalization()(x)

    x = Reshape((7, 7, 128))(x)
    x = Conv2DTranspose(64, (4, 4),
                        strides=(2, 2),
                        padding='same',
                        activation='relu',
                        kernel_initializer=init_kernel)(x)
    x = BatchNormalization()(x)

    G_out = Conv2DTranspose(1, (4, 4),
                            strides=(2, 2),
                            padding='same',
                            activation='tanh',
                            kernel_initializer=init_kernel)(x)
    G = Model(Z_in, G_out)
    '''
    Builiding Discriminator
    '''
    D_in = Input(shape=(28, 28, 1))
    x = Conv2D(64, (4, 4),
               strides=(2, 2),
               padding='same',
               kernel_initializer=init_kernel)(D_in)
    x = LeakyReLU(0.1)(x)

    x = Conv2D(64, (4, 4),
               strides=(2, 2),
               padding='same',
               kernel_initializer=init_kernel)(x)
    x = LeakyReLU(0.1)(x)

    x = Flatten()(x)
    x = Dense(1024, kernel_initializer=init_kernel)(x)
    x = LeakyReLU(0.1)(x)

    D_out = Dense(1, activation='sigmoid', kernel_initializer=init_kernel)(x)
    D = Model(D_in, D_out)
    dopt = Adam(lr=args.d_lr, beta_1=0.5, beta_2=0.999)
    gamma = K.variable([1])
    D.compile(loss=D_loss, optimizer=dopt)
    '''
    Building GAN
    '''
    set_trainability(D, False)
    GAN_in = Input(shape=(200, ))
    G_out = G(GAN_in)
    GAN_out = D(G_out)
    GAN = Model(GAN_in, GAN_out)
    gopt = Adam(lr=args.g_lr, beta_1=0.5, beta_2=0.999)
    GAN.compile(loss=com_conv(G_out, args.beta, 2), optimizer=gopt)

    return G, D, GAN
Пример #10
0
def runProgram():
    k = 4095
    img_rows, img_cols = k, 1  #not one hot
    # the data, shuffled and split between train and test sets

    (x_train, y_train) = load_data("train", "jpeg")
    (x_valid, y_valid) = load_data("valid", "jpeg")
    (x_test, y_test) = load_data("test", "jpeg")
    print('Before reshape:')
    print('x_train shape:', x_train.shape)
    print('x_valid shape:', x_valid.shape)
    print('x_test shape:', x_test.shape)
    x_train = np.reshape(x_train, (len(x_train), 4095, 1, 1))
    x_valid = np.reshape(x_valid, (len(x_valid), 4095, 1, 1))
    x_test = np.reshape(x_test, (len(x_test), 4095, 1, 1))
    y_train = np.reshape(y_train, (len(y_train), 4095, 1, 1))
    y_valid = np.reshape(y_valid, (len(y_valid), 4095, 1, 1))
    y_test = np.reshape(y_test, (len(y_test), 4095, 1, 1))
    print('After reshape:')
    print('x_train shape:', x_train.shape)
    print('x_valid shape:', x_valid.shape)
    print('x_test shape:', x_test.shape)

    input_shape = (img_rows, img_cols, 1)

    # convert class vectors to binary class matrices

    print('Shuffling in unison ')
    shuffle_in_unison(x_train, y_train)
    shuffle_in_unison(x_valid, y_valid)
    shuffle_in_unison(x_test, y_test)
    #print('y_train shape:', y_train.shape)
    #print(y_train.shape[0], 'train labels')
    #print(y_test.shape[0], 'test labels')

    batch_size = 50
    epochs = 20

    input_img = Input(
        shape=input_shape
    )  # adapt this if using `channels_first` image data format

    x = Conv2D(100, (24, 1), activation='relu')(input_img)
    #x = Dropout(0.5)(x)
    x = Conv2D(200, (16, 1), activation='relu')(x)
    #x = Dropout(0.5)(x)
    #x = Conv2D(100, (8, 1), activation='relu')(x)
    #x = Dropout(0.5)(x)
    #x = Conv2D(300, (3, 1), activation='relu')(x)
    #x = Conv2D(64, (4, 1), activation='relu')(x)
    #x = Conv2D(64, (4, 1), activation='relu')(x)
    #x = Conv2D(64, (4, 1), activation='relu')(x)
    #x = Conv2D(64, (4, 1), activation='relu')(x)
    encoded = Conv2D(40, (8, 1), activation='relu')(x)

    x = Conv2DTranspose(200, (8, 1), activation='relu')(encoded)
    #x = Dropout(0.5)(x)
    x = Conv2DTranspose(100, (16, 1), activation='relu')(x)
    #x = Dropout(0.5)(x)
    #x = Conv2DTranspose(200, (8, 1), activation='relu')(x)
    #x = Dropout(0.5)(x)
    #x = Conv2DTranspose(100, (16, 1), activation='relu')(x)
    #x = Conv2DTranspose(64, (4, 1), activation='relu')(x)
    #x = Conv2DTranspose(32, (4, 1), activation='relu')(x)
    #x = Conv2DTranspose(32, (8, 1), activation='relu')(x)
    #x = Conv2DTranspose(320, (16, 1), activation='relu')(x)
    decoded = Conv2DTranspose(1, (24, 1), activation='sigmoid')(x)

    model = Model(input_img, decoded)
    model.compile(loss=negGrowthRateLoss,
                  optimizer=keras.optimizers.Adam(),
                  metrics=['accuracy'])
    filepath = 'auto_weights_jpeg_10_0p01.h5'
    checkpoint = ModelCheckpoint(filepath,
                                 monitor='val_loss',
                                 verbose=1,
                                 save_best_only=True,
                                 mode='min')
    csv_logger = CSVLogger('auto_losses_jpeg_10_0p01.csv')
    plot_model(model, to_file='auto_model_jpeg_10_0p01.png', show_shapes=True)
    #Let's train it for 100 epochs:
    #model.load_weights('auto_weights_jpeg_11_0p01.h5')
    model.fit(x_train,
              y_train,
              batch_size=batch_size,
              epochs=epochs,
              verbose=1,
              validation_data=(x_valid, y_valid),
              callbacks=[checkpoint, csv_logger])

    #optimization algorithm, metrics, and loss functions

    #train

    #model.fit(x_train, y_train,batch_size=batch_size,epochs=epochs,verbose=1)

    model.save('auto_model_jpeg_10_0p01.h5')
    model.load_weights(filepath)
    predictions = model.predict(x_test, verbose=0)
    print(predictions.shape)
    model.summary()
    p_p_y = np.array([[0.0, 0.0], [0.0, 0.0]])
    ct = np.array([0.0, 0.0])
    p_p = np.array([0.0, 0.0])
    p_y = np.array([0.0, 0.0])
    for j in range(0, num_test_examples):
        #filename = open("results/res_test_exmpl_"+str(j)+".csv",'w')
        for i in range(0, 4095):

            #filename.write(str(predictions[j][i])+","+str(y_test[j][i])+","+str(x_test[j][i])+"\n")
            #if(y_test[j][i][0][0] != x_test[j][i][0][0]):
            #print(str(y_test[j][i][0][0])+" "+str(x_test[j][i][0][0])+"  "+str(predictions[j][i][0][0]))
            if (y_test[j][i] == 0):
                p_p_y[1][0] = p_p_y[1][0] + predictions[j][i][0][0]
                #p[1][y_test[j][i]] = p[1][y_test[j][i]]+(1.0-predictions[j][i])
                ct[0] = ct[0] + 1.0
            else:
                p_p_y[1][1] = p_p_y[1][1] + predictions[j][i][0][0]
                #p[0][y_test[j][i]] = p[0][y_test[j][i]]+(1.0-predictions[j][i])
                ct[1] = ct[1] + 1.0
            p_p[1] = p_p[1] + predictions[j][i][0][0]

        #filename.close()

    p_p_y[1][0] = p_p_y[1][0] / ct[0]
    p_p_y[1][1] = p_p_y[1][1] / ct[1]

    p_p_y[0][0] = 1.0 - p_p_y[1][0]
    p_p_y[0][1] = 1.0 - p_p_y[1][1]

    p_p[1] = p_p[1] / (ct[0] + ct[1])
    p_p[0] = 1 - p_p[1]

    p_y[0] = ct[0] / (ct[0] + ct[1])
    p_y[1] = ct[1] / (ct[0] + ct[1])

    mut_inf = 0.0

    for i in range(0, 2):
        for j in range(0, 2):
            p_p_y[i][j] = p_p_y[i][j] * p_y[j]

    #ct = ct/sum(ct)
    #ct_pred = ct_pred/sum(ct_pred)

    file = open("auto_results_jpeg_10_0p01.txt", 'w+')
    file.write("Joint:\n")
    file.write(str(p_p_y))
    #print(sum(p_p_y))
    file.write("\n Marginal: Predictions:\n")
    file.write(str(p_p))
    #print(sum(p_p))
    file.write("\n Marginal: Labels:\n")
    file.write(str(p_y))
    '''
    for i in range(0,2):
        for j in range(0,2):
            p[i][j] = p[i][j]*ct[j]
    '''

    for i in range(0, 2):
        for j in range(0, 2):
            if (p_p_y[i][j] != 0):
                mut_inf = mut_inf + (p_p_y[i][j] * log(
                    (p_p_y[i][j]) / (p_p[i] * p_y[j])) / log(2.0))

    file.write("\nMutual information:\n")
    file.write(str(mut_inf))
Пример #11
0
a, b, c = mngl_arr_test.shape;
mngl_arr_test = np.reshape(mngl_arr_test, [a, b, c, 1]);                

input_shape = (100, 100, 1);
filter0 = 8; th_layer0 = 32;
filter1 = 8; th_layer1 = 1;


model = Sequential();


model.add(Conv2D(th_layer0, kernel_size=(filter0,filter0), strides=(1,1), activation='relu',
                 padding='same', input_shape=input_shape));   


model.add(Conv2DTranspose(th_layer1, kernel_size=(filter1,filter1), strides=(1,1), activation='relu',
                 padding='same', input_shape=input_shape));
                 

#model.compile(loss=losses.mean_squared_error, optimizer=optimizers.RMSprop(lr=0.001, rho=0.9, epsilon=1e-6)) ;

model.compile(loss=losses.mean_squared_error, optimizer=optimizers.adam());

logger = tlg.training_logger();
cPoint = ModelCheckpoint(filepath='./Archive/Toy2.hdf5', verbose=1, save_best_only=True)
eStop  = EarlyStopping(monitor='val_loss', min_delta=0.01, patience=8, verbose=1,  restore_best_weights=True);

rPlat  = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=2, verbose=1, min_lr=0.00001)

model.fit(mngl_arr_train, orig_arr_train,
                epochs=5,
                batch_size=128,
Пример #12
0
    def _gen_model(self):
        """
        Gen the encoder
        - save image_shape before dense layers to avoid np.reshape screwing us over again.
        Gen the decoder
        - Do re parameterization thing first
        Combine to make VAE
        """

        def sampling(args):
            """Re-parameterization trick of sampling. This allows us to learn mean and variance in network.

            args is the previous tensor. So this will be [self.z_mean, self.z_log_var],
            where zmean and z-log-var are tensors.
            """
            z_mean, z_var = args
            epsilon = K.random_normal(shape=(K.shape(z_mean)[0], K.int_shape(z_mean)[1]))  # Shape is num_samples, dim
            return z_mean + K.exp(z_var / 2) * epsilon

        # #-------#  ENCODER  #-------#
        self.input_layer = Input(shape=(28, 28, self.channels), name='input')
        x = Conv2D(self.channels * 48, (3, 3), activation='relu', padding='same', strides=2)(self.input_layer)
        x = Conv2D(self.channels * 72, (3, 3), activation='relu', padding='same', strides=2)(x)
        intermediary_shape = K.int_shape(x)
        encoding = Flatten()(x)
        encoding = Dense(128, activation='relu')(encoding)
        encoding = Dense(128, activation='relu')(encoding)

        self.z_mean = Dense(self.encoding_dim)(encoding)  # No Activation
        self.z_log_var = Dense(self.encoding_dim)(encoding)  # No Activation

        # do re-parameterization to get our ltent vector
        sampled_encoding = Lambda(sampling)([self.z_mean, self.z_log_var])
        self.encoding_layer = sampled_encoding

        # Save the encoded signal
        self.encoder = Model(self.input_layer, [self.z_mean, self.z_log_var, self.encoding_layer])
        # self.encoder.summary()

        # #-------#  DECODER  #-------#
        decoder_input = Input(shape=(self.encoding_dim,))

        x = Dense(128, activation='relu')(decoder_input)
        x = Dense(128, activation='relu')(x)
        x = Dense(intermediary_shape[1] * intermediary_shape[2] * intermediary_shape[3], activation='relu')(x)
        x = Reshape(target_shape=(intermediary_shape[1], intermediary_shape[2], intermediary_shape[3]))(x)
        x = Conv2DTranspose(self.channels * 48, (3, 3), activation='relu', padding='same', strides=2)(x)
        decoder_output = Conv2DTranspose(self.channels, (3, 3), activation='sigmoid', strides=2, padding='same')(x)

        self.decoder = Model(decoder_input, decoder_output)
        self.decoder.summary()

        self.output_layer = self.decoder(self.encoder(self.input_layer)[-1])

        self.auto_encoder = Model(self.input_layer, self.output_layer)
        self.auto_encoder.summary()

        def kl_reconstruction_loss(y, predicted):  # VAE ELBO from lecture
            reconstruction_loss = 28 * 28 * self.channels * binary_crossentropy(K.flatten(y), K.flatten(predicted))

            kl_loss = - 0.5 * K.sum(1 + self.z_log_var - K.square(self.z_mean) - K.exp(self.z_log_var), axis=-1)
            vae_loss = K.mean(reconstruction_loss + kl_loss)
            return vae_loss

        self.auto_encoder.compile(optimizer='rmsprop', loss=kl_reconstruction_loss)
Пример #13
0
def vae_model(fname, save=True, save_name=None, verbose=True):

    imgs = load(fname)
    imgs = np.array(imgs)
    x_train, x_test = split_into_test_train(imgs)

    img_rows, img_cols, img_chns = 100, 100, 3
    if K.image_data_format() == 'channels_first':
        original_img_size = (img_chns, img_rows, img_cols)
    else:
        original_img_size = (img_rows, img_cols, img_chns)

    epochs = 50
    batch_size = 50
    # number of convolutional filters to use
    filters = 64
    # convolution kernel size
    num_conv = 3

    latent_dim = 2
    intermediate_dim = 128
    epsilon_std = 1.0
    activation = 'relu'
    # input image dimensions
    input_shape = (100, 100, 3)  #Define shape without including the batch size

    #First we create the encoder network which maps inputs to our latent distribution parameters:
    x = Input(shape=input_shape)
    conv_1 = Conv2D(img_chns,
                    kernel_size=(2, 2),
                    padding='same',
                    activation=activation)(x)  #Inputs & outputs a 4D tensor
    if verbose:
        print(conv_1.shape)
    conv_2 = Conv2D(filters,
                    kernel_size=(2, 2),
                    padding='same',
                    activation=activation,
                    strides=(2, 2))(conv_1)
    if verbose:
        print(conv_2.shape)
    conv_3 = Conv2D(filters,
                    kernel_size=num_conv,
                    padding='same',
                    activation=activation,
                    strides=1)(conv_2)
    if verbose:
        print(conv_3.shape)
    conv_4 = Conv2D(filters,
                    kernel_size=num_conv,
                    padding='same',
                    activation=activation,
                    strides=2)(conv_3)
    if verbose:
        print(conv_4.shape)
    flat = Flatten()(conv_4)  #For generating the latent vector
    if verbose:
        print(flat.shape)
    hidden = Dense(intermediate_dim, activation=activation)(flat)
    if verbose:
        print(hidden.shape)

    z_mean = Dense(latent_dim)(hidden)
    z_log_sigma = Dense(latent_dim)(
        hidden
    )  #Use log variance instead of standard deviation as it is more convenient and helps with numerical stability

    #Use the latent distribution parameters to sample new & similar points from the latent space
    def sampling(args):
        z_mean, z_log_sigma = args
        epsilon = K.random_normal(
            shape=(K.shape(z_mean)[0], latent_dim),
            mean=0.,
            stddev=epsilon_std
        )  #Normally distributed values in a tensor to be used as noise
        return z_mean + K.exp(z_log_sigma) * epsilon

    #Shifting the random sample by the mean and scaling it by the variance

    #Make the sampling the input
    z = Lambda(sampling, output_shape=(latent_dim, ))(
        [z_mean, z_log_sigma])  #Function, output shape multiplied by args
    #Lambda wraps an arbitrary expression as a layer, so here we are wrapping the sampling (latent space) as our input layer

    #Map the latent points to reconstructed inputs
    decoder_hid = Dense(intermediate_dim, activation=activation)

    decoder_upsample = Dense(filters * int(img_rows / 4) * int(img_cols / 4),
                             activation=activation)

    if K.image_data_format() == 'channels_first':
        output_shape = (batch_size, filters, int(img_rows / 4),
                        int(img_cols / 4))
    else:
        output_shape = (batch_size, int(img_rows / 4), int(img_cols / 4),
                        filters)

    decoder_reshape = Reshape(output_shape[1:])  #Reshapes the output
    decoder_deconv_1 = Conv2DTranspose(
        filters,
        kernel_size=num_conv,
        padding='same',
        strides=1,
        activation=activation)  #Transposed layers for deconvolution
    decoder_deconv_2 = Conv2DTranspose(filters,
                                       kernel_size=num_conv,
                                       padding='same',
                                       strides=2,
                                       activation=activation)
    if K.image_data_format() == 'channels_first':
        output_shape = (batch_size, filters, int(img_rows + 1),
                        int(img_cols + 1))
    else:
        output_shape = (batch_size, int(img_rows + 1), int(img_cols + 1),
                        filters)
    decoder_deconv_3_upsamp = Conv2DTranspose(filters,
                                              kernel_size=(3, 3),
                                              strides=(2, 2),
                                              padding='valid',
                                              activation=activation)
    decoder_mean_squash = Conv2D(img_chns,
                                 kernel_size=2,
                                 padding='valid',
                                 activation='sigmoid')

    hid_decoded = decoder_hid(z)
    up_decoded = decoder_upsample(hid_decoded)
    reshape_decoded = decoder_reshape(up_decoded)
    deconv_1_decoded = decoder_deconv_1(reshape_decoded)
    deconv_2_decoded = decoder_deconv_2(deconv_1_decoded)
    x_decoded_relu = decoder_deconv_3_upsamp(deconv_2_decoded)
    x_decoded_mean_squash = decoder_mean_squash(x_decoded_relu)

    #Instantiate 3 models

    #End-to-end autoencoder for mapping inputs to reconstructions
    vae = Model(x, x_decoded_mean_squash)

    kl = kl_loss(z_mean, z_log_sigma)
    vae.add_loss(kl)

    #Encoder mapping from inputs to latent space
    encoder = Model(x, z_mean)

    #Generator which takes points from the latent space to output the reconstructed samples
    decoder_input = Input(shape=(latent_dim, ))
    _hid_decoded = decoder_hid(decoder_input)
    _up_decoded = decoder_upsample(_hid_decoded)
    _reshape_decoded = decoder_reshape(_up_decoded)
    _deconv_1_decoded = decoder_deconv_1(_reshape_decoded)
    _deconv_2_decoded = decoder_deconv_2(_deconv_1_decoded)
    _x_decoded_relu = decoder_deconv_3_upsamp(_deconv_2_decoded)
    _x_decoded_mean_squash = decoder_mean_squash(_x_decoded_relu)
    #Push z through decoder
    generator = Model(decoder_input, _x_decoded_mean_squash)

    #Time to train!

    x_train = x_train.astype('float32') / 255.
    x_test = x_test.astype('float32') / 255.
    print(x_test.shape)

    shape = x_train.shape[1:]

    #Train using the end-to-end model with a custom loss & K-L divergence regularization
    """def vae_loss(x, x_decoded_mean):
		xent_loss = metrics.binary_crossentropy(x, x_decoded_mean_squash) #Reconstruction loss
		#Binary crossentropy because the decoding term is a Bernoulli multi layered perceptron - is it worth also trying Gaussian + MSE??
		kl_loss = - 0.5 * K.mean(1 + z_log_sigma - K.square(z_mean) - K.exp(z_log_sigma), axis=-1) #Variational loss
		return xent_loss + kl_loss #Combine the losses
	"""
    #Can only get these losses to work when we define them outside of the VAE model
    vae.compile(optimizer='adam', loss=reconstruction_loss)
    vae.summary()

    vae.fit(x_train,
            x_train,
            shuffle=True,
            epochs=epochs,
            batch_size=batch_size,
            validation_data=(x_test, x_test))

    predictions = vae.predict(x_test, batch_size=batch_size)

    if save:
        save_array((x_test, predictions), save_name + '_imgs_preds')
def infect_seg(input_shape,
               num_filters=[32, 64, 64, 128],
               padding='same',
               dropout=0.2):
    """Generate CN-Net model to train on CT scan images
    Arbitrary number of input channels and output classes are supported.
    Sizes are noted with 100x100 input images.

    Arguments:
      input_shape  - (? (number of examples), 
                      input image height (pixels), 
                      input image width  (pixels), 
                      input image features (1 for grayscale, 3 for RGB))
      num_filters - number of filters (exactly 4 should be passed)
      padding - 'same' or 'valid'
      dropout - fraction of units to dropout, 0 to keep all units

    Output:
      CN-Net model expecting input shape (height, width, channels) and generates
      two outputs with shape (height, width, channels).
    """
    assert len(num_filters) == 4

    x_input = Input(input_shape)

    x, x1 = conv_block_1(x_input,
                         num_filters[0],
                         padding=padding,
                         maxpool2Dsize=(2, 2),
                         dropout=dropout,
                         kernel_initializer="he_normal")  #x: 50x50

    x, x2 = conv_block_1(x,
                         num_filters[1],
                         padding=padding,
                         maxpool2Dsize=(2, 2),
                         dropout=dropout,
                         kernel_initializer="he_normal")  #x: 25x25
    x, _ = conv_block_1(x,
                        num_filters[2],
                        padding=padding,
                        maxpool2Dsize=(1, 1),
                        dropout=dropout,
                        kernel_initializer="he_normal")  #x: 25x25

    x, _ = conv_block_1(x,
                        num_filters[3],
                        padding=padding,
                        maxpool2Dsize=(1, 1),
                        dropout=dropout,
                        kernel_initializer="he_normal")  #x: 25x25

    x = conv_block_2(x,
                     num_filters[3],
                     padding=padding,
                     kernel_initializer="he_normal")  #x: 25x25

    x = Conv2DTranspose(num_filters[2], (2, 2),
                        strides=(2, 2),
                        padding=padding)(x)  #x: 50x50
    x = conv_block_2(x,
                     num_filters[2],
                     padding=padding,
                     kernel_initializer="he_normal")  #x: 50x50

    x = Conv2DTranspose(num_filters[1], (2, 2), padding='same')(x)  #x: 50x50
    x = concatenate([x, x2])  #x: 50x50
    x = conv_block_2(x,
                     num_filters[1],
                     padding=padding,
                     kernel_initializer="he_normal")  #x: 50x50

    x = Conv2DTranspose(num_filters[0], (2, 2),
                        strides=(2, 2),
                        padding=padding)(x)  #x: 100x100
    x = concatenate([x, x1], axis=3)  #x: 100x100
    x = conv_block_2(x,
                     num_filters[0],
                     padding=padding,
                     kernel_initializer="he_normal")  #x: 100x100

    infect_seg = Conv2D(1, (1, 1), activation='sigmoid',
                        name='infect_output')(x)  # identifying infections

    iseg = Model(inputs=x_input, outputs=infect_seg, name='cts_model')

    return iseg
Пример #15
0
    def __init__(self,
                 image_size,
                 channels,
                 conv_layers,
                 feature_maps,
                 filter_shapes,
                 strides,
                 dense_layers,
                 dense_neurons,
                 dense_dropouts,
                 latent_dim,
                 activation='relu',
                 eps_mean=0.0,
                 eps_std=1.0):

        self.history = LossHistory()

        # check that arguments are proper length;
        if len(filter_shapes) != conv_layers:
            raise Exception(
                "number of convolutional layers must equal length of filter_shapes list"
            )
        if len(strides) != conv_layers:
            raise Exception(
                "number of convolutional layers must equal length of strides list"
            )
        if len(feature_maps) != conv_layers:
            raise Exception(
                "number of convolutional layers must equal length of feature_maps list"
            )
        if len(dense_neurons) != dense_layers:
            raise Exception(
                "number of dense layers must equal length of dense_neurons list"
            )
        if len(dense_dropouts) != dense_layers:
            raise Exception(
                "number of dense layers must equal length of dense_dropouts list"
            )

        # even shaped filters may cause problems in theano backend;
        even_filters = [
            f for pair in filter_shapes for f in pair if f % 2 == 0
        ]
        if K.common.image_dim_ordering() == 'th' and len(even_filters) > 0:
            warnings.warn(
                'Even shaped filters may cause problems in Theano backend')
        if K.common.image_dim_ordering(
        ) == 'channels_first' and len(even_filters) > 0:
            warnings.warn(
                'Even shaped filters may cause problems in Theano backend')

        self.eps_mean = eps_mean
        self.eps_std = eps_std
        self.image_size = image_size

        # define input layer;
        if K.common.image_dim_ordering(
        ) == 'th' or K.common.image_dim_ordering() == 'channels_first':
            self.input = Input(shape=(channels, image_size[0], image_size[1]))
        else:
            self.input = Input(shape=(image_size[0], image_size[1], channels))

        # define convolutional encoding layers;
        self.encode_conv = []
        layer = Convolution2D(feature_maps[0],
                              filter_shapes[0],
                              padding='same',
                              activation=activation,
                              strides=strides[0])(self.input)
        self.encode_conv.append(layer)
        for i in range(1, conv_layers):
            layer = Convolution2D(feature_maps[i],
                                  filter_shapes[i],
                                  padding='same',
                                  activation=activation,
                                  strides=strides[i])(self.encode_conv[i - 1])
            self.encode_conv.append(layer)

        # define dense encoding layers;
        self.flat = Flatten()(self.encode_conv[-1])
        self.encode_dense = []
        layer = Dense(dense_neurons[0], activation=activation)(Dropout(
            dense_dropouts[0])(self.flat))
        self.encode_dense.append(layer)
        for i in range(1, dense_layers):
            layer = Dense(dense_neurons[i], activation=activation)(Dropout(
                dense_dropouts[i])(self.encode_dense[i - 1]))
            self.encode_dense.append(layer)

        # define embedding layer;
        self.z_mean = Dense(latent_dim)(self.encode_dense[-1])
        self.z_log_var = Dense(latent_dim)(self.encode_dense[-1])
        self.z = Lambda(self._sampling, output_shape=(latent_dim, ))(
            [self.z_mean, self.z_log_var])

        # save all decoding layers for generation model;
        self.all_decoding = []

        # define dense decoding layers;
        self.decode_dense = []
        layer = Dense(dense_neurons[-1], activation=activation)
        self.all_decoding.append(layer)
        self.decode_dense.append(layer(self.z))
        for i in range(1, dense_layers):
            layer = Dense(dense_neurons[-i - 1], activation=activation)
            self.all_decoding.append(layer)
            self.decode_dense.append(layer(self.decode_dense[i - 1]))

        # dummy model to get image size after encoding convolutions;
        self.decode_conv = []
        if K.common.image_dim_ordering(
        ) == 'th' or K.common.image_dim_ordering() == 'channels_first':
            dummy_input = np.ones((1, channels, image_size[0], image_size[1]))
        else:
            dummy_input = np.ones((1, image_size[0], image_size[1], channels))
        dummy = Model(self.input, self.encode_conv[-1])
        conv_size = dummy.predict(dummy_input).shape
        layer = Dense(conv_size[1] * conv_size[2] * conv_size[3],
                      activation=activation)
        self.all_decoding.append(layer)
        self.decode_dense.append(layer(self.decode_dense[-1]))
        reshape = Reshape(conv_size[1:])
        self.all_decoding.append(reshape)
        self.decode_conv.append(reshape(self.decode_dense[-1]))

        # define deconvolutional decoding layers;
        for i in range(1, conv_layers):
            if K.common.image_dim_ordering(
            ) == 'th' or K.common.image_dim_ordering() == 'channels_first':
                dummy_input = np.ones(
                    (1, channels, image_size[0], image_size[1]))
            else:
                dummy_input = np.ones(
                    (1, image_size[0], image_size[1], channels))
            dummy = Model(self.input, self.encode_conv[-i - 1])
            conv_size = list(dummy.predict(dummy_input).shape)

            if K.common.image_dim_ordering(
            ) == 'th' or K.common.image_dim_ordering() == 'channels_first':
                conv_size[1] = feature_maps[-i]
            else:
                conv_size[3] = feature_maps[-i]

            layer = Conv2DTranspose(feature_maps[-i - 1],
                                    filter_shapes[-i],
                                    padding='same',
                                    activation=activation,
                                    strides=strides[-i])
            self.all_decoding.append(layer)
            self.decode_conv.append(layer(self.decode_conv[i - 1]))

        layer = Conv2DTranspose(channels,
                                filter_shapes[0],
                                padding='same',
                                activation='sigmoid',
                                strides=strides[0])
        self.all_decoding.append(layer)
        self.output = layer(self.decode_conv[-1])

        # build model;
        self.model = Model(self.input, self.output)
        self.optimizer = RMSprop(lr=0.001, rho=0.9, epsilon=1e-08, decay=0.0)
        self.model.compile(optimizer=self.optimizer, loss=self._vae_loss)
        #         print "model summary:"
        #         self.model.summary()

        # model for embeddings;
        self.embedder = Model(self.input, self.z_mean)

        # model for generation;
        self.decoder_input = Input(shape=(latent_dim, ))
        self.generation = []
        self.generation.append(self.all_decoding[0](self.decoder_input))
        for i in range(1, len(self.all_decoding)):
            self.generation.append(self.all_decoding[i](self.generation[i -
                                                                        1]))
        self.generator = Model(self.decoder_input, self.generation[-1])
Пример #16
0
def get_cifar10_model(args):
    '''
    Return: G, D, GAN models
    '''
    '''
    Build Generator
    '''
    G_in = Input(shape=(256, ))
    x = Dense(256 * 2 * 2, kernel_initializer='glorot_normal')(G_in)
    x = Reshape((2, 2, 256))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.2)(x)
    x = Conv2DTranspose(128, (5, 5),
                        padding='same',
                        kernel_initializer='glorot_normal',
                        strides=2)(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.2)(x)
    x = Conv2DTranspose(64, (5, 5),
                        padding='same',
                        kernel_initializer='glorot_normal',
                        strides=2)(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.2)(x)
    x = Conv2DTranspose(32, (5, 5),
                        padding='same',
                        kernel_initializer='glorot_normal',
                        strides=2)(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.2)(x)
    x = Conv2DTranspose(3, (5, 5),
                        padding='same',
                        kernel_initializer='glorot_normal',
                        strides=2)(x)
    G_out = Activation('tanh')(x)
    G = Model(G_in, G_out)
    '''
    Build Discriminator
    '''
    D_in = Input(shape=(32, 32, 3))
    x = Conv2D(32, (5, 5),
               strides=2,
               kernel_initializer='glorot_normal',
               kernel_regularizer=l2(args.d_l2),
               padding='same')(D_in)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.2)(x)
    x = Conv2D(64, (5, 5),
               strides=2,
               kernel_initializer='glorot_normal',
               kernel_regularizer=l2(args.d_l2),
               padding='same')(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.2)(x)
    x = Conv2D(128, (5, 5),
               strides=2,
               kernel_initializer='glorot_normal',
               kernel_regularizer=l2(args.d_l2),
               padding='same')(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.2)(x)
    x = Conv2D(256, (5, 5),
               strides=2,
               kernel_initializer='glorot_normal',
               kernel_regularizer=l2(args.d_l2),
               padding='same')(x)
    x = LeakyReLU(alpha=0.2)(x)
    x = Flatten()(x)
    x = Dropout(0.2)(x)
    D_out = Dense(1, kernel_initializer='glorot_normal',
                  activation='sigmoid')(x)
    D = Model(D_in, D_out)
    dopt = Adam(lr=args.d_lr, beta_1=0.5, beta_2=0.999, decay=1e-5)
    D.compile(loss=D_loss, optimizer=dopt)
    '''
    Building GAN
    '''
    set_trainability(D, False)
    GAN_in = Input(shape=(256, ))
    G_out = G(GAN_in)
    GAN_out = D(G_out)
    GAN = Model(GAN_in, GAN_out)
    gopt = Adam(lr=args.g_lr, beta_1=0.5, beta_2=0.999)
    GAN.compile(loss=com_conv(G_out, args.beta, 2), optimizer=gopt)

    return G, D, GAN
Пример #17
0
def unet_model(n_classes=5,
               im_sz=160,
               n_channels=8,
               n_filters_start=32,
               growth_factor=2):
    droprate = 0.25

    #Block1
    n_filters = n_filters_start
    inputs = Input((im_sz, im_sz, n_channels), name='input')
    #inputs = BatchNormalization()(inputs)
    conv1 = Conv2D(n_filters, (3, 3),
                   padding='same',
                   name='conv1_1',
                   kernel_initializer='he_uniform',
                   bias_initializer='he_uniform')(inputs)
    actv1 = LeakyReLU(name='actv1_1')(conv1)
    conv1 = Conv2D(n_filters, (3, 3),
                   padding='same',
                   name='conv1_2',
                   kernel_initializer='he_uniform',
                   bias_initializer='he_uniform')(actv1)
    actv1 = LeakyReLU(name='actv1_2')(conv1)
    pool1 = MaxPooling2D(pool_size=(2, 2), name='maxpool1')(actv1)
    #pool1 = Dropout(droprate)(pool1)

    #Block2
    n_filters *= growth_factor
    pool1 = BatchNormalization(name='bn1')(pool1)
    conv2 = Conv2D(n_filters, (3, 3),
                   padding='same',
                   name='conv2_1',
                   kernel_initializer='he_uniform',
                   bias_initializer='he_uniform')(pool1)
    actv2 = LeakyReLU(name='actv2_1')(conv2)
    conv2 = Conv2D(n_filters, (3, 3),
                   padding='same',
                   name='conv2_2',
                   kernel_initializer='he_uniform',
                   bias_initializer='he_uniform')(actv2)
    actv2 = LeakyReLU(name='actv2_2')(conv2)
    pool2 = MaxPooling2D(pool_size=(2, 2), name='maxpool2')(actv2)
    pool2 = Dropout(droprate, name='dropout2')(pool2)

    #Block3
    n_filters *= growth_factor
    pool2 = BatchNormalization(name='bn2')(pool2)
    conv3 = Conv2D(n_filters, (3, 3),
                   padding='same',
                   name='conv3_1',
                   kernel_initializer='he_uniform',
                   bias_initializer='he_uniform')(pool2)
    actv3 = LeakyReLU(name='actv3_1')(conv3)
    conv3 = Conv2D(n_filters, (3, 3),
                   padding='same',
                   name='conv3_2',
                   kernel_initializer='he_uniform',
                   bias_initializer='he_uniform')(actv3)
    actv3 = LeakyReLU(name='actv3_2')(conv3)
    pool3 = MaxPooling2D(pool_size=(2, 2), name='maxpool3')(actv3)
    pool3 = Dropout(droprate, name='dropout3')(pool3)

    #Block4
    n_filters *= growth_factor
    pool3 = BatchNormalization(name='bn3')(pool3)
    conv4_0 = Conv2D(n_filters, (3, 3),
                     padding='same',
                     name='conv4_1',
                     kernel_initializer='he_uniform',
                     bias_initializer='he_uniform')(pool3)
    actv4_0 = LeakyReLU(name='actv4_1')(conv4_0)
    conv4_0 = Conv2D(n_filters, (3, 3),
                     padding='same',
                     name='conv4_0_2',
                     kernel_initializer='he_uniform',
                     bias_initializer='he_uniform')(actv4_0)
    actv4_0 = LeakyReLU(name='actv4_2')(conv4_0)
    pool4_1 = MaxPooling2D(pool_size=(2, 2), name='maxpool4')(actv4_0)
    pool4_1 = Dropout(droprate, name='dropout4')(pool4_1)

    #Block5
    n_filters *= growth_factor
    pool4_1 = BatchNormalization(name='bn4')(pool4_1)
    conv4_1 = Conv2D(n_filters, (3, 3),
                     padding='same',
                     name='conv5_1',
                     kernel_initializer='he_uniform',
                     bias_initializer='he_uniform')(pool4_1)
    actv4_1 = LeakyReLU(name='actv5_1')(conv4_1)
    conv4_1 = Conv2D(n_filters, (3, 3),
                     padding='same',
                     name='conv5_2',
                     kernel_initializer='he_uniform',
                     bias_initializer='he_uniform')(actv4_1)
    actv4_1 = LeakyReLU(name='actv5_2')(conv4_1)
    pool4_2 = MaxPooling2D(pool_size=(2, 2), name='maxpool5')(actv4_1)
    pool4_2 = Dropout(droprate, name='dropout5')(pool4_2)

    #Block6
    n_filters *= growth_factor
    conv5 = Conv2D(n_filters, (3, 3),
                   padding='same',
                   name='conv6_1',
                   kernel_initializer='he_uniform',
                   bias_initializer='he_uniform')(pool4_2)
    actv5 = LeakyReLU(name='actv6_1')(conv5)
    conv5 = Conv2D(n_filters, (3, 3),
                   padding='same',
                   name='conv6_2',
                   kernel_initializer='he_uniform',
                   bias_initializer='he_uniform')(actv5)
    actv5 = LeakyReLU(name='actv6_2')(conv5)

    #Block7
    n_filters //= growth_factor
    up6_1 = concatenate([
        Conv2DTranspose(
            n_filters,
            (2, 2), strides=(2, 2), padding='same', name='up7')(actv5), actv4_1
    ],
                        name='concat7')
    up6_1 = BatchNormalization(name='bn7')(up6_1)
    conv6_1 = Conv2D(n_filters, (3, 3),
                     padding='same',
                     name='conv7_1',
                     kernel_initializer='he_uniform',
                     bias_initializer='he_uniform')(up6_1)
    actv6_1 = LeakyReLU(name='actv7_1')(conv6_1)
    conv6_1 = Conv2D(n_filters, (3, 3),
                     padding='same',
                     name='conv7_2',
                     kernel_initializer='he_uniform',
                     bias_initializer='he_uniform')(actv6_1)
    actv6_1 = LeakyReLU(name='actv7_2')(conv6_1)
    conv6_1 = Dropout(droprate, name='dropout7')(actv6_1)

    #Block8
    n_filters //= growth_factor
    up6_2 = concatenate([
        Conv2DTranspose(
            n_filters, (2, 2), strides=(2, 2), padding='same',
            name='up8')(conv6_1), actv4_0
    ],
                        name='concat8')
    up6_2 = BatchNormalization(name='bn8')(up6_2)
    conv6_2 = Conv2D(n_filters, (3, 3),
                     padding='same',
                     name='conv8_1',
                     kernel_initializer='he_uniform',
                     bias_initializer='he_uniform')(up6_2)
    actv6_2 = LeakyReLU(name='actv8_1')(conv6_2)
    conv6_2 = Conv2D(n_filters, (3, 3),
                     padding='same',
                     name='conv8_2',
                     kernel_initializer='he_uniform',
                     bias_initializer='he_uniform')(actv6_2)
    actv6_2 = LeakyReLU(name='actv8_2')(conv6_2)
    conv6_2 = Dropout(droprate, name='dropout8')(actv6_2)

    #Block9
    n_filters //= growth_factor
    up7 = concatenate([
        Conv2DTranspose(
            n_filters,
            (2, 2), strides=(2, 2), padding='same', name='up9')(conv6_2), actv3
    ],
                      name='concat9')
    up7 = BatchNormalization(name='bn9')(up7)
    conv7 = Conv2D(n_filters, (3, 3),
                   padding='same',
                   name='conv9_1',
                   kernel_initializer='he_uniform',
                   bias_initializer='he_uniform')(up7)
    actv7 = LeakyReLU(name='actv9_1')(conv7)
    conv7 = Conv2D(n_filters, (3, 3),
                   padding='same',
                   name='conv9_2',
                   kernel_initializer='he_uniform',
                   bias_initializer='he_uniform')(actv7)
    actv7 = LeakyReLU(name='actv9_2')(conv7)
    conv7 = Dropout(droprate, name='dropout9')(actv7)

    #Block10
    n_filters //= growth_factor
    up8 = concatenate([
        Conv2DTranspose(
            n_filters,
            (2, 2), strides=(2, 2), padding='same', name='up10')(conv7), actv2
    ],
                      name='concat10')
    up8 = BatchNormalization(name='bn10')(up8)
    conv8 = Conv2D(n_filters, (3, 3),
                   padding='same',
                   name='conv10_1',
                   kernel_initializer='he_uniform',
                   bias_initializer='he_uniform')(up8)
    actv8 = LeakyReLU(name='actv10_1')(conv8)
    conv8 = Conv2D(n_filters, (3, 3),
                   padding='same',
                   name='conv10_2',
                   kernel_initializer='he_uniform',
                   bias_initializer='he_uniform')(actv8)
    actv8 = LeakyReLU(name='actv10_2')(conv8)
    conv8 = Dropout(droprate, name='dropout10')(actv8)

    #Block11
    n_filters //= growth_factor
    up9 = concatenate([
        Conv2DTranspose(
            n_filters,
            (2, 2), strides=(2, 2), padding='same', name='up11')(conv8), actv1
    ],
                      name='concat11')
    conv9 = Conv2D(n_filters, (3, 3),
                   padding='same',
                   name='conv11_1',
                   kernel_initializer='he_uniform',
                   bias_initializer='he_uniform')(up9)
    actv9 = LeakyReLU(name='actv11_1')(conv9)
    conv9 = Conv2D(n_filters, (3, 3),
                   padding='same',
                   name='conv11_2',
                   kernel_initializer='he_uniform',
                   bias_initializer='he_uniform')(actv9)
    actv9 = LeakyReLU(name='actv11_2')(conv9)

    conv10 = Conv2D(n_classes, (1, 1), activation='softmax',
                    name='output1')(actv9)

    model = Model(inputs=inputs, outputs=conv10)

    def dice_coef(y_true, y_pred, smooth=1e-7):
        y_true_f = K.flatten(y_true)
        y_pred_f = K.flatten(y_pred)
        intersection = K.sum(y_true_f * y_pred_f)
        return (2. * intersection + smooth) / (K.sum(y_true_f) +
                                               K.sum(y_pred_f) + smooth)

    """This simply calculates the dice score for each individual label, and then sums them together, and includes the background."""

    def dice_coef_multilabel(y_true, y_pred):
        dice = n_classes
        for index in range(n_classes):
            dice -= dice_coef(y_true[:, :, :, index], y_pred[:, :, :, index])
        return dice / n_classes

    model.compile(optimizer=Adam(lr=10e-5), loss=dice_coef_multilabel)
    return model
def unet(input_shape=(240, 240, 2), bn=True, do=0, ki="he_normal", lr=0.001):
    '''
    bn: if use batchnorm layer
    do: dropout prob
    ki: kernel initializer (glorot_uniform, he_normal, ...)
    lr: learning rate of Adam
    '''
    concat_axis = -1  #the last axis (channel axis)

    inputs = Input(input_shape)  # channels is 2: <t1, flair>

    conv1 = Conv2D(64, (5, 5),
                   padding="same",
                   activation="relu",
                   kernel_initializer=ki)(inputs)
    conv1 = BatchNormalization()(conv1) if bn else conv1
    conv1 = Dropout(do)(conv1) if do else conv1
    conv1 = Conv2D(64, (5, 5),
                   padding="same",
                   activation="relu",
                   kernel_initializer=ki)(conv1)
    conv1 = BatchNormalization()(conv1) if bn else conv1

    pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)

    conv2 = Conv2D(96, (3, 3),
                   padding="same",
                   activation="relu",
                   kernel_initializer=ki)(pool1)
    conv2 = BatchNormalization()(conv2) if bn else conv2
    conv2 = Dropout(do)(conv2) if do else conv2
    conv2 = Conv2D(96, (3, 3),
                   padding="same",
                   activation="relu",
                   kernel_initializer=ki)(conv2)
    conv2 = BatchNormalization()(conv2) if bn else conv2

    pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)

    conv3 = Conv2D(128, (3, 3),
                   padding="same",
                   activation="relu",
                   kernel_initializer=ki)(pool2)
    conv3 = BatchNormalization()(conv3) if bn else conv3
    conv3 = Dropout(do)(conv3) if do else conv3
    conv3 = Conv2D(128, (3, 3),
                   padding="same",
                   activation="relu",
                   kernel_initializer=ki)(conv3)
    conv3 = BatchNormalization()(conv3) if bn else conv3

    pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)

    conv4 = Conv2D(256, (3, 3),
                   padding="same",
                   activation="relu",
                   kernel_initializer=ki)(pool3)
    conv4 = BatchNormalization()(conv4) if bn else conv4
    conv4 = Dropout(do)(conv4) if do else conv4
    conv4 = Conv2D(256, (3, 3),
                   padding="same",
                   activation="relu",
                   kernel_initializer=ki)(conv4)
    conv4 = BatchNormalization()(conv4) if bn else conv4

    #######

    conv4 = Conv2D(512, (3, 3),
                   dilation_rate=2,
                   padding="same",
                   activation="relu",
                   kernel_initializer=ki)(conv4)
    cat6 = conv4

    #    pool4 = MaxPooling2D(pool_size=(2,2))(conv4)
    #
    #    conv5 = Conv2D(512, (3,3), padding="same", activation="relu", kernel_initializer=ki)(pool4)
    #    conv5 = BatchNormalization()(conv5) if bn else conv5
    #    conv5 = Dropout(do)(conv5) if do else conv5
    #    conv5 = Conv2D(512, (3,3), padding="same", activation="relu", kernel_initializer=ki)(conv5)
    #    conv5 = BatchNormalization()(conv5) if bn else conv5
    #    upconv5 = Conv2DTranspose(256, (2, 2), strides=(2, 2), padding='same', kernel_initializer=ki)(conv5)
    #
    #    ch, cw = get_crop_shape(conv4, upconv5)
    #    crop_conv4 = Cropping2D(cropping=(ch,cw))(conv4)
    #    cat6 = concatenate([upconv5, crop_conv4], axis=concat_axis)

    conv6 = Conv2D(256, (3, 3),
                   padding="same",
                   activation="relu",
                   kernel_initializer=ki)(cat6)
    conv6 = BatchNormalization()(conv6) if bn else conv6
    conv6 = Dropout(do)(conv6) if do else conv6
    conv6 = Conv2D(256, (3, 3),
                   padding="same",
                   activation="relu",
                   kernel_initializer=ki)(conv6)
    conv6 = BatchNormalization()(conv6) if bn else conv6
    upconv6 = Conv2DTranspose(128, (2, 2),
                              strides=(2, 2),
                              padding='same',
                              kernel_initializer=ki)(conv6)

    ch, cw = get_crop_shape(conv3, upconv6)
    crop_conv3 = Cropping2D(cropping=(ch, cw))(conv3)
    up7 = concatenate([upconv6, crop_conv3], axis=concat_axis)

    conv7 = Conv2D(128, (3, 3),
                   padding="same",
                   activation="relu",
                   kernel_initializer=ki)(up7)
    conv7 = BatchNormalization()(conv7) if bn else conv7
    conv7 = Dropout(do)(conv7) if do else conv7
    conv7 = Conv2D(128, (3, 3), padding="same", activation="relu")(conv7)
    conv7 = BatchNormalization()(conv7) if bn else conv7
    upconv7 = Conv2DTranspose(96, (2, 2),
                              strides=(2, 2),
                              padding='same',
                              kernel_initializer=ki)(conv7)

    ch, cw = get_crop_shape(conv2, upconv7)
    crop_conv2 = Cropping2D(cropping=(ch, cw))(conv2)
    up8 = concatenate([upconv7, crop_conv2], axis=concat_axis)

    conv8 = Conv2D(96, (3, 3),
                   padding="same",
                   activation="relu",
                   kernel_initializer=ki)(up8)
    conv8 = BatchNormalization()(conv8) if bn else conv8
    conv8 = Dropout(do)(conv8) if do else conv8
    conv8 = Conv2D(96, (3, 3),
                   padding="same",
                   activation="relu",
                   kernel_initializer=ki)(conv8)
    conv8 = BatchNormalization()(conv8) if bn else conv8
    upconv8 = Conv2DTranspose(64, (2, 2),
                              strides=(2, 2),
                              padding='same',
                              kernel_initializer=ki)(conv8)

    ch, cw = get_crop_shape(conv1, upconv8)
    crop_conv1 = Cropping2D(cropping=(ch, cw))(conv1)
    up9 = concatenate([upconv8, crop_conv1], axis=concat_axis)

    conv9 = Conv2D(64, (3, 3),
                   padding="same",
                   activation="relu",
                   kernel_initializer=ki)(up9)
    conv9 = BatchNormalization()(conv9) if bn else conv9
    conv9 = Conv2D(64, (3, 3),
                   padding="same",
                   activation="relu",
                   kernel_initializer=ki)(conv9)
    conv9 = BatchNormalization()(conv9) if bn else conv9
    ch, cw = get_pad_shape(conv9, conv1)
    pad_conv9 = ZeroPadding2D(padding=(ch, cw))(conv9)
    conv9 = Conv2D(1, (1, 1),
                   padding="same",
                   activation="sigmoid",
                   kernel_initializer=ki)(pad_conv9)  #change to sigmoid

    model = Model(inputs=inputs, outputs=conv9)

    #    optimizer = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)   #default
    optimizer = Adam(lr=lr, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
    #    optimizer = Adagrad(lr=0.001, epsilon=1e-08, decay=0.0)
    #    model.compile(optimizer=optimizer, loss="binary_crossentropy", metrics=['accuracy'])

    #    optimizer = SGD(lr=0.1, momentum=0.8, decay=lr/30, nesterov=False)

    #    model.compile(optimizer=optimizer, loss=dice_coef_loss, metrics=[dice_coef, 'accuracy'])

    model.compile(optimizer=optimizer,
                  loss=dice_coef_loss,
                  metrics=[dice_coef, precision_xue, recall_xue])

    return model
Пример #19
0
    def architecture(self):
        config = get_config()
        config = config["train"]["optimizers"]

        channels, height, weight = 3, 500, 500

        # Input
        input_shape = (height, weight, 3)
        img_input = Input(shape=self.input_shape)
        #img_input = Cropping2D((3,3))(img_input)

        # Add plenty of zero padding
        x = ZeroPadding2D(padding=(218, 218))(img_input)

        # VGG-16 convolution block 1
        x = Conv2D(64, (3, 3), activation='relu', padding='valid', name='conv1_1')(x)
        x = Conv2D(64, (3, 3), activation='relu', padding='same', name='conv1_2')(x)
        x = MaxPooling2D((2, 2), strides=(2, 2), name='pool1')(x)

        # VGG-16 convolution block 2
        x = Conv2D(128, (3, 3), activation='relu', padding='same', name='conv2_1')(x)
        x = Conv2D(128, (3, 3), activation='relu', padding='same', name='conv2_2')(x)
        x = MaxPooling2D((2, 2), strides=(2, 2), name='pool2', padding='same')(x)

        # VGG-16 convolution block output3
        x = Conv2D(256, (3, 3), activation='relu', padding='same', name='conv3_1')(x)
        x = Conv2D(256, (3, 3), activation='relu', padding='same', name='conv3_2')(x)
        x = Conv2D(256, (3, 3), activation='relu', padding='same', name='conv3_3')(x)
        x = MaxPooling2D((2, 2), strides=(2, 2), name='pool3', padding='same')(x)
        pool3 = x

        # VGG-16 convolution block 4
        x = Conv2D(512, (3, 3), activation='relu', padding='same', name='conv4_1')(x)
        x = Conv2D(512, (3, 3), activation='relu', padding='same', name='conv4_2')(x)
        x = Conv2D(512, (3, 3), activation='relu', padding='same', name='conv4_3')(x)
        x = MaxPooling2D((2, 2), strides=(2, 2), name='pool4', padding='same')(x)
        pool4 = x

        # VGG-16 convolution block 5
        x = Conv2D(512, (3, 3), activation='relu', padding='same', name='conv5_1')(x)
        x = Conv2D(512, (3, 3), activation='relu', padding='same', name='conv5_2')(x)
        x = Conv2D(512, (3, 3), activation='relu', padding='same', name='conv5_3')(x)
        x = MaxPooling2D((2, 2), strides=(2, 2), name='pool5', padding='same')(x)

        # Fully-connected layers converted to convolution layers
        x = Conv2D(128, (7, 7), activation='relu', padding='valid', name='fc6')(x)
        x = Dropout(0.5)(x)
        x = Conv2D(128, (1, 1), activation='relu', padding='valid', name='fc7')(x)
        x = Dropout(0.5)(x)
        x = Conv2D(21, (1, 1), padding='valid', name='score-fr')(x)

        # Deconvolution
        score2 = Conv2DTranspose(1, (4, 4), strides=2, name='score2')(x)

        # Skip connections from pool4
        score_pool4 = Conv2D(1, (1, 1), name='score-pool4')(pool4)
        score_pool4c = Cropping2D((5, 5))(score_pool4)
        score_fused = Add()([score2, score_pool4c])
        score4 = Conv2DTranspose(1, (4, 4), strides=2, name='score4', use_bias=False)(score_fused)

        # Skip connections from pool3
        score_pool3 = Conv2D(1, (1, 1), name='score-pool3')(pool3)
        score_pool3c = Cropping2D((9, 9))(score_pool3)
        score_pool3c = ZeroPadding2D(padding=((1,0), (1,0)))(score_pool3c)


        # Fuse things together
        score_final = Add()([score4, score_pool3c])

        # Final up-sampling and cropping
        upsample = Conv2DTranspose(1, (4, 4), strides=4, name='upsample', use_bias=False)(score_final)
        upscore = Cropping2D(((56, 56), (56, 56)))(upsample)
        upscore = Cropping2D(((4, 4), (4, 4)))(upscore)

        output = CrfRnnLayer(image_dims=(64, 64),
                            num_classes=1,
                            theta_alpha= config["crf_theta_alpha"], #3
                            theta_beta= config["crf_theta_beta"], #3
                            theta_gamma= config["crf_theta_gamma"], #3
                            num_iterations= config["crf_num_iterations"],
                            name='crfrnn')([upscore, img_input])


        classi = Add()([upscore, output])
        k = Flatten()(classi)

        k = Dense(128, activation='relu')(k)
        k = Dropout(.5)(k)
        k = Dense(256, activation='relu')(k)
        predictions = Dense(1, activation='sigmoid')(k)

        # Build the model
        model = Model(img_input, predictions, name='CRFVGG')

        return model
Пример #20
0
 def fn(tensor):
     x = Conv2DTranspose(nb_filters, 3, padding="same",
                         strides=(2, 2))(tensor)
     return x
Пример #21
0
z = Lambda(sampling, output_shape=(latent_dim,), name='z')([z_mean, z_log_var])

# instantiate encoder model
encoder = Model(inputs, [z_mean, z_log_var, z], name='encoder')
encoder.summary()
#plot_model(encoder, to_file='vae_cnn_encoder.png', show_shapes=True)

# build decoder model
latent_inputs = Input(shape=(latent_dim,), name='z_sampling')
x = Dense(shape[1] * shape[2] * shape[3], activation='relu')(latent_inputs)
x = Reshape((shape[1], shape[2], shape[3]))(x)

for i in range(3):
    x = Conv2DTranspose(filters=filters,
                        kernel_size=kernel_size,
                        activation='relu',
                        strides=2,
                        padding='same')(x)
    filters //= 2

outputs = Conv2DTranspose(filters=1,
                          kernel_size=kernel_size,
                          activation='sigmoid',
                          padding='same',
                          name='decoder_output')(x)

# instantiate decoder model
decoder = Model(latent_inputs, outputs, name='decoder')
decoder.summary()
#plot_model(decoder, to_file='vae_cnn_decoder.png', show_shapes=True)
    def get_unet_preenc(
        self,
        k=10,
        lr=1e-4,
        f_out=2,
    ):
        """
        
        :param k:
        :param lr:
        :param f_out:
        :return:
        """

        from keras.layers import Conv2D, UpSampling2D, Concatenate, Cropping2D, Conv2DTranspose, BatchNormalization
        from methods.examples import compile_segm

        model_encoder = self.get_encoder(k)

        b_double = False
        padding = 'valid'

        encoder_outputs = model_encoder.output

        l = encoder_outputs

        if self.depth == 2:
            list_w_crop = [12, 4]
        elif self.depth == 1:
            list_w_crop = [4]

        for i_d in range(self.depth)[::-1]:
            f = 2**i_d * k if b_double else k
            l = Conv2D(f, (3, 3),
                       activation='elu',
                       padding=padding,
                       name=f'dec{i_d+1}')(l)

            if self.batch_norm:
                l = BatchNormalization(name=f'batchnorm_dec{i_d+1}')(l)

            if 0:
                l = UpSampling2D(2)(l)
            else:
                l = Conv2DTranspose(f, (2, 2), strides=(2, 2))(l)
                if self.batch_norm:
                    l = BatchNormalization(name=f'batchnorm_up{i_d}')(l)

            # Combine
            l_left_crop = Cropping2D(list_w_crop[i_d], name=f'crop_enc{i_d}')(
                model_encoder.get_layer(f'enc{i_d}').output)
            l = Concatenate(name=f'conc_dec{i_d}')([l, l_left_crop])

        l = Conv2D(k, (3, 3),
                   activation='elu',
                   padding=padding,
                   name=f'dec{0}')(l)
        if self.batch_norm:
            l = BatchNormalization(name=f'batchnorm_dec{0}')(l)
        decoder_outputs = Conv2D(f_out, (1, 1),
                                 activation='softmax',
                                 padding=padding)(l)

        model_pretrained_unet = Model(model_encoder.input, decoder_outputs)
        compile_segm(model_pretrained_unet, lr=lr)

        model_pretrained_unet.summary()

        return model_pretrained_unet
Пример #23
0
    def build(self):
        """
            Build the model for training
        """

        print('. . . . .Building ENet. . . . .')

        img_input = Input(shape=(self.im_height, self.im_width, 3),
                          name='image_input')

        x = self.initial_block(img_input)

        x = self.bottleneck_encoder(x,
                                    64,
                                    downsampling=True,
                                    normal=True,
                                    name='1.0',
                                    drate=0.01)
        for _ in range(1, 5):
            x = self.bottleneck_encoder(x,
                                        64,
                                        normal=True,
                                        name=f'1.{_}',
                                        drate=0.01)

        # Encoder Block
        x = self.bottleneck_encoder(x,
                                    128,
                                    downsampling=True,
                                    normal=True,
                                    name=f'2.0')
        x = self.bottleneck_encoder(x, 128, normal=True, name=f'2.1')
        x = self.bottleneck_encoder(x, 128, dilated=True, name=f'2.2')
        x = self.bottleneck_encoder(x, 128, asymmetric=True, name=f'2.3')
        x = self.bottleneck_encoder(x, 128, dilated=True, name=f'2.4')
        x = self.bottleneck_encoder(x, 128, normal=True, name=f'2.5')
        x = self.bottleneck_encoder(x, 128, dilated=True, name=f'2.6')
        x = self.bottleneck_encoder(x, 128, asymmetric=True, name=f'2.7')
        x = self.bottleneck_encoder(x, 128, dilated=True, name=f'2.8')

        x = self.bottleneck_encoder(x, 128, normal=True, name=f'3.0')
        x = self.bottleneck_encoder(x, 128, dilated=True, name=f'3.1')
        x = self.bottleneck_encoder(x, 128, asymmetric=True, name=f'3.2')
        x = self.bottleneck_encoder(x, 128, dilated=True, name=f'3.3')
        x = self.bottleneck_encoder(x, 128, normal=True, name=f'3.4')
        x = self.bottleneck_encoder(x, 128, dilated=True, name=f'3.5')
        x = self.bottleneck_encoder(x, 128, asymmetric=True, name=f'3.6')
        x = self.bottleneck_encoder(x, 128, dilated=True, name=f'3.7')

        # Decoder Block
        x = self.bottleneck_decoder(x, 64, upsampling=True, name='4.0')
        x = self.bottleneck_decoder(x, 64, normal=True, name='4.1')
        x = self.bottleneck_decoder(x, 64, normal=True, name='4.2')

        x = self.bottleneck_decoder(x, 16, upsampling=True, name='5.0')
        x = self.bottleneck_decoder(x, 16, normal=True, name='5.1')

        img_output = Conv2DTranspose(self.nclasses,
                                     kernel_size=(2, 2),
                                     strides=(2, 2),
                                     kernel_initializer='he_normal',
                                     padding='same',
                                     name='image_output')(x)
        img_output = Activation('softmax')(img_output)

        model = Model(inputs=img_input, outputs=img_output, name='ENET')
        print('. . . . .Build Compeleted. . . . .')
        return model
def load_generator_network(batch_size,
                           sequence_class,
                           n_classes=1,
                           seq_length=205,
                           supply_inputs=False):

    sequence_class_onehots = np.eye(n_classes)

    #Generator network parameters
    latent_size = 100

    #Generator inputs
    latent_input_1, latent_input_2, latent_input_1_out, latent_input_2_out = None, None, None, None
    if not supply_inputs:
        latent_input_1 = Input(tensor=K.ones((batch_size, latent_size)),
                               name='noise_input_1')
        latent_input_2 = Input(tensor=K.ones((batch_size, latent_size)),
                               name='noise_input_2')
        latent_input_1_out = Lambda(lambda inp: inp * K.random_uniform(
            (batch_size, latent_size), minval=-1.0, maxval=1.0),
                                    name='lambda_rand_input_1')(latent_input_1)
        latent_input_2_out = Lambda(lambda inp: inp * K.random_uniform(
            (batch_size, latent_size), minval=-1.0, maxval=1.0),
                                    name='lambda_rand_input_2')(latent_input_2)
    else:
        latent_input_1 = Input(batch_shape=K.ones(batch_size, latent_size),
                               name='noise_input_1')
        latent_input_2 = Input(batch_shape=K.ones(batch_size, latent_size),
                               name='noise_input_2')
        latent_input_1_out = Lambda(lambda inp: inp,
                                    name='lambda_rand_input_1')(latent_input_1)
        latent_input_2_out = Lambda(lambda inp: inp,
                                    name='lambda_rand_input_2')(latent_input_2)

    class_embedding = Lambda(lambda x: K.gather(
        K.constant(sequence_class_onehots), K.cast(x[:, 0], dtype='int32')))(
            sequence_class)

    seed_input_1 = Concatenate(axis=-1)([latent_input_1_out, class_embedding])
    seed_input_2 = Concatenate(axis=-1)([latent_input_2_out, class_embedding])

    #Policy network definition
    policy_dense_1 = Dense(21 * 384,
                           activation='relu',
                           kernel_initializer='glorot_uniform',
                           name='policy_dense_1')

    policy_dense_1_reshape = Reshape((21, 1, 384))

    policy_deconv_0 = Conv2DTranspose(256, (7, 1),
                                      strides=(2, 1),
                                      padding='valid',
                                      activation='linear',
                                      kernel_initializer='glorot_normal',
                                      name='policy_deconv_0')

    policy_deconv_1 = Conv2DTranspose(192, (8, 1),
                                      strides=(2, 1),
                                      padding='valid',
                                      activation='linear',
                                      kernel_initializer='glorot_normal',
                                      name='policy_deconv_1')

    policy_deconv_2 = Conv2DTranspose(128, (7, 1),
                                      strides=(2, 1),
                                      padding='valid',
                                      activation='linear',
                                      kernel_initializer='glorot_normal',
                                      name='policy_deconv_2')

    policy_conv_3 = Conv2D(128, (8, 1),
                           strides=(1, 1),
                           padding='same',
                           activation='linear',
                           kernel_initializer='glorot_normal',
                           name='policy_conv_3')

    policy_conv_4 = Conv2D(64, (8, 1),
                           strides=(1, 1),
                           padding='same',
                           activation='linear',
                           kernel_initializer='glorot_normal',
                           name='policy_conv_4')

    policy_conv_5 = Conv2D(4, (8, 1),
                           strides=(1, 1),
                           padding='same',
                           activation='linear',
                           kernel_initializer='glorot_normal',
                           name='policy_conv_5')

    #policy_deconv_3 = Conv2DTranspose(4, (7, 1), strides=(1, 1), padding='valid', activation='linear', kernel_initializer='glorot_normal', name='policy_deconv_3')

    batch_norm_0 = BatchNormalization(name='policy_batch_norm_0')
    relu_0 = Lambda(lambda x: K.relu(x))
    batch_norm_1 = BatchNormalization(name='policy_batch_norm_1')
    relu_1 = Lambda(lambda x: K.relu(x))
    batch_norm_2 = BatchNormalization(name='policy_batch_norm_2')
    relu_2 = Lambda(lambda x: K.relu(x))

    batch_norm_3 = BatchNormalization(name='policy_batch_norm_3')
    relu_3 = Lambda(lambda x: K.relu(x))

    batch_norm_4 = BatchNormalization(name='policy_batch_norm_4')
    relu_4 = Lambda(lambda x: K.relu(x))

    policy_out_1 = Reshape((seq_length, 4, 1))(policy_conv_5(
        relu_4(
            batch_norm_4(
                policy_conv_4(
                    relu_3(
                        batch_norm_3(
                            policy_conv_3(
                                relu_2(
                                    batch_norm_2(
                                        policy_deconv_2(
                                            relu_1(
                                                batch_norm_1(
                                                    policy_deconv_1(
                                                        relu_0(
                                                            batch_norm_0(
                                                                policy_deconv_0(
                                                                    policy_dense_1_reshape(
                                                                        policy_dense_1(
                                                                            seed_input_1
                                                                        )))))))
                                            ))))))))))))
    policy_out_2 = Reshape((seq_length, 4, 1))(policy_conv_5(
        relu_4(
            batch_norm_4(
                policy_conv_4(
                    relu_3(
                        batch_norm_3(
                            policy_conv_3(
                                relu_2(
                                    batch_norm_2(
                                        policy_deconv_2(
                                            relu_1(
                                                batch_norm_1(
                                                    policy_deconv_1(
                                                        relu_0(
                                                            batch_norm_0(
                                                                policy_deconv_0(
                                                                    policy_dense_1_reshape(
                                                                        policy_dense_1(
                                                                            seed_input_2
                                                                        )))))))
                                            ))))))))))))

    return [latent_input_1, latent_input_2], [policy_out_1, policy_out_2], []
    def _build(self):
        vae_x = Input(shape=self.input_dim)
        vae_c1 = Conv2D(filters=32,
                        kernel_size=4,
                        strides=2,
                        activation="relu")(vae_x)
        vae_c2 = Conv2D(filters=64,
                        kernel_size=4,
                        strides=2,
                        activation="relu")(vae_c1)
        vae_c3 = Conv2D(filters=64,
                        kernel_size=4,
                        strides=2,
                        activation="relu")(vae_c2)
        vae_c4 = Conv2D(filters=128,
                        kernel_size=4,
                        strides=2,
                        activation="relu")(vae_c3)

        vae_z_in = Flatten()(vae_c4)

        vae_z_mean = Dense(self.z_dim)(vae_z_in)
        vae_z_log_var = Dense(self.z_dim)(vae_z_in)

        vae_z = Lambda(sampling)([vae_z_mean, vae_z_log_var])
        vae_z_input = Input(shape=(self.z_dim, ))

        vae_dense = Dense(1024)
        vae_dense_model = vae_dense(vae_z)

        vae_z_out = Reshape((1, 1, VAE_DENSE_SIZE))
        vae_z_out_model = vae_z_out(vae_dense_model)

        vae_d1 = Conv2DTranspose(filters=64,
                                 kernel_size=5,
                                 strides=2,
                                 activation="relu")
        vae_d1_model = vae_d1(vae_z_out_model)
        vae_d2 = Conv2DTranspose(filters=64,
                                 kernel_size=5,
                                 strides=2,
                                 activation="relu")
        vae_d2_model = vae_d2(vae_d1_model)
        vae_d3 = Conv2DTranspose(filters=32,
                                 kernel_size=6,
                                 strides=2,
                                 activation="relu")
        vae_d3_model = vae_d3(vae_d2_model)
        vae_d4 = Conv2DTranspose(filters=3,
                                 kernel_size=6,
                                 strides=2,
                                 activation="sigmoid")
        vae_d4_model = vae_d4(vae_d3_model)

        # Decoder

        vae_dense_decoder = vae_dense(vae_z_input)
        vae_z_out_decoder = vae_z_out(vae_dense_decoder)

        vae_d1_decoder = vae_d1(vae_z_out_decoder)
        vae_d2_decoder = vae_d2(vae_d1_decoder)
        vae_d3_decoder = vae_d3(vae_d2_decoder)
        vae_d4_decoder = vae_d4(vae_d3_decoder)

        # Models
        vae = Model(vae_x, vae_d4_model)
        vae_encoder = Model(vae_x, vae_z)
        vae_decoder = Model(vae_z_input, vae_d4_decoder)

        def vae_r_loss(y_true, y_pred):
            return K.sum(K.square(y_true - y_pred), axis=[1, 2, 3])

        def vae_kl_loss(y_true, y_pred):
            return -0.5 * K.sum(1 + vae_z_log_var - K.square(vae_z_mean) -
                                K.exp(vae_z_log_var),
                                axis=-1)

        def vae_loss(y_true, y_pred):
            return vae_r_loss(y_true, y_pred) + vae_kl_loss(y_true, y_pred)

        vae.compile(optimizer='rmsprop',
                    loss=vae_loss,
                    metrics=[vae_r_loss, vae_kl_loss])

        return vae, vae_encoder, vae_decoder
Пример #26
0
    def fit(self, training_set, training_data_name=None):

        import tensorflow as tf
        from keras.backend.tensorflow_backend import set_session

        #         config = tf.ConfigProto()
        #         #config.gpu_options.allow_growth = True
        #         config.gpu_options.per_process_gpu_memory_fraction = 0.8
        #         sess = tf.Session(config=config)
        #         set_session(sess)

        temp_training_set = []
        if training_data_name is not None:
            self.training_data_name = training_data_name

        if any(isinstance(el, list) for el in
               training_set):  # if training set is a sequence of frames
            for sequence in training_set:
                transformed_frames = self.input_frames_transform(
                    np.array(sequence))
                temp_training_set += self.get_training_set(transformed_frames)
        else:
            transformed_frames = self.input_frames_transform(
                np.array(training_set))
            temp_training_set = self.get_training_set(transformed_frames)

        final_training_set = np.array(temp_training_set)

        seq = Sequential()
        seq.add(
            TimeDistributed(Conv2D(128, (11, 11), strides=4, padding="same"),
                            batch_input_shape=(None, 10, 256, 256, 1)))
        seq.add(LayerNormalization())
        seq.add(TimeDistributed(Conv2D(64, (5, 5), strides=2, padding="same")))
        seq.add(LayerNormalization())
        # # # # #
        seq.add(ConvLSTM2D(64, (3, 3), padding="same", return_sequences=True))
        seq.add(LayerNormalization())
        seq.add(ConvLSTM2D(32, (3, 3), padding="same", return_sequences=True))
        seq.add(LayerNormalization())
        seq.add(ConvLSTM2D(64, (3, 3), padding="same", return_sequences=True))
        seq.add(LayerNormalization())
        # # # # #
        seq.add(
            TimeDistributed(
                Conv2DTranspose(64, (5, 5), strides=2, padding="same")))
        seq.add(LayerNormalization())
        seq.add(
            TimeDistributed(
                Conv2DTranspose(128, (11, 11), strides=4, padding="same")))
        seq.add(LayerNormalization())
        seq.add(
            TimeDistributed(
                Conv2D(1, (11, 11), activation="sigmoid", padding="same")))
        print(seq.summary())
        seq.compile(loss='mse',
                    optimizer=keras.optimizers.Adam(lr=1e-4,
                                                    decay=1e-5,
                                                    epsilon=1e-6))
        seq.fit(final_training_set,
                final_training_set,
                batch_size=self.batch_size,
                epochs=self.epochs,
                shuffle=False)

        self.model = seq
        self.save_model()
def get_unet_extended():
    metrics = dice_coef
    include_label_wise_dice_coefficients = True;
    inputs = Input((patch_size, patch_size, 1))
    conv1 = Conv2D(BASE, (3, 3), activation='relu', padding='same')(inputs)
    conv1 = Conv2D(BASE, (3, 3), activation='relu', padding='same')(conv1)
    pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)

    conv2 = Conv2D(BASE*2, (3, 3), activation='relu', padding='same')(pool1)
    conv2 = Conv2D(BASE*2, (3, 3), activation='relu', padding='same')(conv2)
    pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)

    conv3 = Conv2D(BASE*4, (3, 3), activation='relu', padding='same')(pool2)
    conv3 = Conv2D(BASE*4, (3, 3), activation='relu', padding='same')(conv3)
    pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)

    conv4 = Conv2D(BASE*8, (3, 3), activation='relu', padding='same')(pool3)
    conv4 = Conv2D(BASE*8, (3, 3), activation='relu', padding='same')(conv4)
    pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)

    conv5 = Conv2D(BASE*16, (3, 3), activation='relu', padding='same')(pool4)
    conv5 = Conv2D(BASE*16, (3, 3), activation='relu', padding='same')(conv5)
    pool5 = MaxPooling2D(pool_size=(2, 2))(conv5)
    
    conv5_extend = Conv2D(BASE*32, (3, 3), activation='relu', padding='same')(pool5)
    conv5_extend = Conv2D(BASE*32, (3, 3), activation='relu', padding='same')(conv5_extend)

    up6_extend = concatenate([Conv2DTranspose(BASE*16, (2, 2), strides=(2, 2), padding='same')(conv5_extend), conv5], axis=3)
    conv6_extend = Conv2D(BASE*16, (3, 3), activation='relu', padding='same')(up6_extend)
    conv6_extend = Conv2D(BASE*16, (3, 3), activation='relu', padding='same')(conv6_extend)
    
    up6 = concatenate([Conv2DTranspose(BASE*8, (2, 2), strides=(2, 2), padding='same')(conv6_extend), conv4], axis=3)
    conv6 = Conv2D(BASE*8, (3, 3), activation='relu', padding='same')(up6)
    conv6 = Conv2D(BASE*8, (3, 3), activation='relu', padding='same')(conv6)

    up7 = concatenate([Conv2DTranspose(BASE*4, (2, 2),strides=(2, 2), padding='same')(conv6), conv3], axis=3)
    conv7 = Conv2D(BASE*4, (3, 3), activation='relu', padding='same')(up7)
    conv7 = Conv2D(BASE*4, (3, 3), activation='relu', padding='same')(conv7)

    up8 = concatenate([Conv2DTranspose(BASE*2, (2, 2), strides=(2, 2), padding='same')(conv7), conv2], axis=3)
    conv8 = Conv2D(BASE*2, (3, 3), activation='relu', padding='same')(up8)
    conv8 = Conv2D(BASE*2, (3, 3), activation='relu', padding='same')(conv8)

    up9 = concatenate([Conv2DTranspose(BASE, (2, 2), strides=(2, 2), padding='same')(conv8), conv1], axis=3)
    conv9 = Conv2D(BASE, (3, 3), activation='relu', padding='same')(up9)
    conv9 = Conv2D(BASE, (3, 3), activation='relu', padding='same')(conv9)

    conv10 = Conv2D(num_classes, (1, 1), activation='sigmoid')(conv9)

    model = Model(inputs=[inputs], outputs=[conv10])
    
    if not isinstance(metrics, list):
        metrics = [metrics]

    if include_label_wise_dice_coefficients and num_classes > 1:
        label_wise_dice_metrics = [get_label_dice_coefficient_function(index) for index in range(num_classes)]
        if metrics:
            metrics = metrics + label_wise_dice_metrics
        else:
            metrics = label_wise_dice_metrics
            
    model.compile(optimizer=Adam(lr=1e-4), loss=dice_coef_loss, metrics=metrics)
    return model
Пример #28
0
def mkPretrainedVGG16():
    model = keras.applications.VGG16(input_shape=(targetShape[0],
                                                  targetShape[1], 3),
                                     include_top=False,
                                     weights='imagenet')

    #for l in model.layers:
    #    l.trainable = False

    input = model.inputs[0]
    vggOutput = model.outputs[0]
    x = vggOutput

    block4Pool = model.get_layer('block4_pool').output
    block4Shortcut = Conv2D(2,
                            kernel_size=(1, 1),
                            kernel_initializer='he_normal')(block4Pool)
    block3Pool = model.get_layer('block3_pool').output
    block3Shortcut = Conv2D(2,
                            kernel_size=(1, 1),
                            kernel_initializer='he_normal')(block3Pool)

    x = Conv2D(2,
               kernel_size=(1, 1),
               strides=(1, 1),
               kernel_initializer='he_normal',
               padding='same')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    x = Conv2DTranspose(2,
                        kernel_size=(4, 4),
                        strides=(2, 2),
                        kernel_initializer='he_normal',
                        padding='same')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    x = Add()([x, block4Shortcut])

    x = Conv2DTranspose(2,
                        kernel_size=(4, 4),
                        strides=(2, 2),
                        kernel_initializer='he_normal',
                        padding='same')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    x = Add()([x, block3Shortcut])

    x = Conv2DTranspose(2,
                        kernel_size=(16, 16),
                        strides=(8, 8),
                        kernel_initializer='he_normal',
                        padding='same')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    fcnOutput = x

    x = vggOutput
    x = Flatten()(x)
    x = Dense(4 * 4 * 512)(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = Reshape((4, 4, 512))(x)

    x = Conv2DTranspose(512,
                        kernel_size=(4, 4),
                        strides=(1, 1),
                        kernel_initializer='he_normal',
                        padding='same')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    x = UpSampling2D()(x)  #8 8
    x = Conv2DTranspose(512,
                        kernel_size=(3, 3),
                        kernel_initializer='he_normal',
                        padding='same')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    x = Conv2DTranspose(512,
                        kernel_size=(3, 3),
                        kernel_initializer='he_normal',
                        padding='same')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    x = Conv2DTranspose(512,
                        kernel_size=(3, 3),
                        kernel_initializer='he_normal',
                        padding='same')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    x = UpSampling2D()(x)  #16 16
    x = Conv2DTranspose(256,
                        kernel_size=(3, 3),
                        kernel_initializer='he_normal',
                        padding='same')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = Conv2DTranspose(256,
                        kernel_size=(3, 3),
                        kernel_initializer='he_normal',
                        padding='same')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = Conv2DTranspose(256,
                        kernel_size=(3, 3),
                        kernel_initializer='he_normal',
                        padding='same')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    x = UpSampling2D()(x)  #32 32
    x = Conv2DTranspose(128,
                        kernel_size=(3, 3),
                        kernel_initializer='he_normal',
                        padding='same')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    x = Conv2DTranspose(128,
                        kernel_size=(3, 3),
                        kernel_initializer='he_normal',
                        padding='same')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    x = Conv2DTranspose(128,
                        kernel_size=(3, 3),
                        kernel_initializer='he_normal',
                        padding='same')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    x = UpSampling2D()(x)  #64 64
    x = Conv2DTranspose(64,
                        kernel_size=(3, 3),
                        kernel_initializer='he_normal',
                        padding='same')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    x = Conv2DTranspose(64,
                        kernel_size=(3, 3),
                        kernel_initializer='he_normal',
                        padding='same')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    x = UpSampling2D()(x)  #128 128
    x = Conv2DTranspose(64,
                        kernel_size=(3, 3),
                        kernel_initializer='he_normal',
                        padding='same')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    x = Conv2DTranspose(64,
                        kernel_size=(3, 3),
                        kernel_initializer='he_normal',
                        padding='same')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    x = Conv2D(2,
               kernel_size=(1, 1),
               kernel_initializer='he_normal',
               padding='same')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    deconvOutput = x

    x = Concatenate()([deconvOutput, fcnOutput])

    #x = Dropout(0.2)(x)
    x = Conv2D(1,
               kernel_size=(1, 1),
               strides=(1, 1),
               kernel_initializer='he_normal',
               padding='same')(x)
    x = BatchNormalization()(x)
    x = Activation('sigmoid')(x)

    model = Model(inputs=[input], outputs=[x])
    print model.summary()
    return model
           strides=(1, 1),
           activation='relu',
           name='Conv7'))
model.add(Dropout(0.2))

# Pooling 3
model.add(MaxPooling2D(pool_size=pool_size))

# Upsample 1
model.add(UpSampling2D(size=pool_size))

# Deconv 1
model.add(
    Conv2DTranspose(64, (3, 3),
                    padding='valid',
                    strides=(1, 1),
                    activation='relu',
                    name='Deconv1'))
model.add(Dropout(0.2))

# Deconv 2
model.add(
    Conv2DTranspose(64, (3, 3),
                    padding='valid',
                    strides=(1, 1),
                    activation='relu',
                    name='Deconv2'))
model.add(Dropout(0.2))

# Upsample 2
model.add(UpSampling2D(size=pool_size))
Пример #30
0
c3 = Conv2D(40, (2, 2), activation=act_fn, kernel_initializer='he_normal', padding='same') (p2)
c3 = Dropout(0.2) (c3)
c3 = Conv2D(40, (2, 2), activation=act_fn, kernel_initializer='he_normal', padding='same') (c3)
p3 = MaxPooling2D((2, 2)) (c3)

c4 = Conv2D(40, (2, 2), activation=act_fn, kernel_initializer='he_normal', padding='same') (p3)
c4 = Dropout(0.2) (c4)
c4 = Conv2D(40, (2, 2), activation=act_fn, kernel_initializer='he_normal', padding='same') (c4)
p4 = MaxPooling2D(pool_size=(1, 1)) (c4)

c5 = Conv2D(80, (2, 2), activation=act_fn, kernel_initializer='he_normal', padding='same') (p4)
c5 = Dropout(0.2) (c5)
c5 = Conv2D(80, (2, 2), activation=act_fn, kernel_initializer='he_normal', padding='same') (c5)

u6 = Conv2DTranspose(40, (2, 2), strides=(1, 1), padding='same') (c5)
u6 = concatenate([u6, c4])
c6 = Conv2D(40, (3, 3), activation=act_fn, kernel_initializer='he_normal', padding='same') (u6)
c6 = Dropout(0.2) (c6)
c6 = Conv2D(40, (3, 3), activation=act_fn, kernel_initializer='he_normal', padding='same') (c6)

u7 = Conv2DTranspose(64, (2, 2), strides=(2, 2), padding='same') (c6)
u7 = concatenate([u7, c3])
c7 = Conv2D(64, (3, 3), activation=act_fn, kernel_initializer='he_normal', padding='same') (u7)
c7 = Dropout(0.2) (c7)
c7 = Conv2D(64, (3, 3), activation=act_fn, kernel_initializer='he_normal', padding='same') (c7)

u8 = Conv2DTranspose(32, (2, 2), strides=(2, 2), padding='same') (c7)
u8 = concatenate([u8, c2])
c8 = Conv2D(32, (3, 3), activation=act_fn, kernel_initializer='he_normal', padding='same') (u8)
c8 = Dropout(0.1) (c8)