Beispiel #1
0
    def u_net(self, inputs, prefix, squeeze=4):
        x = inputs
        skip_connections = []

        for i in range(squeeze):
            x = Conv2D(self.filters * (i + 1),
                       kernel_size=1,
                       padding='same',
                       use_bias=False,
                       activation=None,
                       name=prefix + f'contract_Conv1_{i}')(x)
            x = BatchNormalization(epsilon=1e-3,
                                   momentum=0.999,
                                   name=prefix + f'contract_BN1_{i}')(x)
            x = Activation('relu', name=prefix + f'contract_relu1_{i}')(x)

            x = Conv2D(self.filters * (i + 1),
                       kernel_size=1,
                       padding='same',
                       use_bias=False,
                       activation=None,
                       name=prefix + f'contract_Conv2_{i}')(x)
            x = BatchNormalization(epsilon=1e-3,
                                   momentum=0.999,
                                   name=prefix + f'contract_BN2_{i}')(x)
            x = Activation('relu', name=prefix + f'contract_relu2_{i}')(x)

            skip_connections.append(x)

            x = Conv2D(self.filters * (i + 1) * 2,
                       kernel_size=4,
                       strides=2,
                       padding='same',
                       use_bias=False,
                       activation='relu',
                       name=prefix + f'contract_Conv3_{i}')(x)
            x = BatchNormalization(epsilon=1e-3,
                                   momentum=0.999,
                                   name=prefix + f'contract_BN3_{i}')(x)

        #expand out width
        x = Conv2D(self.filters * squeeze**2,
                   kernel_size=1,
                   padding='same',
                   use_bias=False,
                   activation=None,
                   name=prefix + 'mid_1')(x)
        x = Conv2D(self.filters * squeeze**2,
                   kernel_size=1,
                   padding='same',
                   use_bias=False,
                   activation=None,
                   name=prefix + 'mid_2')(x)

        for i in range(squeeze):
            #up conv2x2
            x = Conv2DTranspose(self.filters * (squeeze - i),
                                kernel_size=4,
                                strides=2,
                                padding='same',
                                activation='relu',
                                name=prefix + f'expand_Conv1_{i}')(x)

            x = Concatenate(name=prefix + f'concat{i}',
                            axis=-1)([skip_connections.pop(), x])

            x = Conv2D(self.filters * (squeeze - i),
                       kernel_size=1,
                       padding='same',
                       use_bias=False,
                       activation=None,
                       name=prefix + f'expand_Conv2_{i}')(x)
            x = BatchNormalization(epsilon=1e-3,
                                   momentum=0.999,
                                   name=prefix + f'expand_BN1_{i}')(x)
            x = Activation('relu', name=prefix + f'expand_relu_1_{i}')(x)

            x = Conv2D(self.filters * (squeeze - i),
                       kernel_size=1,
                       padding='same',
                       use_bias=False,
                       activation=None,
                       name=prefix + f'expand_Conv3_{i}')(x)
            x = BatchNormalization(epsilon=1e-3,
                                   momentum=0.999,
                                   name=prefix + f'expand_BN2_{i}')(x)
            x = Activation('relu', name=prefix + f'expand_relu_2_{i}')(x)
        return x
Beispiel #2
0
c1 = Conv2D(8, (3, 3), activation='relu', padding='same')(inputs)
c1 = Conv2D(8, (3, 3), activation='relu', padding='same')(c1)
p1 = MaxPooling2D((2, 2))(c1)

c2 = Conv2D(16, (3, 3), activation='relu', padding='same')(p1)
c2 = Conv2D(16, (3, 3), activation='relu', padding='same')(c2)
p2 = MaxPooling2D((2, 2))(c2)

c3 = Conv2D(32, (3, 3), activation='relu', padding='same')(p2)
c3 = Conv2D(32, (3, 3), activation='relu', padding='same')(c3)
p3 = MaxPooling2D((2, 2))(c3)

c4 = Conv2D(64, (3, 3), activation='relu', padding='same')(p3)
c4 = Conv2D(64, (3, 3), activation='relu', padding='same')(c4)

u5 = Conv2DTranspose(32, (2, 2), strides=(2, 2), padding='same')(c4)
u5 = concatenate([u5, c3])
c5 = Conv2D(32, (3, 3), activation='relu', padding='same')(u5)
c5 = Conv2D(32, (3, 3), activation='relu', padding='same')(c5)

u6 = Conv2DTranspose(16, (2, 2), strides=(2, 2), padding='same')(c5)
u6 = concatenate([u6, c2])
c6 = Conv2D(16, (3, 3), activation='relu', padding='same')(u6)
c6 = Conv2D(16, (3, 3), activation='relu', padding='same')(c6)

u7 = Conv2DTranspose(8, (2, 2), strides=(2, 2), padding='same')(c6)
u7 = concatenate([u7, c1], axis=3)
c7 = Conv2D(8, (3, 3), activation='relu', padding='same')(u7)
c7 = Conv2D(8, (3, 3), activation='relu', padding='same')(c7)

outputs = Conv2D(1, (1, 1), activation='sigmoid')(c7)
Beispiel #3
0
    conv2d_6 = Conv2D(128, 1, padding='same')(ac_5)
    bn_6 = BatchNormalization()(conv2d_6)
    ac_6 = ReLU()(bn_6)

    flat2 = Flatten()(ac_6)

    return flat2


dense1 = Dense(128)
dense2 = Dense(128 * 8 * 8)
reshape = Reshape((8, 8, 128))
conv1 = Conv2D(256, 3, padding='same')
conv2 = Conv2D(1, 3, activation='relu', padding='same')
relu = ReLU()
convtans1 = Conv2DTranspose(128, 3, strides=2, padding='same')
convtans2 = Conv2DTranspose(64, 3, strides=2, padding='same')


def generate(inp3):

    x = dense1(inp3)
    x = dense2(x)
    x = relu(x)
    x = reshape(x)

    x = conv1(x)
    x = relu(x)

    x = convtans1(x)
    x = relu(x)
Beispiel #4
0
# GENERATOR
g = Sequential()

g.add(
    Dense(units=7 * 7 * 512,
          input_dim=latent_dim,
          activation=LeakyReLU(alpha=LEAKY_RELU_ALPHA)))

g.add(BatchNormalization())

g.add(Reshape(target_shape=(7, 7, 512)))

g.add(
    Conv2DTranspose(filters=128,
                    kernel_size=[4, 4],
                    strides=2,
                    padding="same",
                    activation=LeakyReLU(alpha=LEAKY_RELU_ALPHA)))

g.add(BatchNormalization())

g.add(Dropout(rate=DROPOUT_RATE))

g.add(
    Conv2DTranspose(filters=32,
                    kernel_size=[4, 4],
                    strides=2,
                    padding="same",
                    activation=LeakyReLU(alpha=LEAKY_RELU_ALPHA)))

g.add(BatchNormalization())
def upsample_conv(filters, kernel_size, strides, padding):
    return Conv2DTranspose(filters, kernel_size, strides=strides, padding=padding)
Beispiel #6
0
def conv2dtranspose(filters: int):
    return Conv2DTranspose(filters=filters,
                           kernel_size=(2, 2),
                           strides=(2, 2),
                           padding='same')
 def __init__(
     self,
     n_scales=4,
     n_filters=128,
     kernel_size=3,
     bn=False,
     n_convs_per_scale=DEFAULT_N_CONVS_PER_SCALE,
     communications_between_scales=DEFAULT_COMMUNICATION_BETWEEN_SCALES,
     beta=0.2,
     n_outputs=1,
     **kwargs,
 ):
     super(FocNet, self).__init__(**kwargs)
     self.n_scales = n_scales
     self.n_filters = n_filters
     self.kernel_size = kernel_size
     self.bn = bn
     self.n_convs_per_scale = n_convs_per_scale
     self.communications_between_scales = communications_between_scales
     self.beta = beta
     self.n_outputs = n_outputs
     self.pooling = AveragePooling2D(padding='same')
     self.unpoolings_per_scale = [[
         Conv2DTranspose(
             self.n_filters,
             self.kernel_size,
             strides=(2, 2),
             padding='same',
         ) for _ in range(
             len(self.communications_between_scales[i_scale]) // 2)
     ] for i_scale in range(self.n_scales - 1)]
     # unpooling is not specified in the paper, but in the code
     # you can see a deconv is used
     # https://github.com/hsijiaxidian/FOCNet/blob/master/FracDCNN.m#L415
     self.n_switches_per_scale = []
     self.compute_n_switches_per_scale()
     self.switches_per_scale = [[
         SwitchLayer() for _ in range(self.n_switches_per_scale[i_scale])
     ] for i_scale in range(self.n_scales)]
     self.first_conv = Conv2D(
         self.n_filters,  # we output a grayscale image
         self.
         kernel_size,  # we simply do a linear combination of the features
         padding='same',
         use_bias=True,
     )
     self.conv_blocks_per_scale = [[
         FocConvBlock(
             n_filters=self.n_filters,
             kernel_size=self.kernel_size,
             bn=self.bn,
         ) for _ in range(n_conv_blocks)
     ] for n_conv_blocks in self.n_convs_per_scale]
     self.final_conv = Conv2D(
         self.n_outputs,
         1,  # we simply do a linear combination of the features
         padding='same',
         use_bias=True,
     )
     self.needs_to_compute = {}
     self.build_needs_to_compute()
def SE_U_Net_2D(image_shape,
                activation='elu',
                feature_maps=[16, 32, 64, 128, 256],
                depth=4,
                drop_values=[0.1, 0.1, 0.2, 0.2, 0.3],
                spatial_dropout=False,
                batch_norm=False,
                k_init='he_normal',
                loss_type="bce",
                optimizer="sgd",
                lr=0.002,
                n_classes=1):
    """Create 2D U-Net with squeeze-excite blocks.
        
       Reference `Squeeze and Excitation Networks <https://arxiv.org/abs/1709.01507>`_.
                                                                                
       Parameters
       ----------
       image_shape : 2D tuple
           Dimensions of the input image.              
                                                                                
       activation : str, optional
           Keras available activation type.        
                                                                           
       feature_maps : array of ints, optional
           Feature maps to use on each level. Must have the same length as the 
           ``depth+1``.            
                                                                           
       depth : int, optional
           Depth of the network.                        
                                                                           
       drop_values : float, optional
           Dropout value to be fixed. If no value is provided the default
           behaviour will be to select a piramidal value starting from ``0.1`` 
           and reaching ``0.3`` value.
                                                                           
       spatial_dropout : bool, optional
           Use spatial dropout instead of the `normal` dropout.                                               
                                                                           
       batch_norm : bool, optional
           Make batch normalization.      
                                                                           
       k_init : string, optional
           Kernel initialization for convolutional layers.                                                         
                                                                           
       loss_type : str, optional
           Loss type to use, three type available: ``bce`` (Binary Cross Entropy)
           , ``w_bce`` (Weighted BCE, based on weigth maps) and ``w_bce_dice``
           (Weighted loss: ``weight1*BCE + weight2*Dice``).                                              
                                                                           
       optimizer : str, optional
           Optimizer used to minimize the loss function. Posible options: 
           ``sgd`` or ``adam``.                 
                                                                           
       lr : float, optional
           Learning rate value.                          
        
       n_classes: int, optional
           Number of classes.
                                                                           
       Returns
       -------                                                                 
       model : Keras model
           Model containing the U-Net.              


       Calling this function with its default parameters returns the following  
       network:                                                                 
                                                                                
       .. image:: img/unet.png                                                  
           :width: 100%                                                         
           :align: center                                                       
                                                                                
       Image created with `PlotNeuralNet <https://github.com/HarisIqbal88/PlotNeuralNet>`_.
    """

    if len(feature_maps) != depth + 1:
        raise ValueError("feature_maps dimension must be equal depth+1")
    if len(drop_values) != depth + 1:
        raise ValueError("'drop_values' dimension must be equal depth+1")

    dinamic_dim = (None, ) * (len(image_shape) - 1) + (image_shape[-1], )
    x = Input(dinamic_dim)
    #x = Input(image_shape)
    inputs = x

    if loss_type == "w_bce":
        weights = Input(image_shape)

    # List used to access layers easily to make the skip connections of the U-Net
    l = []

    # ENCODER
    for i in range(depth):
        x = Conv2D(feature_maps[i], (3, 3),
                   activation=None,
                   kernel_initializer=k_init,
                   padding='same')(x)
        x = BatchNormalization()(x) if batch_norm else x
        x = Activation(activation)(x)
        x = squeeze_excite_block(x)
        if drop_values is not None:
            if spatial_dropout:
                x = SpatialDropout2D(drop_values[i])(x)
            else:
                x = Dropout(drop_values[i])(x)
        x = Conv2D(feature_maps[i], (3, 3),
                   activation=None,
                   kernel_initializer=k_init,
                   padding='same')(x)
        x = BatchNormalization()(x) if batch_norm else x
        x = Activation(activation)(x)
        x = squeeze_excite_block(x)

        l.append(x)

        x = MaxPooling2D((2, 2))(x)

    # BOTTLENECK
    x = Conv2D(feature_maps[depth], (3, 3),
               activation=None,
               kernel_initializer=k_init,
               padding='same')(x)
    x = BatchNormalization()(x) if batch_norm else x
    x = Activation(activation)(x)
    if drop_values is not None:
        if spatial_dropout:
            x = SpatialDropout2D(drop_values[depth])(x)
        else:
            x = Dropout(drop_values[depth])(x)
    x = Conv2D(feature_maps[depth], (3, 3),
               activation=None,
               kernel_initializer=k_init,
               padding='same')(x)
    x = BatchNormalization()(x) if batch_norm else x
    x = Activation(activation)(x)

    # DECODER
    for i in range(depth - 1, -1, -1):
        x = Conv2DTranspose(feature_maps[i], (2, 2),
                            strides=(2, 2),
                            padding='same')(x)
        x = concatenate([x, l[i]])
        x = Conv2D(feature_maps[i], (3, 3),
                   activation=None,
                   kernel_initializer=k_init,
                   padding='same')(x)
        x = BatchNormalization()(x) if batch_norm else x
        x = Activation(activation)(x)
        x = squeeze_excite_block(x)
        if drop_values is not None:
            if spatial_dropout:
                x = SpatialDropout2D(drop_values[i])(x)
            else:
                x = Dropout(drop_values[i])(x)
        x = Conv2D(feature_maps[i], (3, 3),
                   activation=None,
                   kernel_initializer=k_init,
                   padding='same')(x)
        x = BatchNormalization()(x) if batch_norm else x
        x = Activation(activation)(x)
        x = squeeze_excite_block(x)

    outputs = Conv2D(n_classes, (1, 1), activation='sigmoid')(x)

    # Loss type
    if loss_type == "w_bce":
        model = Model(inputs=[inputs, weights], outputs=[outputs])
    else:
        model = Model(inputs=[inputs], outputs=[outputs])

    # Select the optimizer
    if optimizer == "sgd":
        opt = tf.keras.optimizers.SGD(lr=lr,
                                      momentum=0.99,
                                      decay=0.0,
                                      nesterov=False)
    elif optimizer == "adam":
        opt = tf.keras.optimizers.Adam(lr=lr,
                                       beta_1=0.9,
                                       beta_2=0.999,
                                       epsilon=None,
                                       decay=0.0,
                                       amsgrad=False)
    else:
        raise ValueError("Error: optimizer value must be 'sgd' or 'adam'")

    # Compile the model
    if loss_type == "bce":
        if n_classes > 1:
            model.compile(optimizer=opt,
                          loss='categorical_crossentropy',
                          metrics=[jaccard_index_softmax])
        else:
            model.compile(optimizer=opt,
                          loss='binary_crossentropy',
                          metrics=[jaccard_index])
    elif loss_type == "w_bce":
        model.compile(optimizer=opt,
                      loss=binary_crossentropy_weighted(weights),
                      metrics=[jaccard_index])
    elif loss_type == "w_bce_dice":
        model.compile(optimizer=opt,
                      loss=weighted_bce_dice_loss(w_dice=0.66, w_bce=0.33),
                      metrics=[jaccard_index])
    else:
        raise ValueError("'loss_type' must be 'bce', 'w_bce' or 'w_bce_dice'")

    return model
Beispiel #9
0
def unet(num_classes, input_shape, lr_init, lr_decay):
    img_input = Input(input_shape)

    # Block 1
    x = Conv2D(64, (3, 3), padding='same', name='block1_conv1')(img_input)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    x = Conv2D(64, (3, 3), padding='same', name='block1_conv2')(x)
    x = BatchNormalization()(x)
    block_1_out = Activation('relu')(x)

    x = MaxPooling2D()(block_1_out)

    # Block 2
    x = Conv2D(128, (3, 3), padding='same', name='block2_conv1')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    x = Conv2D(128, (3, 3), padding='same', name='block2_conv2')(x)
    x = BatchNormalization()(x)
    block_2_out = Activation('relu')(x)

    x = MaxPooling2D()(block_2_out)

    # Block 3
    x = Conv2D(256, (3, 3), padding='same', name='block3_conv1')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    x = Conv2D(256, (3, 3), padding='same', name='block3_conv2')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    x = Conv2D(256, (3, 3), padding='same', name='block3_conv3')(x)
    x = BatchNormalization()(x)
    block_3_out = Activation('relu')(x)

    x = MaxPooling2D()(block_3_out)

    # Block 4
    x = Conv2D(512, (3, 3), padding='same', name='block4_conv1')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    x = Conv2D(512, (3, 3), padding='same', name='block4_conv2')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    x = Conv2D(512, (3, 3), padding='same', name='block4_conv3')(x)
    x = BatchNormalization()(x)
    block_4_out = Activation('relu')(x)

    x = MaxPooling2D()(block_4_out)

    # Block 5
    x = Conv2D(512, (3, 3), padding='same', name='block5_conv1')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    x = Conv2D(512, (3, 3), padding='same', name='block5_conv2')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    x = Conv2D(512, (3, 3), padding='same', name='block5_conv3')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    # UP 1
    x = Conv2DTranspose(512, (2, 2), strides=(2, 2), padding='same')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    x = concatenate([x, block_4_out])
    x = Conv2D(512, (3, 3), padding='same')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    x = Conv2D(512, (3, 3), padding='same')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    # UP 2
    x = Conv2DTranspose(256, (2, 2), strides=(2, 2), padding='same')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    x = concatenate([x, block_3_out])
    x = Conv2D(256, (3, 3), padding='same')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    x = Conv2D(256, (3, 3), padding='same')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    # UP 3
    x = Conv2DTranspose(128, (2, 2), strides=(2, 2), padding='same')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    x = concatenate([x, block_2_out])
    x = Conv2D(128, (3, 3), padding='same')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    x = Conv2D(128, (3, 3), padding='same')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    # UP 4
    x = Conv2DTranspose(64, (2, 2), strides=(2, 2), padding='same')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    x = concatenate([x, block_1_out])
    x = Conv2D(64, (3, 3), padding='same')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    x = Conv2D(64, (3, 3), padding='same')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    # last conv
    x = Conv2D(num_classes, (3, 3), activation='softmax', padding='same')(x)

    model = Model(img_input, x)
    model.compile(optimizer=Adam(lr=lr_init, decay=lr_decay),
                  loss='categorical_crossentropy',
                  metrics=['categorical_accuracy'])
    model.summary()
    return model
c4 = BatchNormalization()(c4)
p4 = MaxPooling2D(pool_size=(2, 2))(c4)

c5 = Conv2D(512, (3, 3),
            activation='relu',
            kernel_initializer='he_normal',
            padding='same')(p4)
c5 = BatchNormalization()(c5)
c5 = Dropout(0.3)(c5)
c5 = Conv2D(512, (3, 3),
            activation='relu',
            kernel_initializer='he_normal',
            padding='same')(c5)
c5 = BatchNormalization()(c5)

u6 = Conv2DTranspose(128, (2, 2), strides=(2, 2), padding='same')(c5)
u6 = concatenate([u6, c4])
c6 = Conv2D(256, (3, 3),
            activation='relu',
            kernel_initializer='he_normal',
            padding='same')(u6)
c6 = BatchNormalization()(c6)
c6 = Dropout(0.2)(c6)
c6 = Conv2D(256, (3, 3),
            activation='relu',
            kernel_initializer='he_normal',
            padding='same')(c6)
c6 = BatchNormalization()(c6)

u7 = Conv2DTranspose(64, (2, 2), strides=(2, 2), padding='same')(c6)
u7 = concatenate([u7, c3])
Beispiel #11
0
def get_unet(hyperparameters):
    inputs = Input(hyperparameters['input_shape'])
    conv1 = Conv2D(hyperparameters['base'], (3, 3), padding='same')(inputs)
    if hyperparameters['batch_norm']:
        conv1 = BatchNormalization()(conv1)
    conv1 = Activation('relu')(conv1)
    conv1 = Conv2D(hyperparameters['base'], (3, 3), padding='same')(conv1)
    if hyperparameters['batch_norm']:
        conv1 = BatchNormalization()(conv1)
    conv1 = Activation('relu')(conv1)
    pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
    if hyperparameters['dropout'] != 0:
        pool1 = Dropout(hyperparameters['dropout'])(pool1)
    conv2 = Conv2D(hyperparameters['base'] * 2, (3, 3), padding='same')(pool1)
    if hyperparameters['batch_norm']:
        conv2 = BatchNormalization()(conv2)
    conv2 = Activation('relu')(conv2)
    conv2 = Conv2D(hyperparameters['base'] * 2, (3, 3), padding='same')(conv2)
    if hyperparameters['batch_norm']:
        conv2 = BatchNormalization()(conv2)
    conv2 = Activation('relu')(conv2)
    pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
    if hyperparameters['dropout'] != 0:
        pool2 = Dropout(hyperparameters['dropout'])(pool2)
    conv3 = Conv2D(hyperparameters['base'] * 4, (3, 3), padding='same')(pool2)
    if hyperparameters['batch_norm']:
        conv3 = BatchNormalization()(conv3)
    conv3 = Activation('relu')(conv3)
    conv3 = Conv2D(hyperparameters['base'] * 4, (3, 3), padding='same')(conv3)
    if hyperparameters['batch_norm']:
        conv3 = BatchNormalization()(conv3)
    conv3 = Activation('relu')(conv3)
    pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
    if hyperparameters['dropout'] != 0:
        pool3 = Dropout(hyperparameters['dropout'])(pool3)
    conv4 = Conv2D(hyperparameters['base'] * 8, (3, 3), padding='same')(pool3)
    if hyperparameters['batch_norm']:
        conv4 = BatchNormalization()(conv4)
    conv4 = Activation('relu')(conv4)
    conv4 = Conv2D(hyperparameters['base'] * 8, (3, 3), padding='same')(conv4)
    if hyperparameters['batch_norm']:
        conv4 = BatchNormalization()(conv4)
    conv4 = Activation('relu')(conv4)
    pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)
    if hyperparameters['dropout'] != 0:
        pool4 = Dropout(hyperparameters['dropout'])(pool4)
    conv5 = Conv2D(hyperparameters['base'] * 16, (3, 3), padding='same')(pool4)
    if hyperparameters['batch_norm']:
        conv5 = BatchNormalization()(conv5)
    conv5 = Activation('relu')(conv5)
    conv5 = Conv2D(hyperparameters['base'] * 16, (3, 3), padding='same')(conv5)
    if hyperparameters['batch_norm']:
        conv5 = BatchNormalization()(conv5)
    conv5 = Activation('relu')(conv5)
    up6 = concatenate([
        Conv2DTranspose(hyperparameters['base'] * 8, (2, 2),
                        strides=(2, 2),
                        padding='same')(conv5), conv4
    ],
                      axis=3)
    conv6 = Conv2D(hyperparameters['base'] * 8, (3, 3), padding='same')(up6)
    if hyperparameters['batch_norm']:
        conv6 = BatchNormalization()(conv6)
    conv6 = Activation('relu')(conv6)
    conv6 = Conv2D(hyperparameters['base'] * 8, (3, 3), padding='same')(conv6)
    if hyperparameters['batch_norm']:
        conv6 = BatchNormalization()(conv6)
    conv6 = Activation('relu')(conv6)
    up7 = concatenate([
        Conv2DTranspose(128,
                        (2, 2), strides=(2, 2), padding='same')(conv6), conv3
    ],
                      axis=3)
    conv7 = Conv2D(hyperparameters['base'] * 4, (3, 3), padding='same')(up7)
    if hyperparameters['batch_norm']:
        conv7 = BatchNormalization()(conv7)
    conv7 = Activation('relu')(conv7)
    conv7 = Conv2D(hyperparameters['base'] * 4, (3, 3), padding='same')(conv7)
    if hyperparameters['batch_norm']:
        conv7 = BatchNormalization()(conv7)
    conv7 = Activation('relu')(conv7)
    up8 = concatenate([
        Conv2DTranspose(hyperparameters['base'] * 2, (2, 2),
                        strides=(2, 2),
                        padding='same')(conv7), conv2
    ],
                      axis=3)
    conv8 = Conv2D(hyperparameters['base'] * 2, (3, 3), padding='same')(up8)
    if hyperparameters['batch_norm']:
        conv8 = BatchNormalization()(conv8)
    conv8 = Activation('relu')(conv8)
    conv8 = Conv2D(hyperparameters['base'] * 2, (3, 3), padding='same')(conv8)
    if hyperparameters['batch_norm']:
        conv8 = BatchNormalization()(conv8)
    conv8 = Activation('relu')(conv8)
    up9 = concatenate([
        Conv2DTranspose(
            hyperparameters['base'],
            (2, 2), strides=(2, 2), padding='same')(conv8), conv1
    ],
                      axis=3)
    conv9 = Conv2D(hyperparameters['base'], (3, 3), padding='same')(up9)
    if hyperparameters['batch_norm']:
        conv9 = BatchNormalization()(conv9)
    conv9 = Activation('relu')(conv9)
    conv9 = Conv2D(hyperparameters['base'], (3, 3), padding='same')(conv9)
    if hyperparameters['batch_norm']:
        conv9 = BatchNormalization()(conv9)
    conv9 = Activation('relu')(conv9)
    conv10 = Conv2D(hyperparameters['last_layer_units'], (1, 1),
                    activation=hyperparameters['last_layer_activation'])(conv9)
    model = Model(inputs=[inputs], outputs=[conv10])
    print(model.summary())
    model.compile(
        loss=hyperparameters['loss'],
        optimizer=hyperparameters['optimizer'](lr=hyperparameters['lr']),
        metrics=hyperparameters['metrics_func'])
    return model
Beispiel #12
0
def Nest_Net2(input_shape, num_class=1, deep_supervision=False):
    nb_filter = [32, 64, 128, 256, 512]
    # nb_filter = [16, 32, 64, 128, 256]
    mode = 'residual'  # mode='residual' seems to improve better than DS
    # Handle Dimension Ordering for different backends
    bn_axis = 3
    inputs = Input(shape=input_shape)
    conv1_1 = standard_unit(inputs, stage='11', nb_filter=nb_filter[0])  # add 要求输入输出维度相同
    pool1 = MaxPooling2D((2, 2), strides=(2, 2), name='pool1')(conv1_1)  # (?,128,128,32)

    conv2_1 = standard_unit(pool1, stage='21', nb_filter=nb_filter[1], mode=mode)
    pool2 = MaxPooling2D((2, 2), strides=(2, 2), name='pool2')(conv2_1)  # (?,64,64,64)

    up1_2 = Conv2DTranspose(nb_filter[0], (2, 2), strides=(2, 2), name='up12', padding='same')(conv2_1)
    conv1_2 = concatenate([up1_2, conv1_1], name='merge12', axis=bn_axis)  # (?,256,256,64)
    conv1_2 = standard_unit(conv1_2, stage='12', nb_filter=nb_filter[0], mode=mode)  # (?,256,256,32)

    conv3_1 = standard_unit(pool2, stage='31', nb_filter=nb_filter[2], mode=mode)
    pool3 = MaxPooling2D((2, 2), strides=(2, 2), name='pool3')(conv3_1)

    up2_2 = Conv2DTranspose(nb_filter[1], (2, 2), strides=(2, 2), name='up22', padding='same')(conv3_1)
    conv2_2 = concatenate([up2_2, conv2_1], name='merge22', axis=bn_axis)
    conv2_2 = standard_unit(conv2_2, stage='22', nb_filter=nb_filter[1], mode=mode)

    up1_3 = Conv2DTranspose(nb_filter[0], (2, 2), strides=(2, 2), name='up13', padding='same')(conv2_2)
    conv1_3 = concatenate([up1_3, conv1_1, conv1_2], name='merge13', axis=bn_axis)
    conv1_3 = standard_unit(conv1_3, stage='13', nb_filter=nb_filter[0], mode=mode)  # (?,256,256,32)

    conv4_1 = standard_unit(pool3, stage='41', nb_filter=nb_filter[3], mode=mode)
    pool4 = MaxPooling2D((2, 2), strides=(2, 2), name='pool4')(conv4_1)

    up3_2 = Conv2DTranspose(nb_filter[2], (2, 2), strides=(2, 2), name='up32', padding='same')(conv4_1)
    conv3_2 = concatenate([up3_2, conv3_1], name='merge32', axis=bn_axis)
    conv3_2 =standard_unit(conv3_2, stage='32', nb_filter=nb_filter[2], mode=mode)

    up2_3 = Conv2DTranspose(nb_filter[1], (2, 2), strides=(2, 2), name='up23', padding='same')(conv3_2)
    conv2_3 = concatenate([up2_3, conv2_1, conv2_2], name='merge23', axis=bn_axis)
    conv2_3 = standard_unit(conv2_3, stage='23', nb_filter=nb_filter[1], mode=mode)

    up1_4 = Conv2DTranspose(nb_filter[0], (2, 2), strides=(2, 2), name='up14', padding='same')(conv2_3)
    conv1_4 = concatenate([up1_4, conv1_1, conv1_2, conv1_3], name='merge14', axis=bn_axis)
    conv1_4 =standard_unit(conv1_4, stage='14', nb_filter=nb_filter[0], mode=mode)

    conv5_1 = standard_unit(pool4, stage='51', nb_filter=nb_filter[4], mode=mode)

    up4_2 = Conv2DTranspose(nb_filter[3], (2, 2), strides=(2, 2), name='up42', padding='same')(conv5_1)
    conv4_2 = concatenate([up4_2, conv4_1], name='merge42', axis=bn_axis)
    conv4_2 = standard_unit(conv4_2, stage='42', nb_filter=nb_filter[3], mode=mode)

    up3_3 = Conv2DTranspose(nb_filter[2], (2, 2), strides=(2, 2), name='up33', padding='same')(conv4_2)
    conv3_3 = concatenate([up3_3, conv3_1, conv3_2], name='merge33', axis=bn_axis)
    conv3_3 = standard_unit(conv3_3, stage='33', nb_filter=nb_filter[2], mode=mode)

    up2_4 = Conv2DTranspose(nb_filter[1], (2, 2), strides=(2, 2), name='up24', padding='same')(conv3_3)
    conv2_4 = concatenate([up2_4, conv2_1, conv2_2, conv2_3], name='merge24', axis=bn_axis)
    conv2_4 = standard_unit(conv2_4, stage='24', nb_filter=nb_filter[1], mode=mode)

    up1_5 = Conv2DTranspose(nb_filter[0], (2, 2), strides=(2, 2), name='up15', padding='same')(conv2_4)
    conv1_5 = concatenate([up1_5, conv1_1, conv1_2, conv1_3, conv1_4], name='merge15', axis=bn_axis)
    conv1_5 =standard_unit(conv1_5, stage='15', nb_filter=nb_filter[0], mode=mode)

    nestnet_output_1 = Conv2D(num_class, (1, 1), activation='sigmoid', name='output_1',
                              kernel_initializer='he_normal', padding='same', kernel_regularizer=l2(1e-4))(conv1_2)
    nestnet_output_2 = Conv2D(num_class, (1, 1), activation='sigmoid', name='output_2',
                              kernel_initializer='he_normal', padding='same', kernel_regularizer=l2(1e-4))(conv1_3)
    nestnet_output_3 = Conv2D(num_class, (1, 1), activation='sigmoid', name='output_3',
                              kernel_initializer='he_normal', padding='same', kernel_regularizer=l2(1e-4))(conv1_4)
    nestnet_output_4 = Conv2D(num_class, (1, 1), activation='sigmoid', name='output_4',
                              kernel_initializer='he_normal', padding='same', kernel_regularizer=l2(1e-4))(conv1_5)
    # using combined loss
    conv_fuse = concatenate([conv1_2, conv1_3, conv1_4, conv1_5], name='merge_fuse', axis=bn_axis)
    nestnet_output_5 = Conv2D(num_class, (1, 1), activation='sigmoid', name='output_5',
                              kernel_initializer='he_normal', padding='same', kernel_regularizer=l2(1e-4))(conv_fuse)


    if deep_supervision:
        model = Model(inputs=inputs, outputs=[nestnet_output_1,
                                            nestnet_output_2,
                                            nestnet_output_3,
                                            nestnet_output_4, nestnet_output_5])
        model.compile(optimizer=Adam(lr=1e-4),
                      #loss=['binary_crossentropy','binary_crossentropy','binary_crossentropy','binary_crossentropy'],
                      loss=[weighted_bce_dice_loss, weighted_bce_dice_loss, weighted_bce_dice_loss,
                            weighted_bce_dice_loss, weighted_bce_dice_loss],
                      loss_weights=[0.5, 0.5, 0.75, 0.5, 1.0],
                      metrics=[Recall(), Precision()]
                      )
    else:
        model = Model(inputs=inputs, outputs=[nestnet_output_4])
        model.compile(optimizer=Adam(lr=1e-4), loss=weighted_bce_dice_loss,
                      metrics=[Recall(), Precision()])
    model.summary()
    return model
x = Conv2D(16, (3, 3), activation='relu', padding='same')(x)
x = Conv2D(8, (1, 1), activation='relu', padding='same')(x)
x = MaxPooling2D(pool_size=(2, 2))(x)
x = Conv2D(4, (3, 3), activation='relu', padding='same')(x)
x = Conv2D(2, (1, 1), activation='relu', padding='same')(x)
B = Conv2D(1, (1, 1), activation='relu', padding='same')(x)

encoder = Model(A, B)
encoder.compile(loss='mean_squared_error',
                optimizer=tf.keras.optimizers.Adam(lr=0.1),
                metrics=['accuracy'])
encoder.summary()

#decoder
A0 = tf.keras.layers.Input(shape=(7, 7, 1))
x = Conv2DTranspose(2, (1, 1), activation='relu', padding='same')(A0)
x = Conv2DTranspose(4, (3, 3), activation='relu', padding='same')(x)
x = Conv2DTranspose(8, (3, 3), activation='relu', padding='same')(x)
x = UpSampling2D((2, 2))(x)
x = Conv2DTranspose(16, (3, 3), activation='relu', padding='same')(x)
x = Conv2DTranspose(32, (3, 3), activation='relu', padding='same')(x)
x = Conv2DTranspose(64, (3, 3), activation='relu', padding='same')(x)
x = UpSampling2D((2, 2))(x)
x = Conv2DTranspose(128, (3, 3), activation='relu', padding='same')(x)
x = Conv2DTranspose(256, (3, 3), activation='relu', padding='same')(x)
B0 = Conv2DTranspose(1, (3, 3), activation='tanh', padding='same')(x)

decoder = Model(A0, B0)
decoder.compile(loss='mean_squared_error',
                optimizer=tf.keras.optimizers.Adam(lr=0.1),
                metrics=['accuracy'])
Beispiel #14
0
print(merged)
merged_model = Model([first_input, second_input, third_input],
                     merged,
                     name='merged_model')

# build decoder model
latent_inputs = Input(shape=(latent_dim + 3, ), name='z_sampling')
x = Dense(shape[1] * shape[2] * shape[3], activation='relu')(latent_inputs)
x = Reshape((shape[1], shape[2], shape[3]))(x)

filters //= 2
for i in range(3 - 1):
    filters //= 2
    x = Conv2DTranspose(filters=filters,
                        kernel_size=kernel_size,
                        activation='relu',
                        strides=strides,
                        padding='same')(x)
# clip_activation
outputs = Conv2DTranspose(filters=1,
                          kernel_size=kernel_size,
                          activation=clip_activation(0, 1),
                          padding='same',
                          strides=strides,
                          name='decoder_output')(x)

# instantiate decoder model
decoder = Model(latent_inputs, outputs, name='decoder')
decoder.summary()
plot_model(decoder, to_file='vae_cnn_decoder.png', show_shapes=True)
Beispiel #15
0
model = Sequential()
model.add(
    Conv2D(strides=1,
           kernel_size=3,
           filters=12,
           use_bias=True,
           bias_initializer=tf.keras.initializers.RandomUniform(minval=0.05,
                                                                maxval=0.05),
           padding="valid",
           activation=tf.nn.relu))
model.add(
    Conv2DTranspose(strides=1,
                    kernel_size=3,
                    filters=12,
                    use_bias=True,
                    bias_initializer=tf.keras.initializers.RandomUniform(
                        minval=0.05, maxval=0.05),
                    padding="valid",
                    activation=tf.nn.relu))
model.add(LeakyReLU(0.6))
model.add(Dropout(0.4))

model.add(
    Conv2D(strides=1,
           kernel_size=3,
           filters=12,
           use_bias=True,
           bias_initializer=tf.keras.initializers.RandomUniform(minval=0.05,
                                                                maxval=0.05),
           padding="valid",
           activation=tf.nn.relu))
Beispiel #16
0
def build_model(inputs):

    #---------contraction path----------
    #convolutional layers
    #Conv2D(filters, seed, activation, kernel_init)(in)
    #he_normal - normal distribution (centered on 0)
    #padding = same -> output img has the same dimentions
    #in have to be float
    c1 = Conv2D(16, (3, 3),
                activation='relu',
                kernel_initializer='he_normal',
                padding='same')(inputs)
    c1 = Dropout(0.1)(c1)
    c1 = Conv2D(16, (3, 3),
                activation='relu',
                kernel_initializer='he_normal',
                padding='same')(c1)
    p1 = MaxPooling2D((2, 2))(c1)

    c2 = Conv2D(32, (3, 3),
                activation='relu',
                kernel_initializer='he_normal',
                padding='same')(p1)
    c2 = Dropout(0.1)(c2)
    c2 = Conv2D(32, (3, 3),
                activation='relu',
                kernel_initializer='he_normal',
                padding='same')(c2)
    p2 = MaxPooling2D((2, 2))(c2)

    c3 = Conv2D(64, (3, 3),
                activation='relu',
                kernel_initializer='he_normal',
                padding='same')(p2)
    c3 = Dropout(0.2)(c3)
    c3 = Conv2D(64, (3, 3),
                activation='relu',
                kernel_initializer='he_normal',
                padding='same')(c3)
    p3 = MaxPooling2D((2, 2))(c3)

    c4 = Conv2D(128, (3, 3),
                activation='relu',
                kernel_initializer='he_normal',
                padding='same')(p3)
    c4 = Dropout(0.2)(c4)
    c4 = Conv2D(128, (3, 3),
                activation='relu',
                kernel_initializer='he_normal',
                padding='same')(c4)
    p4 = MaxPooling2D((2, 2))(c4)

    c5 = Conv2D(256, (3, 3),
                activation='relu',
                kernel_initializer='he_normal',
                padding='same')(p4)
    c5 = Dropout(0.3)(c5)
    c5 = Conv2D(256, (3, 3),
                activation='relu',
                kernel_initializer='he_normal',
                padding='same')(c5)
    p5 = MaxPooling2D((2, 2))(c5)

    #---------expansive path----------

    u6x = Conv2DTranspose(256, (2, 2), strides=(2, 2), padding='same')(p5)
    u6x = concatenate([u6x, c5])
    c6x = Conv2D(256, (3, 3),
                 activation='relu',
                 kernel_initializer='he_normal',
                 padding='same')(u6x)
    c6x = Dropout(0.2)(c6x)
    c6x = Conv2D(256, (3, 3),
                 activation='relu',
                 kernel_initializer='he_normal',
                 padding='same')(c6x)

    u6 = Conv2DTranspose(128, (2, 2), strides=(2, 2), padding='same')(c6x)
    u6 = concatenate([u6, c4])
    c6 = Conv2D(128, (3, 3),
                activation='relu',
                kernel_initializer='he_normal',
                padding='same')(u6)
    c6 = Dropout(0.2)(c6)
    c6 = Conv2D(128, (3, 3),
                activation='relu',
                kernel_initializer='he_normal',
                padding='same')(c6)

    u7 = Conv2DTranspose(64, (2, 2), strides=(2, 2), padding='same')(c6)
    u7 = concatenate([u7, c3])
    c7 = Conv2D(64, (3, 3),
                activation='relu',
                kernel_initializer='he_normal',
                padding='same')(u7)
    c7 = Dropout(0.2)(c7)
    c7 = Conv2D(64, (3, 3),
                activation='relu',
                kernel_initializer='he_normal',
                padding='same')(c7)

    u8 = Conv2DTranspose(32, (2, 2), strides=(2, 2), padding='same')(c7)
    u8 = concatenate([u8, c2])
    c8 = Conv2D(32, (3, 3),
                activation='relu',
                kernel_initializer='he_normal',
                padding='same')(u8)
    c8 = Dropout(0.1)(c8)
    c8 = Conv2D(32, (3, 3),
                activation='relu',
                kernel_initializer='he_normal',
                padding='same')(c8)

    u9 = Conv2DTranspose(16, (2, 2), strides=(2, 2), padding='same')(c8)
    u9 = concatenate([u9, c1], axis=3)
    c9 = Conv2D(16, (3, 3),
                activation='relu',
                kernel_initializer='he_normal',
                padding='same')(u9)
    c9 = Dropout(0.1)(c9)
    c9 = Conv2D(16, (3, 3),
                activation='relu',
                kernel_initializer='he_normal',
                padding='same')(c9)

    outputs = Conv2D(1, (1, 1), activation='sigmoid')(c9)
    return outputs
    Dense(1, activation='sigmoid')
])

opt = tf.keras.optimizers.Adam(lr=2e-4, beta_1=0.5)

discriminator.compile(loss='binary_crossentropy',
                      optimizer=opt,
                      metrics=['accuracy'])
discriminator.summary()

#Generator

generator = Sequential([
    Dense(256, activation='relu', input_shape=(noise_dim, )),
    Reshape((1, 1, 256)),
    Conv2DTranspose(256, 5, activation='relu'),
    BatchNormalization(),
    Conv2DTranspose(128, 5, activation='relu'),
    BatchNormalization(),
    Conv2DTranspose(64, 5, strides=2, activation='relu'),
    BatchNormalization(),
    Conv2DTranspose(32, 5, activation='relu'),
    BatchNormalization(),
    Conv2DTranspose(1, 4, activation='sigmoid')
])

generator.summary()

noise = np.random.randn(1, noise_dim)
gen_image = generator.predict(noise)[0]
Beispiel #18
0
c14 = SpatialDropout2D(0.5)(c13)
c15 = Conv2D(filters=11,
             kernel_size=(1, 1),
             strides=(1, 1),
             use_bias=True,
             data_format="channels_last",
             padding="same",
             activation=None,
             activity_regularizer=None,
             kernel_initializer='glorot_normal',
             name='conv10')(c14)

c16 = Conv2DTranspose(filters=11,
                      kernel_size=(32, 32),
                      strides=(16, 16),
                      data_format="channels_last",
                      padding="same",
                      activation=None,
                      kernel_initializer='glorot_normal',
                      name='conv11')(c15)

c17 = Reshape((-1, 11))(c16)
c18 = Activation('softmax')(c17)

rms = Adam(lr=1e-5)
mymodel = Model(inputs=c0, outputs=c18)
mymodel.compile(loss='sparse_categorical_crossentropy',
                optimizer=rms,
                metrics=['sparse_categorical_accuracy'])

print('ready to goo!!!')
mymodel.summary()
Beispiel #19
0
def get_model(IMG_HEIGHT,
              IMG_WIDTH,
              IMG_CHANNELS,
              do_compile=False,
              out_activation='sigmoid'):
    tf.compat.v1.reset_default_graph()
    tf.random.set_seed(42424242)
    tf.compat.v1.set_random_seed(42424242)
    """

    :param IMG_HEIGHT:
    :param IMG_WIDTH:
    :param IMG_CHANNELS:
    :param do_compile: whether or not to compile the model yet
    :return:
    """
    inputs = Input((IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS))
    #s = Lambda(lambda x: x / 255)(inputs)

    c1 = Conv2D(64, (3, 3),
                activation='relu',
                kernel_initializer='he_normal',
                padding='same')(inputs)
    c1 = BatchNormalization()(c1)
    c1 = Dropout(0.1)(c1)
    c1 = Conv2D(64, (3, 3),
                activation='relu',
                kernel_initializer='he_normal',
                padding='same')(c1)
    c1 = BatchNormalization()(c1)
    p1 = MaxPooling2D((2, 2))(c1)

    c2 = Conv2D(128, (3, 3),
                activation='relu',
                kernel_initializer='he_normal',
                padding='same')(p1)
    c2 = BatchNormalization()(c2)
    c2 = Dropout(0.1)(c2)
    c2 = Conv2D(128, (3, 3),
                activation='relu',
                kernel_initializer='he_normal',
                padding='same')(c2)
    c2 = BatchNormalization()(c2)
    p2 = MaxPooling2D((2, 2))(c2)

    c3 = Conv2D(256, (3, 3),
                activation='relu',
                kernel_initializer='he_normal',
                padding='same')(p2)
    c3 = BatchNormalization()(c3)
    c3 = Dropout(0.2)(c3)
    c3 = Conv2D(256, (3, 3),
                activation='relu',
                kernel_initializer='he_normal',
                padding='same')(c3)
    c3 = BatchNormalization()(c3)
    p3 = MaxPooling2D((2, 2))(c3)

    c4 = Conv2D(512, (3, 3),
                activation='relu',
                kernel_initializer='he_normal',
                padding='same')(p3)
    c4 = BatchNormalization()(c4)
    c4 = Dropout(0.2)(c4)
    c4 = Conv2D(512, (3, 3),
                activation='relu',
                kernel_initializer='he_normal',
                padding='same')(c4)
    c4 = BatchNormalization()(c4)
    p4 = MaxPooling2D(pool_size=(2, 2))(c4)

    c5 = Conv2D(1024, (3, 3),
                activation='relu',
                kernel_initializer='he_normal',
                padding='same')(p4)
    c5 = BatchNormalization()(c5)
    c5 = Dropout(0.3)(c5)
    c5 = Conv2D(1024, (3, 3),
                activation='relu',
                kernel_initializer='he_normal',
                padding='same')(c5)
    c5 = BatchNormalization()(c5)

    u6 = Conv2DTranspose(512, (2, 2), strides=(2, 2), padding='same')(c5)
    u6 = concatenate([u6, c4])
    c6 = Conv2D(512, (3, 3),
                activation='relu',
                kernel_initializer='he_normal',
                padding='same')(u6)
    c6 = BatchNormalization()(c6)
    c6 = Dropout(0.2)(c6)
    c6 = Conv2D(512, (3, 3),
                activation='relu',
                kernel_initializer='he_normal',
                padding='same')(c6)
    c6 = BatchNormalization()(c6)

    u7 = Conv2DTranspose(256, (2, 2), strides=(2, 2), padding='same')(c6)
    u7 = concatenate([u7, c3])
    c7 = Conv2D(256, (3, 3),
                activation='relu',
                kernel_initializer='he_normal',
                padding='same')(u7)
    c7 = BatchNormalization()(c7)
    c7 = Dropout(0.2)(c7)
    c7 = Conv2D(256, (3, 3),
                activation='relu',
                kernel_initializer='he_normal',
                padding='same')(c7)
    c7 = BatchNormalization()(c7)

    u8 = Conv2DTranspose(128, (2, 2), strides=(2, 2), padding='same')(c7)
    u8 = concatenate([u8, c2])
    c8 = Conv2D(128, (3, 3),
                activation='relu',
                kernel_initializer='he_normal',
                padding='same')(u8)
    c8 = BatchNormalization()(c8)
    c8 = Dropout(0.1)(c8)
    c8 = Conv2D(128, (3, 3),
                activation='relu',
                kernel_initializer='he_normal',
                padding='same')(c8)
    c8 = BatchNormalization()(c8)

    u9 = Conv2DTranspose(64, (2, 2), strides=(2, 2), padding='same')(c8)
    u9 = concatenate([u9, c1], axis=3)
    c9 = Conv2D(64, (3, 3),
                activation='relu',
                kernel_initializer='he_normal',
                padding='same')(u9)
    c9 = BatchNormalization()(c9)
    c9 = Dropout(0.1)(c9)
    c9 = Conv2D(64, (3, 3),
                activation='relu',
                kernel_initializer='he_normal',
                padding='same')(c9)
    c9 = BatchNormalization()(c9)

    outputs = Conv2D(1, (1, 1), activation=out_activation)(c9)

    model = Model(inputs=[inputs], outputs=[outputs])
    if do_compile:
        model.compile(optimizer='adam',
                      loss='binary_crossentropy',
                      metrics=['accuracy'])
    #model.summary()

    return model
Beispiel #20
0
def PSPNet50(
    input_shape=(512, 512, 3),
    n_labels=20,
    output_stride=16,
    num_blocks=4,
    multigrid=[1, 1, 1],
    levels=[6, 3, 2, 1],
    use_se=True,
    output_mode="softmax",
    upsample_type="deconv",
):

    # Input shape
    img_input = Input(shape=input_shape)

    # compute input shape
    if K.image_data_format() == "channels_last":
        bn_axis = 3
    else:
        bn_axis = 1

    x = Conv2D(64, (7, 7), strides=(2, 2), padding="same", name="conv1")(img_input)
    x = BatchNormalization(axis=bn_axis, name="bn_conv1")(x)
    x = Activation("relu")(x)
    x = MaxPooling2D((3, 3), strides=(2, 2))(x)

    x = conv_block(
        x, 3, [64, 64, 256], stage=2, block="a", strides=(1, 1), use_se=use_se
    )
    x = identity_block(x, 3, [64, 64, 256], stage=2, block="b", use_se=use_se)
    x = identity_block(x, 3, [64, 64, 256], stage=2, block="c", use_se=use_se)

    x = conv_block(x, 3, [128, 128, 512], stage=3, block="a", use_se=use_se)
    x = identity_block(x, 3, [128, 128, 512], stage=3, block="b", use_se=use_se)
    x = identity_block(x, 3, [128, 128, 512], stage=3, block="c", use_se=use_se)
    x = identity_block(x, 3, [128, 128, 512], stage=3, block="d", use_se=use_se)

    if output_stride == 8:
        rate_scale = 2
    elif output_stride == 16:
        rate_scale = 1

    x = conv_block(
        x,
        3,
        [256, 256, 1024],
        stage=4,
        block="a",
        dilation_rate=1 * rate_scale,
        multigrid=multigrid,
        use_se=use_se,
    )
    x = identity_block(
        x,
        3,
        [256, 256, 1024],
        stage=4,
        block="b",
        dilation_rate=1 * rate_scale,
        multigrid=multigrid,
        use_se=use_se,
    )
    x = identity_block(
        x,
        3,
        [256, 256, 1024],
        stage=4,
        block="c",
        dilation_rate=1 * rate_scale,
        multigrid=multigrid,
        use_se=use_se,
    )
    x = identity_block(
        x,
        3,
        [256, 256, 1024],
        stage=4,
        block="d",
        dilation_rate=1 * rate_scale,
        multigrid=multigrid,
        use_se=use_se,
    )
    x = identity_block(
        x,
        3,
        [256, 256, 1024],
        stage=4,
        block="e",
        dilation_rate=1 * rate_scale,
        multigrid=multigrid,
        use_se=use_se,
    )
    x = identity_block(
        x,
        3,
        [256, 256, 1024],
        stage=4,
        block="f",
        dilation_rate=1 * rate_scale,
        multigrid=multigrid,
        use_se=use_se,
    )

    init_rate = 2
    for block in range(4, num_blocks + 1):
        if block == 4:
            block = ""
        x = conv_block(
            x,
            3,
            [512, 512, 2048],
            stage=5,
            block="a%s" % block,
            dilation_rate=init_rate * rate_scale,
            multigrid=multigrid,
            use_se=use_se,
        )
        x = identity_block(
            x,
            3,
            [512, 512, 2048],
            stage=5,
            block="b%s" % block,
            dilation_rate=init_rate * rate_scale,
            multigrid=multigrid,
            use_se=use_se,
        )
        x = identity_block(
            x,
            3,
            [512, 512, 2048],
            stage=5,
            block="c%s" % block,
            dilation_rate=init_rate * rate_scale,
            multigrid=multigrid,
            use_se=use_se,
        )
        init_rate *= 2

    x = pyramid_pooling_module(
        x,
        num_filters=512,
        input_shape=input_shape,
        output_stride=output_stride,
        levels=levels,
    )

    # x = merge([
    #         x1,
    #         x2,
    #         ], mode='concat', concat_axis=3)

    # upsample_type
    if upsample_type == "duc":
        x = duc(
            x,
            factor=output_stride,
            output_shape=(input_shape[0], input_shape[1], n_labels),
        )
        out = _conv(
            filters=n_labels,
            kernel_size=(1, 1),
            padding="same",
            block="out_duc_%s" % output_stride,
        )(x)

    elif upsample_type == "bilinear":
        x = _conv(
            filters=n_labels,
            kernel_size=(1, 1),
            padding="same",
            block="out_bilinear_%s" % output_stride,
        )(x)
        out = BilinearUpSampling2D(
            (n_labels, input_shape[0], input_shape[1]), factor=output_stride
        )(x)

    elif upsample_type == "deconv":
        out = Conv2DTranspose(
            filters=n_labels,
            kernel_size=(output_stride * 2, output_stride * 2),
            strides=(output_stride, output_stride),
            padding="same",
            kernel_initializer="he_normal",
            kernel_regularizer=None,
            use_bias=False,
            name="upscore_{}".format("out"),
        )(x)

    out = Reshape(
        (input_shape[0] * input_shape[1], n_labels),
        input_shape=(input_shape[0], input_shape[1], n_labels),
    )(out)
    # default "softmax"
    out = Activation(output_mode)(out)

    model = Model(inputs=img_input, outputs=out)

    return model
Beispiel #21
0
def get_encoder_model(input_shape=(None, 1566, 257)):
  """
    Get the Encoder Model
    @param input_shape [BATCH_SIZE, no. of frames, no. of freq bins, 1 channel]
  """

  NFFT = (input_shape[2] - 1) * 2
  SAMPLES = input_shape[1]
  BATCH_SIZE = input_shape[0]

  # Conv2D with 32 kernels and ReLu, 3x3in time
  input_layer = tf.keras.layers.Input(shape=input_shape, name='encoder_input')
  il_expand_dims = tf.expand_dims(input_layer, axis=1)
  enc_C1D_1 = TimeDistributed(Conv1D(filters=32, kernel_size=3, strides=2, use_bias=True, name='Enc_Conv_1'))(il_expand_dims)
  enc_BN_1 = TimeDistributed(BatchNormalization(name='Enc_Batch_Norm_1'))(enc_C1D_1)
  enc_Act_1 = TimeDistributed(Activation("relu", name='Enc_ReLU_1'))(enc_BN_1)
  enc_C1D_2 = TimeDistributed(Conv1D(filters=32, kernel_size=3, strides=2, use_bias=True, name='Enc_Conv_2'))(enc_Act_1)
  enc_BN_2 = TimeDistributed(BatchNormalization(name='Enc_Batch_Norm_2'))(enc_C1D_2)
  enc_Act_2 = TimeDistributed(Activation("relu", name='Enc_ReLU_2'))(enc_BN_2)

  # ConvLSTM1D -> Try and make this Bidirectional
  # int_input_layer = tf.reshape(tf.expand_dims(enc_Act_2, axis=1), [-1, enc_Act_2.shape[1], enc_Act_2.shape[2], 1], name='Enc_Expand_Dims')
  ConvLSTM1D = Conv2D(1, (1, 3), use_bias=False, name='Enc_ConvLSTM1D', data_format='channels_first')(enc_Act_2)
  print(ConvLSTM1D.shape)
  int_C1DLSTM_out = tf.squeeze(ConvLSTM1D, axis=[1])

  # 3 Stacked Bidirectional LSTMs
  enc_BiLSTM_1 = Bidirectional(LSTM(NFFT // 4, return_sequences=True), name='Enc_BiLSTM_1')(int_C1DLSTM_out)
  # enc_BiLSTM_2 = Bidirectional(LSTM(NFFT // 4, return_sequences=True), name='Enc_BiLSTM_2')(enc_BiLSTM_1)
  # enc_BiLSTM_3 = Bidirectional(LSTM(NFFT // 4, return_sequences=True), name='Enc_BiLSTM_3')(enc_BiLSTM_2)

  # Linear Projection into NFFT/2 and batchnorm and ReLU
  enc_Dense_1 = Dense(NFFT // 8, name='Enc_Linear_Projection')(enc_BiLSTM_1)
  enc_BN_3 = BatchNormalization(name='Enc_Batch_Norm_3')(enc_Dense_1)
  enc_Act_3 = Activation("relu", name='Enc_ReLU_3')(enc_BN_3)

  encoder = tf.keras.Model(inputs=input_layer, outputs=[enc_Act_3], name='Encoder')

  # Begin DeConvolution
  deConv_input_expand_dims = tf.reshape(tf.expand_dims(enc_Act_3, axis=1), [-1, 1, enc_Act_3.shape[1], enc_Act_3.shape[2]])
  DeC1D_filters = enc_Act_3.shape[2]
  Act = deConv_input_expand_dims
  for i in range(2):
    DeC1D = Conv2DTranspose(
      filters=DeC1D_filters * 2,
      kernel_size=(1,
                   3),
      strides=(1,
               2),
      data_format='channels_last',
      output_padding=(0,
                      1),
      padding='valid',
      name='DeConv1D_{}'.format(i + 1)
    )(Act)
    # DeC1D = TimeDistributed()
    BN = TimeDistributed(BatchNormalization(name='DeConv_Batch_norm_{}'.format(i + 1)))(DeC1D)
    Act = TimeDistributed(Activation("relu", name='DeConv_ReLU_{}'.format(i + 1)))(BN)
    DeC1D_filters *= 2

  # DeConvReshape = Conv2D(filters=1, kernel_size=(1, 1), data_format='channels_first', name='DC1D_Reshape')(Act)
  int_DeConv_out = tf.squeeze(Act, axis=[1])
  # Linear Projection into NFFT/2 and batchnorm and ReLU
  deConv_Dense_1 = Dense(NFFT//2 + 1, name='DeConv_Linear_Projection')(int_DeConv_out)
  deConv_BN_3 = BatchNormalization(name='DeConv_Batch_Norm_{}'.format(i + 1))(deConv_Dense_1)
  # deConv_Act_3 = Activation("tanh", name='DeConv_Tanh')(deConv_BN_3)
  output_layer = deConv_BN_3
  # if input_layer.shape[1] > output_layer.shape[1]:
  #   shape = [input_layer.shape[1] - output_layer.shape[1], output_layer.shape[2]]
  #   zero_padding = tf.zeros(shape, dtype=output_layer.dtype)
  #   output_layer = tf.reshape(tf.concat([output_layer, zero_padding], 1), input_layer.shape)

  ConvDeConvModel = tf.keras.Model(inputs=input_layer, outputs=[output_layer], name='ConvDeConv')
  ConvDeConvModel.summary()

  return ConvDeConvModel
Beispiel #22
0
def PoseMobileNetV2(cfg, alpha):

    # Setup base Mobilenet V2 NN
    IMG_SHAPE = (cfg.MODEL.IMAGE_SIZE[0], cfg.MODEL.IMAGE_SIZE[1], 3)
    base_model = MobileNetV2(input_shape=IMG_SHAPE,
                             include_top=False,
                             alpha=alpha,
                             weights='imagenet')

    base_model.trainable = False

    # Drop final set of layers to reduce output from 1280 to 320
    # Reduces trainable parameters from 9.5m to 5.2m
    base_model_output = base_model.get_layer('block_16_project_BN').output

    extra = cfg.MODEL.EXTRA
    deconv_with_bias = extra.DECONV_WITH_BIAS

    kernel_initializer = RandomNormal(stddev=0.001)
    bias_initializer = Constant(0.)

    # Each layer created manually due to nuances between original Pytorch code and tf.keras version
    # Deconv layer 0
    planes = extra.NUM_DECONV_FILTERS[0]
    deconv_0 = Conv2DTranspose(
        filters=planes,
        kernel_size=4,
        strides=2,
        padding='same',
        use_bias=deconv_with_bias,
        kernel_initializer=kernel_initializer,
        bias_initializer=bias_initializer)(base_model_output)
    batchnorm_0 = BatchNormalization(momentum=BN_MOMENTUM)(deconv_0)
    relu_0 = ReLU()(batchnorm_0)

    # Deconv layer 1
    planes = extra.NUM_DECONV_FILTERS[1]
    deconv_1 = Conv2DTranspose(filters=planes,
                               kernel_size=4,
                               strides=2,
                               padding='same',
                               use_bias=deconv_with_bias,
                               kernel_initializer=kernel_initializer,
                               bias_initializer=bias_initializer)(relu_0)
    batchnorm_1 = BatchNormalization(momentum=BN_MOMENTUM)(deconv_1)
    relu_1 = ReLU()(batchnorm_1)

    # Deconv layer 2
    planes = extra.NUM_DECONV_FILTERS[2]
    deconv_2 = Conv2DTranspose(filters=planes,
                               kernel_size=4,
                               strides=2,
                               padding='same',
                               use_bias=deconv_with_bias,
                               kernel_initializer=kernel_initializer,
                               bias_initializer=bias_initializer)(relu_1)
    batchnorm_2 = BatchNormalization(momentum=BN_MOMENTUM)(deconv_2)
    relu_2 = ReLU()(batchnorm_2)

    # Final layer
    padding = 'same' if extra.FINAL_CONV_KERNEL == 3 else 'valid'
    final_layer = Conv2D(filters=cfg.MODEL.NUM_JOINTS,
                         kernel_size=extra.FINAL_CONV_KERNEL,
                         strides=1,
                         padding=padding,
                         kernel_initializer=kernel_initializer,
                         bias_initializer=bias_initializer)(relu_2)

    # Build complete model
    model = Model(inputs=base_model.inputs,
                  outputs=final_layer,
                  name='pose_mobilenetv2_%0.2f' % alpha)

    return model
Beispiel #23
0
def runAutoencoder(x_train, x_test, x_train_noisy, x_test_noisy):
    np.random.seed(1337)

    # MNIST dataset
    # (x_train, _), (x_test, _) = mnist.load_data()

    image_size = x_train.shape[1]
    x_train = np.reshape(x_train, [-1, image_size, image_size, 1])
    x_test = np.reshape(x_test, [-1, image_size, image_size, 1])
    x_train = x_train.astype('float32') / 255
    x_test = x_test.astype('float32') / 255

    x_train_noisy = np.reshape(x_train_noisy, [-1, image_size, image_size, 1])
    x_test_noisy = np.reshape(x_test_noisy, [-1, image_size, image_size, 1])
    x_train_noisy = x_train_noisy.astype('float32') / 255
    x_test_noisy = x_test_noisy.astype('float32') / 255

    # Generate corrupted MNIST images by adding noise with normal dist
    # centered at 0.5 and std=0.5
    # noise = np.random.normal(loc=0.5, scale=0.5, size=x_train.shape)
    # x_train_noisy = x_train + noise
    # noise = np.random.normal(loc=0.5, scale=0.5, size=x_test.shape)
    # x_test_noisy = x_test + noise

    # x_train_noisy = np.clip(x_train_noisy, 0., 1.)
    # x_test_noisy = np.clip(x_test_noisy, 0., 1.)

    # Network parameters
    input_shape = (image_size, image_size, 1)
    batch_size = 128
    kernel_size = 3
    latent_dim = 16
    # Encoder/Decoder number of CNN layers and filters per layer
    layer_filters = [32, 64]

    # Build the Autoencoder Model
    # First build the Encoder Model
    inputs = Input(shape=input_shape, name='encoder_input')
    x = inputs
    # Stack of Conv2D blocks
    # Notes:
    # 1) Use Batch Normalization before ReLU on deep networks
    # 2) Use MaxPooling2D as alternative to strides>1
    # - faster but not as good as strides>1
    for filters in layer_filters:
        x = Conv2D(filters=filters,
                   kernel_size=kernel_size,
                   strides=2,
                   activation='relu',
                   padding='same')(x)

    # Shape info needed to build Decoder Model
    shape = K.int_shape(x)

    # Generate the latent vector
    x = Flatten()(x)
    latent = Dense(latent_dim, name='latent_vector')(x)

    # Instantiate Encoder Model
    encoder = Model(inputs, latent, name='encoder')
    encoder.summary()

    # Build the Decoder Model
    latent_inputs = Input(shape=(latent_dim, ), name='decoder_input')
    x = Dense(shape[1] * shape[2] * shape[3])(latent_inputs)
    x = Reshape((shape[1], shape[2], shape[3]))(x)

    # Stack of Transposed Conv2D blocks
    # Notes:
    # 1) Use Batch Normalization before ReLU on deep networks
    # 2) Use UpSampling2D as alternative to strides>1
    # - faster but not as good as strides>1
    for filters in layer_filters[::-1]:
        x = Conv2DTranspose(filters=filters,
                            kernel_size=kernel_size,
                            strides=2,
                            activation='relu',
                            padding='same')(x)

    x = Conv2DTranspose(filters=1, kernel_size=kernel_size, padding='same')(x)

    outputs = Activation('sigmoid', name='decoder_output')(x)

    # Instantiate Decoder Model
    decoder = Model(latent_inputs, outputs, name='decoder')
    decoder.summary()

    # Autoencoder = Encoder + Decoder
    # Instantiate Autoencoder Model
    autoencoder = Model(inputs, decoder(encoder(inputs)), name='autoencoder')
    autoencoder.summary()

    autoencoder.compile(loss='mse', optimizer='adam')

    # Train the autoencoder
    autoencoder.fit(x_train_noisy,
                    x_train,
                    validation_data=(x_test_noisy, x_test),
                    epochs=30,
                    batch_size=batch_size)

    # Predict the Autoencoder output from corrupted test images
    x_decoded = autoencoder.predict(x_test_noisy)

    # Display the 1st 8 corrupted and denoised images
    rows, cols = 10, 30
    num = rows * cols
    imgs = np.concatenate([x_test[:num], x_test_noisy[:num], x_decoded[:num]])
    imgs = imgs.reshape((rows * 3, cols, image_size, image_size))
    imgs = np.vstack(np.split(imgs, rows, axis=1))
    imgs = imgs.reshape((rows * 3, -1, image_size, image_size))
    imgs = np.vstack([np.hstack(i) for i in imgs])
    imgs = (imgs * 255).astype(np.uint8)
    plt.figure()
    plt.axis('off')
    plt.title('Original images: top rows, '
              'Corrupted Input: middle rows, '
              'Denoised Input:  third rows')
    plt.imshow(imgs, interpolation='none', cmap='gray')
    Image.fromarray(imgs).save('corrupted_and_denoised.png')
    plt.show()
Beispiel #24
0
def unet(inputs, n=32):
    bn = BatchNormalization()(inputs)
    conv1 = Conv2D(n, (3, 3),
                   activation='relu',
                   kernel_initializer='he_normal',
                   padding='same')(bn)
    conv1 = Conv2D(n, (3, 3),
                   activation='relu',
                   kernel_initializer='he_normal',
                   padding='same')(conv1)
    pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
    pool1 = Dropout(0.1)(pool1)

    conv2 = Conv2D(n * 2, (3, 3),
                   activation='relu',
                   kernel_initializer='he_normal',
                   padding='same')(pool1)
    conv2 = Conv2D(n * 2, (3, 3),
                   activation='relu',
                   kernel_initializer='he_normal',
                   padding='same')(conv2)
    pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
    pool2 = Dropout(0.1)(pool2)

    conv3 = Conv2D(n * 4, (3, 3),
                   activation='relu',
                   kernel_initializer='he_normal',
                   padding='same')(pool2)
    conv3 = Conv2D(n * 4, (3, 3),
                   activation='relu',
                   kernel_initializer='he_normal',
                   padding='same')(conv3)
    pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
    pool3 = Dropout(0.1)(pool3)

    conv4 = Conv2D(n * 8, (3, 3),
                   activation='relu',
                   kernel_initializer='he_normal',
                   padding='same')(pool3)
    conv4 = Conv2D(n * 8, (3, 3),
                   activation='relu',
                   kernel_initializer='he_normal',
                   padding='same')(conv4)
    pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)
    pool4 = Dropout(0.1)(pool4)

    convm = Conv2D(n * 16, (3, 3),
                   activation='relu',
                   kernel_initializer='he_normal',
                   padding='same')(pool4)
    convm = Conv2D(n * 16, (3, 3),
                   activation='relu',
                   kernel_initializer='he_normal',
                   padding='same')(convm)

    up6 = Conv2DTranspose(n * 8, (2, 2), strides=(2, 2), padding='same')(convm)
    conv6 = concatenate([up6, conv4])
    conv6 = Dropout(0.1)(conv6)
    conv6 = Conv2D(n * 8, (3, 3),
                   activation='relu',
                   kernel_initializer='he_normal',
                   padding='same')(conv6)
    conv6 = Conv2D(n * 8, (3, 3),
                   activation='relu',
                   kernel_initializer='he_normal',
                   padding='same')(conv6)

    up7 = Conv2DTranspose(n * 4, (2, 2), strides=(2, 2), padding='same')(conv6)
    conv7 = concatenate([up7, conv3])
    conv7 = Dropout(0.1)(conv7)
    conv7 = Conv2D(n * 4, (3, 3),
                   activation='relu',
                   kernel_initializer='he_normal',
                   padding='same')(conv7)
    conv7 = Conv2D(n * 4, (3, 3),
                   activation='relu',
                   kernel_initializer='he_normal',
                   padding='same')(conv7)

    up8 = Conv2DTranspose(n * 2, (2, 2), strides=(2, 2), padding='same')(conv7)
    conv8 = concatenate([up8, conv2])
    conv8 = Dropout(0.1)(conv8)
    conv8 = Conv2D(n * 2, (3, 3),
                   activation='relu',
                   kernel_initializer='he_normal',
                   padding='same')(conv8)
    conv8 = Conv2D(n * 2, (3, 3),
                   activation='relu',
                   kernel_initializer='he_normal',
                   padding='same')(conv8)

    up9 = Conv2DTranspose(n, (2, 2), strides=(2, 2), padding='same')(conv8)
    conv9 = concatenate([up9, conv1])
    conv9 = Dropout(0.1)(conv9)
    conv9 = Conv2D(n, (3, 3),
                   activation='relu',
                   kernel_initializer='he_normal',
                   padding='same')(conv9)
    conv9 = Conv2D(n, (3, 3),
                   activation='relu',
                   kernel_initializer='he_normal',
                   padding='same')(conv9)

    output = Conv2D(1, (1, 1), activation='sigmoid')(conv9)

    return Model(inputs=[inputs], outputs=[output])
Beispiel #25
0
def FCN8_VGG16(mean, stddev, config):
    print(mean, stddev)
    """
    Implementation of FCN8 with VGG16 backend based on arXiv:1411.4038 [cs.CV]
    """
    input = Input(shape=config.input_shape)

    vgg16_input = ZeroPadding2D(padding=(100, 100))(input)

    vgg16 = VGG16(include_top=False,
                  weights=config.init_weights,
                  input_tensor=vgg16_input,
                  input_shape=config.input_shape,
                  pooling=None)

    vgg16_block3 = vgg16.get_layer(name="block3_pool").output
    vgg16_block4 = vgg16.get_layer(name="block4_pool").output
    vgg16_block5 = vgg16.get_layer(name="block5_pool").output

    # Fully connected from VGG16
    fc1_conv = BatchNormalization()(Conv2D(
        4096, (7, 7),
        activation="relu",
        padding="valid",
        kernel_regularizer=l2(config.l2_constant))(vgg16_block5))
    dropout1 = Dropout(config.dropout)(fc1_conv)
    fc2_conv = BatchNormalization()(Conv2D(4096, (1, 1),
                                           activation="relu",
                                           padding="valid",
                                           kernel_regularizer=l2(
                                               config.l2_constant))(dropout1))
    dropout2 = Dropout(config.dropout)(fc2_conv)

    # 2 = number of classes
    fcn32_conv = BatchNormalization()(Conv2D(
        2, (1, 1), kernel_regularizer=l2(config.l2_constant))(dropout2))
    fcn32_deconv = BatchNormalization()(Conv2DTranspose(
        2,
        kernel_size=(4, 4),
        strides=(2, 2),
        use_bias=False,
        kernel_regularizer=l2(config.l2_constant))(fcn32_conv))

    fcn16_conv = BatchNormalization()(Conv2D(
        2, (1, 1), kernel_regularizer=l2(config.l2_constant))(vgg16_block4))
    fcn16_crop = crop(fcn16_conv, fcn32_deconv)
    fcn16_add = Add()([fcn32_deconv, fcn16_crop])
    fcn16_deconv = BatchNormalization()(Conv2DTranspose(
        2,
        kernel_size=(4, 4),
        strides=(2, 2),
        use_bias=False,
        kernel_regularizer=l2(config.l2_constant))(fcn16_add))

    fcn8_conv = BatchNormalization()(Conv2D(
        2, (1, 1), kernel_regularizer=l2(config.l2_constant))(vgg16_block3))
    fcn8_crop = crop(fcn8_conv, fcn16_deconv)
    fcn8_add = Add()([fcn16_deconv, fcn8_crop])
    fcn8_deconv = BatchNormalization()(Conv2DTranspose(
        2,
        kernel_size=(16, 16),
        strides=(8, 8),
        use_bias=False,
        kernel_regularizer=l2(config.l2_constant))(fcn8_add))

    final = crop(fcn8_deconv, input)

    final = Activation("softmax")(final)

    # Use Z-scores as a form of normalization
    model = Model(input, final)

    return model
Beispiel #26
0
def BaseUnet_modeling(kernel_size=4,dropout=0.5):
    initializer = tf.random_normal_initializer(0.,0.02)
    inputs = Input(shape=(256,256,3))
    layer1 = Conv2D(filters=64,kernel_size=kernel_size,strides=2,padding='same',use_bias=False,kernel_initializer=initializer)(inputs)
    layer1 = LeakyReLU()(layer1)
    layer1_ = layer1

    layer2 = Conv2D(filters=128,kernel_size=kernel_size,strides=2,padding='same',use_bias=False,kernel_initializer=initializer)(layer1)
    layer2_ = BatchNormalization()(layer2)
    layer2 = LeakyReLU()(layer2_)

    layer3 = Conv2D(filters=256,kernel_size=kernel_size,strides=2,padding='same',use_bias=False,kernel_initializer=initializer)(layer2)
    layer3_ = BatchNormalization()(layer3)
    layer3 = LeakyReLU()(layer3_)

    layer4 = Conv2D(filters=512,kernel_size=kernel_size,strides=2,padding='same',use_bias=False,kernel_initializer=initializer)(layer3)
    layer4_ = BatchNormalization()(layer4)
    layer4 = LeakyReLU()(layer4_)

    layer5 = Conv2D(filters=512,kernel_size=kernel_size,strides=2,padding='same',use_bias=False,kernel_initializer=initializer)(layer4)
    layer5_ = BatchNormalization()(layer5)
    layer5 = LeakyReLU()(layer5_)

    layer6 = Conv2D(filters=512,kernel_size=kernel_size,strides=2,padding='same',use_bias=False,kernel_initializer=initializer)(layer5)
    layer6_ = BatchNormalization()(layer6)
    layer6 = LeakyReLU()(layer6_)

    layer7 = Conv2D(filters=512,kernel_size=kernel_size,strides=2,padding='same',use_bias=False,kernel_initializer=initializer)(layer6)
    layer7_ = BatchNormalization()(layer7)
    layer7 = LeakyReLU()(layer7_)



    layer8 = Conv2D(filters=512,kernel_size=kernel_size,strides=2,padding='same',use_bias=False,kernel_initializer=initializer)(layer7)
    layer8_ = BatchNormalization()(layer8)
    layer8 = LeakyReLU()(layer8_)



    layer9 = Conv2DTranspose(filters=512,kernel_size=kernel_size,strides=2,padding='same',kernel_initializer=initializer,use_bias=False)(layer8)
    layer9 = BatchNormalization()(layer9)
    layer9 = layer9+layer7_
    layer9 = Dropout(dropout)(layer9)
    layer9 = ReLU()(layer9)

    layer10 = Conv2DTranspose(filters=512,kernel_size=kernel_size,strides=2,padding='same',kernel_initializer=initializer,use_bias=False)(layer9)
    layer10 = BatchNormalization()(layer10)
    layer10 = concatenate([layer10,layer6_])
    layer10 = Dropout(dropout)(layer10)
    layer10 = ReLU()(layer10)

    layer11 = Conv2DTranspose(filters=512,kernel_size=kernel_size,strides=2,padding='same',kernel_initializer=initializer,use_bias=False)(layer10)
    layer11 = BatchNormalization()(layer11)
    layer11 = layer11+layer5_
    layer11 = Dropout(dropout)(layer11)
    layer11 = ReLU()(layer11)

    layer12 = Conv2DTranspose(filters=512,kernel_size=kernel_size,strides=2,padding='same',kernel_initializer=initializer,use_bias=False)(layer11)
    layer12 = BatchNormalization()(layer12)
    layer12 = layer12+layer4_
    layer12 = ReLU()(layer12)

    layer13 = Conv2DTranspose(filters=256,kernel_size=kernel_size,strides=2,padding='same',kernel_initializer=initializer,use_bias=False)(layer12)
    layer13 = BatchNormalization()(layer13)
    layer13 = layer13+layer3_
    layer13 = ReLU()(layer13)

    layer14 = Conv2DTranspose(filters=128,kernel_size=kernel_size,strides=2,padding='same',kernel_initializer=initializer,use_bias=False)(layer13)
    layer14 = BatchNormalization()(layer14)
    layer14 = layer14+layer2_
    layer14 = ReLU()(layer14)

    layer15 = Conv2DTranspose(filters=64,kernel_size=kernel_size,strides=2,padding='same',kernel_initializer=initializer,use_bias=False)(layer14)
    layer15 = BatchNormalization()(layer15)
    layer15 = layer15+layer1_
    layer15 = ReLU()(layer15)

    outputs = Conv2DTranspose(3,4,strides=2,padding='same',kernel_initializer=initializer,activation='tanh')(layer15)

    Generator = Model(inputs=inputs,outputs=outputs)
    return Generator
Beispiel #27
0
def default_latent(num_outputs, input_shape):
    # TODO: this auto-encoder should run the standard cnn in encoding and
    #  have corresponding decoder. Also outputs should be reversed with
    #  images at end.
    drop = 0.2
    img_in = Input(shape=input_shape, name='img_in')
    x = img_in
    x = Convolution2D(24, (5, 5),
                      strides=(2, 2),
                      activation='relu',
                      name="conv2d_1")(x)
    x = Dropout(drop)(x)
    x = Convolution2D(32, (5, 5),
                      strides=(2, 2),
                      activation='relu',
                      name="conv2d_2")(x)
    x = Dropout(drop)(x)
    x = Convolution2D(32, (5, 5),
                      strides=(2, 2),
                      activation='relu',
                      name="conv2d_3")(x)
    x = Dropout(drop)(x)
    x = Convolution2D(32, (3, 3),
                      strides=(1, 1),
                      activation='relu',
                      name="conv2d_4")(x)
    x = Dropout(drop)(x)
    x = Convolution2D(32, (3, 3),
                      strides=(1, 1),
                      activation='relu',
                      name="conv2d_5")(x)
    x = Dropout(drop)(x)
    x = Convolution2D(64, (3, 3),
                      strides=(2, 2),
                      activation='relu',
                      name="conv2d_6")(x)
    x = Dropout(drop)(x)
    x = Convolution2D(64, (3, 3),
                      strides=(2, 2),
                      activation='relu',
                      name="conv2d_7")(x)
    x = Dropout(drop)(x)
    x = Convolution2D(64, (1, 1),
                      strides=(2, 2),
                      activation='relu',
                      name="latent")(x)

    y = Conv2DTranspose(filters=64,
                        kernel_size=(3, 3),
                        strides=2,
                        name="deconv2d_1")(x)
    y = Conv2DTranspose(filters=64,
                        kernel_size=(3, 3),
                        strides=2,
                        name="deconv2d_2")(y)
    y = Conv2DTranspose(filters=32,
                        kernel_size=(3, 3),
                        strides=2,
                        name="deconv2d_3")(y)
    y = Conv2DTranspose(filters=32,
                        kernel_size=(3, 3),
                        strides=2,
                        name="deconv2d_4")(y)
    y = Conv2DTranspose(filters=32,
                        kernel_size=(3, 3),
                        strides=2,
                        name="deconv2d_5")(y)
    y = Conv2DTranspose(filters=1,
                        kernel_size=(3, 3),
                        strides=2,
                        name="img_out")(y)

    x = Flatten(name='flattened')(x)
    x = Dense(256, activation='relu')(x)
    x = Dropout(drop)(x)
    x = Dense(100, activation='relu')(x)
    x = Dropout(drop)(x)
    x = Dense(50, activation='relu')(x)
    x = Dropout(drop)(x)

    outputs = [y]
    for i in range(num_outputs):
        outputs.append(
            Dense(1, activation='linear', name='n_outputs' + str(i))(x))

    model = Model(inputs=[img_in], outputs=outputs)
    return model
Beispiel #28
0
def vae_model(input_shape):
    # network parameters
    # input_shape = (image_size, image_size, 1)
    # batch_size = 128
    blocks = 2
    kernel_size = 5
    filters = 16
    latent_dim = 2
    # epochs = 30

    # VAE model = encoder + decoder
    # build encoder model
    inputs = Input(shape=input_shape, name='encoder_input')
    x = inputs
    for i in range(blocks):
        filters *= 2
        x = Conv2D(filters=filters,
                   kernel_size=kernel_size,
                   activation='relu',
                   strides=2,
                   padding='same')(x)

    # shape info needed to build decoder model
    shape = K.int_shape(x)

    # generate latent vector Q(z|X)
    x = Flatten()(x)
    x = Dense(16, activation='relu')(x)
    z_mean = Dense(latent_dim, name='z_mean')(x)
    z_log_var = Dense(latent_dim, name='z_log_var')(x)

    # use reparameterization trick to push the sampling out as input
    # note that "output_shape" isn't necessary with the TensorFlow backend
    z = Lambda(sampling, output_shape=(latent_dim,), name='z')([z_mean, z_log_var])

    # instantiate encoder model
    encoder = Model(inputs, [z_mean, z_log_var, z], name='encoder')
    encoder.summary()
    # plot_model(encoder, to_file='vae_cnn_encoder.png', show_shapes=True)

    # build decoder model
    latent_inputs = Input(shape=(latent_dim,), name='z_sampling')
    x = Dense(shape[1] * shape[2] * shape[3], activation='relu')(latent_inputs)
    x = Reshape((shape[1], shape[2], shape[3]))(x)

    for i in range(blocks):
        x = Conv2DTranspose(filters=filters,
                            kernel_size=kernel_size,
                            activation='relu',
                            strides=2,
                            padding='same')(x)
        filters //= 2

    outputs = Conv2DTranspose(filters=1,
                              kernel_size=kernel_size,
                              activation='sigmoid',
                              padding='same',
                              name='decoder_output')(x)

    # instantiate decoder model
    decoder = Model(latent_inputs, outputs, name='decoder')
    decoder.summary()
    # plot_model(decoder, to_file='vae_cnn_decoder.png', show_shapes=True)

    # instantiate VAE model
    outputs = decoder(encoder(inputs)[2])
    vae = Model(inputs, outputs, name='vae')

    return vae, z_mean, z_log_var
Beispiel #29
0
    def build_generator(self):
        dropout = 0.4

        model = Sequential()

        model.add(
            Dense(64 * 32 * 32, kernel_initializer=RandomNormal(stddev=0.02)))
        model.add(BatchNormalization(momentum=0.9))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(dropout))
        model.add(Reshape((32, 32, 64)))

        # model.add(UpSampling2D(interpolation='bilinear'))
        # # model.add(Conv2DTranspose(512, 5, 2, padding='same', kernel_initializer=RandomNormal(stddev=0.02)))
        # model.add(Conv2D(256, 5, 1, padding='same', kernel_initializer=RandomNormal(stddev=0.02)))
        # model.add(BatchNormalization(momentum=0.9))
        # model.add(LeakyReLU(alpha=0.2))
        # model.add(Dropout(dropout))

        # model.add(UpSampling2D(interpolation='bilinear'))
        # model.add(Conv2DTranspose(128, 5, 2, padding='same', kernel_initializer=RandomNormal(stddev=0.02)))
        model.add(
            Conv2D(64,
                   5,
                   1,
                   padding='same',
                   kernel_initializer=RandomNormal(stddev=0.02)))
        model.add(BatchNormalization(momentum=0.9))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(dropout))

        # model.add(UpSampling2D(interpolation='bilinear'))
        # model.add(Conv2DTranspose(64, 5, 2, padding='same', kernel_initializer=RandomNormal(stddev=0.02)))
        model.add(
            Conv2DTranspose(64,
                            5,
                            2,
                            padding='same',
                            kernel_initializer=RandomNormal(stddev=0.02)))
        model.add(
            Conv2D(64,
                   5,
                   1,
                   padding='same',
                   kernel_initializer=RandomNormal(stddev=0.02)))
        model.add(BatchNormalization(momentum=0.9))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(dropout))

        # model.add(UpSampling2D(interpolation='bilinear'))
        model.add(
            Conv2D(32,
                   5,
                   1,
                   padding='same',
                   kernel_initializer=RandomNormal(stddev=0.02)))
        model.add(BatchNormalization(momentum=0.9))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(dropout))

        model.add(
            Conv2DTranspose(3,
                            5,
                            1,
                            padding='same',
                            kernel_initializer=RandomNormal(stddev=0.02)))
        model.add(Activation('tanh'))

        noiseInput = Input(shape=(100, ))
        imageOutput = model(noiseInput)

        self.G = Model(noiseInput, imageOutput)
        print('==============Generator=============')
        model.summary()
        print('====================================')
def generator():
    init = TruncatedNormal(mean=0.0, stddev=0.02)

    model = tf.keras.Sequential()
    # ----------------------------------------------------------------
    size = 26
    filters = 512
    model.add(Dense(units=size*size*filters,
                    use_bias=False,
                    kernel_initializer=init,
                    input_shape=(NOISE_DIM,)))
    # model.add(BatchNormalization())
    model.add(LeakyReLU(alpha=0.2))
    # ----------------------------------------------------------------

    model.add(Reshape(target_shape=(size, size, filters)))
    # ----------------------------------------------------------------

    model.add(Conv2DTranspose(filters=512,
                              kernel_size=(4, 4),
                              kernel_initializer=init,
                              strides=(2, 2),
                              padding='same',
                              use_bias=False))
    model.add(BatchNormalization())
    model.add(LeakyReLU(alpha=0.2))
    # ----------------------------------------------------------------

    # Upsample 8x8
    model.add(Conv2DTranspose(filters=128,
                              kernel_size=(4, 4),
                              kernel_initializer=init,
                              strides=(2, 2),
                              padding='same',
                              use_bias=False))
    model.add(BatchNormalization())
    model.add(LeakyReLU(alpha=0.2))
    model.add(Dropout(rate=0.4))
    # ----------------------------------------------------------------
    # Upsample 8x8
    model.add(Conv2DTranspose(filters=128,
                              kernel_size=(4, 4),
                              kernel_initializer=init,
                              strides=(2, 2),
                              padding='same',
                              use_bias=False))
    model.add(BatchNormalization())
    model.add(LeakyReLU(alpha=0.2))
    # Upsample 8x8
    model.add(Conv2DTranspose(filters=128,
                              kernel_size=(4, 4),
                              kernel_initializer=init,
                              strides=(2, 2),
                              padding='same',
                              use_bias=False))
    model.add(BatchNormalization())
    model.add(LeakyReLU(alpha=0.2))
    # ----------------------------------------------------------------
    # Output: 416x416x3
    model.add(Conv2D(filters=3,
                     kernel_size=(4, 4),
                     kernel_initializer=init,
                     activation='tanh',
                     padding='same'))
    assert model.output_shape == (None, 416, 416, 3)

    return model