def Convolutional(input_shape=(51, 51, 1), conv_layers_dimensions=(16, 32, 64, 128), dense_layers_dimensions=(32, 32), steps_per_pooling=1, dropout=(), dense_top=True, number_of_outputs=3, output_activation=None, output_kernel_size=3, loss=nd_mean_absolute_error, convolution_block="convolutional", pooling_block="pooling", dense_block="dense", **kwargs): """Creates and compiles a convolutional neural network. A convolutional network with a dense top. Parameters ---------- input_shape : tuple of ints Size of the images to be analyzed. conv_layers_dimensions : tuple of ints Number of convolutions in each convolutional layer. dense_layers_dimensions : tuple of ints Number of units in each dense layer. dropout : tuple of float Adds a dropout between the convolutional layers number_of_outputs : int Number of units in the output layer. output_activation : str or keras activation The activation function of the output. loss : str or keras loss function The loss function of the network. layer_function : Callable[int] -> keras layer Function that returns a convolutional layer with convolutions determined by the input argument. Can be use to futher customize the network. Returns ------- keras.models.Model Deep learning network """ # Update layer functions dense_block = as_block(dense_block) convolution_block = as_block(convolution_block) pooling_block = as_block(pooling_block) ### INITIALIZE DEEP LEARNING NETWORK if isinstance(input_shape, list): inputs = [layers.Input(shape for shape in input_shape)] inputs = layers.Concatenate(axis=-1)(inputs) else: network_input = layers.Input(input_shape) inputs = network_input layer = inputs ### CONVOLUTIONAL BASIS for conv_layer_dimension in conv_layers_dimensions: for _ in range(steps_per_pooling): layer = convolution_block(conv_layer_dimension)(layer) if dropout: layer = layers.SpatialDropout2D(dropout[0])(layer) dropout = dropout[1:] # add pooling layer layer = pooling_block(conv_layer_dimension)(layer) # DENSE TOP if dense_top: layer = layers.Flatten()(layer) for dense_layer_dimension in dense_layers_dimensions: layer = dense_block(dense_layer_dimension)(layer) output_layer = layers.Dense(number_of_outputs, activation=output_activation)(layer) else: output_layer = layers.Conv2D( number_of_outputs, kernel_size=output_kernel_size, activation=output_activation, padding="same", name="output", )(layer) model = models.Model(inputs, output_layer) return KerasModel(model, loss=loss, **kwargs)
def UNet(input_shape=(None, None, 1), conv_layers_dimensions=(16, 32, 64, 128), base_conv_layers_dimensions=(128, 128), output_conv_layers_dimensions=(16, 16), dropout=(), steps_per_pooling=1, number_of_outputs=1, output_kernel_size=3, output_activation=None, loss=nd_mean_absolute_error, encoder_convolution_block="convolutional", base_convolution_block="convolutional", decoder_convolution_block="convolutional", output_convolution_block="convolutional", pooling_block="pooling", upsampling_block="deconvolutional", **kwargs): """Creates and compiles a U-Net. Parameters ---------- input_shape : tuple of ints Size of the images to be analyzed. conv_layers_dimensions : tuple of ints Number of convolutions in each convolutional layer during down- and upsampling. base_conv_layers_dimensions : tuple of ints Number of convolutions in each convolutional layer at the base of the unet, where the image is the most downsampled. output_conv_layers_dimensions : tuple of ints Number of convolutions in each convolutional layer after the upsampling. steps_per_pooling : int Number of convolutional layers between each pooling and upsampling step. number_of_outputs : int Number of convolutions in output layer. output_activation : str or keras activation The activation function of the output. loss : str or keras loss function The loss function of the network. layer_function : Callable[int] -> keras layer Function that returns a convolutional layer with convolutions determined by the input argument. Can be use to futher customize the network. Returns ------- keras.models.Model Deep learning network. """ # Update layer functions encoder_convolution_block = as_block(encoder_convolution_block) base_convolution_block = as_block(base_convolution_block) output_convolution_block = as_block(output_convolution_block) decoder_convolution_block = as_block(decoder_convolution_block) pooling_block = as_block(pooling_block) upsampling_block = as_block(upsampling_block) unet_input = layers.Input(input_shape) concat_layers = [] layer = unet_input # Downsampling path for conv_layer_dimension in conv_layers_dimensions: for _ in range(steps_per_pooling): layer = encoder_convolution_block(conv_layer_dimension)(layer) concat_layers.append(layer) if dropout: layer = layers.SpatialDropout2D(dropout[0])(layer) dropout = dropout[1:] layer = pooling_block(conv_layer_dimension)(layer) # Bottleneck path for conv_layer_dimension in base_conv_layers_dimensions: layer = base_convolution_block(conv_layer_dimension)(layer) # Upsampling path for conv_layer_dimension, concat_layer in zip( reversed(conv_layers_dimensions), reversed(concat_layers)): layer = upsampling_block(conv_layer_dimension)(layer) layer = layers.Concatenate(axis=-1)([layer, concat_layer]) for _ in range(steps_per_pooling): layer = decoder_convolution_block(conv_layer_dimension)(layer) # Output step for conv_layer_dimension in output_conv_layers_dimensions: layer = output_convolution_block(conv_layer_dimension)(layer) output_layer = layers.Conv2D( number_of_outputs, kernel_size=output_kernel_size, activation=output_activation, padding="same", )(layer) model = models.Model(unet_input, output_layer) return KerasModel(model, loss=loss, **kwargs)
def FullyConnected(input_shape, dense_layers_dimensions=(32, 32), dropout=(), flatten_input=True, number_of_outputs=3, output_activation=None, dense_block="dense", **kwargs): """Creates and compiles a fully connected neural network. A convolutional network with a dense top. Parameters ---------- input_shape : tuple of ints Size of the images to be analyzed. dense_layers_dimensions : tuple of ints Number of units in each dense layer. flatten_input : bool Whether to add a flattening layer to the input number_of_outputs : int Number of units in the output layer. output_activation : str or keras activation The activation function of the output. dense_block loss : str or keras loss function The loss function of the network. Returns ------- keras.models.Model Deep learning network """ dense_block = as_block(dense_block) ### INITIALIZE DEEP LEARNING NETWORK input_layer = layers.Input(shape=input_shape) layer = input_layer if flatten_input: layer = layers.Flatten()(layer) # DENSE TOP for dense_layer_number, dense_layer_dimension in zip( range(len(dense_layers_dimensions)), dense_layers_dimensions): if dense_layer_number is 0 and not flatten_input: layer = dense_block(dense_layer_dimension, input_shape=input_shape)(layer) else: layer = dense_block(dense_layer_dimension)(layer) if dropout: layer = layers.Dropout(dropout[0])(layer) dropout = dropout[1:] # OUTPUT LAYER output_layer = layers.Dense(number_of_outputs, activation=output_activation)(layer) model = models.Model(input_layer, output_layer) return KerasModel(model, **kwargs)