def get_multiGPUmodel(base_model, n_labels=1, initial_learning_rate=0.00001,include_label_wise_dice_coefficients=False, metrics=dice_coefficient,GPU=1): if (GPU <= 1): print("[INFO] training with 1 GPU...") with tf.device("/GPU:1"): model=base_model else: print("[INFO] training with {} GPUs...".format(GPU)) # we'll store a copy of the model on *every* GPU and then combine # the results from the gradient updates on the CP model = multi_gpu_model(base_model, gpus=GPU) if not isinstance(metrics, list): metrics = [metrics] if include_label_wise_dice_coefficients and n_labels > 1: label_wise_dice_metrics = [get_label_dice_coefficient_function(index) for index in range(n_labels)] if metrics: metrics = metrics + label_wise_dice_metrics else: metrics = label_wise_dice_metrics model.compile(optimizer=Adam(lr=initial_learning_rate), loss=dice_coefficient_loss, metrics=metrics) return model
def unet_model_3d(input_shape, pool_size=(2, 2, 2), n_labels=1, initial_learning_rate=0.00001, deconvolution=False, depth=4, n_base_filters=32, include_label_wise_dice_coefficients=False, metrics=dice_coefficient, batch_normalization=False, activation_name="sigmoid"): """ Builds the 3D UNet Keras model.f :param metrics: List metrics to be calculated during model training (default is dice coefficient). :param include_label_wise_dice_coefficients: If True and n_labels is greater than 1, model will report the dice coefficient for each label as metric. :param n_base_filters: The number of filters that the first layer in the convolution network will have. Following layers will contain a multiple of this number. Lowering this number will likely reduce the amount of memory required to train the model. :param depth: indicates the depth of the U-shape for the model. The greater the depth, the more max pooling layers will be added to the model. Lowering the depth may reduce the amount of memory required for training. :param input_shape: Shape of the input data (n_chanels, x_size, y_size, z_size). The x, y, and z sizes must be divisible by the pool size to the power of the depth of the UNet, that is pool_size^depth. :param pool_size: Pool size for the max pooling operations. :param n_labels: Number of binary labels that the model is learning. :param initial_learning_rate: Initial learning rate for the model. This will be decayed during training. :param deconvolution: If set to True, will use transpose convolution(deconvolution) instead of up-sampling. This increases the amount memory required during training. :return: Untrained 3D UNet Model """ inputs = Input(input_shape) current_layer = inputs levels = list() # add levels with max pooling for layer_depth in range(depth): layer1 = create_convolution_block(input_layer=current_layer, n_filters=n_base_filters*(2**layer_depth), batch_normalization=batch_normalization) layer2 = create_convolution_block(input_layer=layer1, n_filters=n_base_filters*(2**layer_depth)*2, batch_normalization=batch_normalization) if layer_depth < depth - 1: current_layer = MaxPooling3D(pool_size=pool_size)(layer2) levels.append([layer1, layer2, current_layer]) else: current_layer = layer2 levels.append([layer1, layer2]) # add levels with up-convolution or up-sampling for layer_depth in range(depth-2, -1, -1): up_convolution = get_up_convolution(pool_size=pool_size, deconvolution=deconvolution, n_filters=current_layer._keras_shape[1])(current_layer) concat = concatenate([up_convolution, levels[layer_depth][1]], axis=1) current_layer = create_convolution_block(n_filters=levels[layer_depth][1]._keras_shape[1], input_layer=concat, batch_normalization=batch_normalization) current_layer = create_convolution_block(n_filters=levels[layer_depth][1]._keras_shape[1], input_layer=current_layer, batch_normalization=batch_normalization) final_convolution = Conv3D(n_labels, (1, 1, 1))(current_layer) act = Activation(activation_name)(final_convolution) model = Model(inputs=inputs, outputs=act) if not isinstance(metrics, list): metrics = [metrics] if include_label_wise_dice_coefficients and n_labels > 1: label_wise_dice_metrics = [get_label_dice_coefficient_function(index) for index in range(n_labels)] if metrics: metrics = metrics + label_wise_dice_metrics else: metrics = label_wise_dice_metrics model.compile(optimizer=Adam(lr=initial_learning_rate), loss=dice_coefficient_loss, metrics=metrics) return model