Пример #1
0
def Unet(input_size,
         first_filter_num,
         weight_decay,
         batch_norm=False,
         deconv=False,
         dropout_rate=0.3,
         deeper=False,
         nb_classes=1,
         initial_learning_rate=0.005,
         activate_fun='sigmoid',
         metrics=[dice_coefficient],
         multi_gpu=None,
         use_Adam=False):
    inputs = Input(input_size)
    conv1 = double_conv(inputs,
                        filter_num_1=first_filter_num,
                        filter_num_2=2 * first_filter_num,
                        weight_decay=weight_decay,
                        batch_norm=batch_norm)  #32-64
    pool1 = MaxPooling3D(pool_size=(2, 2, 2))(conv1)

    conv2 = double_conv(pool1,
                        filter_num_1=2 * first_filter_num,
                        filter_num_2=4 * first_filter_num,
                        weight_decay=weight_decay,
                        batch_norm=batch_norm)  #64-128
    pool2 = MaxPooling3D(pool_size=(2, 2, 2))(conv2)

    conv3 = double_conv(pool2,
                        filter_num_1=4 * first_filter_num,
                        filter_num_2=8 * first_filter_num,
                        weight_decay=weight_decay,
                        batch_norm=batch_norm)  #128-256
    pool3 = MaxPooling3D(pool_size=(2, 2, 2))(conv3)

    conv4 = double_conv(pool3,
                        filter_num_1=8 * first_filter_num,
                        filter_num_2=16 * first_filter_num,
                        weight_decay=weight_decay,
                        batch_norm=batch_norm)  #256-512
    if dropout_rate != 0.0:
        drop4 = SpatialDropout3D(dropout_rate)(conv4)
        conv4 = drop4

    #是否加深到4个skip-connection,参数量
    if deeper:
        pool4 = MaxPooling3D(pool_size=(2, 2, 2))(conv4)

        conv0 = double_conv(pool4,
                            filter_num_1=16 * first_filter_num,
                            filter_num_2=32 * first_filter_num,
                            weight_decay=weight_decay,
                            batch_norm=batch_norm)  #512-1024
        if dropout_rate != 0.0:
            drop0 = SpatialDropout3D(dropout_rate)(conv0)
            conv0 = drop0

        up0 = get_up_convolution(conv0,
                                 filter_num=32 * first_filter_num,
                                 deconv=deconv,
                                 weight_decay=weight_decay,
                                 batch_norm=batch_norm)  #up:1024
        merge0 = concatenate([conv4, up0], axis=4)
        conv0 = double_conv(merge0,
                            filter_num_1=16 * first_filter_num,
                            filter_num_2=16 * first_filter_num,
                            weight_decay=weight_decay,
                            batch_norm=batch_norm)  #512-512
        conv4 = conv0  #方便up5输入命名,无需更改

    up5 = get_up_convolution(conv4,
                             filter_num=16 * first_filter_num,
                             deconv=deconv,
                             weight_decay=weight_decay,
                             batch_norm=batch_norm)  #up:512
    merge5 = concatenate([conv3, up5], axis=4)
    conv5 = double_conv(merge5,
                        filter_num_1=8 * first_filter_num,
                        filter_num_2=8 * first_filter_num,
                        weight_decay=weight_decay,
                        batch_norm=batch_norm)  #256-256

    up6 = get_up_convolution(conv5,
                             filter_num=8 * first_filter_num,
                             deconv=deconv,
                             weight_decay=weight_decay,
                             batch_norm=batch_norm)  #up:256
    merge6 = concatenate([conv2, up6], axis=4)
    conv6 = double_conv(merge6,
                        filter_num_1=4 * first_filter_num,
                        filter_num_2=4 * first_filter_num,
                        weight_decay=weight_decay,
                        batch_norm=batch_norm)  #128-128

    up7 = get_up_convolution(conv6,
                             filter_num=4 * first_filter_num,
                             deconv=deconv,
                             weight_decay=weight_decay,
                             batch_norm=batch_norm)  #up:128
    merge7 = concatenate([conv1, up7], axis=4)
    conv7 = double_conv(merge7,
                        filter_num_1=2 * first_filter_num,
                        filter_num_2=2 * first_filter_num,
                        weight_decay=weight_decay,
                        batch_norm=batch_norm)  #64-64

    conv8 = Conv3D(nb_classes,
                   1,
                   activation=activate_fun,
                   kernel_initializer='he_normal',
                   kernel_regularizer=l2(weight_decay))(conv7)

    model = Model(inputs=inputs, outputs=conv8)
    if multi_gpu:
        model = multi_gpu_model(model, multi_gpu)

    if not isinstance(metrics, list):
        metrics = [metrics]

    if nb_classes > 1:  #注意还要修改metrics函数中loss的默认nb_class
        label_wise_dice_metrics = [
            get_label_dice_coefficient_function(index)
            for index in range(0, nb_classes)
        ]
        metrics = label_wise_dice_metrics
    if not use_Adam:
        model.compile(optimizer=SGD(initial_learning_rate),
                      loss=dice_coefficient_loss,
                      metrics=metrics)
        print('use the SGD optimizer...')
    else:
        model.compile(optimizer=Adam(lr=initial_learning_rate,
                                     beta_1=0.9,
                                     beta_2=0.999,
                                     epsilon=1e-08),
                      loss=dice_coefficient_loss,
                      metrics=metrics)
        print('use the Adam optimizer....')
#    model.compile(optimizer = SGD(initial_learning_rate), loss = connection_loss, metrics = [dice_coef])
#     la=[layer for layer in model.layers]
#     print(la)
#     model.summary()
    plot_model(model, to_file='Model.png', show_shapes=True)
    return model
Пример #2
0
def unet_model_3d(n_labels,shape,W,lr=1e-5, pool_size=(2, 2, 2), initial_learning_rate=0.00001, deconvolution=False,
                  depth=3, n_base_filters=16, include_label_wise_dice_coefficients=False, metrics=dice_coefficient,
                  batch_normalization=False, activation_name="sigmoid"):
    """
    Builds the 3D UNet Keras model.f
    :param metrics: List metrics to be calculated during model training (default is dice coefficient).
    :param include_label_wise_dice_coefficients: If True and n_labels is greater than 1, model will report the dice
    coefficient for each label as metric.
    :param n_base_filters: The number of filters that the first layer in the convolution network will have. Following
    layers will contain a multiple of this number. Lowering this number will likely reduce the amount of memory required
    to train the model.
    :param depth: indicates the depth of the U-shape for the model. The greater the depth, the more max pooling
    layers will be added to the model. Lowering the depth may reduce the amount of memory required for training.
    :param input_shape: Shape of the input data (n_chanels, x_size, y_size, z_size). The x, y, and z sizes must be
    divisible by the pool size to the power of the depth of the UNet, that is pool_size^depth.
    :param pool_size: Pool size for the max pooling operations.
    :param n_labels: Number of binary labels that the model is learning.
    :param initial_learning_rate: Initial learning rate for the model. This will be decayed during training.
    :param deconvolution: If set to True, will use transpose convolution(deconvolution) instead of up-sampling. This
    increases the amount memory required during training.
    :return: Untrained 3D UNet Model
    """
    inputs = Input(shape)
    print('Input shape:',shape)
    current_layer = inputs
    levels = list()

    # add levels with max pooling
    for layer_depth in range(depth):
        layer1 = create_convolution_block(input_layer=current_layer, n_filters=n_base_filters*(2**layer_depth),
                                          batch_normalization=batch_normalization)
        layer2 = create_convolution_block(input_layer=layer1, n_filters=n_base_filters*(2**layer_depth)*2,
                                          batch_normalization=batch_normalization)
        if layer_depth < depth - 1:
            current_layer = MaxPooling3D(pool_size=pool_size)(layer2)
            levels.append([layer1, layer2, current_layer])
        else:
            current_layer = layer2
            levels.append([layer1, layer2])

    # add levels with up-convolution or up-sampling
    for layer_depth in range(depth-2, -1, -1):
        up_convolution = get_up_convolution(pool_size=pool_size, deconvolution=deconvolution,
                                            n_filters=current_layer._keras_shape[4])(current_layer)
        concat = concatenate([up_convolution, levels[layer_depth][1]], axis=4)
        current_layer = create_convolution_block(n_filters=levels[layer_depth][1]._keras_shape[1],
                                                 input_layer=concat, batch_normalization=batch_normalization)
        current_layer = create_convolution_block(n_filters=levels[layer_depth][1]._keras_shape[1],
                                                 input_layer=current_layer,
                                                 batch_normalization=batch_normalization)


    if n_labels>1:
        final_convolution = Conv3D(n_labels, 1)(current_layer)
        o = Reshape((shape[0] * shape[1]* shape[2],n_labels), input_shape=(shape[0], shape[1], shape[2],n_labels))(final_convolution)
        activation_name="softmax"
#        o = (Permute((2, 1)))(o)
    if n_labels==1:
        o = Conv3D(n_labels, (1, 1, 1))(current_layer)
        activation_name="sigmoid"
    act = Activation(activation_name)(o)
    model = Model(inputs=inputs, outputs=act)

    if not isinstance(metrics, list):
        metrics = [metrics]

    if include_label_wise_dice_coefficients and n_labels > 1:
        label_wise_dice_metrics = [get_label_dice_coefficient_function(index) for index in range(n_labels)]
        if metrics:
            metrics = metrics + label_wise_dice_metrics
        else:
            metrics = label_wise_dice_metrics
    if W !='':
        model.load_weights(W)
    if n_labels>1:
#        model.compile(loss=weighted_dice_coefficient_loss, optimizer = Adam(lr = initial_learning_rate) , metrics=metrics )
        model.compile(loss="categorical_crossentropy", optimizer=Adam(lr =  lr) , metrics=['accuracy'] )
    if n_labels==1:
        model.compile(loss="binary_crossentropy", optimizer = Adam(lr = lr) , metrics=['accuracy'] )
#    model.compile(optimizer=Adam(lr=initial_learning_rate), loss=dice_coefficient_loss, metrics=metrics)
    model.summary()
    return model