Beispiel #1
0
def keras_dropout(layer, rate):
    input_dim = len(layer.input.shape)
    if input_dim == 2:
        return layers.SpatialDropout1D(rate)
    elif input_dim == 3:
        return layers.SpatialDropout2D(rate)
    elif input_dim == 4:
        return layers.SpatialDropout3D(rate)
    else:
        return layers.Dropout(rate)
Beispiel #2
0
def keras_dropout(layer, rate):
    '''keras dropout layer.
    '''

    from keras import layers

    input_dim = len(layer.input.shape)
    if input_dim == 2:
        return layers.SpatialDropout1D(rate)
    elif input_dim == 3:
        return layers.SpatialDropout2D(rate)
    elif input_dim == 4:
        return layers.SpatialDropout3D(rate)
    else:
        return layers.Dropout(rate)
Beispiel #3
0
                          activation='relu',
                          name='conv1t_1')(x1)
 conv1tMP_1 = layers.MaxPooling3D(pool_size=(1, 4, 4),
                                  strides=None,
                                  padding='valid',
                                  data_format=None)(conv1t_1)
 conv1tNorm_1 = layers.BatchNormalization(
     epsilon=0.001,
     axis=-1,
     momentum=0.99,
     weights=None,
     beta_init='zero',
     gamma_init='one',
     gamma_regularizer=None,
     beta_regularizer=None)(conv1tMP_1)
 conv1tDO_1 = layers.SpatialDropout3D(0.3)(conv1tNorm_1)
 conv1_1 = layers.Conv3D(filters=32,
                         kernel_size=(3, 5, 5),
                         strides=1,
                         padding='valid',
                         activation='relu',
                         name='conv1_1')(conv1tDO_1)
 conv1MP_1 = layers.MaxPooling3D(pool_size=(1, 4, 2),
                                 strides=None,
                                 padding='valid',
                                 data_format=None)(conv1_1)
 conv1Norm_1 = layers.BatchNormalization(
     epsilon=0.001,
     axis=-1,
     momentum=0.99,
     weights=None,
                        padding='valid',
                        activation='relu',
                        name='conv1t')(x)
 conv1tMP = layers.MaxPooling3D(pool_size=(1, 3, 4),
                                strides=None,
                                padding='valid',
                                data_format=None)(conv1t)
 conv1tNorm = layers.BatchNormalization(epsilon=0.001,
                                        axis=-1,
                                        momentum=0.99,
                                        weights=None,
                                        beta_init='zero',
                                        gamma_init='one',
                                        gamma_regularizer=None,
                                        beta_regularizer=None)(conv1tMP)
 conv1tDO = layers.SpatialDropout3D(0.3)(conv1tNorm)
 conv1 = layers.Conv3D(filters=32,
                       kernel_size=(3, 5, 5),
                       strides=1,
                       padding='valid',
                       activation='relu',
                       name='conv1')(conv1tDO)
 conv1MP = layers.MaxPooling3D(pool_size=(1, 4, 3),
                               strides=None,
                               padding='valid',
                               data_format=None)(conv1)
 conv1Norm = layers.BatchNormalization(epsilon=0.001,
                                       axis=-1,
                                       momentum=0.99,
                                       weights=None,
                                       beta_init='zero',
def D3GenerateModel(n_filter=16,
                    number_of_class=1,
                    input_shape=(16, 144, 144, 1),
                    activation_last='softmax',
                    metrics=[
                        'mse', 'acc', dice_coef, recall_at_thresholds,
                        precision_at_thresholds, auc_roc
                    ],
                    loss='binary_crossentropy',
                    dropout=0.05,
                    init='glorot_uniform',
                    two_output=False):
    filter_size = n_filter
    input_x = layers.Input(shape=input_shape,
                           name='Input_layer',
                           dtype='float32')
    #1 level
    x = layers.Conv3D(filters=filter_size,
                      kernel_size=(3, 3, 3),
                      strides=(1, 1, 1),
                      padding='same')(input_x)
    x = layers.LeakyReLU()(x)
    x = layers.Conv3D(filters=filter_size * 2,
                      kernel_size=(3, 3, 3),
                      strides=(1, 1, 1),
                      padding='same')(x)
    x = layers.LeakyReLU()(x)
    x = layers.MaxPooling3D(pool_size=(2, 2, 2), padding='same')(x)
    #2 level
    conv_list = []
    counter = 0
    for index, kernel_sizes in enumerate([[(1, 3, 3), (1, 1, 3)],
                                          [(3, 3, 3), (3, 1, 3)],
                                          [(3, 3, 1), (1, 3, 1)]]):
        for kernel_size in (kernel_sizes):
            x = layers.Conv3D(filters=(filter_size * 4),
                              kernel_size=kernel_size,
                              strides=(1, 1, 1),
                              padding='same',
                              name='Conv3D_%s' % (counter))(x)
            x = layers.BatchNormalization()(x)
            x = layers.LeakyReLU()(x)
            x = layers.SpatialDropout3D(dropout)(x)
            counter = counter + 1
        conv_list.append(x)
    x = layers.add(conv_list)
    x = layers.Conv3D(filters=filter_size * 8,
                      kernel_size=(3, 3, 3),
                      strides=(2, 2, 2),
                      padding='same')(x)
    x = layers.Reshape(target_shape=[4, -1, filter_size * 8])(x)
    x = layers.Conv2D(filters=filter_size * 8,
                      kernel_size=(1, 1296),
                      strides=(1, 1296))(x)
    x = layers.BatchNormalization()(x)
    x = layers.ReLU()(x)
    #x = layers.SpatialDropout2D(dropout)(x)
    #x = layers.Lambda(squash)(x)
    #x = layers.Softmax()(x)
    x = layers.Reshape(target_shape=[filter_size * 8, -1])(x)
    x = layers.Conv1D(filters=2,
                      kernel_size=filter_size * 8,
                      strides=filter_size * 8,
                      activation='softmax')(
                          x)  #, kernel_regularizer=l2(0.001))(x)
    y = layers.Flatten()(x)
    #Classification
    model = Model(inputs=input_x, outputs=y)
    #keras.optimizers.SGD(lr=lr, momentum=0.90, decay=decay, nesterov=False)
    opt_noise = add_gradient_noise(optimizers.Adam)
    optimizer = opt_noise(
        lr, amsgrad=True)  #, nesterov=True)#opt_noise(lr, amsgrad=True)
    model.compile(optimizer=optimizer, loss=loss,
                  metrics=metrics)  #categorical_crossentropy
    return model