Пример #1
0
def __create_dense_net(nb_classes,
                       img_input,
                       include_top,
                       depth=40,
                       nb_dense_block=3,
                       growth_rate=12,
                       nb_filter=-1,
                       nb_layers_per_block=-1,
                       bottleneck=False,
                       reduction=0.0,
                       dropout_rate=None,
                       weight_decay=1e-4,
                       subsample_initial_block=False,
                       pooling=None,
                       activation='softmax',
                       transition_pooling='avg'):
    ''' Build the DenseNet model

    # Arguments
        nb_classes: number of classes
        img_input: tuple of shape (channels, rows, columns) or (rows, columns, channels)
        include_top: flag to include the final Dense layer
        depth: number or layers
        nb_dense_block: number of dense blocks to add to end (generally = 3)
        growth_rate: number of filters to add per dense block
        nb_filter: initial number of filters. Default -1 indicates initial number
            of filters is 2 * growth_rate
        nb_layers_per_block: number of layers in each dense block.
                Can be a -1, positive integer or a list.
                If -1, calculates nb_layer_per_block from the depth of the network.
                If positive integer, a set number of layers per dense block.
                If list, nb_layer is used as provided. Note that list size must
                be (nb_dense_block + 1)
        bottleneck: add bottleneck blocks
        reduction: reduction factor of transition blocks. Note : reduction value is
            inverted to compute compression
        dropout_rate: dropout rate
        weight_decay: weight decay rate
        subsample_initial_block: Changes model type to suit different datasets.
            Should be set to True for ImageNet, and False for CIFAR datasets.
            When set to True, the initial convolution will be strided and
            adds a MaxPooling3D before the initial dense block.
        pooling: Optional pooling mode for feature extraction
            when `include_top` is `False`.
            - `None` means that the output of the model
                will be the 4D tensor output of the
                last convolutional layer.
            - `avg` means that global average pooling
                will be applied to the output of the
                last convolutional layer, and thus
                the output of the model will be a
                2D tensor.
            - `max` means that global max pooling will
                be applied.
        activation: Type of activation at the top layer. Can be one of 'softmax' or
            'sigmoid'. Note that if sigmoid is used, classes must be 1.
        transition_pooling: `avg` for avg pooling (default), `max` for max pooling,
            None for no pooling during scale transition blocks. Please note that this
            default differs from the DenseNetFCN paper in accordance with the DenseNet
            paper.

    # Returns
        a keras tensor

    # Raises
        ValueError: in case of invalid argument for `reduction`
            or `nb_dense_block`
    '''
    with K.name_scope('DenseNet'):
        concat_axis = 1 if K.image_data_format() == 'channels_first' else -1

        if reduction != 0.0:
            if not (reduction <= 1.0 and reduction > 0.0):
                raise ValueError(
                    '`reduction` value must lie between 0.0 and 1.0')

        # layers in each dense block
        if type(nb_layers_per_block) is list or type(
                nb_layers_per_block) is tuple:
            nb_layers = list(nb_layers_per_block)  # Convert tuple to list

            if len(nb_layers) != nb_dense_block:
                raise ValueError(
                    'If `nb_dense_block` is a list, its length must match '
                    'the number of layers provided by `nb_layers`.')

            final_nb_layer = nb_layers[-1]
            nb_layers = nb_layers[:-1]
        else:
            if nb_layers_per_block == -1:
                assert (depth - 4) % 3 == 0, ('Depth must be 3 N + 4 '
                                              'if nb_layers_per_block == -1')
                count = int((depth - 4) / 3)

                if bottleneck:
                    count = count // 2

                nb_layers = [count for _ in range(nb_dense_block)]
                final_nb_layer = count
            else:
                final_nb_layer = nb_layers_per_block
                nb_layers = [nb_layers_per_block] * nb_dense_block

        # compute initial nb_filter if -1, else accept users initial nb_filter
        if nb_filter <= 0:
            nb_filter = 2 * growth_rate

        # compute compression factor
        compression = 1.0 - reduction

        # Initial convolution
        if subsample_initial_block:
            initial_kernel = (7, 7, 7)
            initial_strides = (2, 2, 2)
        else:
            initial_kernel = (3, 3, 3)
            initial_strides = (1, 1, 1)

        x = Conv3D(nb_filter,
                   initial_kernel,
                   kernel_initializer='he_normal',
                   padding='same',
                   name='initial_Conv3D',
                   strides=initial_strides,
                   use_bias=False,
                   kernel_regularizer=l2(weight_decay))(img_input)

        if subsample_initial_block:
            x = BatchNormalization(axis=concat_axis,
                                   epsilon=1.1e-5,
                                   name='initial_bn')(x)
            x = Activation('relu')(x)
            x = MaxPooling3D((3, 3, 3), strides=(2, 2, 2), padding='same')(x)

        # Add dense blocks
        for block_idx in range(nb_dense_block - 1):
            x, nb_filter = __dense_block(x,
                                         nb_layers[block_idx],
                                         nb_filter,
                                         growth_rate,
                                         bottleneck=bottleneck,
                                         dropout_rate=dropout_rate,
                                         weight_decay=weight_decay,
                                         block_prefix='dense_%i' % block_idx)
            # add transition_block
            x = __transition_block(x,
                                   nb_filter,
                                   compression=compression,
                                   weight_decay=weight_decay,
                                   block_prefix='tr_%i' % block_idx,
                                   transition_pooling=transition_pooling)
            nb_filter = int(nb_filter * compression)

        # The last dense_block does not have a transition_block
        x, nb_filter = __dense_block(x,
                                     final_nb_layer,
                                     nb_filter,
                                     growth_rate,
                                     bottleneck=bottleneck,
                                     dropout_rate=dropout_rate,
                                     weight_decay=weight_decay,
                                     block_prefix='dense_%i' %
                                     (nb_dense_block - 1))

        x = BatchNormalization(axis=concat_axis,
                               epsilon=1.1e-5,
                               name='final_bn')(x)
        x = Activation('relu')(x)

        if include_top:
            if pooling == 'avg':
                x = GlobalAveragePooling3D()(x)
            elif pooling == 'max':
                x = GlobalMaxPooling3D()(x)
            x = Dense(nb_classes, activation=activation)(x)
        else:
            if pooling == 'avg':
                x = GlobalAveragePooling3D()(x)
            elif pooling == 'max':
                x = GlobalMaxPooling3D()(x)
        return x
def get_unet(nClasses, img_depth=19, img_rows=496, img_cols=512):
    inputs = Input((img_depth, img_rows, img_cols, 1))

    # Block 1
    conv1 = myConv3D(inputs, weights[0], (3, 3, 3))
    conv1 = myConv3D(conv1, weights[0], (3, 3, 3))
    pool1 = MaxPooling3D(pool_size=(1, 2, 2), name='block1_pool')(conv1)
    # print('conv1: ', np.shape(conv1))
    # print('pool1: ', np.shape(pool1))

    # Block 2
    conv2 = myConv3D(pool1, weights[1], (3, 3, 3))
    conv2 = myConv3D(conv2, weights[1], (3, 3, 3))
    pool2 = MaxPooling3D(pool_size=(1, 2, 2), name='block2_pool')(conv2)
    # print('conv2: ', np.shape(conv2))
    # print('pool2: ', np.shape(pool2))

    # Block 3
    conv3 = myConv3D(pool2, weights[2], (3, 3, 3))
    conv3 = myConv3D(conv3, weights[2], (3, 3, 3))
    conv3 = myConv3D(conv3, weights[2], (3, 3, 3))
    pool3 = MaxPooling3D(pool_size=(1, 2, 2), name='block3_pool')(conv3)
    # print('conv3: ', np.shape(conv3))
    # print('pool3: ', np.shape(pool3))

    conv4 = myConv3D(pool3, weights[3], (3, 3, 3))
    conv4 = myConv3D(conv4, weights[3], (3, 3, 3))
    conv4 = myConv3D(conv4, weights[3], (3, 3, 3))
    pool4 = MaxPooling3D(pool_size=(1, 2, 2), name='block4_pool')(conv4)
    # print('conv4: ', np.shape(conv4))
    # print('pool4: ', np.shape(pool4))

    conv5 = myConv3D(pool4, weights[4], (3, 3, 3))
    conv5 = Dropout(0.5)(conv5)
    conv5 = myConv3D(conv5, weights[4], (3, 3, 3))
    conv5 = Dropout(0.5)(conv5)
    # print('conv5: ', np.shape(conv5))

    up6 = UpSampling3D(size=(1, 2, 2), name='up_block4')(conv5)
    up6 = myConv3D(up6, weights[3], (3, 3, 3))
    up6 = concatenate([up6, conv4], axis=4)
    # print('up6: ', np.shape(up6))
    conv6 = myConv3D(up6, weights[3], (3, 3, 3))
    conv6 = myConv3D(conv6, weights[3], (3, 3, 3))
    conv6 = myConv3D(conv6, weights[3], (3, 3, 3))
    # print('conv6: ', np.shape(conv6))

    up7 = UpSampling3D(size=(1, 2, 2), name='up_block3')(conv6)
    up7 = myConv3D(up7, weights[2], (3, 3, 3))
    up7 = concatenate([up7, conv3], axis=4)
    # print('up7: ', np.shape(up7))
    conv7 = myConv3D(up7, weights[2], (3, 3, 3))
    conv7 = myConv3D(conv7, weights[2], (3, 3, 3))
    conv7 = myConv3D(conv7, weights[2], (3, 3, 3))
    # print('conv7: ', np.shape(conv7))

    up8 = UpSampling3D(size=(1, 2, 2), name='up_block2')(conv7)
    up8 = myConv3D(up8, weights[1], (3, 3, 3))
    up8 = concatenate([up8, conv2], axis=4)
    # print('up8: ', np.shape(up8))
    conv8 = myConv3D(up8, weights[1], (3, 3, 3))
    conv8 = myConv3D(conv8, weights[1], (3, 3, 3))
    # print('conv8: ', np.shape(conv8))

    up9 = UpSampling3D(size=(1, 2, 2), name='up_block1')(conv8)
    up9 = myConv3D(up9, weights[0], (3, 3, 3))
    up9 = concatenate([up9, conv1], axis=4)
    # print('up9: ', np.shape(up9))
    conv9 = myConv3D(up9, weights[0], (3, 3, 3))
    # print('conv9: ', np.shape(conv9))
    conv9 = myConv3D(conv9, weights[0], (3, 3, 3))
    print('conv9: ', np.shape(conv9))

    # conv10 = Conv3D(4, (1, 1, 1), name='up_out_conv')(conv9)
    # conv10 = Conv3D(nClasses, (1, 1, 1), activation='softmax', padding='same')(conv9)
    conv10 = Conv3D(nClasses, (1, 1, 1), activation='softmax',
                    name='up_out')(conv9)
    print('conv10: ', np.shape(conv10))

    model = Model(inputs, conv10)

    return model
Пример #3
0
def get_model_new(Shape,
                  Do2D=False,
                  filters=([64, 2], [128, 2], [256, 2]),
                  dense=([500, 0.5], [100, 0.25], [20, 0]),
                  batchNorm=True):
    # Returns a Keras regression CNN model.
    # Good ref on CNN regression: https://www.pyimagesearch.com/2019/01/28/keras-regression-and-cnns/
    #___________________________________________________________________]

    # initialize the input shape and channel dimension, assuming
    # TensorFlow/channels-last ordering
    inputShape = Shape
    chanDim = -1

    # define the model input
    inputs = Input(shape=inputShape)

    # loop over the number of filters
    for (i, f) in enumerate(filters):
        # if this is the first CONV layer then set the input
        # appropriately
        if i == 0:
            x = inputs

        # CONV => RELU => BN => POOL
        for i1 in range(0, f[1]):
            if Do2D:
                x = Conv2D(f[0],
                           3,
                           padding="same",
                           kernel_initializer='he_normal')(x)
            else:
                x = Conv3D(f[0],
                           3,
                           padding="same",
                           kernel_initializer='he_normal')(x)
            x = Activation("relu")(x)

        if batchNorm:
            x = BatchNormalization(axis=chanDim)(x)

        if Do2D:
            x = MaxPooling2D(pool_size=(2, 2))(x)
        else:
            x = MaxPooling3D(pool_size=(2, 2, 2))(x)

    # Flatten the volume...
    x = Flatten()(x)

    # Then do repetions of FC => RELU => BN => DROPOUT
    if isinstance(dense, tuple):
        for (i, f) in enumerate(dense):
            x = Dense(f[0])(x)
            x = Activation("relu")(x)

            if batchNorm:
                x = BatchNormalization(axis=chanDim)(x)

            if f[1] > 0:
                x = Dropout(f[1])(x)
    else:
        x = Dense(dense[0])(x)
        x = Activation("relu")(x)

        if batchNorm:
            x = BatchNormalization(axis=chanDim)(x)

        if dense[1] > 0:
            x = Dropout(dense[1])(x)

    # Add the regression node
    x = Dense(1, activation="linear")(x)

    # construct the CNN
    model = Model(inputs, x)

    # return the CNN
    return model
Пример #4
0
    training_gen = indexPenDataGen(partition['train'],
                                   labels,
                                   dataset_path=dataset_path,
                                   **dataGenParams)
    validation_gen = indexPenDataGen(partition['validation'],
                                     labels,
                                     dataset_path=dataset_path,
                                     **dataGenParams)

    # Build the RNN ###############################################
    if not is_use_pre_train:
        classifier = Sequential()
        classifier.add(
            TimeDistributed(Conv3D(filters=32,
                                   kernel_size=(3, 3, 3),
                                   data_format='channels_first',
                                   input_shape=(1, 25, 25, 25),
                                   kernel_regularizer=l2(0.0005),
                                   kernel_initializer='random_uniform'),
                            input_shape=(timesteps, 1, 25, 25, 25)))
        # classifier.add(TimeDistributed(LeakyReLU(alpha=0.1)))
        classifier.add(TimeDistributed(BatchNormalization()))

        classifier.add(
            TimeDistributed(
                Conv3D(filters=32,
                       kernel_size=(3, 3, 3),
                       data_format='channels_first',
                       kernel_regularizer=l2(0.0005))))
        # classifier.add(TimeDistributed(LeakyReLU(alpha=0.1)))
        classifier.add(TimeDistributed(BatchNormalization()))
def get_3d_plainnet_34():

  k = 64

  # Input
  inputs = Input((cm.slices_3d, cm.img_rows_3d, cm.img_cols_3d, 1))

  # Convolution 1
  bn1 = BatchNormalization(axis=-1)(inputs)
  act1 = Activation('relu')(bn1)
  conv1 = Conv3D(filters=k, kernel_size=(7, 7, 7), strides=(1, 2, 2), activation='relu', padding='same')(act1)

  # Max pooling 1
  pool1 = MaxPooling3D(pool_size=(3, 3, 3), strides=(1, 2, 2))(conv1)

  # Residual block 1-1
  bn2 = BatchNormalization(axis=-1)(pool1)
  act2 = Activation('relu')(bn2)
  conv2_1 = Conv3D(filters=(k), kernel_size=(3, 3, 3), strides=(1, 1, 1), activation='relu', padding='same')(act2)
  bn3 = BatchNormalization(axis=-1)(conv2_1)
  act3 = Activation('relu')(bn3)
  conv2_1 = Conv3D(filters=(k), kernel_size=(3, 3, 3), strides=(1, 1, 1), activation='relu', padding='same')(act3)

  # Residual block 1-2
  bn4 = BatchNormalization(axis=-1)(conv2_1)
  act4 = Activation('relu')(bn4)
  conv2_2 = Conv3D(filters=(k), kernel_size=(3, 3, 3), strides=(1, 1, 1), activation='relu', padding='same')(act4)
  bn5 = BatchNormalization(axis=-1)(conv2_2)
  act5 = Activation('relu')(bn5)
  conv2_2 = Conv3D(filters=(k), kernel_size=(3, 3, 3), strides=(1, 1, 1), activation='relu', padding='same')(act5)

  # Residual block 1-3
  bn6 = BatchNormalization(axis=-1)(conv2_2)
  act6 = Activation('relu')(bn6)
  conv2_3 = Conv3D(filters=(k), kernel_size=(3, 3, 3), strides=(1, 1, 1), activation='relu', padding='same')(act6)
  bn7 = BatchNormalization(axis=-1)(conv2_3)
  act7 = Activation('relu')(bn7)
  conv2_3 = Conv3D(filters=(k), kernel_size=(3, 3, 3), strides=(1, 1, 1), activation='relu', padding='same')(act7)

  # Residual block 2-1
  bn8 = BatchNormalization(axis=-1)(conv2_3)
  act8 = Activation('relu')(bn8)
  conv3_1 = Conv3D(filters=(2 * k), kernel_size=(3, 3, 3), strides=(1, 2, 2), activation='relu')(act8)
  bn9 = BatchNormalization(axis=-1)(conv3_1)
  act9 = Activation('relu')(bn9)
  conv3_1 = Conv3D(filters=(2 * k), kernel_size=(3, 3, 3), strides=(1, 1, 1), activation='relu', padding='same')(act9)

  # Residual block 2-2
  bn10 = BatchNormalization(axis=-1)(conv3_1)
  act10 = Activation('relu')(bn10)
  conv3_2 = Conv3D(filters=(2 * k), kernel_size=(3, 3, 3), strides=(1, 1, 1), activation='relu', padding='same')(act10)
  bn11 = BatchNormalization(axis=-1)(conv3_2)
  act11 = Activation('relu')(bn11)
  conv3_2 = Conv3D(filters=(2 * k), kernel_size=(3, 3, 3), strides=(1, 1, 1), activation='relu', padding='same')(act11)

  # Residual block 2-3
  bn12 = BatchNormalization(axis=-1)(conv3_2)
  act12 = Activation('relu')(bn12)
  conv3_3 = Conv3D(filters=(2 * k), kernel_size=(3, 3, 3), strides=(1, 1, 1), activation='relu', padding='same')(act12)
  bn13 = BatchNormalization(axis=-1)(conv3_3)
  act13 = Activation('relu')(bn13)
  conv3_3 = Conv3D(filters=(2 * k), kernel_size=(3, 3, 3), strides=(1, 1, 1), activation='relu', padding='same')(act13)

  # Residual block 2-4
  bn14 = BatchNormalization(axis=-1)(conv3_3)
  act14 = Activation('relu')(bn14)
  conv3_4 = Conv3D(filters=(2 * k), kernel_size=(3, 3, 3), strides=(1, 1, 1), activation='relu', padding='same')(act14)
  bn15 = BatchNormalization(axis=-1)(conv3_4)
  act15 = Activation('relu')(bn15)
  conv3_4 = Conv3D(filters=(2 * k), kernel_size=(3, 3, 3), strides=(1, 1, 1), activation='relu', padding='same')(act15)

  # Residual block 3-1
  bn16 = BatchNormalization(axis=-1)(conv3_4)
  act16 = Activation('relu')(bn16)
  conv4_1 = Conv3D(filters=(4 * k), kernel_size=(3, 3, 3), strides=(1, 2, 2), activation='relu')(act16)
  bn17 = BatchNormalization(axis=-1)(conv4_1)
  act17 = Activation('relu')(bn17)
  conv4_1 = Conv3D(filters=(4 * k), kernel_size=(3, 3, 3), strides=(1, 1, 1), activation='relu', padding='same')(act17)

  # Residual block 3-2
  bn18 = BatchNormalization(axis=-1)(conv4_1)
  act18 = Activation('relu')(bn18)
  conv4_2 = Conv3D(filters=(4 * k), kernel_size=(3, 3, 3), strides=(1, 1, 1), activation='relu', padding='same')(act18)
  bn19 = BatchNormalization(axis=-1)(conv4_2)
  act19 = Activation('relu')(bn19)
  conv4_2 = Conv3D(filters=(4 * k), kernel_size=(3, 3, 3), strides=(1, 1, 1), activation='relu', padding='same')(act19)

  # Residual block 3-3
  bn20 = BatchNormalization(axis=-1)(conv4_2)
  act20 = Activation('relu')(bn20)
  conv4_3 = Conv3D(filters=(4 * k), kernel_size=(3, 3, 3), strides=(1, 1, 1), activation='relu', padding='same')(act20)
  bn21 = BatchNormalization(axis=-1)(conv4_3)
  act21 = Activation('relu')(bn21)
  conv4_3 = Conv3D(filters=(4 * k), kernel_size=(3, 3, 3), strides=(1, 1, 1), activation='relu', padding='same')(act21)

  # Residual block 3-4
  bn22 = BatchNormalization(axis=-1)(conv4_3)
  act22 = Activation('relu')(bn22)
  conv4_4 = Conv3D(filters=(4 * k), kernel_size=(3, 3, 3), strides=(1, 1, 1), activation='relu', padding='same')(act22)
  bn23 = BatchNormalization(axis=-1)(conv4_4)
  act23 = Activation('relu')(bn23)
  conv4_4 = Conv3D(filters=(4 * k), kernel_size=(3, 3, 3), strides=(1, 1, 1), activation='relu', padding='same')(act23)

  # Residual block 3-5
  bn24 = BatchNormalization(axis=-1)(conv4_4)
  act24 = Activation('relu')(bn24)
  conv4_5 = Conv3D(filters=(4 * k), kernel_size=(3, 3, 3), strides=(1, 1, 1), activation='relu', padding='same')(act24)
  bn25 = BatchNormalization(axis=-1)(conv4_5)
  act25 = Activation('relu')(bn25)
  conv4_5 = Conv3D(filters=(4 * k), kernel_size=(3, 3, 3), strides=(1, 1, 1), activation='relu', padding='same')(act25)

  # Residual block 3-6
  bn26 = BatchNormalization(axis=-1)(conv4_5)
  act26 = Activation('relu')(bn26)
  conv4_6 = Conv3D(filters=(4 * k), kernel_size=(3, 3, 3), strides=(1, 1, 1), activation='relu', padding='same')(act26)
  bn27 = BatchNormalization(axis=-1)(conv4_6)
  act27 = Activation('relu')(bn27)
  conv4_6 = Conv3D(filters=(4 * k), kernel_size=(3, 3, 3), strides=(1, 1, 1), activation='relu', padding='same')(act27)

  # Residual block 4-1
  bn28 = BatchNormalization(axis=-1)(conv4_6)
  act28 = Activation('relu')(bn28)
  conv5_1 = Conv3D(filters=(8 * k), kernel_size=(3, 3, 3), strides=(1, 2, 2), activation='relu')(act28)
  bn29 = BatchNormalization(axis=-1)(conv5_1)
  act29 = Activation('relu')(bn29)
  conv5_1 = Conv3D(filters=(8 * k), kernel_size=(3, 3, 3), strides=(1, 1, 1), activation='relu', padding='same')(act29)

  # Residual block 4-2
  bn30 = BatchNormalization(axis=-1)(conv5_1)
  act30 = Activation('relu')(bn30)
  conv5_2 = Conv3D(filters=(8 * k), kernel_size=(3, 3, 3), strides=(1, 1, 1), activation='relu', padding='same')(act30)
  bn31 = BatchNormalization(axis=-1)(conv5_2)
  act31 = Activation('relu')(bn31)
  conv5_2 = Conv3D(filters=(8 * k), kernel_size=(3, 3, 3), strides=(1, 1, 1), activation='relu', padding='same')(act31)

  # Residual block 4-3
  bn32 = BatchNormalization(axis=-1)(conv5_2)
  act32 = Activation('relu')(bn32)
  conv5_3 = Conv3D(filters=(8 * k), kernel_size=(3, 3, 3), strides=(1, 1, 1), activation='relu', padding='same')(act32)
  bn33 = BatchNormalization(axis=-1)(conv5_3)
  act33 = Activation('relu')(bn33)
  conv5_3 = Conv3D(filters=(8 * k), kernel_size=(3, 3, 3), strides=(1, 1, 1), activation='relu', padding='same')(act33)

  # Average pooling + FC + sigmoid
  pool2 = AveragePooling3D(pool_size=(3, 3, 3), strides=(1, 2, 2))(conv5_3)
  bn34 = BatchNormalization(axis=-1)(pool2)
  act34 = Activation('relu')(bn34)
  conv6 = Conv3D(filters=3, kernel_size=(1, 1, 1), strides=(1, 1, 1), activation='sigmoid')(act34)

  # Complie
  model = Model(input=inputs, output=conv6)
  model.compile(optimizer=Adam(lr=1.0e-5), loss="categorical_crossentropy", metrics=["categorical_accuracy"])

  return model
Пример #6
0
def GUM(img_shape=None):
    if img_shape is None:
        img_shape = [32, 32, 32]
    input_shape = (img_shape[0], img_shape[1], img_shape[2], 1)
    channel_axis = -1

    # feature extractors share the same weights
    shared_conv1 = Conv3D(32, (3, 3, 3), padding='valid')
    shared_conv2 = Conv3D(64, (3, 3, 3), padding='valid')
    shared_conv3 = Conv3D(128, (3, 3, 3), padding='valid')
    shared_conv4 = Conv3D(256, (3, 3, 3), padding='valid')
    shared_conv5 = Conv3D(512, (3, 3, 3), padding='valid')

    # feature extraction for s_b
    main_input = Input(shape=input_shape, name='main_input')

    v_a = shared_conv1(main_input)
    v_a = Activation('relu')(v_a)
    v_a = BatchNormalization(axis=channel_axis)(v_a)
    v_a = SpectralPooling((26, 26, 26), (22, 22, 22))(v_a)

    v_a = shared_conv2(v_a)
    v_a = Activation('relu')(v_a)
    v_a = BatchNormalization(axis=channel_axis)(v_a)
    v_a = SpectralPooling((18, 18, 18), (15, 15, 15))(v_a)

    v_a = shared_conv3(v_a)
    v_a = Activation('relu')(v_a)
    v_a = BatchNormalization(axis=channel_axis)(v_a)
    v_a = SpectralPooling((12, 12, 12), (10, 10, 10))(v_a)

    v_a = shared_conv4(v_a)
    v_a = Activation('relu')(v_a)
    v_a = BatchNormalization(axis=channel_axis)(v_a)
    v_a = SpectralPooling((8, 8, 8), (7, 7, 7))(v_a)

    v_a = shared_conv5(v_a)
    v_a = BatchNormalization(axis=channel_axis)(v_a)
    v_a = FeatureL2Norm()(v_a)

    # feature extraction for s_a
    auxiliary_input = Input(shape=input_shape, name='aux_input')

    v_b = shared_conv1(auxiliary_input)
    v_b = Activation('relu')(v_b)
    v_b = BatchNormalization(axis=channel_axis)(v_b)
    v_b = SpectralPooling((26, 26, 26), (22, 22, 22))(v_b)

    v_b = shared_conv2(v_b)
    v_b = Activation('relu')(v_b)
    v_b = BatchNormalization(axis=channel_axis)(v_b)
    v_b = SpectralPooling((18, 18, 18), (15, 15, 15))(v_b)

    v_b = shared_conv3(v_b)
    v_b = Activation('relu')(v_b)
    v_b = BatchNormalization(axis=channel_axis)(v_b)
    v_b = SpectralPooling((12, 12, 12), (10, 10, 10))(v_b)

    v_b = shared_conv4(v_b)
    v_b = Activation('relu')(v_b)
    v_b = BatchNormalization(axis=channel_axis)(v_b)
    v_b = SpectralPooling((8, 8, 8), (7, 7, 7))(v_b)

    v_b = shared_conv5(v_b)
    v_b = BatchNormalization(axis=channel_axis)(v_b)
    v_b = FeatureL2Norm()(v_b)

    # correlation layer
    c_ab = FeatureCorrelation()([v_a, v_b])
    c_ab = FeatureL2Norm()(c_ab)

    # correlation layer
    c_ba = FeatureCorrelation()([v_b, v_a])
    c_ba = FeatureL2Norm()(c_ba)

    c_ab = Conv3D(1024, (3, 3, 3))(c_ab)
    c_ab = BatchNormalization(axis=channel_axis)(c_ab)
    c_ab = Activation('relu')(c_ab)

    c_ab = Conv3D(1024, (3, 3, 3))(c_ab)
    c_ab = BatchNormalization(axis=channel_axis)(c_ab)
    c_ab = Activation('relu')(c_ab)

    c_ab = Flatten()(c_ab)

    c_ba = FeatureL2Norm()(c_ba)
    c_ba = Conv3D(1024, (3, 3, 3))(c_ba)
    c_ba = BatchNormalization(axis=channel_axis)(c_ba)
    c_ba = Activation('relu')(c_ba)

    c_ba = Conv3D(1024, (3, 3, 3))(c_ba)
    c_ba = BatchNormalization(axis=channel_axis)(c_ba)
    c_ba = Activation('relu')(c_ba)

    c_ba = Flatten()(c_ba)

    c = Concatenate()([c_ab, c_ba])

    c = Dense(2000)(c)
    c = Dense(2000)(c)

    weights = get_initial_weights(2000)

    # estimated 3D rigid body transformation parameters
    c = Dense(6, weights=weights)(c)
    c = Activation('sigmoid')(c)

    mask_1 = Input(shape=input_shape, name='mask_1')
    mask_2 = Input(shape=input_shape, name='mask_2')

    x, mask1, mask2 = RigidTransformation3DImputation(
        (img_shape[0], img_shape[1],
         img_shape[2]))([main_input, auxiliary_input, mask_1, mask_2, c])

    model = Model(inputs=[main_input, auxiliary_input, mask_1, mask_2],
                  outputs=x)

    adam = Adam()
    model.compile(loss=correlation_coefficient_loss, optimizer=adam)

    return model
Пример #7
0
def VGG_16():
    model = Sequential()
    model.add(
        Conv3D(64, (3, 3, 3),
               activation='relu',
               padding='same',
               name='block1_conv1',
               dim_ordering='tf',
               input_shape=(255, 255, 3, 4)))
    model.add(
        Conv3D(64, (3, 3, 3),
               activation='relu',
               padding='same',
               name='block1_conv2'))
    model.add(
        MaxPooling3D(pool_size=(2, 2, 2),
                     strides=(2, 2, 2),
                     name='block1_pool',
                     padding='same'))
    model.add(
        Conv3D(128, (3, 3, 3),
               activation='relu',
               padding='same',
               name='block2_conv1'))
    model.add(
        Conv3D(128, (3, 3, 3),
               activation='relu',
               padding='same',
               name='block2_conv2'))

    model.add(
        MaxPooling3D(
            (2, 2, 2),
            strides=(2, 2, 2),  # padding='same',
            name='block2_pool'))

    model.add(
        Conv3D(256, (3, 3, 3),
               activation='relu',
               padding='same',
               name='block3_conv1'))
    model.add(
        Conv3D(256, (3, 3, 3),
               activation='relu',
               padding='same',
               name='block3_conv2'))
    model.add(
        Conv3D(256, (3, 3, 3),
               activation='relu',
               padding='same',
               name='block3_conv3'))
    model.add(
        MaxPooling3D((2, 2, 2),
                     strides=(2, 2, 2),
                     padding='same',
                     name='block3_pool'))
    model.add(
        Conv3D(512, (3, 3, 3),
               activation='relu',
               padding='same',
               name='block4_conv1'))
    model.add(
        Conv3D(512, (3, 3, 3),
               activation='relu',
               padding='same',
               name='block4_conv2'))
    model.add(
        Conv3D(512, (3, 3, 3),
               activation='relu',
               padding='same',
               name='block4_conv3'))
    model.add(
        MaxPooling3D((2, 2, 2),
                     strides=(2, 2, 2),
                     padding='same',
                     name='block4_pool'))
    model.add(
        Conv3D(512, (3, 3, 3),
               activation='relu',
               padding='same',
               name='block5_conv1'))
    model.add(
        Conv3D(512, (3, 3, 3),
               activation='relu',
               padding='same',
               name='block5_conv2'))

    model.add(
        Conv3D(512, (3, 3, 3),
               activation='relu',
               padding='same',
               name='block5_conv3'))
    model.add(
        MaxPooling3D((2, 2, 2),
                     strides=(2, 2, 2),
                     padding='same',
                     name='block5_pool'))

    model.add(Flatten(name='flatten'))
    model.add(Dense(4096, activation='relu', name='fc1'))

    model.add(Dense(4096, activation='relu', name='fc2'))

    model.add(Dense(2, activation='sigmoid', name='predictions'))
    print(model.summary())

    return model
Пример #8
0
    loc=0.0, scale=1.0, size=train_X.shape)
x_valid_noisy = valid_X + noise_factor * np.random.normal(
    loc=0.0, scale=1.0, size=valid_X.shape)
x_test_noisy = test_data + noise_factor * np.random.normal(
    loc=0.0, scale=1.0, size=test_data.shape)
x_train_noisy = np.clip(x_train_noisy, 0., 1.)
x_valid_noisy = np.clip(x_valid_noisy, 0., 1.)
x_test_noisy = np.clip(x_test_noisy, 0., 1.)

############
# Encoding #
############

# Conv1 #
x = Conv3D(filters=16,
           kernel_size=(3, 3, 3),
           activation='relu',
           padding='same')(input_img)
x = MaxPooling3D(pool_size=(2, 2, 2), padding='same')(x)

# Conv2 #
x = Conv3D(filters=8, kernel_size=(3, 3, 3), activation='relu',
           padding='same')(x)
x = MaxPooling3D(pool_size=(2, 2, 2), padding='same')(x)

# Conv 3 #
x = Conv3D(filters=8, kernel_size=(3, 3, 3), activation='relu',
           padding='same')(x)
encoded = MaxPooling3D(pool_size=(2, 2, 2), padding='same')(x)

# Note:
# padding is a hyper-arameter for either 'valid' or 'same'.
Пример #9
0
# Model and Training
Xtrain = Xtrain.reshape(-1, windowSize, windowSize, K, 1)
print(Xtrain.shape)

ytrain = np_utils.to_categorical(ytrain)
print(ytrain.shape)

S = windowSize
L = K
output_units = 9 if (dataset == 'PU' or dataset == 'PC') else 16

## input layer
input_layer = Input((S, S, L, 1))

## convolutional layers
conv_layer1 = Conv3D(filters=8, kernel_size=(3, 3, 7),
                     activation='relu')(input_layer)
conv_layer2 = Conv3D(filters=16, kernel_size=(3, 3, 5),
                     activation='relu')(conv_layer1)
conv_layer3 = Conv3D(filters=32, kernel_size=(3, 3, 3),
                     activation='relu')(conv_layer2)
print(conv_layer3.shape)
conv3d_shape = conv_layer3.shape
conv_layer3 = Reshape((conv3d_shape[1], conv3d_shape[2],
                       conv3d_shape[3] * conv3d_shape[4]))(conv_layer3)
conv_layer4 = Conv2D(filters=64, kernel_size=(3, 3),
                     activation='relu')(conv_layer3)

flatten_layer = Flatten()(conv_layer4)

## fully connected layers
dense_layer1 = Dense(units=256, activation='relu')(flatten_layer)
Пример #10
0
def isensee2017_model_3d(input_shape=(1, 128, 128, 128),
                         n_base_filters=16,
                         depth=5,
                         dropout_rate=0.3,
                         n_segmentation_levels=3,
                         n_labels=1,
                         optimizer=Adam,
                         initial_learning_rate=5e-4,
                         loss_function=dice_coefficient_loss,
                         activation_name="sigmoid",
                         **kargs):
    """
    This function builds a model proposed by Isensee et al. for the BRATS 2017 competition:
    https://www.cbica.upenn.edu/sbia/Spyridon.Bakas/MICCAI_BraTS/MICCAI_BraTS_2017_proceedings_shortPapers.pdf

    This network is highly similar to the model proposed by Kayalibay et al. "CNN-based Segmentation of Medical
    Imaging Data", 2017: https://arxiv.org/pdf/1701.03056.pdf


    :param input_shape:
    :param n_base_filters:
    :param depth:
    :param dropout_rate:
    :param n_segmentation_levels:
    :param n_labels:
    :param optimizer:
    :param initial_learning_rate:
    :param loss_function:
    :param activation_name:
    :return:
    """
    inputs = Input(input_shape)

    current_layer = inputs
    level_output_layers = list()
    level_filters = list()
    for level_number in range(depth):
        n_level_filters = (2**level_number) * n_base_filters
        level_filters.append(n_level_filters)

        if current_layer is inputs:
            in_conv = create_convolution_block(current_layer, n_level_filters)
        else:
            in_conv = create_convolution_block(current_layer,
                                               n_level_filters,
                                               strides=(2, 2, 2))

        context_output_layer = create_context_module(in_conv,
                                                     n_level_filters,
                                                     dropout_rate=dropout_rate)

        summation_layer = Add()([in_conv, context_output_layer])
        level_output_layers.append(summation_layer)
        current_layer = summation_layer

    segmentation_layers = list()
    for level_number in range(depth - 2, -1, -1):
        up_sampling = create_up_sampling_module(current_layer,
                                                level_filters[level_number])
        concatenation_layer = concatenate(
            [level_output_layers[level_number], up_sampling], axis=1)
        localization_output = create_localization_module(
            concatenation_layer, level_filters[level_number])
        current_layer = localization_output
        if level_number < n_segmentation_levels:
            segmentation_layers.insert(
                0,
                Conv3D(n_labels, (1, 1, 1))(current_layer))

    output_layer = None
    for level_number in reversed(range(n_segmentation_levels)):
        segmentation_layer = segmentation_layers[level_number]
        if output_layer is None:
            output_layer = segmentation_layer
        else:
            output_layer = Add()([output_layer, segmentation_layer])

        if level_number > 0:
            output_layer = UpSampling3D(size=(2, 2, 2))(output_layer)

    activation_block = Activation(activation_name)(output_layer)

    metrics = ['binary_accuracy', vod_coefficient]
    if loss_function != dice_coefficient_loss:
        metrics += [dice_coefficient]

    model = Model(inputs=inputs,
                  outputs=activation_block,
                  name='isensee2017_3d_Model_' + str(np.random.random()))
    model.compile(optimizer=optimizer(lr=initial_learning_rate),
                  loss=loss_function,
                  metrics=metrics)
    return model
Пример #11
0
    def generate_model(self,
                       input_shape,
                       dropout=False,
                       batchnormalization=False,
                       load_weight_path=None):
        inputs = Input(shape=input_shape, name="input_1")
        x = inputs
        constraint = None
        if dropout:
            x = Dropout(rate=0.1)(x)
            constraint = maxnorm(4)
        x = AveragePooling3D(pool_size=(2, 1, 1),
                             strides=(2, 1, 1),
                             padding="same")(x)
        x = Conv3D(64, (3, 3, 3),
                   strides=(1, 1, 1),
                   activation='relu',
                   padding='same',
                   kernel_constraint=constraint,
                   name='conv1')(x)
        if batchnormalization:
            x = BatchNormalization()(x)
        x = MaxPooling3D(pool_size=(1, 2, 2),
                         strides=(1, 2, 2),
                         padding='valid',
                         name='pool1')(x)
        if dropout:
            x = Dropout(rate=0.25)(x)

        # 2nd layer group
        x = Conv3D(128, (3, 3, 3),
                   strides=(1, 1, 1),
                   activation='relu',
                   padding='same',
                   kernel_constraint=constraint,
                   name='conv2')(x)
        if batchnormalization:
            x = BatchNormalization()(x)
        x = MaxPooling3D(pool_size=(2, 2, 2),
                         strides=(2, 2, 2),
                         padding='valid',
                         name='pool2')(x)
        if dropout:
            x = Dropout(rate=0.25)(x)

        # 3rd layer group
        x = Conv3D(256, (3, 3, 3),
                   strides=(1, 1, 1),
                   activation='relu',
                   padding='same',
                   kernel_constraint=constraint,
                   name='conv3a')(x)
        if batchnormalization:
            x = BatchNormalization()(x)
        x = Conv3D(256, (3, 3, 3),
                   strides=(1, 1, 1),
                   activation='relu',
                   padding='same',
                   kernel_constraint=constraint,
                   name='conv3b')(x)
        if batchnormalization:
            x = BatchNormalization()(x)
        x = MaxPooling3D(pool_size=(2, 2, 2),
                         strides=(2, 2, 2),
                         padding='valid',
                         name='pool3')(x)
        if dropout:
            x = Dropout(rate=0.5)(x)

        # 4th layer group
        x = Conv3D(512, (3, 3, 3),
                   strides=(1, 1, 1),
                   activation='relu',
                   padding='same',
                   kernel_constraint=constraint,
                   name='conv4a')(x)
        if batchnormalization:
            x = BatchNormalization()(x)
        x = Conv3D(512, (3, 3, 3),
                   strides=(1, 1, 1),
                   activation='relu',
                   padding='same',
                   kernel_constraint=constraint,
                   name='conv4b')(x)
        if batchnormalization:
            x = BatchNormalization()(x)
        x = MaxPooling3D(pool_size=(2, 2, 2),
                         strides=(2, 2, 2),
                         padding='valid',
                         name='pool4')(x)
        if dropout:
            x = Dropout(rate=0.5)(x)

        last64 = Conv3D(64, (2, 2, 2), activation="relu", name="last_64")(x)
        out_class = Conv3D(1, (1, 1, 1),
                           activation="sigmoid",
                           kernel_constraint=constraint,
                           name="out_class_last")(last64)
        out_class = Flatten(name="out_class")(out_class)

        out_malignancy = Conv3D(1, (1, 1, 1),
                                activation=None,
                                kernel_constraint=constraint,
                                name="out_malignancy_last")(last64)
        out_malignancy = Flatten(name="out_malignancy")(out_malignancy)

        model = Model(inputs=inputs, outputs=[out_class, out_malignancy])
        if load_weight_path is not None:
            model.load_weights(load_weight_path, by_name=False)

        MOMENTUM = 0.9
        NESTEROV = True
        if dropout:
            MOMENTUM = 0.95
            NESTEROV = False

        model.compile(optimizer=SGD(lr=self.LEARN_RATE,
                                    momentum=MOMENTUM,
                                    nesterov=NESTEROV),
                      loss={
                          "out_class": "binary_crossentropy",
                          "out_malignancy": mean_absolute_error
                      },
                      metrics={
                          "out_class": [binary_accuracy, binary_crossentropy],
                          "out_malignancy": mean_absolute_error
                      })

        self.model_summary(model)
        self.model = model
Пример #12
0
def detection_unet(filters, kernel_size, weights, learning_rate):

    # Input
    main_input = Input(shape=(None, None, None, 1))

    # 64 x 64 x 80
    step_down_1 = Conv3D(filters,
                         kernel_size=kernel_size,
                         strides=(1, 1, 1),
                         padding="same")(main_input)
    step_down_1 = BatchNormalization(momentum=0.1)(step_down_1)
    step_down_1 = Activation("relu")(step_down_1)
    step_down_1 = Conv3D(filters,
                         kernel_size=kernel_size,
                         strides=(1, 1, 1),
                         padding="same")(step_down_1)
    step_down_1 = BatchNormalization(momentum=0.1)(step_down_1)
    step_down_1 = Activation("relu")(step_down_1)

    # 32 x 32 x 40
    step_down_2 = MaxPooling3D(pool_size=(2, 2, 2),
                               strides=(2, 2, 2))(step_down_1)
    step_down_2 = Conv3D(2 * filters,
                         kernel_size=kernel_size,
                         strides=(1, 1, 1),
                         padding="same")(step_down_2)
    step_down_2 = BatchNormalization(momentum=0.1)(step_down_2)
    step_down_2 = Activation("relu")(step_down_2)
    step_down_2 = Conv3D(2 * filters,
                         kernel_size=kernel_size,
                         strides=(1, 1, 1),
                         padding="same")(step_down_2)
    step_down_2 = BatchNormalization(momentum=0.1)(step_down_2)
    step_down_2 = Activation("relu")(step_down_2)

    # 16 x 16 x 20
    step_down_3 = MaxPooling3D(pool_size=(2, 2, 2),
                               strides=(2, 2, 2))(step_down_2)
    step_down_3 = Conv3D(4 * filters,
                         kernel_size=kernel_size,
                         strides=(1, 1, 1),
                         padding="same")(step_down_3)
    step_down_3 = BatchNormalization(momentum=0.1)(step_down_3)
    step_down_3 = Activation("relu")(step_down_3)
    step_down_3 = Conv3D(4 * filters,
                         kernel_size=kernel_size,
                         strides=(1, 1, 1),
                         padding="same")(step_down_3)
    step_down_3 = BatchNormalization(momentum=0.1)(step_down_3)
    step_down_3 = Activation("relu")(step_down_3)

    # 8 x 8 x 10
    step_down_4 = MaxPooling3D(pool_size=(2, 2, 2),
                               strides=(2, 2, 2))(step_down_3)
    step_down_4 = Conv3D(8 * filters,
                         kernel_size=kernel_size,
                         strides=(1, 1, 1),
                         padding="same")(step_down_4)
    step_down_4 = BatchNormalization(momentum=0.1)(step_down_4)
    step_down_4 = Activation("relu")(step_down_4)
    step_down_4 = Conv3D(8 * filters,
                         kernel_size=kernel_size,
                         strides=(1, 1, 1),
                         padding="same")(step_down_4)
    step_down_4 = BatchNormalization(momentum=0.1)(step_down_4)
    step_down_4 = Activation("relu")(step_down_4)

    # 4 x 4 x 5
    floor = MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2))(step_down_4)
    floor = Conv3D(16 * filters,
                   kernel_size=kernel_size,
                   strides=(1, 1, 1),
                   padding="same")(floor)
    floor = BatchNormalization(momentum=0.1)(floor)
    floor = Activation("relu")(floor)
    floor = Conv3D(16 * filters,
                   kernel_size=kernel_size,
                   strides=(1, 1, 1),
                   padding="same")(floor)
    floor = BatchNormalization(momentum=0.1)(floor)
    floor = Activation("relu")(floor)

    # 8 x 8 x 10
    step_up_4 = UpSampling3D(size=(2, 2, 2))(floor)
    step_up_4 = concatenate([step_down_4, step_up_4], axis=-1)
    step_up_4 = Conv3D(8 * filters,
                       kernel_size=kernel_size,
                       strides=(1, 1, 1),
                       padding="same")(step_up_4)
    step_up_4 = BatchNormalization(momentum=0.1)(step_up_4)
    step_up_4 = Activation("relu")(step_up_4)
    step_up_4 = Conv3D(8 * filters,
                       kernel_size=kernel_size,
                       strides=(1, 1, 1),
                       padding="same")(step_up_4)
    step_up_4 = BatchNormalization(momentum=0.1)(step_up_4)
    step_up_4 = Activation("relu")(step_up_4)

    # 16 x 16 x 20
    step_up_3 = UpSampling3D(size=(2, 2, 2))(step_up_4)
    step_up_3 = concatenate([step_down_3, step_up_3], axis=-1)
    step_up_3 = Conv3D(4 * filters,
                       kernel_size=kernel_size,
                       strides=(1, 1, 1),
                       padding="same")(step_up_3)
    step_up_3 = BatchNormalization(momentum=0.1)(step_up_3)
    step_up_3 = Activation("relu")(step_up_3)
    step_up_3 = Conv3D(4 * filters,
                       kernel_size=kernel_size,
                       strides=(1, 1, 1),
                       padding="same")(step_up_3)
    step_up_3 = BatchNormalization(momentum=0.1)(step_up_3)
    step_up_3 = Activation("relu")(step_up_3)

    # 32 x 32 x 40
    step_up_2 = UpSampling3D(size=(2, 2, 2))(step_up_3)
    step_up_2 = concatenate([step_down_2, step_up_2], axis=-1)
    step_up_2 = Conv3D(2 * filters,
                       kernel_size=kernel_size,
                       strides=(1, 1, 1),
                       padding="same")(step_up_2)
    step_up_2 = BatchNormalization(momentum=0.1)(step_up_2)
    step_up_2 = Activation("relu")(step_up_2)
    step_up_2 = Conv3D(2 * filters,
                       kernel_size=kernel_size,
                       strides=(1, 1, 1),
                       padding="same")(step_up_2)
    step_up_2 = BatchNormalization(momentum=0.1)(step_up_2)
    step_up_2 = Activation("relu")(step_up_2)

    # 64 x 64 x 80
    step_up_1 = UpSampling3D(size=(2, 2, 2))(step_up_2)
    step_up_1 = concatenate([step_down_1, step_up_1], axis=-1)
    step_up_1 = Conv3D(filters,
                       kernel_size=kernel_size,
                       strides=(1, 1, 1),
                       padding="same")(step_up_1)
    step_up_1 = BatchNormalization(momentum=0.1)(step_up_1)
    step_up_1 = Activation("relu")(step_up_1)
    step_up_1 = Conv3D(filters,
                       kernel_size=kernel_size,
                       strides=(1, 1, 1),
                       padding="same")(step_up_1)
    step_up_1 = BatchNormalization(momentum=0.1)(step_up_1)
    step_up_1 = Activation("relu")(step_up_1)

    main_output = Conv3D(2,
                         kernel_size=kernel_size,
                         strides=(1, 1, 1),
                         padding="same",
                         activation='softmax')(step_up_1)

    model = Model(inputs=main_input, outputs=main_output)

    # define optimizer
    adam = optimizers.Adam(lr=learning_rate,
                           beta_1=0.9,
                           beta_2=0.999,
                           epsilon=None,
                           decay=1e-6)

    # define loss function
    loss_function = weighted_categorical_crossentropy(weights)

    # define metrics
    dsc = dice_coef_label(label=1)
    recall_background = km.binary_recall(label=0)
    recall_vertebrae = km.binary_recall(label=1)
    cat_accuracy = metrics.categorical_accuracy

    model.compile(
        optimizer=adam,
        loss=loss_function,
        metrics=[dsc, recall_background, recall_vertebrae, cat_accuracy])

    return model
Пример #13
0
def create_denseunet_model_3d(input_image_size,
                              number_of_outputs=1,
                              number_of_layers_per_dense_block=(6, 12, 36, 24),
                              growth_rate=48,
                              initial_number_of_filters=96,
                              reduction_rate=0.0,
                              depth=7,
                              dropout_rate=0.0,
                              weight_decay=1e-4,
                              mode='classification'
                             ):
    """
    2-D implementation of the dense U-net deep learning architecture.

    Creates a keras model of the dense U-net deep learning architecture for
    image segmentation

    X. Li, H. Chen, X. Qi, Q. Dou, C.-W. Fu, P.-A. Heng. H-DenseUNet: Hybrid
    Densely Connected UNet for Liver and Tumor Segmentation from CT Volumes

    available here:

            https://arxiv.org/pdf/1709.07330.pdf

    with the author's implementation available at:

            https://github.com/xmengli999/H-DenseUNet

    Arguments
    ---------
    input_image_size : tuple of length 4
        Used for specifying the input tensor shape.  The
        shape (or dimension) of that tensor is the image dimensions followed by
        the number of channels (e.g., red, green, and blue).  The batch size
        (i.e., number of training images) is not specified a priori.

    number_of_outputs : integer
        Meaning depends on the mode.  For 'classification' this is the number of
        segmentation labels.  For 'regression' this is the number of outputs.

    number_of_layers_per_dense_blocks : tuple
        Number of dense blocks per layer.

    growth_rate : integer
        Number of filters to add for each dense block layer (default = 48).

    initial_number_of_filters : integer
        Number of filters at the beginning (default = 96).

    reduction_rate : scalar
        Reduction factor of transition blocks.

    depth :  integer
        Number of layers---must be equal to 3 * N + 4 where N is an integer
        (default = 7).

    dropout_rate : scalar
        Float between 0 and 1 to use between dense layers.

    weight_decay :  scalar
        Weighting parameter for L2 regularization of the kernel weights of the
        convolution layers (default = 1e-4).

    Returns
    -------
    Keras model
        A 3-D Keras model defining the network.

    Example
    -------
    >>> model = create_denseunet_model_3d((128, 128, 128, 1))
    >>> model.summary()
    """

    concatenation_axis=1
    if K.image_data_format() == 'channels_last':
        concatenation_axis=-1

    def convolution_factory_3d(model, number_of_filters,
                               kernel_size=(3, 3, 3),
                               dropout_rate=0.0, weight_decay=1e-4):

        # Bottleneck layer

        model = BatchNormalization(axis=concatenation_axis)(model)
        model = Scale(axis=concatenation_axis)(model)
        model = Activation('relu')(model)
        model = Conv3D(filters=(number_of_filters * 4),
                       kernel_size=(1, 1, 1),
                       use_bias=False)(model)

        if dropout_rate > 0.0:
            model = Dropout(rate=dropout_rate)(model)

        # Convolution layer

        model = BatchNormalization(axis=concatenation_axis,
                                   epsilon=1.1e-5)(model)
        model = Scale(axis=concatenation_axis)(model)
        model = Activation(activation='relu')(model)
        model = ZeroPadding3D(padding=(1, 1, 1))(model)
        model = Conv3D(filters=number_of_filters,
                       kernel_size=kernel_size,
                       use_bias=False)(model)

        if dropout_rate > 0.0:
            model = Dropout(rate=dropout_rate)(model)

        return(model)

    def transition_3d(model, number_of_filters, compression_rate=1.0,
                      dropout_rate=0.0, weight_decay=1e-4):

        model = BatchNormalization(axis=concatenation_axis,
                                   gamma_regularizer=regularizers.l2(weight_decay),
                                   beta_regularizer=regularizers.l2(weight_decay))(model)
        model = Scale(axis=concatenation_axis)(model)
        model = Activation(activation='relu')(model)
        model = Conv3D(filters=int(number_of_filters * compression_rate),
                       kernel_size=(1, 1, 1),
                       use_bias=False)(model)

        if dropout_rate > 0.0:
            model = Dropout(rate=dropout_rate)(model)

        model = AveragePooling3D(pool_size=(2, 2, 2),
                                 strides=(2, 2, 2))(model)
        return(model)

    def create_dense_blocks_3d(model, number_of_filters, depth, growth_rate,
                               dropout_rate=0.0, weight_decay=1e-4):

        dense_block_layers = [model]
        for i in range(depth):
            model = convolution_factory_3d(model, number_of_filters=growth_rate,
                                           kernel_size=(3, 3, 3), dropout_rate=dropout_rate,
                                           weight_decay=weight_decay)
            dense_block_layers.append(model)
            model = Concatenate(axis=concatenation_axis)(dense_block_layers)
            number_of_filters += growth_rate

        return(model, number_of_filters)


    if ((depth - 4) % 3) != 0:
        raise ValueError('Depth must be equal to 3*N+4 where N is an integer.')

    number_of_layers = int((depth - 4) % 3)
    number_of_dense_blocks = len(number_of_layers_per_dense_block)

    inputs = Input(shape = input_image_size)

    box_layers = []
    box_count = 1

    # Initial convolution

    outputs = ZeroPadding3D(padding=(3, 3))(inputs)
    outputs = Conv3D(filters=initial_number_of_filters,
                     kernel_size=(7, 7, 7),
                     strides=(2, 2, 2),
                     use_bias=False)(outputs)
    outputs = BatchNormalization(epsilon=1.1e-5,
                                 axis=concatenation_axis)(outputs)
    outputs = Scale(axis=concatenation_axis)(outputs)
    outputs = Activation(activation='relu')(outputs)

    box_layers.append(outputs)
    box_count += 1

    outputs = ZeroPadding3D(padding=(1, 1, 1))(outputs)
    outputs = MaxPooling3D(pool_size=(3, 3, 3),
                           strides=(2, 2, 2))(outputs)

    # Add dense blocks

    nFilters = initial_number_of_filters

    for i in range(number_of_dense_blocks - 1):
        outputs, number_of_filters = \
           create_dense_blocks_3d(outputs, number_of_filters=nFilters,
                                  depth=number_of_layers_per_dense_block[i],
                                  growth_rate=growth_rate, dropout_rate=dropout_rate,
                                  weight_decay=weight_decay)
        box_layers.append(outputs)
        box_count += 1

        outputs = transition_3d(outputs, number_of_filters=number_of_filters,
                                compression_rate=(1.0 - reduction_rate),
                                dropout_rate=dropout_rate, weight_decay=weight_decay)
        nFilters = int(number_of_filters * (1 - reduction_rate))


    outputs, nFilters = \
       create_dense_blocks_3d(outputs, number_of_filters=nFilters,
                              depth=number_of_layers_per_dense_block[number_of_dense_blocks - 1],
                              growth_rate=growth_rate, dropout_rate=dropout_rate,
                              weight_decay=weight_decay)

    outputs = BatchNormalization(epsilon=1.1e-5,
                                 axis=concatenation_axis)(outputs)
    outputs = Scale(axis=concatenation_axis)(outputs)
    outputs = Activation(activation='relu')(outputs)

    box_layers.append(outputs)
    box_count -= 1

    local_number_of_filters = (K.int_shape(box_layers[box_count]))[-1]
    local_layer = Conv3D(filters=local_number_of_filters,
                         kernel_size=(1, 1, 1),
                         padding='same',
                         kernel_initializer='normal')(box_layers[box_count - 1])
    box_count -= 1

    for i in range(number_of_dense_blocks - 1):
        upsampling_layer = UpSampling3D(size=(2, 2, 2))(outputs)
        outputs = Add()([local_layer, upsampling_layer])

        local_layer = box_layers[box_count - 1]
        box_count -= 1

        local_number_of_filters = (K.int_shape(box_layers[box_count]))[-1]
        outputs = Conv3D(filters=local_number_of_filters,
                         kernel_size=(3, 3, 3),
                         padding='same',
                         kernel_initializer='normal')(outputs)

        if i == (number_of_dense_blocks - 2):
            outputs = Dropout(rate=0.3)(outputs)

        outputs = BatchNormalization(epsilon=1.1e-5,
                                     axis=concatenation_axis)(outputs)
        outputs = Activation(activation='relu')(outputs)

    convActivation = ''

    if mode == 'classification':
        if number_of_outputs == 2:
            convActivation = 'sigmoid'
        else:
            convActivation = 'softmax'
    elif mode == 'regression':
        convActivation = 'linear'
    else:
        raise ValueError('mode must be either `classification` or `regression`.')

    outputs = Conv3D(filters=number_of_outputs,
                     kernel_size=(1, 1, 1),
                     activation = convActivation,
                     kernel_initializer='normal')(outputs)

    denseunet_model = Model(inputs=inputs, outputs=outputs)

    return denseunet_model
Пример #14
0
def __create_fcn_dense_net(nb_classes,
                           img_input,
                           include_top,
                           nb_dense_block=5,
                           growth_rate=12,
                           reduction=0.0,
                           dropout_rate=None,
                           weight_decay=1e-4,
                           nb_layers_per_block=4,
                           nb_upsampling_conv=128,
                           upsampling_type='deconv',
                           init_conv_filters=48,
                           input_shape=None,
                           activation='softmax',
                           early_transition=False,
                           transition_pooling='max',
                           initial_kernel_size=(3, 3, 3)):
    ''' Build the DenseNet-FCN model

    # Arguments
        nb_classes: number of classes
        img_input: tuple of shape (channels, rows, columns, height) or (rows, columns, height, channels)
        include_top: flag to include the final Dense layer
        nb_dense_block: number of dense blocks to add to end (generally = 3)
        growth_rate: number of filters to add per dense block
        reduction: reduction factor of transition blocks. Note : reduction value
            is inverted to compute compression
        dropout_rate: dropout rate
        weight_decay: weight decay
        nb_layers_per_block: number of layers in each dense block.
            Can be a positive integer or a list.
            If positive integer, a set number of layers per dense block.
            If list, nb_layer is used as provided. Note that list size must
            be (nb_dense_block + 1)
        nb_upsampling_conv: number of convolutional layers in upsampling via subpixel
            convolution
        upsampling_type: Can be one of 'upsampling', 'deconv' and 'subpixel'. Defines
            type of upsampling algorithm used.
        input_shape: Only used for shape inference in fully convolutional networks.
        activation: Type of activation at the top layer. Can be one of 'softmax' or
            'sigmoid'. Note that if sigmoid is used, classes must be 1.
        early_transition: Start with an extra initial transition down and end with an
            extra transition up to reduce the network size.
        transition_pooling: 'max' for max pooling (default), 'avg' for average pooling,
            None for no pooling. Please note that this default differs from the DenseNet
            paper in accordance with the DenseNetFCN paper.
        initial_kernel_size: The first Conv3D kernel might vary in size based on the
            application, this parameter makes it configurable.

    # Returns
        a keras tensor

    # Raises
        ValueError: in case of invalid argument for `reduction`,
            `nb_dense_block` or `nb_upsampling_conv`.
    '''
    with K.name_scope('DenseNetFCN'):
        concat_axis = 1 if K.image_data_format() == 'channels_first' else -1

        if concat_axis == 1:  # channels_first dim ordering
            _, rows, cols, height = input_shape
        else:
            rows, cols, height, _ = input_shape

        if reduction != 0.0:
            if not (reduction <= 1.0 and reduction > 0.0):
                raise ValueError(
                    '`reduction` value must lie between 0.0 and 1.0')

        # check if upsampling_conv has minimum number of filters minimum
        # is set to 12, as at least 3 color channels are needed for correct upsampling
        if not (nb_upsampling_conv > 12 and nb_upsampling_conv % 4 == 0):
            raise ValueError(
                'Parameter `nb_upsampling_conv` number of channels must '
                'be a positive number divisible by 4 and greater than 12')

        # layers in each dense block
        if type(nb_layers_per_block) is list or type(
                nb_layers_per_block) is tuple:
            nb_layers = list(nb_layers_per_block)  # Convert tuple to list

            if len(nb_layers) != (nb_dense_block + 1):
                raise ValueError(
                    'If `nb_dense_block` is a list, its length must be '
                    '(`nb_dense_block` + 1)')

            bottleneck_nb_layers = nb_layers[-1]
            rev_layers = nb_layers[::-1]
            nb_layers.extend(rev_layers[1:])
        else:
            bottleneck_nb_layers = nb_layers_per_block
            nb_layers = [nb_layers_per_block] * (2 * nb_dense_block + 1)

        # compute compression factor
        compression = 1.0 - reduction

        # Initial convolution
        x = Conv3D(init_conv_filters,
                   initial_kernel_size,
                   kernel_initializer='he_normal',
                   padding='same',
                   name='initial_Conv3D',
                   use_bias=False,
                   kernel_regularizer=l2(weight_decay))(img_input)
        x = BatchNormalization(axis=concat_axis,
                               epsilon=1.1e-5,
                               name='initial_bn')(x)
        x = Activation('relu')(x)

        nb_filter = init_conv_filters

        skip_list = []

        if early_transition:
            x = __transition_block(x,
                                   nb_filter,
                                   compression=compression,
                                   weight_decay=weight_decay,
                                   block_prefix='tr_early',
                                   transition_pooling=transition_pooling)

        # Add dense blocks and transition down block
        for block_idx in range(nb_dense_block):
            x, nb_filter = __dense_block(x,
                                         nb_layers[block_idx],
                                         nb_filter,
                                         growth_rate,
                                         dropout_rate=dropout_rate,
                                         weight_decay=weight_decay,
                                         block_prefix='dense_%i' % block_idx)

            # Skip connection
            skip_list.append(x)

            # add transition_block
            x = __transition_block(x,
                                   nb_filter,
                                   compression=compression,
                                   weight_decay=weight_decay,
                                   block_prefix='tr_%i' % block_idx,
                                   transition_pooling=transition_pooling)

            # this is calculated inside transition_down_block
            nb_filter = int(nb_filter * compression)

        # The last dense_block does not have a transition_down_block
        # return the concatenated feature maps without the concatenation of the input
        block_prefix = 'dense_%i' % nb_dense_block
        _, nb_filter, concat_list = __dense_block(x,
                                                  bottleneck_nb_layers,
                                                  nb_filter,
                                                  growth_rate,
                                                  dropout_rate=dropout_rate,
                                                  weight_decay=weight_decay,
                                                  return_concat_list=True,
                                                  block_prefix=block_prefix)

        skip_list = skip_list[::-1]  # reverse the skip list

        # Add dense blocks and transition up block
        for block_idx in range(nb_dense_block):
            n_filters_keep = growth_rate * nb_layers[nb_dense_block +
                                                     block_idx]

            # upsampling block must upsample only the feature maps (concat_list[1:]),
            # not the concatenation of the input with the feature maps (concat_list[0].
            l = concatenate(concat_list[1:], axis=concat_axis)

            t = __transition_up_block(l,
                                      nb_filters=n_filters_keep,
                                      type=upsampling_type,
                                      weight_decay=weight_decay,
                                      block_prefix='tr_up_%i' % block_idx)

            # concatenate the skip connection with the transition block
            x = concatenate([t, skip_list[block_idx]], axis=concat_axis)

            # Dont allow the feature map size to grow in upsampling dense blocks
            block_layer_index = nb_dense_block + 1 + block_idx
            block_prefix = 'dense_%i' % (block_layer_index)
            x_up, nb_filter, concat_list = __dense_block(
                x,
                nb_layers[block_layer_index],
                nb_filter=growth_rate,
                growth_rate=growth_rate,
                dropout_rate=dropout_rate,
                weight_decay=weight_decay,
                return_concat_list=True,
                grow_nb_filters=False,
                block_prefix=block_prefix)

        if early_transition:
            x_up = __transition_up_block(x_up,
                                         nb_filters=nb_filter,
                                         type=upsampling_type,
                                         weight_decay=weight_decay,
                                         block_prefix='tr_up_early')
        if include_top:
            x = Conv3D(nb_classes, (1, 1, 1),
                       activation='linear',
                       padding='same',
                       use_bias=False)(x_up)

            if K.image_data_format() == 'channels_first':
                channel, row, col, height = input_shape
            else:
                row, col, height, channel = input_shape

            x = Reshape((row * col * height, nb_classes))(x)
            x = Activation(activation)(x)
            x = Reshape((row, col, height, nb_classes))(x)
        else:
            x = x_up

        return x
Пример #15
0
ReadFilesInList(allLinesValidate, CSV_Path, FileListValidate)

#-------------------------------------------------------------------------------
# make a OneNNData to get array sizes for the NN
#-------------------------------------------------------------------------------
data = OneNNData()
inShape = data.InData.shape
outShape = data.OutData.shape

#-------------------------------------------------------------------------------
# make the Keras Functional API model.
#-------------------------------------------------------------------------------
input = Input(shape=data.InData.shape)
L01a = Conv3D(20,
              5,
              activation='relu',
              padding='same',
              kernel_initializer='he_normal')(input)
L01b = Conv3D(20,
              5,
              activation='relu',
              padding='same',
              kernel_initializer='he_normal')(L01a)
D02 = MaxPooling3D(pool_size=(2, 2, 2))(L01b)
L08a = Conv3D(20,
              7,
              activation='relu',
              padding='same',
              kernel_initializer='he_normal')(D02)
L08b = Conv3D(20,
              7,
Пример #16
0
def unet_model_3d(input_shape, pool_size=(2, 2, 2), n_labels=1, initial_learning_rate=0.00001, deconvolution=False,
                  depth=4, n_base_filters=32, include_label_wise_dice_coefficients=False, metrics=dice_coefficient,
                  batch_normalization=False, activation_name="sigmoid"):
    """
    Builds the 3D UNet Keras model.f
    :param metrics: List metrics to be calculated during model training (default is dice coefficient).
    :param include_label_wise_dice_coefficients: If True and n_labels is greater than 1, model will report the dice
    coefficient for each label as metric.
    :param n_base_filters: The number of filters that the first layer in the convolution network will have. Following
    layers will contain a multiple of this number. Lowering this number will likely reduce the amount of memory required
    to train the model.
    :param depth: indicates the depth of the U-shape for the model. The greater the depth, the more max pooling
    layers will be added to the model. Lowering the depth may reduce the amount of memory required for training.
    :param input_shape: Shape of the input data (n_chanels, x_size, y_size, z_size). The x, y, and z sizes must be
    divisible by the pool size to the power of the depth of the UNet, that is pool_size^depth.
    :param pool_size: Pool size for the max pooling operations.
    :param n_labels: Number of binary labels that the model is learning.
    :param initial_learning_rate: Initial learning rate for the model. This will be decayed during training.
    :param deconvolution: If set to True, will use transpose convolution(deconvolution) instead of up-sampling. This
    increases the amount memory required during training.
    :return: Untrained 3D UNet Model
    """
    inputs = Input(input_shape)
    current_layer = inputs
    levels = list()

    # add levels with max pooling
    for layer_depth in range(depth):
        layer1 = create_convolution_block(input_layer=current_layer, n_filters=n_base_filters*(2**layer_depth),
                                          batch_normalization=batch_normalization)
        layer2 = create_convolution_block(input_layer=layer1, n_filters=n_base_filters*(2**layer_depth)*2,
                                          batch_normalization=batch_normalization)
        if layer_depth < depth - 1:
            current_layer = MaxPooling3D(pool_size=pool_size)(layer2)
            levels.append([layer1, layer2, current_layer])
        else:
            current_layer = layer2
            levels.append([layer1, layer2])

    # add levels with up-convolution or up-sampling
    for layer_depth in range(depth-2, -1, -1):
        up_convolution = get_up_convolution(pool_size=pool_size, deconvolution=deconvolution,
                                            n_filters=current_layer._keras_shape[1])(current_layer)
        concat = concatenate([up_convolution, levels[layer_depth][1]], axis=1)
        current_layer = create_convolution_block(n_filters=levels[layer_depth][1]._keras_shape[1],
                                                 input_layer=concat, batch_normalization=batch_normalization)
        current_layer = create_convolution_block(n_filters=levels[layer_depth][1]._keras_shape[1],
                                                 input_layer=current_layer,
                                                 batch_normalization=batch_normalization)

    final_convolution = Conv3D(n_labels, (1, 1, 1))(current_layer)
    act = Activation(activation_name)(final_convolution)
    model = Model(inputs=inputs, outputs=act)

    if not isinstance(metrics, list):
        metrics = [metrics]

    if include_label_wise_dice_coefficients and n_labels > 1:
        label_wise_dice_metrics = [get_label_dice_coefficient_function(index) for index in range(n_labels)]
        if metrics:
            metrics = metrics + label_wise_dice_metrics
        else:
            metrics = label_wise_dice_metrics

    model.compile(optimizer=Adam(lr=initial_learning_rate), loss=dice_coefficient_loss, metrics=metrics)
    return model
Пример #17
0
def make_models(input_shape=(40, 64, 64, 1),
                latent_dim=256,
                low_res_shape=(2, 2, 2, 128),
                dropout=0.2):
    encoder = Sequential([
        Conv3D(16,
               kernel_size=3,
               activation='relu',
               padding="same",
               input_shape=input_shape),
        BatchNormalization(),
        Conv3D(32, kernel_size=3, activation='relu', padding="same",
               strides=2),
        BatchNormalization(),
        Conv3D(32, kernel_size=3, activation='relu', padding="same"),
        BatchNormalization(),
        Conv3D(64, kernel_size=3, activation='relu', padding="same",
               strides=2),
        BatchNormalization(),
        Conv3D(64, kernel_size=3, activation='relu', padding="same"),
        BatchNormalization(),
        Conv3D(
            128, kernel_size=3, activation='relu', padding="same", strides=2),
        BatchNormalization(),
        Conv3D(
            128, kernel_size=3, activation='relu', padding="same", strides=2),
        BatchNormalization(),
        Conv3D(latent_dim,
               kernel_size=3,
               padding="same",
               strides=2,
               activation='relu'),
        Flatten(),
        Dropout(dropout),
        Dense(latent_dim),
    ],
                         name="encoder")

    decoder = Sequential([
        Dense(np.prod(low_res_shape), input_shape=(latent_dim, )),
        Dropout(dropout),
        Reshape(low_res_shape),
        Conv3DTranspose(
            128, kernel_size=3, strides=2, activation='relu', padding="same"),
        BatchNormalization(),
        Conv3D(128, kernel_size=3, activation='relu', padding="same"),
        BatchNormalization(),
        Conv3DTranspose(
            128, kernel_size=3, strides=2, activation='relu', padding="same"),
        Lambda(function=crop_5_8_8),
        BatchNormalization(),
        Conv3D(64, kernel_size=3, activation='relu', padding="same"),
        BatchNormalization(),
        Conv3DTranspose(
            64, kernel_size=3, strides=2, activation='relu', padding="same"),
        BatchNormalization(),
        Conv3D(32, kernel_size=3, activation='relu', padding="same"),
        BatchNormalization(),
        Conv3DTranspose(
            32, kernel_size=3, strides=2, activation='relu', padding="same"),
        BatchNormalization(),
        Conv3D(16, kernel_size=3, activation='relu', padding="same"),
        BatchNormalization(),
        Conv3DTranspose(
            16, kernel_size=3, strides=2, activation='relu', padding="same"),
        BatchNormalization(),
        Conv3D(1, kernel_size=3, activation=None, padding="same"),
    ],
                         name="decoder")
    autoencoder = Sequential([encoder, decoder], name="autoencoder")
    return encoder, decoder, autoencoder
Пример #18
0
from generator import DataGenerator

from masked_loss import masked_loss_factory

weights_dir_hash = os.environ.get('WEIGHTS_DIR', './')
# Create network architecture.
conv_reg_w = 0.01
chans = ('aslmean', 'aslstd')
filter_pix = 3
filter_size = (filter_pix, filter_pix, filter_pix)

x = Input(shape=(None, None, None, 1))
aslstd = Input(shape=(None, None, None, 1))

y = Conv3D(64, filter_size, padding='same', activation='relu')(x)

#y = Dropout(0.2)(y)
y = BatchNormalization(momentum=0.8)(y)
y = Conv3D(64, filter_size, padding='same', activation='relu')(y)

#y = Dropout(0.2)(y)
y = BatchNormalization(momentum=0.8)(y)
y = Conv3D(1, filter_size, padding='same')(y)

y = keras.layers.add([y, x])

y2 = Conv3D(64, filter_size, padding='same', activation='relu')(aslstd)

#y2 = Dropout(0.2)(y2)
y2 = BatchNormalization(momentum=0.8)(y2)
Пример #19
0
def get_model_3d(kwargs):
    base_filters = kwargs['base_filters']
    gpus = kwargs['numgpu']
    loss = kwargs['loss']
    numchannel = int(len(kwargs['modalities']))
    inputs = Input((None, None, None, int(numchannel)))
    if kwargs['model'] == 'inception':
        conv1 = Conv3D(base_filters * 8, (3, 3, 3), activation='relu', padding='same', strides=(1, 1, 1))(inputs)
        conv2 = Conv3D(base_filters * 8, (3, 3, 3), activation='relu', padding='same', strides=(1, 1, 1))(conv1)
        inception1 = Inception3d(conv2, base_filters)
        inception2 = Inception3d(inception1, base_filters)
        inception3 = Inception3d(inception2, base_filters)
        convconcat1 = Conv3D(base_filters * 4, (3, 3, 3), activation='relu', padding='same', strides=(1, 1, 1))(inception3)
        final = Conv3D(base_filters * 4, (3, 3, 3), activation='relu', padding='same', strides=(1, 1, 1))(convconcat1)
    elif kwargs['model'] == 'unet':
        final = Unet3D(inputs, base_filters)
    elif kwargs['model'] == 'vnet':
        final = Vnet3D(inputs, base_filters)
    elif kwargs['model'] == 'fpn' or kwargs['model'] == 'panopticfpn':
        reg = 0.0001
        f1, f2, f3, f4, _ = FPN3D(inputs, base_filters, reg)
    elif kwargs['model'] == 'densenet':
        final = DenseNet3D(inputs,base_filters)
    else:
        sys.exit('Model must be inception/unet/vnet/fpn.')

    if kwargs['model'] != 'fpn' and kwargs['model'] != 'panopticfpn':
        if loss == 'bce'  or loss == 'dice' or loss == 'focal':
            final = Conv3D(1, (3, 3, 3), activation='sigmoid', padding='same', strides=(1, 1, 1))(final)
        else:
            final = Conv3D(1, (3, 3, 3), activation='relu', padding='same', strides=(1, 1, 1))(final)
        model = Model(inputs=inputs, outputs=final,name='some_unique_name')
    else:
        if kwargs['model'] == 'panopticfpn':
            if loss == 'bce' or loss == 'dice' or loss == 'focal':
                # Generate the semantic segmentation branch of panoptic FPN on top of feature extraction backbone
                # Upsampling stages for F4
                # U1
                f4 = BatchNormalization(axis=-1)(f4)
                f4 = Activation('relu')(f4)
                f4 = UpSampling3D(size=(2, 2, 2), name='F4_U1')(f4)
                # U2
                f4 = Conv3D(base_filters*4, (3, 3, 3), padding='same', strides=(1, 1, 1), kernel_regularizer=l2(reg))(f4)
                f4 = BatchNormalization(axis=-1)(f4)
                f4 = Activation('relu')(f4)
                f4 = UpSampling3D(size=(2, 2, 2), name='F4_U2')(f4)
                # U3
                f4 = Conv3D(base_filters*4, (3, 3, 3), padding='same', strides=(1, 1, 1), kernel_regularizer=l2(reg))(f4)
                f4 = BatchNormalization(axis=-1)(f4)
                f4 = Activation('relu')(f4)
                f4 = UpSampling3D(size=(2, 2, 2), name='F4_U3')(f4)

                # Prepare
                f4 = Conv3D(base_filters*4, (3, 3, 3), padding='same', strides=(1, 1, 1), kernel_regularizer=l2(reg))(f4)
                f4 = BatchNormalization(axis=-1)(f4)
                f4 = Activation('relu')(f4)

                # Upsampling stages for F3
                # U1
                f3 = BatchNormalization(axis=-1)(f3)
                f3 = Activation('relu')(f3)
                f3 = UpSampling3D(size=(2, 2, 2), name='F3_U1')(f3)
                # U2
                f3 = Conv3D(base_filters*4, (3, 3, 3), padding='same', strides=(1, 1, 1), kernel_regularizer=l2(reg))(f3)
                f3 = BatchNormalization(axis=-1)(f3)
                f3 = Activation('relu')(f3)
                f3 = UpSampling3D(size=(2, 2, 2), name='F3_U2')(f3)
                # Prepare
                f3 = Conv3D(base_filters*4, (3, 3, 3), padding='same', strides=(1, 1, 1), kernel_regularizer=l2(reg))(f3)
                f3 = BatchNormalization(axis=-1)(f3)
                f3 = Activation('relu')(f3)

                # Upsampling stages for F2
                # U1
                f2 = BatchNormalization(axis=-1)(f2)
                f2 = Activation('relu')(f2)
                f2 = UpSampling3D(size=(2, 2, 2), name='F2_U1')(f2)
                # Prepare
                f2 = Conv3D(base_filters*4, (3, 3, 3), padding='same', strides=(1, 1, 1), kernel_regularizer=l2(reg))(f2)
                f2 = BatchNormalization(axis=-1)(f2)
                f2 = Activation('relu')(f2)

                # Prepare F1
                f1 = BatchNormalization(axis=-1)(f1)
                f1 = Activation('relu')(f1)

                f3 = Add()([f4, f3])
                f2 = Add()([f3, f2])
                f1 = Add()([f2, f1])

                f1 = Conv3D(base_filters*4, (3, 3, 3),  padding='same', strides=(1,1,1), kernel_regularizer=l2(reg))(f1)
                f1 = BatchNormalization(axis=-1)(f1)
                f1 = Activation('relu')(f1)
                final = Conv3D(1, (3, 3, 3), activation='sigmoid', padding='same', strides=(1, 1, 1), name='Level1')(f1)
            else:
                sys.exit('Loss function for Panoptic FPN must be BCE, Dice, or Focal.')

        elif kwargs['model'] == 'fpn':
            if loss == 'bce' or loss == 'dice' or loss == 'focal':
                f1 = Conv3D(1, (3, 3, 3), activation='sigmoid', padding='same', strides=(1, 1, 1), name='Level1')(f1)
                f2 = Conv3D(1, (3, 3, 3), activation='sigmoid', padding='same', strides=(1, 1, 1), name='Level2')(f2)
                f3 = Conv3D(1, (3, 3, 3), activation='sigmoid', padding='same', strides=(1, 1, 1), name='Level3')(f3)
                f4 = Conv3D(1, (3, 3, 3), activation='sigmoid', padding='same', strides=(1, 1, 1), name='Level4')(f4)
            else:
                f1 = Conv3D(1, (3, 3, 3), activation='relu', padding='same', strides=(1, 1, 1))(f1)
                f2 = Conv3D(1, (3, 3, 3), activation='relu', padding='same', strides=(1, 1, 1))(f2)
                f3 = Conv3D(1, (3, 3, 3), activation='relu', padding='same', strides=(1, 1, 1))(f3)
                f4 = Conv3D(1, (3, 3, 3), activation='relu', padding='same', strides=(1, 1, 1))(f4)

        model = Model(inputs=inputs, outputs=final,name='some_unique_name')
    #print(model.summary())
    return model
Пример #20
0
def C3D(T, img_h, img_w, C):
    inputs = Input((T, img_h, img_w, C))

    conv1 = Conv3D(32,
                   kernel_size=(2, 3, 3),
                   padding='same',
                   activation="relu")(inputs)

    #conv1 = Conv3D(16, kernel_size=(3, 3, 3), padding='same',activation="relu")(conv1)
    pool1 = MaxPooling3D(pool_size=(1, 2, 2))(conv1)

    conv2 = Conv3D(64,
                   kernel_size=(2, 3, 3),
                   padding='same',
                   activation="relu")(pool1)
    #conv2 = Conv3D(32, kernel_size=(3, 3, 3), padding='same',activation="relu")(conv2)
    pool2 = MaxPooling3D(pool_size=(1, 2, 2))(conv2)
    print(pool2.shape)

    conv3 = Conv3D(128,
                   kernel_size=(2, 3, 3),
                   padding='same',
                   activation="relu")(pool2)
    conv3 = Conv3D(128,
                   kernel_size=(2, 3, 3),
                   padding='same',
                   activation="relu")(conv3)
    pool3 = MaxPooling3D(pool_size=(1, 2, 2))(conv3)

    conv4 = Conv3D(256,
                   kernel_size=(2, 3, 3),
                   padding='same',
                   activation="relu")(pool3)
    conv4 = Conv3D(256,
                   kernel_size=(2, 3, 3),
                   padding='same',
                   activation="relu")(conv4)
    pool4 = MaxPooling3D(pool_size=(1, 2, 2))(conv4)

    conv5 = Conv3D(256,
                   kernel_size=(2, 3, 3),
                   padding='same',
                   activation="relu")(pool4)
    print(conv5.shape)
    conv5 = Conv3D(256,
                   kernel_size=(2, 3, 3),
                   padding='same',
                   activation="relu")(conv5)

    pool5 = MaxPooling3D(pool_size=(2, 2, 2))(conv5)

    feature = Flatten()(pool5)
    # feature=Dropout(0.5)(feature)
    feature = Dense(1024, activation='relu')(feature)
    # feature = Dropout(0.5)(feature)
    feature = Dense(512, activation='relu')(feature)
    # feature = Dropout(0.5)(feature)
    feature = Dense(1, activation='sigmoid')(feature)
    model = Model(inputs=inputs, outputs=feature)
    adam = keras.optimizers.Adam(lr=0.0001)
    model.summary()
    parallel_model = multi_gpu_model(model, gpus=2)

    model.compile(optimizer=adam,
                  loss='binary_crossentropy',
                  metrics=['binary_accuracy'])
    #model.compile(optimizer=adam, loss=[focal_loss(alpha=4)], metrics=['binary_accuracy'])
    #model.compile(optimizer=adam, loss=[combo_dice], metrics=['binary_accuracy'])
    return model
Пример #21
0
#
# Where $i$ is the current depth.
#
# So at depth $i=0$:
# $$filters_{0} = 32 \times (2^{0}) = 32$$
#
# ### Layer 0
# There are two convolutional layers for each depth

# Run the next cell to create the first 3D convolution

# In[3]:

# Define a Conv3D tensor with 32 filters
down_depth_0_layer_0 = Conv3D(filters=32,
                              kernel_size=(3, 3, 3),
                              padding='same',
                              strides=(1, 1, 1))(input_layer)
down_depth_0_layer_0

# Notice that with 32 filters, the result you get above is a tensor with 32 channels.
#
# Run the next cell to add a relu activation to the first convolutional layer

# In[4]:

# Add a relu activation to layer 0 of depth 0
down_depth_0_layer_0 = Activation('relu')(down_depth_0_layer_0)
down_depth_0_layer_0

# ### Depth 0, Layer 1
# For layer 1 of depth 0, the formula for calculating the number of filters is:
def create_dense_convolution_block_separate(input_layer,
                                            n_filters,
                                            batch_normalization=False,
                                            kernel=(3, 3, 3),
                                            activation=None,
                                            padding='same',
                                            strides=(1, 1, 1),
                                            instance_normalization=False):
    """

    :param strides:
    :param input_layer:
    :param n_filters:
    :param batch_normalization:
    :param kernel:
    :param activation: Keras activation layer to use. (default is 'relu')
    :param padding:
    :return:
    """
    # try:
    #     from keras_contrib.layers.normalization import InstanceNormalization
    # except ImportError:
    #     raise ImportError("Install keras_contrib in order to use instance normalization."
    #                       "\nTry: pip install git+https://www.github.com/farizrahman4u/keras-contrib.git")

    strides_x = (strides[0], 1, 1)
    strides_y = (1, strides[1], 1)
    strides_z = (1, 1, strides[2])
    print(kernel[0], kernel[1], kernel[2])
    conv_x = Conv3D(n_filters, (kernel[0], 1, 1),
                    padding=padding,
                    strides=strides_x)(input_layer)
    if batch_normalization:
        conv_x = BatchNormalization(axis=1)(conv_x)
    elif instance_normalization:
        conv_x = InstanceNormalization(axis=1)(conv_x)
    if activation is None:
        conv_x = Activation('relu')(conv_x)
    else:
        conv_x = activation()(conv_x)
    concat1 = concatenate([conv_x, input_layer], axis=1)
    conv_y = Conv3D(n_filters, (1, kernel[1], 1),
                    padding=padding,
                    strides=strides_y)(concat1)
    if batch_normalization:
        conv_y = BatchNormalization(axis=1)(conv_y)
    elif instance_normalization:
        conv_y = InstanceNormalization(axis=1)(conv_y)
    if activation is None:
        conv_y = Activation('relu')(conv_y)
    else:
        conv_y = activation()(conv_y)
    concat2 = concatenate([concat1, conv_y], axis=1)
    conv_z = Conv3D(n_filters, (1, 1, kernel[2]),
                    padding=padding,
                    strides=strides_z)(concat2)
    if batch_normalization:
        conv_z = BatchNormalization(axis=1)(conv_z)
    elif instance_normalization:
        conv_z = InstanceNormalization(axis=1)(conv_z)
    if activation is None:
        conv_z = Activation('relu')(conv_z)
    else:
        conv_z = activation()(conv_z)
    return conv_z
Пример #23
0
def build_sh_patch_resnet_fracvol():

    # Patch Size is hard code in the network
    input_dims = (3, 3, 3, 45)
    inputs = Input(shape=input_dims)

    # First Convolution
    x1 = Conv3D(filters=45, kernel_size=10, strides=(1, 1, 1),
                padding='same')(inputs)

    # Functional Blocks
    sh0 = Lambda(lambda x: x[:, :, :, :, 0:1])(x1)
    sh2 = Lambda(lambda x: x[:, :, :, :, 1:6])(x1)
    sh4 = Lambda(lambda x: x[:, :, :, :, 6:15])(x1)
    sh6 = Lambda(lambda x: x[:, :, :, :, 16:28])(x1)
    sh8 = Lambda(lambda x: x[:, :, :, :, 28:45])(x1)

    sh0_c1 = Conv3D(filters=1,
                    kernel_size=10,
                    strides=(1, 1, 1),
                    padding='same',
                    activation='relu')(sh0)
    sh2_c1 = Conv3D(filters=5,
                    kernel_size=10,
                    strides=(1, 1, 1),
                    padding='same',
                    activation='relu')(sh2)
    sh4_c1 = Conv3D(filters=9,
                    kernel_size=10,
                    strides=(1, 1, 1),
                    padding='same',
                    activation='relu')(sh4)
    sh6_c1 = Conv3D(filters=13,
                    kernel_size=10,
                    strides=(1, 1, 1),
                    padding='same',
                    activation='relu')(sh6)
    sh8_c1 = Conv3D(filters=17,
                    kernel_size=10,
                    strides=(1, 1, 1),
                    padding='same',
                    activation='relu')(sh8)

    sh0_c2 = Conv3D(filters=1,
                    kernel_size=10,
                    strides=(1, 1, 1),
                    padding='same',
                    activation='relu')(sh0_c1)
    sh2_c2 = Conv3D(filters=5,
                    kernel_size=10,
                    strides=(1, 1, 1),
                    padding='same',
                    activation='relu')(sh2_c1)
    sh4_c2 = Conv3D(filters=9,
                    kernel_size=10,
                    strides=(1, 1, 1),
                    padding='same',
                    activation='relu')(sh4_c1)
    sh6_c2 = Conv3D(filters=13,
                    kernel_size=10,
                    strides=(1, 1, 1),
                    padding='same',
                    activation='relu')(sh6_c1)
    sh8_c2 = Conv3D(filters=17,
                    kernel_size=10,
                    strides=(1, 1, 1),
                    padding='same',
                    activation='relu')(sh8_c1)

    sh0_c3 = Conv3D(filters=1,
                    kernel_size=10,
                    strides=(1, 1, 1),
                    padding='same')(sh0_c2)
    sh2_c3 = Conv3D(filters=5,
                    kernel_size=10,
                    strides=(1, 1, 1),
                    padding='same')(sh2_c2)
    sh4_c3 = Conv3D(filters=9,
                    kernel_size=10,
                    strides=(1, 1, 1),
                    padding='same')(sh4_c2)
    sh6_c3 = Conv3D(filters=13,
                    kernel_size=10,
                    strides=(1, 1, 1),
                    padding='same')(sh6_c2)
    sh8_c3 = Conv3D(filters=17,
                    kernel_size=10,
                    strides=(1, 1, 1),
                    padding='same')(sh8_c2)

    combined = concatenate([sh0_c3, sh2_c3, sh4_c3, sh6_c3, sh8_c3])
    x2 = Conv3D(filters=45, kernel_size=10, strides=(1, 1, 1),
                padding='same')(combined)

    # Complete Residual Block
    res_add = Add()([x1, x2])

    x3 = Conv3D(filters=45,
                kernel_size=10,
                strides=(1, 1, 1),
                padding='same',
                activation='relu')(res_add)
    x4 = Conv3D(filters=45, kernel_size=10, strides=(1, 1, 1),
                padding='same')(x3)

    x5 = Flatten()(x4)
    x6 = Dense(45)(x5)

    # Extract Fractional Volume Output
    f_out = Dense(3, activation='linear')(x5)

    total_out = concatenate([x6, f_out])

    # Model define inputs and outputs from network structure
    model = Model(inputs=inputs, outputs=total_out)

    opt_func = RMSprop(lr=0.0001)
    model.compile(loss='mse',
                  optimizer=opt_func,
                  metrics=[calc_acc, frac_loss, sh_loss])
    print(model.summary())
    return model
Пример #24
0
def multiinput_resnet_lk():


    # 4 input1:a =======================================================================================================
    inputs_1 = Input(shape=(280, 280, 16, 1), name='path1_input1')
    # 256*256*128
    print("path1_input shape:", inputs_1.shape)  # (?, 140, 140, 16, 64)
    out1 = Conv3D(64, 7, strides=(2, 2, 1), padding = 'same', kernel_initializer='he_normal', use_bias = False, name = 'path1_conv1')(inputs_1)
    print("path1_conv0 shape:", out1.shape)#(?, 140, 140, 16, 64)
    out1 = BatchNormalization(axis = -1, epsilon = 1e-6, name = 'path1_bn1')(out1)
    out1 = LeakyReLU(alpha=0.2)(out1)#=====================
    out1 = MaxPooling3D((3, 3, 3), strides=(2, 2, 1), padding = 'same')(out1)
    print("path1_pooling1 shape:", out1.shape)#(?, 70, 70, 16, 64)

    out1 = conv_block(out1, [64, 64, 256], name = 'path1_L1_block1')
    print("path1_conv1 shape:", out1.shape)
    out1 = identity_block(out1, [64, 64, 256], name = 'path1_L1_block2')
    out1 = identity_block(out1, [64, 64, 256], name = 'path1_L1_block3')

    # 4 input2:v =======================================================================================================
    inputs_2 = Input(shape=(280, 280, 16, 1), name='path2_input2')
    # 256*256*128
    print("path2_input shape:", inputs_2.shape)  # (?, 140, 140, 16, 64)
    out2 = Conv3D(64, 7, strides=(2, 2, 1), padding = 'same', kernel_initializer='he_normal', use_bias = False, name = 'path2_conv1')(inputs_2)
    print("path2_conv0 shape:", out1.shape)#(?, 140, 140, 16, 64)
    out2 = BatchNormalization(axis = -1, epsilon = 1e-6, name = 'path2_bn1')(out2)
    out2 = LeakyReLU(alpha=0.2)(out2)  # =====================
    out2 = MaxPooling3D((3, 3, 3), strides=(2, 2, 1), padding = 'same')(out2)
    print("path2_pooling1 shape:", out1.shape)#(?, 70, 70, 16, 64)

    out2 = conv_block(out2, [64, 64, 256], name = 'path2_L1_block1')
    print("path2_conv1 shape:", out2.shape)
    out2 = identity_block(out2, [64, 64, 256], name = 'path2_L1_block2')
    out2 = identity_block(out2, [64, 64, 256], name = 'path2_L1_block3')


    #main path:concatenate 'out1' and 'out2' into 'out' ================================================================
    out = concatenate([out1, out2], axis=-1)
    print("concatenate shape:", out.shape)



    out = conv_block(out, [128, 128, 512], name = 'L2_block1')
    print("conv2 shape:", out.shape)
    out = identity_block(out, [128, 128, 512], name = 'L2_block2')
    out = identity_block(out, [128, 128, 512], name = 'L2_block3')
    out = identity_block(out, [128, 128, 512], name = 'L2_block4')


    out = conv_block(out, [256, 256, 1024], name = 'L3_block1')
    print("conv3 shape:", out.shape)
    out = identity_block(out, [256, 256, 1024], name = 'L3_block2')
    out = identity_block(out, [256, 256, 1024], name = 'L3_block3')
    out = identity_block(out, [256, 256, 1024], name = 'L3_block4')
    out = identity_block(out, [256, 256, 1024], name = 'L3_block5')
    out = identity_block(out, [256, 256, 1024], name = 'L3_block6')

    out = conv_block(out, [512, 512, 2048], name = 'L4_block1')
    print("conv4 shape:", out.shape)
    out = identity_block(out, [512, 512, 2048], name = 'L4_block2')
    out = identity_block(out, [512, 512, 2048], name = 'L4_block3')

    out = GlobalAveragePooling3D(data_format = 'channels_last')(out)
    print("Gpooling shape:", out.shape)
    out_drop = Dropout(rate=0.3)(out)
    out = Dense(1, name = 'fc1')(out_drop)
    print("out shape:", out.shape)
    output = Activation(activation = 'sigmoid')(out)

    model = Model(input = [inputs_1, inputs_2], output = output)


    print('im multi_input_ClassNet_lk')
    return model
def myConv3D(input_tensor, filters, kernel_size):
    conv1 = Conv3D(filters, kernel_size, padding='same')(input_tensor)
    bn = BatchNormalization()(conv1)
    out = Activation('relu')(bn)
    return out
Пример #26
0
def resnet(classes=2, use_bias_flag=True):
    '''
    :param use_bias_flag: 是否使用偏置,包括卷积层与全连接层
    :param bn_flag: 是否使用bn层,全网络范围
    :param classes:类的数量
    :return:resnet模型
    '''

    inputs = Input(shape=(280, 280, 16, 1), name='input1')
    # 256*256*128
    print("input shape:", inputs.shape)  # (?, 140, 140, 16, 64)
    out = Conv3D(64, 7, strides=(2, 2, 1), padding='same', kernel_initializer='he_normal', use_bias=use_bias_flag, name='conv1')(inputs)
    print("conv0 shape:", out.shape)#(?, 140, 140, 16, 64)
    out = BatchNormalization(axis=-1, epsilon=1e-6, name='bn1')(out)
    out = Activation('relu')(out)
    out = MaxPooling3D((3, 3, 3), strides=(2, 2, 1), padding='same')(out)
    print("pooling1 shape:", out.shape)#(?, 70, 70, 16, 64)

    out = conv_block(out, [64, 64, 256], name='L1_block1', use_bias_flag=use_bias_flag)
    print("conv1 shape:", out.shape)
    out = identity_block(out, [64, 64, 256], name='L1_block2', use_bias_flag=use_bias_flag)
    out = identity_block(out, [64, 64, 256], name='L1_block3', use_bias_flag=use_bias_flag)

    out = conv_block(out, [128, 128, 512], name='L2_block1', use_bias_flag=use_bias_flag)
    print("conv2 shape:", out.shape)
    out = identity_block(out, [128, 128, 512], name='L2_block2', use_bias_flag=use_bias_flag)
    out = identity_block(out, [128, 128, 512], name='L2_block3', use_bias_flag=use_bias_flag)
    out = identity_block(out, [128, 128, 512], name='L2_block4', use_bias_flag=use_bias_flag)


    out = conv_block(out, [256, 256, 1024], name = 'L3_block1', use_bias_flag=use_bias_flag)
    print("conv3 shape:", out.shape)
    out = identity_block(out, [256, 256, 1024], name='L3_block2', use_bias_flag=use_bias_flag)
    out = identity_block(out, [256, 256, 1024], name='L3_block3', use_bias_flag=use_bias_flag)
    out = identity_block(out, [256, 256, 1024], name='L3_block4', use_bias_flag=use_bias_flag)
    out = identity_block(out, [256, 256, 1024], name='L3_block5', use_bias_flag=use_bias_flag)
    out = identity_block(out, [256, 256, 1024], name='L3_block6', use_bias_flag=use_bias_flag)

    out = conv_block(out, [512, 512, 2048], name='L4_block1', use_bias_flag=use_bias_flag)
    print("conv4 shape:", out.shape)
    out = identity_block(out, [512, 512, 2048], name='L4_block2', use_bias_flag=use_bias_flag)
    out = identity_block(out, [512, 512, 2048], name='L4_block3', use_bias_flag=use_bias_flag)

    out = GlobalAveragePooling3D(data_format='channels_last')(out)
    print("Gpooling shape:", out.shape)
    out_drop = Dropout(rate=0.3)(out)


    if classes == 1:
        output = Dense(classes, activation='sigmoid', use_bias=use_bias_flag, name='fc1')(out_drop)
        print("predictions1 shape:", output.shape, 'activition:sigmoid')
    else:
        output = Dense(classes, activation='softmax', use_bias=use_bias_flag, name='fc1')(out_drop)
        print("predictions2 shape:", output.shape, 'activition:softmax')


    #out = Dense(classes, name='fc1', use_bias=use_bias_flag)(out_drop)
    #print("out shape:", out.shape)
    #output = Activation(activation='sigmoid')(out)

    model = Model(input=inputs, output=output)

    return model
Пример #27
0
#for i in range(x_test.shape[0]):
#    x_test[i] = add_rgb_dimension(x_test[i])

# 3rd step: convert to 1 + 4D space (1st argument represents number of rows in the dataset)
#xtrain = x_train.reshape(.shape[0], 16, 16, 16, 3)
#xtest = x_test.reshape(xtest.shape[0], 16, 16, 16, 3)
#
### convert target variable into one-hot
#y_train = keras.utils.to_categorical(y_train, 10)
#y_test = keras.utils.to_categorical(y_test, 10)

## input layer
input_layer = Input((500, 130, 130, 4))

## convolutional layers
conv_layer1 = Conv3D(filters=64, kernel_size=(3, 3, 4),
                     activation='relu')(input_layer)
conv_layer2 = Conv3D(filters=128, kernel_size=(3, 3, 4),
                     activation='relu')(conv_layer1)
conv_layer3 = Conv3D(filters=256, kernel_size=(3, 3, 4),
                     activation='relu')(conv_layer2)

## add max pooling to obtain the most imformatic features
pooling_layer1 = MaxPool3D(pool_size=(2, 2, 4))(conv_layer3)

conv_layer3 = Conv3D(filters=512, kernel_size=(3, 3, 4),
                     activation='relu')(pooling_layer1)
conv_layer4 = Conv3D(filters=1024, kernel_size=(3, 3, 4),
                     activation='relu')(conv_layer3)
conv_layer5 = Conv3D(filters=2048, kernel_size=(3, 3, 4),
                     activation='relu')(conv_layer4)
pooling_layer2 = MaxPool3D(pool_size=(2, 2, 4))(conv_layer5)
Пример #28
0
# Pre-processing
train_set = train_set.astype('float32')
train_set -= np.mean(train_set)
train_set /= np.max(train_set)

# Split the data
X_train_new1, X_test_new, y_train_new1, y_test_new = train_test_split(
    train_set, Y_train, test_size=0.2, random_state=4)
X_train_new, X_val_new, y_train_new, y_val_new = train_test_split(
    X_train_new1, y_train_new1, test_size=0.25, random_state=5)

# Define model
inputs = Input(shape=(1, img_rows, img_cols, img_depth))

conv1 = Conv3D(8, (5, 5, 5), activation='relu', padding='same')(inputs)
conv1 = BatchNormalization(axis=1)(conv1)
conv1 = Conv3D(16, (5, 5, 5), activation='relu', padding='same')(conv1)
conv1 = BatchNormalization(axis=1)(conv1)
pool1 = MaxPooling3D(pool_size=(2, 2, 2), padding='same')(conv1)

conv2 = Conv3D(16, (3, 3, 3), activation='relu', padding='same')(pool1)
conv2 = BatchNormalization(axis=1)(conv2)
conv2 = Conv3D(32, (3, 3, 3), activation='relu', padding='same')(conv2)
conv2 = BatchNormalization(axis=1)(conv2)
pool2 = MaxPooling3D(pool_size=(2, 2, 2), padding='same')(conv2)

conv3 = Conv3D(32, (3, 3, 3), activation='relu', padding='same')(pool2)
conv3 = BatchNormalization(axis=1)(conv3)
conv3 = Conv3D(64, (3, 3, 3), activation='relu', padding='same')(conv3)
conv3 = BatchNormalization(axis=1)(conv3)
Пример #29
0
def get_model(Shape,
              weight_decay=0.00005,
              kernel_initializer='glorot_uniform',
              reg='l2'):
    # Returns a Keras regression CNN model. Based on:
    # Cole, James H., et al. "Predicting brain age with deep learning f
    # rom raw imaging data results in a reliable and heritable
    # biomarker." NeuroImage 163 (2017): 115-124.
    # Good ref on CNN regression: https://www.pyimagesearch.com/2019/01/28/keras-regression-and-cnns/
    #___________________________________________________________________]

    if reg == 'l1':
        reg_func = l1
    else:
        reg_func = l2

    input_layer = Input(shape=Shape)
    x = Conv3D(8, (3, 3, 3),
               padding='valid',
               kernel_initializer=kernel_initializer,
               name='conv1',
               kernel_regularizer=reg_func(weight_decay))(input_layer)
    x = Activation('relu')(x)
    x = Conv3D(8, (3, 3, 3),
               padding='valid',
               kernel_initializer=kernel_initializer,
               name='conv2',
               kernel_regularizer=reg_func(weight_decay))(x)
    x = BatchNormalization(momentum=0.99)(x)
    x = Activation('relu')(x)
    x = MaxPooling3D((2, 2, 2), strides=(2, 2, 2))(x)

    x = Conv3D(16, (3, 3, 3),
               padding='valid',
               kernel_initializer=kernel_initializer,
               name='conv3',
               kernel_regularizer=reg_func(weight_decay))(x)
    x = Activation('relu')(x)
    x = Conv3D(16, (3, 3, 3),
               padding='valid',
               kernel_initializer=kernel_initializer,
               name='conv4',
               kernel_regularizer=reg_func(weight_decay))(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = MaxPooling3D((2, 2, 2), strides=(2, 2, 2))(x)

    x = Conv3D(32, (3, 3, 3),
               padding='valid',
               kernel_initializer=kernel_initializer,
               name='conv5',
               kernel_regularizer=reg_func(weight_decay))(x)
    x = Activation('relu')(x)
    x = Conv3D(32, (3, 3, 3),
               padding='valid',
               kernel_initializer=kernel_initializer,
               name='conv6',
               kernel_regularizer=reg_func(weight_decay))(x)
    x = Activation('relu')(x)
    x = MaxPooling3D((2, 2, 2), strides=(2, 2, 2))(x)

    x = Flatten()(x)
    # x = Dense(4096, activation='relu')(x)
    # x = Dropout(0.3)(x)
    #     x = Dense(1024, activation='relu')(x)
    x = Dense(1024, activation='relu')(x)

    x = Dropout(0.3)(x)
    predictions = Dense(1, name='fcdense', kernel_initializer='he_normal')(x)

    return Model(input=input_layer, output=predictions)
Пример #30
0
def __transition_up_block(ip,
                          nb_filters,
                          type='deconv',
                          weight_decay=1E-4,
                          block_prefix=None):
    '''Adds an upsampling block. Upsampling operation relies on the the type parameter.

    # Arguments
        ip: input keras tensor
        nb_filters: integer, the dimensionality of the output space
            (i.e. the number output of filters in the convolution)
        type: can be 'upsampling', 'subpixel', 'deconv'. Determines
            type of upsampling performed
        weight_decay: weight decay factor
        block_prefix: str, for block unique naming

    # Input shape
        4D tensor with shape:
        `(samples, channels, rows, cols)` if data_format='channels_first'
        or 4D tensor with shape:
        `(samples, rows, cols, channels)` if data_format='channels_last'.

    # Output shape
        4D tensor with shape:
        `(samples, nb_filter, rows * 2, cols * 2)` if data_format='channels_first'
        or 4D tensor with shape:
        `(samples, rows * 2, cols * 2, nb_filter)` if data_format='channels_last'.

    # Returns
        a keras tensor
    '''
    with K.name_scope('TransitionUp'):

        if type == 'upsampling':
            x = UpSampling3D(
                name=name_or_none(block_prefix, '_upsampling'))(ip)
        elif type == 'subpixel':
            x = Conv3D(nb_filters, (3, 3, 3),
                       activation='relu',
                       padding='same',
                       kernel_regularizer=l2(weight_decay),
                       use_bias=False,
                       kernel_initializer='he_normal',
                       name=name_or_none(block_prefix, '_Conv3D'))(ip)
            # x = SubPixelUpscaling(scale_factor=2,
            #                       name=name_or_none(block_prefix, '_subpixel'))(x)
            x = Conv3D(nb_filters, (3, 3, 3),
                       activation='relu',
                       padding='same',
                       kernel_regularizer=l2(weight_decay),
                       use_bias=False,
                       kernel_initializer='he_normal',
                       name=name_or_none(block_prefix, '_Conv3D'))(x)
        else:
            x = Conv3DTranspose(nb_filters, (3, 3, 3),
                                activation='relu',
                                padding='same',
                                strides=(2, 2, 2),
                                kernel_initializer='he_normal',
                                kernel_regularizer=l2(weight_decay),
                                name=name_or_none(block_prefix,
                                                  '_Conv3DT'))(ip)
        return x