def vgg_3d_v1(input_shape=(38, 38, 6, 1), n_filters=32, kernel_size=(3, 3, 3)):
    # No normalization, padding first to 40x40x8, same padding, max pooling
    model = Sequential()

    model.add(layers.ZeroPadding3D(padding=(1, 1, 1),
                                   input_shape=input_shape))  # 40x40x8

    model.add(
        layers.Conv3D(filters=n_filters,
                      kernel_size=kernel_size,
                      padding='same',
                      activation='relu'))
    model.add(
        layers.Conv3D(filters=n_filters,
                      kernel_size=kernel_size,
                      padding='same',
                      activation='relu'))
    model.add(layers.MaxPool3D(pool_size=(2, 2, 2)))  # 20x20x4

    model.add(
        layers.Conv3D(filters=2 * n_filters,
                      kernel_size=kernel_size,
                      padding='same',
                      activation='relu'))
    model.add(
        layers.Conv3D(filters=2 * n_filters,
                      kernel_size=kernel_size,
                      padding='same',
                      activation='relu'))
    model.add(
        layers.Conv3D(filters=2 * n_filters,
                      kernel_size=kernel_size,
                      padding='same',
                      activation='relu'))
    model.add(layers.MaxPool3D(pool_size=(2, 2, 2)))  # 10x10x2

    model.add(layers.Flatten())
    model.add(layers.Dense(units=4 * n_filters, activation='relu'))
    model.add(layers.Dropout(0.5))

    model.add(layers.Dense(units=4 * n_filters, activation='relu'))
    model.add(layers.Dropout(0.5))

    model.add(layers.Dense(units=1, activation='sigmoid'))
    return model
Beispiel #2
0
def ResNet(stack_fn,
           preact,
           use_bias,
           model_name='resnet',
           include_top=True,
           input_tensor=None,
           input_shape=None,
           pooling=None,
           classes=1000,
           **kwargs):
    """Instantiates the ResNet, ResNetV2, and ResNeXt architecture.
    Optionally loads weights pre-trained on ImageNet.
    Note that the data format convention used by the model is
    the one specified in your Keras config at `~/.keras/keras.json`.
    # Arguments
        stack_fn: a function that returns output tensor for the
            stacked residual blocks.
        preact: whether to use pre-activation or not
            (True for ResNetV2, False for ResNet and ResNeXt).
        use_bias: whether to use biases for convolutional layers or not
            (True for ResNet and ResNetV2, False for ResNeXt).
        model_name: string, model name.
        include_top: whether to include the fully-connected
            layer at the top of the network.
        input_tensor: optional Keras tensor
            (i.e. output of `layers.Input()`)
            to use as image input for the model.
        input_shape: optional shape tuple, only to be specified
            if `include_top` is False (otherwise the input shape
            has to be `(224, 224, 3)` (with `channels_last` data format)
            or `(3, 224, 224)` (with `channels_first` data format).
            It should have exactly 3 inputs channels.
        pooling: optional pooling mode for feature extraction
            when `include_top` is `False`.
            - `None` means that the output of the model will be
                the 4D tensor output of the
                last convolutional layer.
            - `avg` means that global average pooling
                will be applied to the output of the
                last convolutional layer, and thus
                the output of the model will be a 2D tensor.
            - `max` means that global max pooling will
                be applied.
        classes: optional number of classes to classify images
            into, only to be specified if `include_top` is True, and
            if no `weights` argument is specified.
    # Returns
        A Keras model instance.
    # Raises
        ValueError: in case of invalid argument for `weights`,
            or invalid input shape.
    """


    if input_tensor is None:
        img_input = layers.Input(shape=input_shape)
    else:
        if not backend.is_keras_tensor(input_tensor):
            img_input = layers.Input(tensor=input_tensor, shape=input_shape)
        else:
            img_input = input_tensor

    bn_axis = 3 if backend.image_data_format() == 'channels_last' else 1

    x = layers.ZeroPadding3D(padding=3, name='conv1_pad')(img_input)
    x = layers.Conv3D(64, 7, strides=2, use_bias=use_bias, name='conv1_conv')(x)

    if preact is False:
        x = layers.BatchNormalization(axis=bn_axis, epsilon=1.001e-5,
                                      name='conv1_bn')(x)
        x = layers.Activation('relu', name='conv1_relu')(x)

    x = layers.ZeroPadding3D(padding=1, name='pool1_pad')(x)
    x = layers.MaxPooling3D(3, strides=2, name='pool1_pool')(x)

    x = stack_fn(x)

    if preact is True:
        x = layers.BatchNormalization(axis=bn_axis, epsilon=1.001e-5,
                                      name='post_bn')(x)
        x = layers.Activation('relu', name='post_relu')(x)

    if include_top:
        x = layers.GlobalAveragePooling3D(name='avg_pool')(x)
        x = layers.Dense(classes, activation='softmax', name='probs')(x)
    else:
        if pooling == 'avg':
            x = layers.GlobalAveragePooling3D(name='avg_pool')(x)
        elif pooling == 'max':
            x = layers.GlobalMaxPooling3D(name='max_pool')(x)

    # Ensure that the model takes into account
    # any potential predecessors of `input_tensor`.
    if input_tensor is not None:
        inputs = keras_utils.get_source_inputs(input_tensor)
    else:
        inputs = img_input

    # Create model.
    model = models.Model(inputs, x, name=model_name)

    return model
Beispiel #3
0
def VGG16(sideLength):
    '''
    VGG16 3D CNN Implementation
    '''
    #try decreasing strides
    #compare with and without batch norm
    
    input = keras.Input(shape=(sideLength, sideLength, sideLength, 1))
    
    x = layers.ZeroPadding3D(padding=(1,1,1))(input)
    x = layers.Conv3D(64, 3, 1)(x)
    x = layers.BatchNormalization()(x)
    x = layers.Activation('relu')(x)
    x = layers.ZeroPadding3D((1,1,1))(x)
    x = layers.Conv3D(64, 3, 1)(x)
    x = layers.BatchNormalization()(x)
    x = layers.Activation('relu')(x)
    x = layers.MaxPooling3D((2,2,2), strides=(2,2,2))(x)
    
    x = layers.ZeroPadding3D((1,1,1))(x)
    x = layers.Conv3D(128, 3, 1)(x)
    x = layers.BatchNormalization()(x)
    x = layers.Activation('relu')(x)
    x = layers.ZeroPadding3D((1,1,1))(x)
    x = layers.Conv3D(128, 3, 1)(x)
    x = layers.BatchNormalization()(x)
    x = layers.Activation('relu')(x)
    x = layers.MaxPooling3D((2,2,2), strides=(2,2,2))(x)
    
    x = layers.ZeroPadding3D((1,1,1))(x)
    x = layers.Conv3D(256, 3, 1)(x)
    x = layers.BatchNormalization()(x)
    x = layers.Activation('relu')(x)
    x = layers.ZeroPadding3D((1,1,1))(x)
    x = layers.Conv3D(256, 3, 1)(x)
    x = layers.BatchNormalization()(x)
    x = layers.Activation('relu')(x)
    x = layers.ZeroPadding3D((1,1,1))(x)
    x = layers.Conv3D(256, 3, 1)(x)
    x = layers.BatchNormalization()(x)
    x = layers.Activation('relu')(x)
    x = layers.MaxPooling3D((2,2,2), strides=(2,2,2))(x)
    
    '''
    x = layers.ZeroPadding3D((1,1,1))(x)
    x = layers.Conv3D(512, 3, 1, activation='relu')(x)
    x = layers.ZeroPadding3D((1,1,1))(x)
    x = layers.Conv3D(512, 3, 1, activation='relu')(x)
    x = layers.ZeroPadding3D((1,1,1))(x)
    x = layers.Conv3D(512, 3, 1, activation='relu')(x)
    x = layers.MaxPooling3D((2,2,2), strides=(2,2,2))(x)
    '''
    
    x = layers.ZeroPadding3D((1,1,1))(x)
    x = layers.Conv3D(512, 3, 1)(x)
    x = layers.BatchNormalization()(x)
    x = layers.Activation('relu')(x)
    x = layers.ZeroPadding3D((1,1,1))(x)
    x = layers.Conv3D(512, 3, 1)(x)
    x = layers.BatchNormalization()(x)
    x = layers.Activation('relu')(x)
    x = layers.ZeroPadding3D((1,1,1))(x)
    x = layers.Conv3D(512, 3, 1)(x)
    x = layers.BatchNormalization()(x)
    x = layers.Activation('relu')(x)
    x = layers.MaxPooling3D((2,2,2), strides=(2,2,2))(x)
    
    x = layers.Flatten()(x)
    x = layers.Dense(4096, activation='relu')(x)
    x = layers.Dropout(0.5)(x)
    x = layers.Dense(4096, activation='relu')(x)
    x = layers.Dropout(0.5)(x)
    output = layers.Dense(1, activation='sigmoid')(x)
    
    model = keras.Model(input, output, name='VGG16')
    return model
Beispiel #4
0
    def f(x):
        y = layers.Conv3D(filters, (1, 1),
                          strides=stride,
                          use_bias=False,
                          name="res{}{}_branch2a".format(
                              stage_char, block_char),
                          **parameters)(x)

        y = BatchNormalizationFreeze(axis=axis,
                                     epsilon=1e-5,
                                     freeze=freeze_bn,
                                     name="bn{}{}_branch2a".format(
                                         stage_char, block_char))(y)

        y = layers.Activation("relu",
                              name="res{}{}_branch2a_relu".format(
                                  stage_char, block_char))(y)

        y = layers.ZeroPadding3D(padding=1,
                                 name="padding{}{}_branch2b".format(
                                     stage_char, block_char))(y)

        y = layers.Conv3D(filters,
                          kernel_size,
                          use_bias=False,
                          name="res{}{}_branch2b".format(
                              stage_char, block_char),
                          **parameters)(y)

        y = BatchNormalizationFreeze(axis=axis,
                                     epsilon=1e-5,
                                     freeze=freeze_bn,
                                     name="bn{}{}_branch2b".format(
                                         stage_char, block_char))(y)

        y = layers.Activation("relu",
                              name="res{}{}_branch2b_relu".format(
                                  stage_char, block_char))(y)

        y = layers.Conv3D(filters * 4, (1, 1),
                          use_bias=False,
                          name="res{}{}_branch2c".format(
                              stage_char, block_char),
                          **parameters)(y)

        y = BatchNormalizationFreeze(axis=axis,
                                     epsilon=1e-5,
                                     freeze=freeze_bn,
                                     name="bn{}{}_branch2c".format(
                                         stage_char, block_char))(y)

        if block == 0:
            shortcut = layers.Conv3D(filters * 4, (1, 1),
                                     strides=stride,
                                     use_bias=False,
                                     name="res{}{}_branch1".format(
                                         stage_char, block_char),
                                     **parameters)(x)

            shortcut = BatchNormalizationFreeze(axis=axis,
                                                epsilon=1e-5,
                                                freeze=freeze_bn,
                                                name="bn{}{}_branch1".format(
                                                    stage_char,
                                                    block_char))(shortcut)
        else:
            shortcut = x

        y = layers.Add(name="res{}{}".format(stage_char, block_char))(
            [y, shortcut])

        y = layers.Activation("relu",
                              name="res{}{}_relu".format(
                                  stage_char, block_char))(y)

        return y
Beispiel #5
0
def Vox_discriminator(input_shape, n_filters=64, kernel_size=4, norm="batch"):
    """Instantiate Discriminator.

    Adapted from https://arxiv.org/abs/2003.13653

    Parameters
    ----------
    input_shape: list or tuple of four ints, the shape of the input data. Omit
        the batch dimension, and include the number of channels.
    n_filters: int, number of filters. default is set 64.
    kernal_size: int, size of the kernal of conv layers. Default kernal size
        is set to be 4.
    norm: str, to set batch or instance norm.

    Returns
    ----------
    Model object.

    """

    inputs = layers.Input(input_shape, name="input_image")
    targets = layers.Input(input_shape, name="target_image")
    Nfilter_start = n_filters
    depth = 3

    def encoder_step(inputs, n_filters, kernel_size, norm="instance"):
        x = layers.Conv3D(
            n_filters,
            kernel_size,
            strides=2,
            kernel_initializer="he_normal",
            padding="same",
        )(inputs)
        if norm == "instance":
            x = InstanceNormalization()(x)
        if norm == "batch":
            x = layers.BatchNormalization()(x)
        x = layers.LeakyReLU()(x)
        x = layers.Dropout(0.2)(x)

        return x

    x = layers.Concatenate()([inputs, targets])

    for d in range(depth):
        if d == 0:
            x = encoder_step(
                x, Nfilter_start * np.power(2, d), kernel_size, norm="None"
            )
        else:
            x = encoder_step(x, Nfilter_start * np.power(2, d), kernel_size, norm=norm)

    x = layers.ZeroPadding3D()(x)
    x = layers.Conv3D(
        Nfilter_start * (2**depth),
        kernel_size,
        strides=1,
        padding="valid",
        kernel_initializer="he_normal",
    )(x)
    if norm == "instance":
        x = InstanceNormalization()(x)
    if norm == "batch":
        x = layers.BatchNormalization()(x)
    x = layers.LeakyReLU()(x)

    x = layers.ZeroPadding3D()(x)
    last = layers.Conv3D(
        1,
        kernel_size,
        strides=1,
        padding="valid",
        kernel_initializer="he_normal",
        name="output_discriminator",
    )(x)

    return Model(inputs=[targets, inputs], outputs=last, name="Discriminator")
Beispiel #6
0
def make_base_cnn_3d(image_shape=(20, 100, 100), name='base_cnn', nlayers=18):
    """Make a CNN network for a single image.

    Args:
        image_shape: tuple of ints
            Shape of input images in pixels
        name: string
            Name for model
        nlayers: int
            Number of layers in the model: 18 or 34

    Returns:
        Keras model
    """
    if nlayers not in [8, 18, 34]:
        raise ValueError('nlayers must be 8, 18 or 34.')

    img_input = layers.Input(shape=image_shape + (1, ))  # Channels last.
    x = layers.ZeroPadding3D(padding=(1, 3, 3), name='conv1_pad')(img_input)

    x = layers.Conv3D(64, (3, 7, 7),
                      strides=(2, 2, 2),
                      padding='valid',
                      kernel_initializer='he_normal',
                      name='conv1')(x)

    x = layers.BatchNormalization(name='bn_conv1')(x)
    x = layers.Activation('relu')(x)
    x = layers.ZeroPadding3D(padding=(1, 1, 1), name='pool1_pad')(x)
    x = layers.MaxPooling3D((3, 3, 3), strides=(2, 2, 2))(x)

    if nlayers == 8:
        x = layers.ZeroPadding3D(padding=(1, 3, 3), name='conv2_pad')(x)

        x = layers.Conv3D(64, (3, 7, 7),
                          strides=(2, 2, 2),
                          padding='valid',
                          kernel_initializer='he_normal',
                          name='conv2')(x)

        x = conv_block_3d(x, (3, 3, 3), [64, 64],
                          stage=2,
                          block='a',
                          strides=(2, 2, 2))
        x = identity_block_3d(x, (3, 3, 3), [64, 64], stage=2, block='b')

        x = conv_block_3d(x, (3, 3, 3), [128, 128],
                          stage=3,
                          block='a',
                          strides=(2, 1, 1))
        x = identity_block_3d(x, (3, 3, 3), [128, 128], stage=3, block='b')

        x = conv_block_3d(x, (3, 3, 3), [128, 128],
                          stage=4,
                          block='a',
                          strides=(1, 1, 1))
        x = identity_block_3d(x, (3, 3, 3), [128, 128], stage=4, block='b')

    if nlayers == 18:

        x = conv_block_3d(x, (3, 3, 3), [64, 64],
                          stage=2,
                          block='a',
                          strides=(2, 1, 1))
        x = identity_block_3d(x, (3, 3, 3), [64, 64], stage=2, block='b')

        x = conv_block_3d(x, (3, 3, 3), [128, 128],
                          stage=3,
                          block='a',
                          strides=(2, 2, 2))
        x = identity_block_3d(x, (3, 3, 3), [128, 128], stage=3, block='b')

        x = conv_block_3d(x, (3, 3, 3), [256, 256],
                          stage=4,
                          block='a',
                          strides=(2, 2, 2))
        x = identity_block_3d(x, (3, 3, 3), [256, 256], stage=4, block='b')

        x = conv_block_3d(x, (3, 3, 3), [512, 512],
                          stage=5,
                          block='a',
                          strides=(1, 1, 1))
        x = identity_block_3d(x, (3, 3, 3), [512, 512], stage=5, block='b')

    if nlayers == 34:

        x = conv_block_3d(x, (3, 3, 3), [64, 64],
                          stage=2,
                          block='a',
                          strides=(2, 1, 1))
        x = identity_block_3d(x, (3, 3, 3), [64, 64], stage=2, block='b')
        x = identity_block_3d(x, (3, 3, 3), [64, 64], stage=2, block='c')

        x = conv_block_3d(x, (3, 3, 3), [128, 128],
                          stage=3,
                          block='a',
                          strides=(2, 2, 2))
        x = identity_block_3d(x, (3, 3, 3), [128, 128], stage=3, block='b')
        x = identity_block_3d(x, (3, 3, 3), [128, 128], stage=3, block='c')
        x = identity_block_3d(x, (3, 3, 3), [128, 128], stage=3, block='d')

        x = conv_block_3d(x, (3, 3, 3), [256, 256],
                          stage=4,
                          block='a',
                          strides=(2, 2, 2))
        x = identity_block_3d(x, (3, 3, 3), [256, 256], stage=4, block='b')
        x = identity_block_3d(x, (3, 3, 3), [256, 256], stage=4, block='c')
        x = identity_block_3d(x, (3, 3, 3), [256, 256], stage=4, block='d')
        x = identity_block_3d(x, (3, 3, 3), [256, 256], stage=4, block='e')
        x = identity_block_3d(x, (3, 3, 3), [256, 256], stage=4, block='f')

        x = conv_block_3d(x, (3, 3, 3), [512, 512],
                          stage=5,
                          block='a',
                          strides=(1, 1, 1))
        x = identity_block_3d(x, (3, 3, 3), [512, 512], stage=5, block='b')
        x = identity_block_3d(x, (3, 3, 3), [512, 512], stage=5, block='c')

    return Model(img_input, x, name=name)
Beispiel #7
0
def resnet50_3d(inputs,
                filter_ratio=1,
                n=2,
                include_fc_layer=False,
                logits=True,
                kernal1=(1, 1, 1),
                kernal3=(1, 3, 3),
                kernal7=(1, 7, 7),
                num_layers=None):
    """

    :param inputs: Keras Input object with desire shape
    :type inputs:
    :param filter_ratio:
    :type filter_ratio:
    :param n: # of categories
    :type n: integer
    :param include_fc_layer:
    :type include_fc_layer:
    :return:
    :rtype:
    """
    # --- Define kwargs dictionary
    kwargs1 = {
        'kernel_size': kernal1,
        'padding': 'valid',
    }
    kwargs3 = {
        'kernel_size': kernal3,
        'padding': 'same',
    }
    kwargs7 = {
        'kernel_size': kernal7,
        'padding': 'valid',
    }
    # --- Define block components
    conv1 = lambda x, filters, strides: layers.Conv3D(
        filters=filters, strides=strides, **kwargs1)(x)
    conv3 = lambda x, filters, strides: layers.Conv3D(
        filters=filters, strides=strides, **kwargs3)(x)
    relu = lambda x: layers.LeakyReLU()(x)
    conv7 = lambda x, filters, strides: layers.Conv3D(
        filters=filters, strides=strides, **kwargs7)(x)
    max_pool = lambda x, pool_size, strides: layers.MaxPooling3D(
        pool_size=pool_size, strides=strides, padding='valid')(x)
    norm = lambda x: layers.BatchNormalization()(x)
    add = lambda x, y: layers.Add()([x, y])
    zeropad = lambda x, padding: layers.ZeroPadding3D(padding=padding)(x)
    # --- Residual blocks
    # conv blocks
    conv_1 = lambda filters, x, strides: relu(
        norm(conv1(x, filters, strides=strides)))
    conv_2 = lambda filters, x: relu(norm(conv3(x, filters, strides=1)))
    conv_3 = lambda filters, x: norm(conv1(x, filters, strides=1))
    conv_sc = lambda filters, x, strides: norm(
        conv1(x, filters, strides=strides))
    conv_block = lambda filters1, filters2, x, strides: relu(
        add(conv_sc(filters2, x, strides),
            conv_3(filters2, conv_2(filters1, conv_1(filters1, x, strides)))))
    # identity blocks
    identity_1 = lambda filters, x: relu(norm(conv1(x, filters, strides=1)))
    identity_2 = lambda filters, x: relu(norm(conv3(x, filters, strides=1)))
    identity_3 = lambda filters, x: norm(conv1(x, filters, strides=1))
    identity_block = lambda filters1, filters2, x: relu(
        add(
            identity_3(filters2, identity_2(filters1, identity_1(filters1, x))
                       ), x))
    # --- ResNet-50 backbone
    # stage 1 c2 1/4
    res1 = max_pool(zeropad(
        relu(
            norm(
                conv7(zeropad(inputs, (0, 3, 3)),
                      int(64 * filter_ratio),
                      strides=(1, 2, 2)))), (0, 1, 1)), (1, 3, 3),
                    strides=(1, 2, 2))
    # stage 2 c2 1/4
    res2 = layers.Lambda(lambda x: x, name='c2-output')(identity_block(
        int(64 * filter_ratio), int(256 * filter_ratio),
        identity_block(
            int(64 * filter_ratio), int(256 * filter_ratio),
            conv_block(int(64 * filter_ratio),
                       int(256 * filter_ratio),
                       res1,
                       strides=1))))
    # stage 3 c3 1/8
    res3 = layers.Lambda(lambda x: x, name='c3-output')(identity_block(
        int(128 * filter_ratio), int(512 * filter_ratio),
        identity_block(
            int(128 * filter_ratio), int(512 * filter_ratio),
            identity_block(
                int(128 * filter_ratio), int(512 * filter_ratio),
                conv_block(int(128 * filter_ratio),
                           int(512 * filter_ratio),
                           res2,
                           strides=(1, 2, 2))))))
    # stage 4 c4 1/16
    res4 = layers.Lambda(lambda x: x, name='c4-output')(identity_block(
        int(256 * filter_ratio), int(1024 * filter_ratio),
        identity_block(
            int(256 * filter_ratio), int(1024 * filter_ratio),
            identity_block(
                int(256 * filter_ratio), int(1024 * filter_ratio),
                identity_block(
                    int(256 * filter_ratio), int(1024 * filter_ratio),
                    identity_block(
                        int(256 * filter_ratio), int(1024 * filter_ratio),
                        conv_block(int(256 * filter_ratio),
                                   int(1024 * filter_ratio),
                                   res3,
                                   strides=(1, 2, 2))))))))
    # stage 5 c5 1/32
    res5 = layers.Lambda(lambda x: x, name='c5-output')(identity_block(
        int(512 * filter_ratio), int(2048 * filter_ratio),
        identity_block(
            int(512 * filter_ratio), int(2048 * filter_ratio),
            conv_block(int(512 * filter_ratio),
                       int(2048 * filter_ratio),
                       res4,
                       strides=(1, 2, 2)))))
    if num_layers:
        avg_pool = layers.GlobalAveragePooling3D()(
            [res1, res2, res3, res4, res5][num_layers - 1])
    else:
        avg_pool = layers.GlobalAveragePooling3D()(res5)
    flatten = layers.Flatten()(avg_pool)
    if logits:
        logits = layers.Dense(n)(flatten)
    else:
        logits = layers.Dense(n, activation='softmax')
    if include_fc_layer:
        model = Model(inputs=inputs, outputs=logits)
    else:
        model = Model(inputs=inputs, outputs=res5)
    return model
Beispiel #8
0
def retinanet_resnet(inputs, K, A):
    """Retinanet with resnet backbone. Classification and regression networks share weights across feature pyramid
     layers"""
    # --- Define kwargs dictionary
    kwargs1 = {
        'kernel_size': (1, 1, 1),
        'padding': 'valid',
    }
    kwargs3 = {
        'kernel_size': (1, 3, 3),
        'padding': 'same',
    }
    kwargs7 = {
        'kernel_size': (1, 7, 7),
        'padding': 'valid',
    }
    # --- Define block components
    conv1 = lambda x, filters, strides: layers.Conv3D(
        filters=filters, strides=strides, **kwargs1)(x)
    conv3 = lambda x, filters, strides: layers.Conv3D(
        filters=filters, strides=strides, **kwargs3)(x)
    relu = lambda x: layers.LeakyReLU()(x)
    conv7 = lambda x, filters, strides: layers.Conv3D(
        filters=filters, strides=strides, **kwargs7)(x)
    max_pool = lambda x, pool_size, strides: layers.MaxPooling3D(
        pool_size=pool_size, strides=strides, padding='valid')(x)
    norm = lambda x: layers.BatchNormalization()(x)
    add = lambda x, y: layers.Add()([x, y])
    zeropad = lambda x, padding: layers.ZeroPadding3D(padding=padding)(x)
    upsamp2x = lambda x: layers.UpSampling3D(size=(1, 2, 2))(x)
    # --- Define stride-1, stride-2 blocks
    # conv1 = lambda filters, x : relu(conv(x, filters, strides=1))
    # conv2 = lambda filters, x : relu(conv(x, filters, strides=(2, 2)))
    # --- Residual blocks
    # conv blocks
    conv_1 = lambda filters, x, strides: relu(
        norm(conv1(x, filters, strides=strides)))
    conv_2 = lambda filters, x: relu(norm(conv3(x, filters, strides=1)))
    conv_3 = lambda filters, x: norm(conv1(x, filters, strides=1))
    conv_sc = lambda filters, x, strides: norm(
        conv1(x, filters, strides=strides))
    conv_block = lambda filters1, filters2, x, strides: relu(
        add(conv_3(filters2, conv_2(filters1, conv_1(filters1, x, strides))),
            conv_sc(filters2, x, strides)))
    # identity blocks
    identity_1 = lambda filters, x: relu(norm(conv1(x, filters, strides=1)))
    identity_2 = lambda filters, x: relu(norm(conv3(x, filters, strides=1)))
    identity_3 = lambda filters, x: norm(conv1(x, filters, strides=1))
    identity_block = lambda filters1, filters2, x: relu(
        add(
            identity_3(filters2, identity_2(filters1, identity_1(filters1, x))
                       ), x))
    # --- feature pyramid blocks
    fp_block = lambda x, y: add(upsamp2x(x), conv1(y, 256, strides=1))
    # --- classification head
    class_subnet = classification_head(K, A)
    # --- regression head
    box_subnet = regression_head(A)
    # --- ResNet-50 backbone
    # stage 1 c2 1/4
    res1 = max_pool(zeropad(
        relu(
            norm(
                conv7(zeropad(inputs['dat'], (0, 3, 3)), 64,
                      strides=(1, 2, 2)))), (0, 1, 1)), (1, 3, 3),
                    strides=(1, 2, 2))
    # stage 2 c2 1/4
    res2 = identity_block(
        64, 256, identity_block(64, 256, conv_block(64, 256, res1, strides=1)))
    # stage 3 c3 1/8
    res3 = identity_block(
        128, 512,
        identity_block(
            128, 512,
            identity_block(128, 512,
                           conv_block(128, 512, res2, strides=(1, 2, 2)))))
    # stage 4 c4 1/16
    res4 = identity_block(
        256, 1024,
        identity_block(
            256, 1024,
            identity_block(
                256, 1024,
                identity_block(
                    256, 1024,
                    identity_block(
                        256, 1024,
                        conv_block(256, 1024, res3, strides=(1, 2, 2)))))))
    # stage 5 c5 1/32
    res5 = identity_block(
        512, 2048,
        identity_block(512, 2048, conv_block(512,
                                             2048,
                                             res4,
                                             strides=(1, 2, 2))))
    # --- Feature Pyramid Network architecture
    # p5 1/32
    fp5 = conv1(res5, 256, strides=1)
    # p4 1/16
    fp4 = fp_block(fp5, res4)
    p4 = conv3(fp4, 256, strides=1)
    # p3 1/8
    fp3 = fp_block(fp4, res3)
    p3 = conv3(fp3, 256, strides=1)
    # p6 1/4
    # p6 = conv3(fp5, 256, strides=(2, 2))
    # p7 1/2
    # p7 = conv3(relu(p6), 256, strides=(2, 2))
    feature_pyramid = [p3, p4, fp5]
    # lambda layer that allows multiple outputs from a shared model to have specific names
    # layers.Lambda(lambda x:x, name=name)()
    # --- Class subnet
    class_outputs = [class_subnet(features) for features in feature_pyramid]
    # --- Box subnet
    box_outputs = [box_subnet(features) for features in feature_pyramid]
    # --- put class and box outputs in dictionary
    logits = {
        'cls-c3': layers.Lambda(lambda x: x, name='cls-c3')(class_outputs[0]),
        'reg-c3': layers.Lambda(lambda x: x, name='reg-c3')(box_outputs[0]),
        'cls-c4': layers.Lambda(lambda x: x, name='cls-c4')(class_outputs[1]),
        'reg-c4': layers.Lambda(lambda x: x, name='reg-c4')(box_outputs[1]),
        'cls-c5': layers.Lambda(lambda x: x, name='cls-c5')(class_outputs[2]),
        'reg-c5': layers.Lambda(lambda x: x, name='reg-c5')(box_outputs[2])
    }

    model = Model(inputs=inputs, outputs=logits)
    return model
Beispiel #9
0
def mj_genNetHeadChannel3Dself64Inflated(winlen=10):
    the_input_shape = (winlen, 64, 64, 3)

    headBranch = Sequential(name="headBranchSelf64")

    headBranch.add(
        layers.ZeroPadding3D(padding=(0, 1, 1),
                             input_shape=the_input_shape,
                             name="zp3d_1"))
    headBranch.add(
        layers.Conv3D(32, (3, 4, 4),
                      strides=(1, 2, 2),
                      padding='valid',
                      data_format='channels_last',
                      activation='linear',
                      name="hconv3d_1"))
    headBranch.add(layers.LeakyReLU(alpha=0.2, name="lkrelu_1"))

    headBranch.add(layers.ZeroPadding3D(padding=(0, 1, 1), name="zp3d_2"))
    headBranch.add(
        layers.Conv3D(64, (3, 4, 4),
                      strides=(1, 2, 2),
                      padding='valid',
                      data_format='channels_last',
                      activation='linear',
                      name="hconv3d_2"))
    headBranch.add(
        layers.BatchNormalization(momentum=0.99, epsilon=1e-05, name='bn_1'))
    headBranch.add(layers.LeakyReLU(alpha=0.2, name="lkrelu_2"))

    if winlen >= 10:
        headBranch.add(layers.ZeroPadding3D(padding=(0, 1, 1), name="zp3d_3"))
        headBranch.add(
            layers.Conv3D(128, (3, 4, 4),
                          strides=(1, 2, 2),
                          padding='valid',
                          data_format='channels_last',
                          activation='linear',
                          name="hconv3d_3"))
        headBranch.add(
            layers.BatchNormalization(momentum=0.99,
                                      epsilon=1e-05,
                                      name='bn_2'))
        headBranch.add(layers.LeakyReLU(alpha=0.2, name="lkrelu_3"))

        headBranch.add(layers.ZeroPadding3D(padding=(0, 1, 1), name="zp3d_4"))
        headBranch.add(
            layers.Conv3D(256, (3, 4, 4),
                          strides=(1, 2, 2),
                          padding='valid',
                          data_format='channels_last',
                          activation='linear',
                          name="hconv3d_4"))
        headBranch.add(
            layers.BatchNormalization(momentum=0.99,
                                      epsilon=1e-05,
                                      name='bn_3'))
        headBranch.add(layers.LeakyReLU(alpha=0.2, name="lkrelu_4"))

        headBranch.add(
            layers.Conv3D(256, (2, 4, 4),
                          strides=(1, 2, 2),
                          padding='valid',
                          data_format='channels_last',
                          activation='linear',
                          name="embedding"))
    else:
        headBranch.add(layers.ZeroPadding3D(padding=(0, 1, 1), name="zp3d_3"))
        headBranch.add(
            layers.Conv3D(128, (1, 4, 4),
                          strides=(1, 2, 2),
                          padding='valid',
                          data_format='channels_last',
                          activation='linear',
                          name="hconv3d_3"))
        headBranch.add(
            layers.BatchNormalization(momentum=0.99,
                                      epsilon=1e-05,
                                      name='bn_2'))
        headBranch.add(layers.LeakyReLU(alpha=0.2, name="lkrelu_3"))

        headBranch.add(layers.ZeroPadding3D(padding=(0, 1, 1), name="zp3d_4"))
        headBranch.add(
            layers.Conv3D(256, (1, 4, 4),
                          strides=(1, 2, 2),
                          padding='valid',
                          data_format='channels_last',
                          activation='linear',
                          name="hconv3d_4"))
        headBranch.add(
            layers.BatchNormalization(momentum=0.99,
                                      epsilon=1e-05,
                                      name='bn_3'))
        headBranch.add(layers.LeakyReLU(alpha=0.2, name="lkrelu_4"))

        headBranch.add(
            layers.Conv3D(256, (1, 4, 4),
                          strides=(1, 2, 2),
                          padding='valid',
                          data_format='channels_last',
                          activation='linear',
                          name="embedding"))

    return headBranch