예제 #1
0
def conv2_block(input_layer: "Layer",
                k: int = 1,
                dropout: float = 0.0) -> "Layer":
    """
    :param input_layer:
    :param k:
    :param dropout:
    :return:
    """

    layer = BatchNormalization(momentum=0.1,
                               epsilon=1e-5,
                               gamma_initializer='uniform')(input_layer)

    layer = Activation('relu')(layer)

    layer = create_cnn_layer(layer,
                             32 * k,
                             dropout=dropout,
                             kernel_init='he_normal',
                             kernel_regularizer=l2(weight_decay))

    layer = create_cnn_layer(layer,
                             32 * k,
                             kernel_init='he_normal',
                             kernel_regularizer=l2(weight_decay))

    layer_after_addition = Add()([input_layer, layer])
    return layer_after_addition
예제 #2
0
def conv1(input_layer: "Layer", filters: int) -> "Layer":
    """
    :param input_layer:
    :param filters:
    :return:
    """

    layer = create_cnn_layer(input_layer, filters, kernel_size=1)
    return layer
예제 #3
0
def conv3_downsample(input_layer: "Layer", filters: int):
    """
    :param input_layer:
    :param filters:
    :return:
    """

    layer = create_cnn_layer(input_layer, filters)
    return layer
예제 #4
0
def initial_conv(input_layer: "Layer") -> "Layer":
    """
    :return:
    """

    layer = create_cnn_layer(input_layer=input_layer,
                             num_filter=16,
                             kernel_init='he_normal',
                             kernel_regularizer=l2(weight_decay))
    return layer
예제 #5
0
def expand_conv(input_layer: "Layer",
                base: int or Tuple[int, int],
                k: int,
                strides: int or Tuple[int, int] = (1, 1)) -> "Layer":
    """
    :param input_layer:
    :param base:
    :param k:
    :param strides:
    :return:
    """

    layer = create_cnn_layer(input_layer,
                             base * k,
                             strides=strides,
                             kernel_init='he_normal',
                             kernel_regularizer=l2(weight_decay))

    layer = create_cnn_layer(layer,
                             base * k,
                             strides=strides,
                             kernel_init='he_normal',
                             kernel_regularizer=l2(weight_decay),
                             batch_normalization=False,
                             activation=False)

    skip = create_cnn_layer(input_layer,
                            base * k,
                            kernel_size=(1, 1),
                            strides=strides,
                            kernel_init='he_normal',
                            kernel_regularizer=l2(weight_decay))

    layer_after_addition = Add()([layer, skip])

    return layer_after_addition
예제 #6
0
def _pre_res_blocks(input_layer: "Layer") -> "Layer":
    layer = create_cnn_layer(input_layer, 64, 3)
    return layer