Exemple #1
0
def _reduce_conv(x: tf.Tensor,
                 filters: int,
                 input_stride: int,
                 is_training: bool,
                 name: Optional[str]) -> tf.Tensor:
    bn_name, conv_name = None, None
    if name is not None:
        conv_name = prepare_block_operation_name(name, '1x1_reduce', 'relu')
        bn_name = prepare_block_operation_name(name, '1x1_reduce', 'bn')
    if input_stride > 1:
        stride = input_stride, input_stride
        conv = downsample_conv2d(
            x=x,
            num_filters=filters,
            kernel_size=(1, 1),
            strides=stride,
            name=conv_name)
    else:
        conv = bottleneck_conv2d(
            x=x,
            num_filters=filters,
            name=conv_name)
    return tf.layers.batch_normalization(
        inputs=conv,
        training=is_training,
        name=bn_name)
Exemple #2
0
def residual_conv_encoder(x: tf.Tensor,
                          output_filters: Tuple[int, int, int],
                          input_stride: int = 1,
                          dilation_rate: int = 1,
                          project_input: bool = True,
                          use_relu_at_output: bool = True,
                          name: Optional[str] = None,
                          is_training: bool = True) -> tf.Tensor:
    increased = _reduce_conv_increase_block(
        x=x,
        output_filters=output_filters,
        input_stride=input_stride,
        dilation_rate=dilation_rate,
        is_training=is_training,
        name=name
    )
    if project_input is True:
        projection_conv = _projection_conv(
            x=x,
            filters=output_filters[-1],
            stride=input_stride,
            name=name,
            is_training=is_training)
        sum = tf.add(projection_conv, increased, name=name)
    else:
        sum = tf.add(x, increased, name=name)
    if not use_relu_at_output:
        return sum
    if name is not None:
        name = prepare_block_operation_name(name, 'relu')
    return tf.nn.relu(sum, name=name)
Exemple #3
0
def _blending_layer(x: tf.Tensor,
                    output_filters: int,
                    kernel: int,
                    use_separable_conv: bool,
                    activation: Optional[str],
                    name: Optional[str],
                    output_is_last_op_in_block: bool) -> tf.Tensor:
    blending_name = None
    if name is not None:
        blending_postfix = 'out' if output_is_last_op_in_block else 'blend'
        blending_name = prepare_block_operation_name(name, blending_postfix)
    kernel = kernel, kernel
    if use_separable_conv is True:
        return separable_conv2d(
            x=x,
            num_filters=output_filters,
            kernel_size=kernel,
            activation=activation,
            name=blending_name)
    else:
        return dim_hold_conv2d(
            x=x,
            num_filters=output_filters,
            kernel_size=kernel,
            activation=activation,
            name=blending_name)
Exemple #4
0
def _atrous_pyramid_head(x: tf.Tensor,
                         output_filters: int,
                         kernel: int,
                         dilation_rate: int,
                         use_separable_conv: bool,
                         activation: Optional[str],
                         name: Optional[str],
                         head_id: int) -> tf.Tensor:
    if name is not None:
        name = prepare_block_operation_name(
            name,
            'pyramid',
            f'{kernel}x{kernel}_conv_{head_id}')
    kernel = kernel, kernel
    dilation_rate = dilation_rate, dilation_rate
    if use_separable_conv:
        return atrous_separable_conv2d(
            x=x,
            num_filters=output_filters,
            kernel_size=kernel,
            dilation_rate=dilation_rate,
            activation=activation,
            name=name
        )
    else:
        return atrous_conv2d(
            x=x,
            num_filters=output_filters,
            kernel_size=kernel,
            dilation_rate=dilation_rate,
            activation=activation,
            name=name
        )
Exemple #5
0
def _increase_conv(x: tf.Tensor,
                   filters: int,
                   is_training: bool,
                   name: Optional[str]) -> tf.Tensor:
    bn_name, conv_name = None, None
    if name is not None:
        conv_name = prepare_block_operation_name(name, '1x1_increase')
        bn_name = prepare_block_operation_name(name, '1x1_increase', 'bn')
    conv = bottleneck_conv2d(
        x=x,
        num_filters=filters,
        activation=None,
        name=conv_name)
    return tf.layers.batch_normalization(
        inputs=conv,
        training=is_training,
        name=bn_name)
Exemple #6
0
def atrous_pyramid_encoder(x: tf.Tensor,
                           output_filters: int,
                           pyramid_heads_dilation_rate: List[int],
                           pyramid_heads_kernels: Union[int, List[int]] = 3,
                           use_separable_conv_in_pyramid: bool = False,
                           pyramid_heads_activations: Optional[str] = 'relu',
                           input_filters_after_reduction: Optional[int] = 64,
                           separate_reduction_head: bool = True,
                           use_separable_conv_on_input: bool = False,
                           reduction_activation: Optional[str] = 'relu',
                           use_residual_connection: bool = True,
                           fusion_method: FusionMethod = FusionMethod.SUM,
                           fusion_blending_kernel: Optional[int] = 3,
                           use_separable_conv_while_fusion: bool = False,
                           fusion_activation: Optional[str] = 'relu',
                           name: Optional[str] = None) -> tf.Tensor:
    _validate_atrous_encoder_input(
        x=x,
        pyramid_heads_dilation_rate=pyramid_heads_dilation_rate,
        pyramid_heads_kernels=pyramid_heads_kernels,
        use_residual_connection=use_residual_connection,
        input_filters_after_reduction=input_filters_after_reduction,
        output_filters=output_filters)
    heads_number = len(pyramid_heads_dilation_rate)
    inputs = _atrous_encoder_input(
        x=x,
        heads_number=heads_number,
        input_filters_after_reduction=input_filters_after_reduction,
        separate_reduction_head=separate_reduction_head,
        reduction_activation=reduction_activation,
        use_separable_conv_on_input=use_separable_conv_on_input,
        name=name)
    pyramid_output = _atrous_pyramid(
        inputs=inputs,
        output_filters=output_filters,
        heads_dilation_rate=pyramid_heads_dilation_rate,
        heads_kernels=pyramid_heads_kernels,
        use_separable_conv_in_pyramid=use_separable_conv_in_pyramid,
        pyramid_heads_activations=pyramid_heads_activations,
        name=name)
    output_is_last_op_in_block = not use_residual_connection
    out = _output_fusion(
        pyramid_output=pyramid_output,
        output_filters=output_filters,
        fusion_method=fusion_method,
        fusion_blending_kernel=fusion_blending_kernel,
        use_separable_conv_while_fusion=use_separable_conv_while_fusion,
        fusion_activation=fusion_activation,
        name=name,
        output_is_last_op_in_block=output_is_last_op_in_block
    )
    if use_residual_connection:
        if name is not None:
            name = prepare_block_operation_name(name, 'out')
        out = tf.math.add(out, x, name=name)
    return out
Exemple #7
0
def _internal_conv3x3(x: tf.Tensor,
                      filters: int,
                      dilation_rate: int,
                      is_training: bool,
                      name: Optional[str]) -> tf.Tensor:
    bn_name, conv_name = None, None
    if name is not None:
        conv_name = prepare_block_operation_name(name, '3x3', 'relu')
        bn_name = prepare_block_operation_name(name, '3x3', 'bn')
    dilation_rate = dilation_rate, dilation_rate
    conv = atrous_conv2d(
        x=x,
        num_filters=filters,
        kernel_size=(3, 3),
        dilation_rate=dilation_rate,
        name=conv_name)
    return tf.layers.batch_normalization(
        inputs=conv,
        training=is_training,
        name=bn_name)
Exemple #8
0
def _projection_conv(x: tf.Tensor,
                     filters: int,
                     stride: int,
                     name: Optional[str],
                     is_training: bool = True) -> tf.Tensor:
    bn_name = None
    if name is not None:
        name = prepare_block_operation_name(name, '1x1_proj')
        bn_name = prepare_block_operation_name(name, '1x1_proj', 'bn')
    stride = stride, stride
    projection = bottleneck_conv2d(
        x=x,
        num_filters=filters,
        strides=stride,
        activation=None,
        name=name)
    return tf.layers.batch_normalization(
        inputs=projection,
        training=is_training,
        name=bn_name)
Exemple #9
0
def _pyramid_pooling_head(x: tf.Tensor, window_shape: Tuple[int, int],
                          strides: Tuple[int, int], output_size: Size,
                          name: Optional[str], head_id: int) -> tf.Tensor:
    pool_op_name = None
    if name is not None:
        pool_op_name = prepare_block_operation_name(name, f'pool_{head_id}')
    pool_op = avg_pool2d(x=x,
                         window_shape=window_shape,
                         strides=strides,
                         name=pool_op_name)
    height, width = output_size
    return resize_bilinear(x=pool_op, height=height, width=width)
Exemple #10
0
def _fusion_op(pyramid_output: List[tf.Tensor],
               fusion_method: FusionMethod,
               fusion_blending_kernel: Optional[int],
               name: Optional[str],
               output_is_last_op_in_block: bool) -> tf.Tensor:
    fusion_name = None
    if name is not None:
        concat_is_final_op = output_is_last_op_in_block and \
                             fusion_blending_kernel is None
        fusion_name_postfix = 'out' if concat_is_final_op else 'fusion'
        fusion_name = prepare_block_operation_name(name, fusion_name_postfix)
    if fusion_method is FusionMethod.SUM:
        fused = tf.math.add_n(pyramid_output, name=fusion_name)
    else:
        fused = tf.concat(pyramid_output, axis=-1, name=fusion_name)
    return fused
Exemple #11
0
def _atrous_encoder_input_head(x: tf.Tensor,
                               filters: int,
                               activation: Optional[str],
                               use_separable_conv: bool,
                               name: Optional[str],
                               head_id: Optional[str]) -> tf.Tensor:
    if name is not None:
        name = prepare_block_operation_name(
            name,
            'input_head',
            f'reduction_1x1_conv_{head_id}')
    if use_separable_conv:
        return separable_bottleneck_conv2d(
            x,
            filters,
            activation=activation,
            name=name)
    else:
        return bottleneck_conv2d(
            x,
            filters,
            activation=activation,
            name=name)
Exemple #12
0
def _pyramid_pooling_fusion(inputs: List[tf.Tensor],
                            name: Optional[str]) -> tf.Tensor:
    fusion_op_name = None
    if name is not None:
        fusion_op_name = prepare_block_operation_name(name, 'sum')
    return tf.add_n(inputs=inputs, name=fusion_op_name)