Esempio n. 1
0
def identity_bottleneck_1d(inputs, depth, depth_bottleneck, stride, v, rate=1, outputs_collections=None,
                           scope=None, use_bounded_activations=False, attention_module=None):
    """
    Args:
    inputs: A tensor of size [batch, height, width, channels].
    depth、depth_bottleneck、stride三个参数是前面blocks类中的args
    depth: 一个block中的某个unit中(第三个conv)输出的feature-map的个数
    depth_bottleneck:  一个block中的某个unit中(前面两个conv)输出的feature-map个数
    stride: 是short_cut路径对于para_inputs/pre_act(经过bn层的para_inputs)的subsample_2d的步长 -- (是否经过bn层主要看输入输出通道数是否一致)
            以及unit中conv-2的步长
    rate: An integer, rate for atrous convolution.
    outputs_collections: 是收集end_points的collection
    scope: 是这个unit的名称。

    todo what is quantized inference??? what is bounded activations???  => must make it clear before any runs
    use_bounded_activations: Whether or not to use bounded activations. Bounded
        activations better lend themselves to quantized inference.
    attention_module: SE-blocks or SESE-blocks
    """
    with tf.variable_scope(scope, 'bottleneck_v1', [inputs]) as sc:
        depth_in = slim.utils.last_dimension(inputs.get_shape(), min_rank=4)
        # shortcut fine-tune
        if depth == depth_in:
            # pre se/sese in shortcut
            if attention_module == 'se_block':
                shortcut = se_block(inputs, name='se_block', ratio=2 if inputs.get_shape()[-1] <= 8 else 8)
            elif attention_module == 'sese_block':
                # todo ratio to be defined...
                shortcut = sese_block(input_feature=inputs, name='sese_block', v=v, ratio=2 if inputs.get_shape()[-1] <= 8 else 8)
            else:
                shortcut = inputs
            shortcut = subsample_1d(shortcut, stride, 'shortcut')
        else:
            # pre se/sese in shortcut
            if attention_module == 'se_block':
                shortcut = se_block(inputs, name='se_block', ratio=2 if inputs.get_shape()[-1] <= 8 else 8)
            elif attention_module == 'sese_block':
                # todo ratio to be defined...
                shortcut = sese_block(input_feature=inputs, name='sese_block', v=v, ratio=2 if inputs.get_shape()[-1] <= 8 else 8)
            else:
                shortcut = inputs
            shortcut = slim.conv2d(shortcut, depth, [1, 1], stride=stride, activation_fn=tf.nn.relu6 if use_bounded_activations else None,
                                   scope='shortcut')
        # convs
        residual = slim.conv2d(inputs, depth_bottleneck, [1, 1], stride=1, scope='conv1')
        residual = conv1d_same(residual, depth_bottleneck, 3, stride, rate=rate, scope='conv2')
        residual = slim.conv2d(residual, depth, [1, 1], stride=1, activation_fn=None, scope='conv3')
        # post-relu
        if use_bounded_activations:
            # Use clip_by_value to simulate bandpass activation.
            # todo why using bandpass act?????
            residual = tf.clip_by_value(residual, -6.0, 6.0)
            output = tf.nn.relu6(shortcut + residual)
        else:
            output = tf.nn.relu(shortcut + residual)

        return slim.utils.collect_named_outputs(outputs_collections, sc.name, output)
Esempio n. 2
0
def pre_bottleneck_1d(inputs,
                      depth,
                      depth_bottleneck,
                      stride,
                      v,
                      rate=1,
                      outputs_collections=None,
                      scope=None,
                      attention_module=None):
    """
    Args:
    inputs: A tensor of size [batch, height, width, channels].
    depth、depth_bottleneck、stride三个参数是前面blocks类中的args
    depth: 一个block中的某个unit中(第三个conv)输出的feature-map的个数
    depth_bottleneck:  一个block中的某个unit中(前面两个conv)输出的feature-map个数
    stride: 是short_cut路径对于para_inputs/pre_act(经过bn层的para_inputs)的subsample_2d的步长 -- (是否经过bn层主要看输入输出通道数是否一致)
            以及unit中conv-2的步长
    rate: An integer, rate for atrous convolution.
    outputs_collections: 是收集end_points的collection
    scope: 是这个unit的名称
    attention_module: SE-blocks or SESE-blocks
    """
    with tf.variable_scope(scope, 'bottleneck_v2', [inputs]) as sc:
        depth_in = slim.utils.last_dimension(inputs.get_shape(), min_rank=4)
        # add se
        if attention_module == 'se_block':
            residual = se_block(inputs,
                                name='se_block',
                                ratio=2 if inputs.get_shape()[-1] <= 8 else 8)
        # add sese
        elif attention_module == 'sese_block':
            # todo ratio to be defined...
            residual = sese_block(
                input_feature=inputs,
                name='sese_block',
                v=v,
                ratio=2 if inputs.get_shape()[-1] <= 8 else 8)
        # no other block implemented
        else:
            residual = inputs
        # pre activate
        preact = slim.batch_norm(inputs,
                                 activation_fn=tf.nn.relu,
                                 scope='preact')
        # shortcut fine-tune
        if depth == depth_in:
            shortcut = subsample_1d(inputs, stride, 'shortcut')
        else:
            shortcut = slim.conv2d(preact,
                                   depth, [1, 1],
                                   stride=stride,
                                   normalizer_fn=None,
                                   activation_fn=None,
                                   scope='shortcut')
        # convs
        residual = slim.conv2d(residual,
                               depth_bottleneck, [1, 1],
                               stride=1,
                               scope='conv1')
        residual = conv1d_same(residual,
                               depth_bottleneck,
                               3,
                               stride,
                               rate=rate,
                               scope='conv2')
        residual = slim.conv2d(residual,
                               depth, [1, 1],
                               stride=1,
                               normalizer_fn=None,
                               activation_fn=None,
                               scope='conv3')
        # junction
        output = shortcut + residual

        return slim.utils.collect_named_outputs(outputs_collections, sc.name,
                                                output)