Beispiel #1
0
def identity_bottleneck_1d(inputs, depth, depth_bottleneck, stride, v, rate=1, outputs_collections=None,
                           scope=None, use_bounded_activations=False, attention_module=None):
    """
    Args:
    inputs: A tensor of size [batch, height, width, channels].
    depth、depth_bottleneck、stride三个参数是前面blocks类中的args
    depth: 一个block中的某个unit中(第三个conv)输出的feature-map的个数
    depth_bottleneck:  一个block中的某个unit中(前面两个conv)输出的feature-map个数
    stride: 是short_cut路径对于para_inputs/pre_act(经过bn层的para_inputs)的subsample_2d的步长 -- (是否经过bn层主要看输入输出通道数是否一致)
            以及unit中conv-2的步长
    rate: An integer, rate for atrous convolution.
    outputs_collections: 是收集end_points的collection
    scope: 是这个unit的名称。

    todo what is quantized inference??? what is bounded activations???  => must make it clear before any runs
    use_bounded_activations: Whether or not to use bounded activations. Bounded
        activations better lend themselves to quantized inference.
    attention_module: SE-blocks or SESE-blocks
    """
    with tf.variable_scope(scope, 'bottleneck_v1', [inputs]) as sc:
        depth_in = slim.utils.last_dimension(inputs.get_shape(), min_rank=4)
        # shortcut fine-tune
        if depth == depth_in:
            # pre se/sese in shortcut
            if attention_module == 'se_block':
                shortcut = se_block(inputs, name='se_block', ratio=2 if inputs.get_shape()[-1] <= 8 else 8)
            elif attention_module == 'sese_block':
                # todo ratio to be defined...
                shortcut = sese_block(input_feature=inputs, name='sese_block', v=v, ratio=2 if inputs.get_shape()[-1] <= 8 else 8)
            else:
                shortcut = inputs
            shortcut = subsample_1d(shortcut, stride, 'shortcut')
        else:
            # pre se/sese in shortcut
            if attention_module == 'se_block':
                shortcut = se_block(inputs, name='se_block', ratio=2 if inputs.get_shape()[-1] <= 8 else 8)
            elif attention_module == 'sese_block':
                # todo ratio to be defined...
                shortcut = sese_block(input_feature=inputs, name='sese_block', v=v, ratio=2 if inputs.get_shape()[-1] <= 8 else 8)
            else:
                shortcut = inputs
            shortcut = slim.conv2d(shortcut, depth, [1, 1], stride=stride, activation_fn=tf.nn.relu6 if use_bounded_activations else None,
                                   scope='shortcut')
        # convs
        residual = slim.conv2d(inputs, depth_bottleneck, [1, 1], stride=1, scope='conv1')
        residual = conv1d_same(residual, depth_bottleneck, 3, stride, rate=rate, scope='conv2')
        residual = slim.conv2d(residual, depth, [1, 1], stride=1, activation_fn=None, scope='conv3')
        # post-relu
        if use_bounded_activations:
            # Use clip_by_value to simulate bandpass activation.
            # todo why using bandpass act?????
            residual = tf.clip_by_value(residual, -6.0, 6.0)
            output = tf.nn.relu6(shortcut + residual)
        else:
            output = tf.nn.relu(shortcut + residual)

        return slim.utils.collect_named_outputs(outputs_collections, sc.name, output)
Beispiel #2
0
def resnet_v1_1d(inputs, blocks, num_classes=None, is_training=True, global_pool=True, output_stride=None,
                 include_root_block=True, spatial_squeeze=True, store_non_strided_activations=False,
                 reuse=None, scope=None, s=None):
    """
    Args:
    s = None (s is short for switch)!!!
    output_stride: If None, then the output will be computed at the nominal
        network stride. If output_stride is not None, it specifies the requested
        ratio of input to output spatial resolution.
    store_non_strided_activations: If True, we compute non-strided (undecimated)
        activations at the last unit of each block and store them in the
        `outputs_collections` before subsampling them. This gives us access to
        higher resolution intermediate activations which are useful in some
        dense prediction problems but increases 4x the computation and memory cost
        at the last unit of each block.
    reuse: whether or not the network and its variables should be reused. To be
        able to reuse 'scope' must be given.
    scope: Optional variable_scope.
    """
    with tf.variable_scope(scope, 'resnet_v1', [inputs], reuse=reuse) as sc:
        end_points_collection = sc.original_name_scope + '_end_points'
        with slim.arg_scope([slim.conv2d, standard_bottleneck_1d if s == 0 else pre_bottleneck_1d if s == 1
                            else identity_bottleneck_1d, stack_blocks_dense_1d], outputs_collections=end_points_collection):
            # todo if not training => enter noop scope!
            with (slim.arg_scope([slim.batch_norm], is_training=is_training) if is_training is not None else NoOpScope()):
                net = inputs
                if include_root_block:
                    if output_stride is not None:
                        if output_stride % 4 != 0:
                            raise ValueError('The output_stride needs to be a multiple of 4.')
                        output_stride /= 4
                    net = conv1d_same(net, num_outputs=8, kernel_size=4, stride=2, scope='conv1')
                    net = slim.max_pool2d(net, kernel_size=[1, 3], stride=[1, 2], scope='pool1')
                # todo stack 函数根据blocks中的args参数dict进行参数解析
                net = stack_blocks_dense_1d(net, blocks, output_stride, store_non_strided_activations)
                # convert to dict 'end_points'
                end_points = slim.utils.convert_collection_to_dict(end_points_collection)

                if global_pool:  # gap
                    net = tf.reduce_mean(net, [1, 2], name='pool5', keepdims=True)
                    end_points['global_pool'] = net
                if num_classes:
                    net = slim.conv2d(net, num_classes, [1, 1], activation_fn=None, normalizer_fn=None, scope='logits')
                    end_points[sc.name + '/logits'] = net
                    if spatial_squeeze:
                        # todo ... check this before run ->
                        net = tf.squeeze(net, [1, 2], name='SpatialSqueeze')
                        end_points[sc.name + '/spatial_squeeze'] = net
                    end_points['predictions'] = slim.softmax(net, scope='predictions')
                return net, end_points
Beispiel #3
0
def pre_bottleneck_1d(inputs,
                      depth,
                      depth_bottleneck,
                      stride,
                      v,
                      rate=1,
                      outputs_collections=None,
                      scope=None,
                      attention_module=None):
    """
    Args:
    inputs: A tensor of size [batch, height, width, channels].
    depth、depth_bottleneck、stride三个参数是前面blocks类中的args
    depth: 一个block中的某个unit中(第三个conv)输出的feature-map的个数
    depth_bottleneck:  一个block中的某个unit中(前面两个conv)输出的feature-map个数
    stride: 是short_cut路径对于para_inputs/pre_act(经过bn层的para_inputs)的subsample_2d的步长 -- (是否经过bn层主要看输入输出通道数是否一致)
            以及unit中conv-2的步长
    rate: An integer, rate for atrous convolution.
    outputs_collections: 是收集end_points的collection
    scope: 是这个unit的名称
    attention_module: SE-blocks or SESE-blocks
    """
    with tf.variable_scope(scope, 'bottleneck_v2', [inputs]) as sc:
        depth_in = slim.utils.last_dimension(inputs.get_shape(), min_rank=4)
        # add se
        if attention_module == 'se_block':
            residual = se_block(inputs,
                                name='se_block',
                                ratio=2 if inputs.get_shape()[-1] <= 8 else 8)
        # add sese
        elif attention_module == 'sese_block':
            # todo ratio to be defined...
            residual = sese_block(
                input_feature=inputs,
                name='sese_block',
                v=v,
                ratio=2 if inputs.get_shape()[-1] <= 8 else 8)
        # no other block implemented
        else:
            residual = inputs
        # pre activate
        preact = slim.batch_norm(inputs,
                                 activation_fn=tf.nn.relu,
                                 scope='preact')
        # shortcut fine-tune
        if depth == depth_in:
            shortcut = subsample_1d(inputs, stride, 'shortcut')
        else:
            shortcut = slim.conv2d(preact,
                                   depth, [1, 1],
                                   stride=stride,
                                   normalizer_fn=None,
                                   activation_fn=None,
                                   scope='shortcut')
        # convs
        residual = slim.conv2d(residual,
                               depth_bottleneck, [1, 1],
                               stride=1,
                               scope='conv1')
        residual = conv1d_same(residual,
                               depth_bottleneck,
                               3,
                               stride,
                               rate=rate,
                               scope='conv2')
        residual = slim.conv2d(residual,
                               depth, [1, 1],
                               stride=1,
                               normalizer_fn=None,
                               activation_fn=None,
                               scope='conv3')
        # junction
        output = shortcut + residual

        return slim.utils.collect_named_outputs(outputs_collections, sc.name,
                                                output)
Beispiel #4
0
def resnet_v2_1d(inputs,
                 blocks,
                 num_classes=None,
                 is_training=True,
                 global_pool=True,
                 output_stride=None,
                 include_root_block=True,
                 spatial_squeeze=True,
                 reuse=None,
                 scope=None,
                 s=None):
    """
    implementation for resnet_v2 1d | more detail see tf/slim/.../nets/resnet_v2_discarded.py
    """
    with tf.variable_scope(scope, 'resnet_v2', [inputs], reuse=reuse) as sc:
        end_points_collection = sc.original_name_scope + '_end_points'
        # todo slim.conv2d?????????
        with slim.arg_scope([
                slim.conv2d, standard_bottleneck_1d if s == 0 else
                pre_bottleneck_1d if s == 1 else identity_bottleneck_1d,
                stack_blocks_dense_1d
        ],
                            outputs_collections=end_points_collection):
            with slim.arg_scope(
                [slim.batch_norm],
                    is_training=is_training):  # 单独为batch_norm设置train的参数状态
                net = inputs
                if include_root_block:
                    if output_stride is not None:
                        if output_stride % 4 != 0:
                            raise ValueError(
                                'The output_stride needs to be a multiple of 4.'
                            )
                        output_stride /= 4
                    # We do not include batch normalization or activation functions in
                    # conv1 because the first ResNet unit will perform these. Cf.
                    # Appendix of [2].
                    with slim.arg_scope([slim.conv2d],
                                        activation_fn=None,
                                        normalizer_fn=None):
                        net = conv1d_same(net,
                                          num_outputs=8,
                                          kernel_size=4,
                                          stride=2,
                                          scope='conv1')
                    net = slim.max_pool2d(net, [1, 3],
                                          stride=[1, 2],
                                          scope='pool1')
                net = stack_blocks_dense_1d(net, blocks, output_stride)
                # This is needed because the pre-activation variant does not have batch
                # normalization or activation functions in the residual unit output. See
                # Appendix of [2].
                net = slim.batch_norm(net,
                                      activation_fn=tf.nn.relu,
                                      scope='postnorm')
                # Convert end_points_collection into a dictionary of end_points.
                end_points = slim.utils.convert_collection_to_dict(
                    end_points_collection)

                if global_pool:
                    # Global average pooling.
                    net = tf.reduce_mean(
                        net, [1, 2], name='pool5',
                        keepdims=True)  # (x,1,xx,xxx) -> (x,xxx)
                    end_points['global_pool'] = net
                if num_classes:
                    net = slim.conv2d(net,
                                      num_classes, [1, 1],
                                      activation_fn=None,
                                      normalizer_fn=None,
                                      scope='logits')
                    end_points[sc.name + '/logits'] = net
                    # todo make it clear before run
                    if spatial_squeeze:
                        net = tf.squeeze(net, [1, 2], name='SpatialSqueeze')
                        end_points[sc.name + '/spatial_squeeze'] = net
                    end_points['predictions'] = slim.softmax(
                        net, scope='predictions')
                return net, end_points