def block(inputs, depth, stride, rate=1, outputs_collections=None, scope=None):
    with tf.variable_scope(scope, 'block_v2', [inputs]) as sc:
        depth_in = slim.utils.last_dimension(inputs.get_shape(), min_rank=4)
        preact = slim.batch_norm(
            inputs, activation_fn=tf.nn.leaky_relu, scope='preact'
        )  # (?,112,112,64)(?,56,56,64)(?,56,56,64)(?,56,56,64)(?,56,56,64)(?,28,28,128)(?,28,28,128)(?,28,28,128)(?,28,28,128)(?,14,14,256)(?,14,14,256)(?,14,14,256)(?,14,14,256)
        if depth == depth_in:
            shortcut = net_insight_lucky_utils.subsample(
                inputs, stride, 'shortcut')
        else:
            shortcut = slim.conv2d(preact,
                                   depth, [1, 1],
                                   stride=stride,
                                   normalizer_fn=None,
                                   activation_fn=None,
                                   scope='shortcut')

        residual = net_insight_lucky_utils.conv2d_same(preact,
                                                       depth,
                                                       3,
                                                       stride,
                                                       rate=rate,
                                                       scope='conv1')
        residual = slim.conv2d(residual,
                               depth, [3, 3],
                               stride=1,
                               normalizer_fn=None,
                               activation_fn=None,
                               scope='conv2')
        # residual = slim.conv2d(residual, depth, [1, 1], stride=1, normalizer_fn=None, activation_fn=None, scope='conv3')

        output = shortcut + residual

        return slim.utils.collect_named_outputs(outputs_collections, sc.name,
                                                output)
def resnet_v2_m(inputs,
                blocks,
                num_classes=None,
                is_training=True,
                return_raw=True,
                global_pool=True,
                output_stride=None,
                include_root_block=True,
                spatial_squeeze=True,
                reuse=None,
                scope=None):
    with tf.variable_scope(scope, 'resnet_v2', [inputs], reuse=reuse) as sc:
        end_points_collection = sc.original_name_scope + '_end_points'
        with slim.arg_scope([
                slim.conv2d, bottleneck,
                net_insight_lucky_utils.stack_blocks_dense
        ],
                            outputs_collections=end_points_collection):
            with slim.arg_scope([slim.batch_norm], is_training=is_training):
                net = inputs
                if include_root_block:
                    if output_stride is not None:
                        if output_stride % 4 != 0:
                            raise ValueError(
                                'The output_stride needs to be a multiple of 4.'
                            )
                        output_stride /= 4
                    with slim.arg_scope([slim.conv2d],
                                        activation_fn=None,
                                        normalizer_fn=None):
                        net = net_insight_lucky_utils.conv2d_same(
                            net, 64, 3, stride=1, scope='conv1')
                    # net = slim.max_pool2d(net, [3, 3], stride=2, scope='pool1')
                net = net_insight_lucky_utils.stack_blocks_dense(
                    net, blocks, output_stride)
                end_points = slim.utils.convert_collection_to_dict(
                    end_points_collection)
                if return_raw:
                    return net, end_points
                net = slim.batch_norm(net,
                                      activation_fn=tf.nn.relu,
                                      scope='postnorm')
                end_points[sc.name + '/postnorm'] = net

                if global_pool:
                    net = tf.reduce_mean(net, [1, 2],
                                         name='pool5',
                                         keep_dims=True)
                    end_points['global_pool'] = net

                if num_classes:
                    net = slim.conv2d(net,
                                      num_classes, [1, 1],
                                      activation_fn=None,
                                      normalizer_fn=None,
                                      scope='logits')
                    end_points[sc.name + '/logits'] = net
                    if spatial_squeeze:
                        net = tf.squeeze(net, [1, 2], name='SpatialSqueeze')
                        end_points[sc.name + '/spatial_squeeze'] = net
                    end_points['predictions'] = slim.softmax(
                        net, scope='predictions')
                return net, end_points