Ejemplo n.º 1
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 in_size,
                 bn_use_global_stats=False,
                 bn_cudnn_off=False,
                 **kwargs):
        super(FastPyramidPooling, self).__init__(**kwargs)
        down_sizes = [1, 2, 3, 6]
        mid_channels = in_channels // 4

        with self.name_scope():
            self.branches = Concurrent()
            self.branches.add(Identity())
            for down_size in down_sizes:
                self.branches.add(
                    PoolingBranch(in_channels=in_channels,
                                  out_channels=mid_channels,
                                  in_size=in_size,
                                  down_size=down_size,
                                  bn_use_global_stats=bn_use_global_stats,
                                  bn_cudnn_off=bn_cudnn_off))
            self.conv = conv1x1_block(in_channels=(in_channels * 2),
                                      out_channels=out_channels,
                                      bn_use_global_stats=bn_use_global_stats,
                                      bn_cudnn_off=bn_cudnn_off)
Ejemplo n.º 2
0
def _make_dense_layer(growth_rate, bn_size, dropout, dilation):
    new_features = nn.HybridSequential(prefix='')
    if bn_size == 0:
        # no bottleneck
        new_features.add(nn.BatchNorm())
        new_features.add(
            nn.activated_conv(growth_rate,
                              kernel_size=3,
                              padding=dilation,
                              dilation=dilation))
        if dropout:
            new_features.add(nn.Dropout(dropout))
    else:
        # bottleneck design
        new_features.add(nn.BatchNorm())
        new_features.add(
            nn.activated_conv(bn_size * growth_rate, kernel_size=1))
        if dropout:
            new_features.add(nn.Dropout(dropout))
        new_features.add(nn.BatchNorm())
        new_features.add(
            nn.activated_conv(growth_rate, kernel_size=3, padding=1))
        if dropout:
            new_features.add(nn.Dropout(dropout))

    out = HybridConcurrent(axis=1, prefix='')
    out.add(Identity())
    out.add(new_features)

    return out
Ejemplo n.º 3
0
def _make_dense_layer(bits, bits_a, growth_rate, bn_size, dropout):
    new_features = nn.HybridSequential(prefix='')
    if bn_size == 0:
        # no bottleneck
        new_features.add(nn.QActivation(bits=bits_a))
        new_features.add(
            nn.QConv2D(growth_rate, bits=bits, kernel_size=3, padding=1))
        if dropout:
            new_features.add(nn.Dropout(dropout))
        new_features.add(nn.BatchNorm())
    else:
        # bottleneck design
        new_features.add(nn.BatchNorm())
        new_features.add(nn.QActivation(bits=bits_a))
        new_features.add(
            nn.QConv2D(bn_size * growth_rate, bits=bits, kernel_size=1))
        if dropout:
            new_features.add(nn.Dropout(dropout))
        new_features.add(nn.BatchNorm())
        new_features.add(nn.QActivation(bits=bits_a))
        new_features.add(
            nn.QConv2D(growth_rate, bits=bits, kernel_size=3, padding=1))
        if dropout:
            new_features.add(nn.Dropout(dropout))

    out = HybridConcurrent(axis=1, prefix='')
    out.add(Identity())
    out.add(new_features)

    return out
Ejemplo n.º 4
0
    def _add_dense_block(self, dilation):
        new_features = nn.HybridSequential(prefix='')

        def _add_conv_block(layer):
            new_features.add(nn.BatchNorm())
            new_features.add(layer)
            if self.dropout:
                new_features.add(nn.Dropout(self.dropout))

        if self.bn_size == 0:
            # no bottleneck
            _add_conv_block(
                nn.activated_conv(self.growth_rate,
                                  kernel_size=3,
                                  padding=dilation,
                                  dilation=dilation))
        else:
            # bottleneck design
            _add_conv_block(
                nn.activated_conv(self.bn_size * self.growth_rate,
                                  kernel_size=1))
            _add_conv_block(
                nn.activated_conv(self.growth_rate, kernel_size=3, padding=1))

        self.num_features += self.growth_rate

        dense_block = HybridConcurrent(axis=1, prefix='')
        dense_block.add(Identity())
        dense_block.add(new_features)
        self.current_stage.add(dense_block)
Ejemplo n.º 5
0
 def __init__(self,
              in_feats,
              out_feats,
              num_heads,
              feat_drop=0.,
              attn_drop=0.,
              negative_slope=0.2,
              residual=False,
              activation=None,
              allow_zero_in_degree=False):
     super(GATConv, self).__init__()
     self._num_heads = num_heads
     self._in_src_feats, self._in_dst_feats = expand_as_pair(in_feats)
     self._in_feats = in_feats
     self._out_feats = out_feats
     self._allow_zero_in_degree = allow_zero_in_degree
     with self.name_scope():
         if isinstance(in_feats, tuple):
             self.fc_src = nn.Dense(out_feats * num_heads,
                                    use_bias=False,
                                    weight_initializer=mx.init.Xavier(
                                        magnitude=math.sqrt(2.0)),
                                    in_units=self._in_src_feats)
             self.fc_dst = nn.Dense(out_feats * num_heads,
                                    use_bias=False,
                                    weight_initializer=mx.init.Xavier(
                                        magnitude=math.sqrt(2.0)),
                                    in_units=self._in_dst_feats)
         else:
             self.fc = nn.Dense(out_feats * num_heads,
                                use_bias=False,
                                weight_initializer=mx.init.Xavier(
                                    magnitude=math.sqrt(2.0)),
                                in_units=in_feats)
         self.attn_l = self.params.get(
             'attn_l',
             shape=(1, num_heads, out_feats),
             init=mx.init.Xavier(magnitude=math.sqrt(2.0)))
         self.attn_r = self.params.get(
             'attn_r',
             shape=(1, num_heads, out_feats),
             init=mx.init.Xavier(magnitude=math.sqrt(2.0)))
         self.feat_drop = nn.Dropout(feat_drop)
         self.attn_drop = nn.Dropout(attn_drop)
         self.leaky_relu = nn.LeakyReLU(negative_slope)
         if residual:
             if in_feats != out_feats:
                 self.res_fc = nn.Dense(out_feats * num_heads,
                                        use_bias=False,
                                        weight_initializer=mx.init.Xavier(
                                            magnitude=math.sqrt(2.0)),
                                        in_units=in_feats)
             else:
                 self.res_fc = Identity()
         else:
             self.res_fc = None
         self.activation = activation
Ejemplo n.º 6
0
    def __init__(self,
                 in_feats,
                 out_feats,
                 dim,
                 n_kernels,
                 aggregator_type='sum',
                 residual=False,
                 bias=True,
                 allow_zero_in_degree=False):
        super(GMMConv, self).__init__()

        self._in_src_feats, self._in_dst_feats = expand_as_pair(in_feats)
        self._out_feats = out_feats
        self._dim = dim
        self._n_kernels = n_kernels
        self._allow_zero_in_degree = allow_zero_in_degree
        if aggregator_type == 'sum':
            self._reducer = fn.sum
        elif aggregator_type == 'mean':
            self._reducer = fn.mean
        elif aggregator_type == 'max':
            self._reducer = fn.max
        else:
            raise KeyError(
                "Aggregator type {} not recognized.".format(aggregator_type))

        with self.name_scope():
            self.mu = self.params.get('mu',
                                      shape=(n_kernels, dim),
                                      init=mx.init.Normal(0.1))
            self.inv_sigma = self.params.get('inv_sigma',
                                             shape=(n_kernels, dim),
                                             init=mx.init.Constant(1))
            self.fc = nn.Dense(
                n_kernels * out_feats,
                in_units=self._in_src_feats,
                use_bias=False,
                weight_initializer=mx.init.Xavier(magnitude=math.sqrt(2.0)))
            if residual:
                if self._in_dst_feats != out_feats:
                    self.res_fc = nn.Dense(out_feats,
                                           in_units=self._in_dst_feats,
                                           use_bias=False)
                else:
                    self.res_fc = Identity()
            else:
                self.res_fc = None

            if bias:
                self.bias = self.params.get('bias',
                                            shape=(out_feats, ),
                                            init=mx.init.Zero())
            else:
                self.bias = None
Ejemplo n.º 7
0
    def __init__(self, in_channels, out_channels, in_size):
        super(FastPyramidPooling, self).__init__()
        down_sizes = [1, 2, 3, 6]
        mid_channels = in_channels // 4

        with self.name_scope():
            self.branches = Concurrent()
            self.branches.add(Identity())
            for down_size in down_sizes:
                self.branches.add(
                    PoolingBranch(in_channels=in_channels,
                                  out_channels=mid_channels,
                                  in_size=in_size,
                                  down_size=down_size))
            self.conv = conv1x1_block(in_channels=(in_channels * 2),
                                      out_channels=out_channels)
Ejemplo n.º 8
0
def _make_dense_layer(growth_rate, bn_size, dropout):
    new_features = nn.HybridSequential(prefix='')
    new_features.add(nn.BatchNorm(use_global_stats=True))
    new_features.add(nn.Activation('relu'))
    new_features.add(nn.Conv2D(bn_size*growth_rate,
                               kernel_size=1, use_bias=False))
    new_features.add(nn.BatchNorm(use_global_stats=True))
    new_features.add(nn.Activation('relu'))
    new_features.add(nn.Conv2D(growth_rate, kernel_size=3,
                               padding=1, use_bias=False))
    if dropout:
        new_features.add(nn.Dropout(dropout))
    out = HybridConcurrent(axis=1, prefix='')
    out.add(Identity())
    out.add(new_features)
    return out
    def __init__(self, in_channels, upscale_out_size, **kwargs):
        super(PyramidPooling, self).__init__(**kwargs)
        pool_out_sizes = [1, 2, 3, 6]
        assert (len(pool_out_sizes) == 4)
        assert (in_channels % 4 == 0)
        mid_channels = in_channels // 4

        with self.name_scope():
            self.branches = HybridConcurrent(axis=1, prefix="")
            self.branches.add(Identity())
            for pool_out_size in pool_out_sizes:
                self.branches.add(
                    PyramidPoolingBranch(in_channels=in_channels,
                                         out_channels=mid_channels,
                                         pool_out_size=pool_out_size,
                                         upscale_out_size=upscale_out_size))
Ejemplo n.º 10
0
    def __init__(self, in_channels, out_channels, strides, width, scale,
                 bn_use_global_stats, **kwargs):
        super(Res2NetUnit, self).__init__(**kwargs)
        self.scale = scale
        downsample = (strides != 1)
        self.resize_identity = (in_channels != out_channels) or downsample
        mid_channels = width * scale
        brn_channels = width

        with self.name_scope():
            self.reduce_conv = conv1x1_block(
                in_channels=in_channels,
                out_channels=mid_channels,
                bn_use_global_stats=bn_use_global_stats)
            self.branches = HierarchicalConcurrent(axis=1,
                                                   multi_input=True,
                                                   prefix='')
            if downsample:
                self.branches.add(
                    conv1x1(in_channels=brn_channels,
                            out_channels=brn_channels,
                            strides=strides))
            else:
                self.branches.add(Identity())
            for i in range(scale - 1):
                self.branches.add(
                    conv3x3(in_channels=brn_channels,
                            out_channels=brn_channels,
                            strides=strides))
            self.merge_conv = conv1x1_block(
                in_channels=mid_channels,
                out_channels=out_channels,
                bn_use_global_stats=bn_use_global_stats,
                activation=None,
                activate=False)
            self.preactiv = PreActivation(in_channels=out_channels)
            if self.resize_identity:
                self.identity_conv = conv1x1_block(
                    in_channels=in_channels,
                    out_channels=out_channels,
                    strides=strides,
                    bn_use_global_stats=bn_use_global_stats,
                    activation=None,
                    activate=False)
            self.activ = nn.Activation("relu")
    def __init__(self, in_channels_list, out_channels_list, num_modules,
                 num_branches, num_subblocks, bn_use_global_stats, **kwargs):
        super(HRStage, self).__init__(**kwargs)
        self.branches = num_branches
        self.in_channels_list = out_channels_list
        in_branches = len(in_channels_list)
        out_branches = len(out_channels_list)

        with self.name_scope():
            self.transition = nn.HybridSequential(prefix="")
            for i in range(out_branches):
                if i < in_branches:
                    if out_channels_list[i] != in_channels_list[i]:
                        self.transition.add(
                            conv3x3_block(
                                in_channels=in_channels_list[i],
                                out_channels=out_channels_list[i],
                                strides=1,
                                bn_use_global_stats=bn_use_global_stats))
                    else:
                        self.transition.add(Identity())
                else:
                    conv3x3_seq = nn.HybridSequential(
                        prefix="conv3x3_seq{}_".format(i + 1))
                    for j in range(i + 1 - in_branches):
                        in_channels_i = in_channels_list[-1]
                        out_channels_i = out_channels_list[
                            i] if j == i - in_branches else in_channels_i
                        conv3x3_seq.add(
                            conv3x3_block(
                                in_channels=in_channels_i,
                                out_channels=out_channels_i,
                                strides=2,
                                bn_use_global_stats=bn_use_global_stats))
                    self.transition.add(conv3x3_seq)

            self.layers = DualPathSequential(prefix="")
            for i in range(num_modules):
                self.layers.add(
                    HRBlock(in_channels_list=self.in_channels_list,
                            out_channels_list=out_channels_list,
                            num_branches=num_branches,
                            num_subblocks=num_subblocks,
                            bn_use_global_stats=bn_use_global_stats))
                self.in_channels_list = self.layers[-1].in_channels_list
Ejemplo n.º 12
0
def darts_skip_connection(channels, strides):
    """
    DARTS specific skip connection layer.

    Parameters:
    ----------
    channels : int
        Number of input/output channels.
    strides : int or tuple/list of 2 int
        Strides of the convolution.
    """
    assert (channels > 0)
    if strides == 1:
        return Identity()
    else:
        assert (strides == 2)
        return DartsReduceBranch(in_channels=channels,
                                 out_channels=channels,
                                 strides=strides)
Ejemplo n.º 13
0
 def __init__(self,
              growth_rate,
              dilation,
              bn_size,
              dropout,
              replace_by_skip_connection=False,
              **kwargs):
     super().__init__(**kwargs)
     self.growth_rate = growth_rate
     self.dilation = dilation
     self.bn_size = bn_size
     self.dropout = dropout
     new_feature_computation = nn.HybridSequential(prefix='')
     self.replace_by_skip_connection = replace_by_skip_connection
     if self.replace_by_skip_connection:
         self._add_conv_block(
             new_feature_computation,
             nn.activated_conv(self.growth_rate, kernel_size=1, padding=0))
     else:
         if self.bn_size == 0:
             # no bottleneck
             self._add_conv_block(
                 new_feature_computation,
                 nn.activated_conv(self.growth_rate,
                                   kernel_size=3,
                                   padding=dilation,
                                   dilation=dilation))
         else:
             # bottleneck design
             self._add_conv_block(
                 new_feature_computation,
                 nn.activated_conv(self.bn_size * self.growth_rate,
                                   kernel_size=1))
             self._add_conv_block(
                 new_feature_computation,
                 nn.activated_conv(self.growth_rate,
                                   kernel_size=3,
                                   padding=1))
     dense_block = HybridConcurrent(axis=1, prefix='')
     dense_block.add(Identity())
     dense_block.add(new_feature_computation)
     self.dense_block = dense_block
Ejemplo n.º 14
0
def _make_dense_layer(growth_rate, bn_size, dropout, norm_layer, norm_kwargs):
    new_features = nn.HybridSequential(prefix='')
    new_features.add(
        norm_layer(**({} if norm_kwargs is None else norm_kwargs)))
    new_features.add(nn.Activation('relu'))
    new_features.add(
        nn.Conv2D(bn_size * growth_rate, kernel_size=1, use_bias=False))
    new_features.add(
        norm_layer(**({} if norm_kwargs is None else norm_kwargs)))
    new_features.add(nn.Activation('relu'))
    new_features.add(
        nn.Conv2D(growth_rate, kernel_size=3, padding=1, use_bias=False))
    if dropout:
        new_features.add(nn.Dropout(dropout))

    out = HybridConcurrent(axis=1, prefix='')
    out.add(Identity())
    out.add(new_features)

    return out
Ejemplo n.º 15
0
    def __init__(self,
                 in_feats,
                 out_feats,
                 edge_func,
                 aggregator_type,
                 residual=False,
                 bias=True):
        super(NNConv, self).__init__()
        self._in_src_feats, self._in_dst_feats = expand_as_pair(in_feats)
        self._out_feats = out_feats
        if aggregator_type == 'sum':
            self.reducer = fn.sum
        elif aggregator_type == 'mean':
            self.reducer = fn.mean
        elif aggregator_type == 'max':
            self.reducer = fn.max
        else:
            raise KeyError(
                'Aggregator type {} not recognized: '.format(aggregator_type))
        self._aggre_type = aggregator_type

        with self.name_scope():
            self.edge_nn = edge_func
            if residual:
                if self._in_dst_feats != out_feats:
                    self.res_fc = nn.Dense(out_feats,
                                           in_units=self._in_dst_feats,
                                           use_bias=False,
                                           weight_initializer=mx.init.Xavier())
                else:
                    self.res_fc = Identity()
            else:
                self.res_fc = None

            if bias:
                self.bias = self.params.get('bias',
                                            shape=(out_feats, ),
                                            init=mx.init.Zero())
            else:
                self.bias = None
Ejemplo n.º 16
0
    def __init__(self, bn_size, growth_rate, dropout, **kwargs):
        super(DenseLayer, self).__init__(**kwargs)

        self.layer = nn.HybridSequential()
        self.out = HybridConcurrent(axis=1)
        with self.name_scope():
            self.layer.add(nn.BatchNorm())
            self.layer.add(nn.Activation('relu'))
            self.layer.add(
                nn.Conv2D(bn_size * growth_rate, kernel_size=1,
                          use_bias=False))
            self.layer.add(nn.BatchNorm())
            self.layer.add(nn.Activation('relu'))
            self.layer.add(
                nn.Conv2D(growth_rate,
                          kernel_size=3,
                          padding=1,
                          use_bias=False))
            if dropout:
                self.layer.add(nn.Dropout(dropout))
            self.out.add(Identity())
            self.out.add(self.layer)
Ejemplo n.º 17
0
def test_identity():
    model = Identity()
    x = mx.nd.random.uniform(shape=(128, 33, 64))
    assert_almost_equal(model(x), x)
Ejemplo n.º 18
0
    def __init__(self,
                 direct_channels,
                 skip_channels,
                 init_block_channels,
                 bn_use_global_stats=False,
                 in_channels=3,
                 in_size=(224, 224),
                 classes=1000,
                 **kwargs):
        super(FishNet, self).__init__(**kwargs)
        self.in_size = in_size
        self.classes = classes

        depth = len(direct_channels[0])
        down1_channels = direct_channels[0]
        up_channels = direct_channels[1]
        down2_channels = direct_channels[2]
        skip1_channels = skip_channels[0]
        skip2_channels = skip_channels[1]

        with self.name_scope():
            self.features = nn.HybridSequential(prefix='')
            self.features.add(
                SEInitBlock(in_channels=in_channels,
                            out_channels=init_block_channels,
                            bn_use_global_stats=bn_use_global_stats))
            in_channels = init_block_channels

            down1_seq = nn.HybridSequential(prefix='')
            skip1_seq = nn.HybridSequential(prefix='')
            for i in range(depth + 1):
                skip1_channels_list = skip1_channels[i]
                if i < depth:
                    skip1_seq.add(
                        SkipUnit(in_channels=in_channels,
                                 out_channels_list=skip1_channels_list,
                                 bn_use_global_stats=bn_use_global_stats))
                    down1_channels_list = down1_channels[i]
                    down1_seq.add(
                        DownUnit(in_channels=in_channels,
                                 out_channels_list=down1_channels_list,
                                 bn_use_global_stats=bn_use_global_stats))
                    in_channels = down1_channels_list[-1]
                else:
                    skip1_seq.add(
                        SkipAttUnit(in_channels=in_channels,
                                    out_channels_list=skip1_channels_list,
                                    bn_use_global_stats=bn_use_global_stats))
                    in_channels = skip1_channels_list[-1]

            up_seq = nn.HybridSequential(prefix='')
            skip2_seq = nn.HybridSequential(prefix='')
            for i in range(depth + 1):
                skip2_channels_list = skip2_channels[i]
                if i > 0:
                    in_channels += skip1_channels[depth - i][-1]
                if i < depth:
                    skip2_seq.add(
                        SkipUnit(in_channels=in_channels,
                                 out_channels_list=skip2_channels_list,
                                 bn_use_global_stats=bn_use_global_stats))
                    up_channels_list = up_channels[i]
                    dilation = 2**i
                    up_seq.add(
                        UpUnit(in_channels=in_channels,
                               out_channels_list=up_channels_list,
                               dilation=dilation,
                               bn_use_global_stats=bn_use_global_stats))
                    in_channels = up_channels_list[-1]
                else:
                    skip2_seq.add(Identity())

            down2_seq = nn.HybridSequential(prefix='')
            for i in range(depth):
                down2_channels_list = down2_channels[i]
                down2_seq.add(
                    DownUnit(in_channels=in_channels,
                             out_channels_list=down2_channels_list,
                             bn_use_global_stats=bn_use_global_stats))
                in_channels = down2_channels_list[-1] + skip2_channels[depth -
                                                                       1 -
                                                                       i][-1]

            self.features.add(
                SesquialteralHourglass(down1_seq=down1_seq,
                                       skip1_seq=skip1_seq,
                                       up_seq=up_seq,
                                       skip2_seq=skip2_seq,
                                       down2_seq=down2_seq))
            self.features.add(
                FishFinalBlock(in_channels=in_channels,
                               bn_use_global_stats=bn_use_global_stats))
            in_channels = in_channels // 2
            self.features.add(nn.AvgPool2D(pool_size=7, strides=1))

            self.output = nn.HybridSequential(prefix='')
            self.output.add(
                conv1x1(in_channels=in_channels,
                        out_channels=classes,
                        use_bias=True))
            self.output.add(nn.Flatten())
Ejemplo n.º 19
0
def test_identity():
    model = Identity()
    x = mx.nd.random.uniform(shape=(128, 33, 64))
    mx.test_utils.assert_almost_equal(model(x).asnumpy(),
                                      x.asnumpy())
Ejemplo n.º 20
0
def _make_residual(cell_net):
    out = HybridConcurrent(axis=1, prefix='')
    # 把原始的channels加进来,所以最终 out_channel = in_channel + net.out_channel
    out.add(Identity())
    out.add(cell_net)
    return out
    def __init__(self, in_channels_list, out_channels_list, num_branches,
                 num_subblocks, bn_use_global_stats, **kwargs):
        super(HRBlock, self).__init__(**kwargs)
        self.in_channels_list = in_channels_list
        self.num_branches = num_branches

        with self.name_scope():
            self.branches = nn.HybridSequential(prefix="")
            for i in range(num_branches):
                layers = nn.HybridSequential(prefix="branch{}_".format(i + 1))
                in_channels_i = self.in_channels_list[i]
                out_channels_i = out_channels_list[i]
                for j in range(num_subblocks[i]):
                    layers.add(
                        ResUnit(in_channels=in_channels_i,
                                out_channels=out_channels_i,
                                strides=1,
                                bottleneck=False,
                                bn_use_global_stats=bn_use_global_stats))
                    in_channels_i = out_channels_i
                self.in_channels_list[i] = out_channels_i
                self.branches.add(layers)

            if num_branches > 1:
                self.fuse_layers = nn.HybridSequential(prefix="")
                for i in range(num_branches):
                    fuse_layer = nn.HybridSequential(
                        prefix="fuselayer{}_".format(i + 1))
                    with fuse_layer.name_scope():
                        for j in range(num_branches):
                            if j > i:
                                fuse_layer.add(
                                    UpSamplingBlock(
                                        in_channels=in_channels_list[j],
                                        out_channels=in_channels_list[i],
                                        bn_use_global_stats=bn_use_global_stats,
                                        scale_factor=2**(j - i)))
                            elif j == i:
                                fuse_layer.add(Identity())
                            else:
                                conv3x3_seq = nn.HybridSequential(
                                    prefix="conv3x3seq{}_".format(j + 1))
                                with conv3x3_seq.name_scope():
                                    for k in range(i - j):
                                        if k == i - j - 1:
                                            conv3x3_seq.add(
                                                conv3x3_block(
                                                    in_channels=
                                                    in_channels_list[j],
                                                    out_channels=
                                                    in_channels_list[i],
                                                    strides=2,
                                                    activation=None,
                                                    bn_use_global_stats=
                                                    bn_use_global_stats))
                                        else:
                                            conv3x3_seq.add(
                                                conv3x3_block(
                                                    in_channels=
                                                    in_channels_list[j],
                                                    out_channels=
                                                    in_channels_list[j],
                                                    strides=2,
                                                    bn_use_global_stats=
                                                    bn_use_global_stats))
                                fuse_layer.add(conv3x3_seq)
                    self.fuse_layers.add(fuse_layer)
                self.activ = nn.Activation("relu")