Beispiel #1
0
class PyramidPooling(HybridBlock):
    """
    Pyramid Pooling module.

    Parameters:
    ----------
    in_channels : int
        Number of input channels.
    upscale_out_size : tuple of 2 int
        Spatial size of the input tensor for the bilinear upsampling operation.
    """
    def __init__(self,
                 in_channels,
                 upscale_out_size,
                 **kwargs):
        super(PyramidPooling, self).__init__(**kwargs)
        pool_out_sizes = [1, 2, 3, 6]
        assert (len(pool_out_sizes) == 4)
        assert (in_channels % 4 == 0)
        mid_channels = in_channels // 4

        with self.name_scope():
            self.branches = HybridConcurrent(axis=1, prefix='')
            self.branches.add(Identity())
            for pool_out_size in pool_out_sizes:
                self.branches.add(PyramidPoolingBranch(
                    in_channels=in_channels,
                    out_channels=mid_channels,
                    pool_out_size=pool_out_size,
                    upscale_out_size=upscale_out_size))

    def hybrid_forward(self, F, x):
        x = self.branches(x)
        return x
Beispiel #2
0
class _InceptionResnetC(_InceptionResnetBlock):  # Too many arguments (13/5)
    def __init__(
        self,
        name,
        in_ch,
        ch_0_0=192,
        ch_1_0=128,
        ch_1_1=224,
        ch_1_2=256,
        ch=2144,
        bn_mom=0.9,
        act_type="relu",
        res_scale_fac=0.2,
        use_se=True,
        shortcut=True,
    ):
        """
        Definition of the InceptionResnetC block

        :param name: name prefix for all blocks
        :param ch_0_0: Number of channels for 1st conv operation in branch 0
        :param ch_1_0: Number of channels for 1st conv operation in branch 1
        :param ch_1_1: Number of channels for 2nd conv operation in branch 1
        :param ch_1_2: Number of channels for 3rd conv operation in branch 1
        :param ch: Number of channels for conv operation after concatenating branches (no act is applied here)
        :param bn_mom: Batch normalization momentum parameter
        :param act_type: Activation type to use
        :param res_scale_fac: Constant multiply scalar which is applied to the residual activations maps
        """
        super(_InceptionResnetC, self).__init__(name, ch, res_scale_fac, act_type, bn_mom, use_se, shortcut)
        self.res_scale_fac = res_scale_fac
        self.block_name = name
        self.body = HybridSequential(prefix="")
        self.branches = HybridConcurrent(axis=1, prefix="")  # entry point for all branches
        # branch 0 of block type C
        self.b_0 = HybridSequential()
        self.b_0.add(Conv2D(channels=ch_0_0, kernel_size=(1, 1), prefix="%s_0_conv0" % name, in_channels=in_ch))
        self.b_0.add(get_act(act_type, prefix="%s_0_%s0" % (name, act_type)))
        # branch 2 of block type C
        self.b_1 = HybridSequential()
        self.b_1.add(Conv2D(channels=ch_1_0, kernel_size=(1, 1), prefix="%s_1_conv0" % name, in_channels=in_ch))
        self.b_1.add(get_act(act_type, prefix="%s_2_%s0" % (name, act_type)))
        self.b_1.add(
            Conv2D(channels=ch_1_1, kernel_size=(1, 3), padding=(0, 1), prefix="%s_1_conv1" % name, in_channels=ch_1_0)
        )
        self.b_1.add(get_act(act_type, prefix="%s_2_%s1" % (name, act_type)))
        self.b_1.add(
            Conv2D(channels=ch_1_2, kernel_size=(3, 1), padding=(1, 0), prefix="%s_1_conv2" % name, in_channels=ch_1_1)
        )
        self.b_1.add(get_act(act_type, prefix="%s_1_%s2" % (name, act_type)))
        # concatenate all branches and add them to the body
        self.branches.add(self.b_0)
        self.branches.add(self.b_1)
        self.body.add(self.branches)
        # apply a single CNN layer without activation function
        self.body.add(
            Conv2D(
                channels=ch, kernel_size=(1, 1), prefix="%s_conv0" % name, in_channels=ch_0_0 + ch_1_2, use_bias=False
            )
        )
Beispiel #3
0
class DepthWiseASPP(nn.HybridBlock):
    """
    Depth-wise ASPP block.
    """

    def __init__(self, channels, atrous_rates, in_channels, norm_layer=nn.BatchNorm,
                 norm_kwargs=None, activation='relu'):
        super(DepthWiseASPP, self).__init__()
        with self.name_scope():
            self.concurrent = HybridConcurrent(axis=1)
            self.concurrent.add(ConvBlock(channels, 1, in_channels=in_channels,
                                          norm_layer=norm_layer, norm_kwargs=norm_kwargs,
                                          activation=activation))
            for i in range(len(atrous_rates)):
                rate = atrous_rates[i]
                self.concurrent.add(ConvBlock(channels, 3, 1, padding=rate, dilation=rate,
                                              groups=in_channels, in_channels=in_channels,
                                              norm_layer=norm_layer, norm_kwargs=norm_kwargs,
                                              activation=activation))
            self.conv1x1 = ConvBlock(channels, 1, norm_layer=norm_layer, norm_kwargs=norm_kwargs,
                                     activation=activation)

    def hybrid_forward(self, F, x, *args, **kwargs):
        x = self.concurrent(x)
        x = self.conv1x1(x)
        return x
Beispiel #4
0
class DenseLayer(gluon.HybridBlock):
    def __init__(self, bn_size, growth_rate, dropout, **kwargs):
        super(DenseLayer, self).__init__(**kwargs)

        self.layer = nn.HybridSequential()
        self.out = HybridConcurrent(axis=1)
        with self.name_scope():
            self.layer.add(nn.BatchNorm())
            self.layer.add(nn.Activation('relu'))
            self.layer.add(
                nn.Conv2D(bn_size * growth_rate, kernel_size=1,
                          use_bias=False))
            self.layer.add(nn.BatchNorm())
            self.layer.add(nn.Activation('relu'))
            self.layer.add(
                nn.Conv2D(growth_rate,
                          kernel_size=3,
                          padding=1,
                          use_bias=False))
            if dropout:
                self.layer.add(nn.Dropout(dropout))
            self.out.add(Identity())
            self.out.add(self.layer)

    def hybrid_forward(self, F, x):
        return self.out(x)
Beispiel #5
0
class TwoWayCBlock(HybridBlock):
    """
    PolyNet type Inception-C block.

    Parameters:
    ----------
    bn_use_global_stats : bool
        Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
    """
    def __init__(self, bn_use_global_stats, **kwargs):
        super(TwoWayCBlock, self).__init__(**kwargs)
        in_channels = 2048

        with self.name_scope():
            self.branches = HybridConcurrent(axis=1, prefix='')
            self.branches.add(
                ConvSeqBranch(in_channels=in_channels,
                              out_channels_list=(192, 224, 256),
                              kernel_size_list=(1, (1, 3), (3, 1)),
                              strides_list=(1, 1, 1),
                              padding_list=(0, (0, 1), (1, 0)),
                              bn_use_global_stats=bn_use_global_stats))
            self.branches.add(
                Conv1x1Branch(in_channels=in_channels,
                              out_channels=192,
                              bn_use_global_stats=bn_use_global_stats))
            self.conv = conv1x1_block(in_channels=448,
                                      out_channels=in_channels,
                                      bn_use_global_stats=bn_use_global_stats,
                                      activate=False)

    def hybrid_forward(self, F, x):
        x = self.branches(x)
        x = self.conv(x)
        return x
Beispiel #6
0
class PolyBlock4a(HybridBlock):
    """
    PolyNet type Mixed-4a block.

    Parameters:
    ----------
    bn_use_global_stats : bool
        Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
    """
    def __init__(self, bn_use_global_stats, **kwargs):
        super(PolyBlock4a, self).__init__(**kwargs)
        with self.name_scope():
            self.branches = HybridConcurrent(axis=1, prefix='')
            self.branches.add(
                ConvSeqBranch(in_channels=160,
                              out_channels_list=(64, 96),
                              kernel_size_list=(1, 3),
                              strides_list=(1, 1),
                              padding_list=(0, 0),
                              bn_use_global_stats=bn_use_global_stats))
            self.branches.add(
                ConvSeqBranch(in_channels=160,
                              out_channels_list=(64, 64, 64, 96),
                              kernel_size_list=(1, (7, 1), (1, 7), 3),
                              strides_list=(1, 1, 1, 1),
                              padding_list=(0, (3, 0), (0, 3), 0),
                              bn_use_global_stats=bn_use_global_stats))

    def hybrid_forward(self, F, x):
        x = self.branches(x)
        return x
Beispiel #7
0
def _make_dense_layer(bits, bits_a, growth_rate, bn_size, dropout):
    new_features = nn.HybridSequential(prefix='')
    if bn_size == 0:
        # no bottleneck
        new_features.add(nn.QActivation(bits=bits_a))
        new_features.add(
            nn.QConv2D(growth_rate, bits=bits, kernel_size=3, padding=1))
        if dropout:
            new_features.add(nn.Dropout(dropout))
        new_features.add(nn.BatchNorm())
    else:
        # bottleneck design
        new_features.add(nn.BatchNorm())
        new_features.add(nn.QActivation(bits=bits_a))
        new_features.add(
            nn.QConv2D(bn_size * growth_rate, bits=bits, kernel_size=1))
        if dropout:
            new_features.add(nn.Dropout(dropout))
        new_features.add(nn.BatchNorm())
        new_features.add(nn.QActivation(bits=bits_a))
        new_features.add(
            nn.QConv2D(growth_rate, bits=bits, kernel_size=3, padding=1))
        if dropout:
            new_features.add(nn.Dropout(dropout))

    out = HybridConcurrent(axis=1, prefix='')
    out.add(Identity())
    out.add(new_features)

    return out
Beispiel #8
0
    def _add_dense_block(self, dilation):
        new_features = nn.HybridSequential(prefix='')

        def _add_conv_block(layer):
            new_features.add(nn.BatchNorm())
            new_features.add(layer)
            if self.dropout:
                new_features.add(nn.Dropout(self.dropout))

        if self.bn_size == 0:
            # no bottleneck
            _add_conv_block(
                nn.activated_conv(self.growth_rate,
                                  kernel_size=3,
                                  padding=dilation,
                                  dilation=dilation))
        else:
            # bottleneck design
            _add_conv_block(
                nn.activated_conv(self.bn_size * self.growth_rate,
                                  kernel_size=1))
            _add_conv_block(
                nn.activated_conv(self.growth_rate, kernel_size=3, padding=1))

        self.num_features += self.growth_rate

        dense_block = HybridConcurrent(axis=1, prefix='')
        dense_block.add(Identity())
        dense_block.add(new_features)
        self.current_stage.add(dense_block)
def _make_dense_layer(growth_rate, bn_size, dropout, dilation):
    new_features = nn.HybridSequential(prefix='')
    if bn_size == 0:
        # no bottleneck
        new_features.add(nn.BatchNorm())
        new_features.add(
            nn.activated_conv(growth_rate,
                              kernel_size=3,
                              padding=dilation,
                              dilation=dilation))
        if dropout:
            new_features.add(nn.Dropout(dropout))
    else:
        # bottleneck design
        new_features.add(nn.BatchNorm())
        new_features.add(
            nn.activated_conv(bn_size * growth_rate, kernel_size=1))
        if dropout:
            new_features.add(nn.Dropout(dropout))
        new_features.add(nn.BatchNorm())
        new_features.add(
            nn.activated_conv(growth_rate, kernel_size=3, padding=1))
        if dropout:
            new_features.add(nn.Dropout(dropout))

    out = HybridConcurrent(axis=1, prefix='')
    out.add(Identity())
    out.add(new_features)

    return out
Beispiel #10
0
class InceptBlock5a(HybridBlock):
    """
    InceptionV4 type Mixed-5a block.

    Parameters:
    ----------
    bn_epsilon : float
        Small float added to variance in Batch norm.
    bn_use_global_stats : bool
        Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
    """
    def __init__(self,
                 bn_epsilon,
                 bn_use_global_stats,
                 **kwargs):
        super(InceptBlock5a, self).__init__(**kwargs)
        with self.name_scope():
            self.branches = HybridConcurrent(axis=1, prefix="")
            self.branches.add(Conv3x3Branch(
                in_channels=192,
                out_channels=192,
                bn_epsilon=bn_epsilon,
                bn_use_global_stats=bn_use_global_stats))
            self.branches.add(MaxPoolBranch())

    def hybrid_forward(self, F, x):
        x = self.branches(x)
        return x
Beispiel #11
0
def _make_fire(squeeze_channels, expand1x1_channels, expand3x3_channels):
    out = nn.HybridSequential(prefix='')
    out.add(_make_fire_conv(squeeze_channels, 1))

    paths = HybridConcurrent(axis=1, prefix='')
    paths.add(_make_fire_conv(expand1x1_channels, 1))
    paths.add(_make_fire_conv(expand3x3_channels, 3, 1))
    out.add(paths)

    return out
Beispiel #12
0
def _make_D():
    block = HybridConcurrent(axis=1)

    block.add(_make_branch(None, (192, 1, None, None), (320, 3, 2, None)))

    block.add(
        _make_branch(None, (192, 1, None, None), (192, (1, 5), None, (0, 2)),
                     (192, (5, 1), None, (2, 0)), (192, 3, 2, None)))

    return block
Beispiel #13
0
class InceptionBUnit(HybridBlock):
    """
    InceptionResNetV1 type Inception-B unit.

    Parameters:
    ----------
    in_channels : int
        Number of input channels.
    out_channels_list : list of int
        List for numbers of output channels.
    bn_epsilon : float
        Small float added to variance in Batch norm.
    bn_use_global_stats : bool
        Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
    """
    def __init__(self,
                 in_channels,
                 out_channels_list,
                 bn_epsilon,
                 bn_use_global_stats,
                 **kwargs):
        super(InceptionBUnit, self).__init__(**kwargs)
        self.scale = 0.10

        with self.name_scope():
            self.branches = HybridConcurrent(axis=1, prefix="")
            self.branches.add(Conv1x1Branch(
                in_channels=in_channels,
                out_channels=out_channels_list[0],
                bn_epsilon=bn_epsilon,
                bn_use_global_stats=bn_use_global_stats))
            self.branches.add(ConvSeqBranch(
                in_channels=in_channels,
                out_channels_list=out_channels_list[1:4],
                kernel_size_list=(1, (1, 7), (7, 1)),
                strides_list=(1, 1, 1),
                padding_list=(0, (0, 3), (3, 0)),
                bn_epsilon=bn_epsilon,
                bn_use_global_stats=bn_use_global_stats))
            conv_in_channels = out_channels_list[0] + out_channels_list[3]
            self.conv = conv1x1(
                in_channels=conv_in_channels,
                out_channels=in_channels,
                use_bias=True)
            self.activ = nn.Activation("relu")

    def hybrid_forward(self, F, x):
        identity = x
        x = self.branches(x)
        x = self.conv(x)
        x = self.scale * x + identity
        x = self.activ(x)
        return x
Beispiel #14
0
class _InceptionResnetB(_InceptionResnetBlock):

    def __init__(self, name, in_ch,
                 ch_0_0=192,
                 ch_1_0=128, ch_1_1=160, ch_1_2=192,
                 ch=1152,
                 bn_mom=0.9, act_type='relu', res_scale_fac=0.2, use_se=True, shortcut=True):
        """
        Definition of the InceptionResnetB block

        :param name: name prefix for all blocks
        :param ch_0_0: Number of channels for 1st conv operation in branch 0
        :param ch_1_0: Number of channels for 1st conv operation in branch 1
        :param ch_1_1: Number of channels for 2nd conv operation in branch 1
        :param ch_1_2: Number of channels for 3rd conv operation in branch 1
        :param ch: Number of channels for conv operation after concatenating branches (no act is applied here)
        :param bn_mom: Batch normalization momentum parameter
        :param act_type: Activation type to use
        :param res_scale_fac: Constant multiply scalar which is applied to the residual activations maps
        """
        super(_InceptionResnetB, self).__init__(name, ch, res_scale_fac, act_type, bn_mom, use_se, shortcut)

        self.body = HybridSequential(prefix='')

        # entry point for all branches
        self.branches = HybridConcurrent(axis=1, prefix='')

        # branch 0 of block type B
        self.b_0 = HybridSequential()
        self.b_0.add(Conv2D(channels=ch_0_0, kernel_size=(1, 1), prefix='%s_0_conv0' % name, in_channels=in_ch))
        self.b_0.add(get_act(act_type, prefix='%s_0_%s0' % (name, act_type)))

        # branch 2 of block type B
        self.b_1 = HybridSequential()
        self.b_1.add(Conv2D(channels=ch_1_0, kernel_size=(1, 1), prefix='%s_1_conv0' % name, in_channels=in_ch))
        self.b_1.add(get_act(act_type, prefix='%s_2_%s0' % (name, act_type)))
        # self.b_1.add(Conv2D(channels=ch_1_1, kernel_size=(1, 7), padding=(0, 3), prefix='%s_1_conv1' % name, in_channels=ch_1_0))
        self.b_1.add(
            Conv2D(channels=ch_1_1, kernel_size=(1, 5), padding=(0, 2), prefix='%s_1_conv1' % name, in_channels=ch_1_0))
        self.b_1.add(get_act(act_type, prefix='%s_2_%s1' % (name, act_type)))
        # self.b_1.add(Conv2D(channels=ch_1_2, kernel_size=(7, 1), padding=(3, 0), prefix='%s_1_conv2' % name, in_channels=ch_1_1))
        self.b_1.add(
            Conv2D(channels=ch_1_2, kernel_size=(5, 1), padding=(2, 0), prefix='%s_1_conv2' % name, in_channels=ch_1_1))
        self.b_1.add(get_act(act_type, prefix='%s_1_%s2' % (name, act_type)))

        # concatenate all branches and add them to the body
        self.branches.add(self.b_0)
        self.branches.add(self.b_1)
        self.body.add(self.branches)

        # apply a single CNN layer without activation function
        self.body.add(Conv2D(channels=ch, kernel_size=(1, 1), prefix='%s_conv0' % name, in_channels=ch_0_0 + ch_1_2,
                             use_bias=False))
def _make_AB(pool_features, prefix):
    out = HybridConcurrent(axis=1, prefix=prefix)
    with out.name_scope():
        out.add(_make_branch(None, (64, 1, None, None)))
        if 32 == pool_features:
            out.add(_make_branch(None, (64, 1, None, None), (64, 3, None, 1)))
        elif 64 == pool_features:
            out.add(_make_branch(None, (64, 1, None, None), (96, 3, None, 1)))
        out.add(
            _make_branch(None, (64, 1, None, None), (96, 3, None, 1),
                         (96, 3, None, 1)))
        out.add(_make_branch('avg', (pool_features, 1, None, None)))
    return out
Beispiel #16
0
class InceptionCUnit(HybridBlock):
    """
    InceptionResNetV2 type Inception-C unit.

    Parameters:
    ----------
    scale : float, default 1.0
        Scale value for residual branch.
    activate : bool, default True
        Whether activate the convolution block.
    bn_use_global_stats : bool, default False
        Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
    """
    def __init__(self,
                 scale=0.2,
                 activate=True,
                 bn_use_global_stats=False,
                 **kwargs):
        super(InceptionCUnit, self).__init__(**kwargs)
        self.activate = activate
        self.scale = scale
        in_channels = 2080

        with self.name_scope():
            self.branches = HybridConcurrent(axis=1, prefix='')
            self.branches.add(Conv1x1Branch(
                in_channels=in_channels,
                out_channels=192,
                bn_use_global_stats=bn_use_global_stats))
            self.branches.add(ConvSeqBranch(
                in_channels=in_channels,
                out_channels_list=(192, 224, 256),
                kernel_size_list=(1, (1, 3), (3, 1)),
                strides_list=(1, 1, 1),
                padding_list=(0, (0, 1), (1, 0)),
                bn_use_global_stats=bn_use_global_stats))
            self.conv = conv1x1(
                in_channels=448,
                out_channels=in_channels,
                use_bias=True)
            if self.activate:
                self.activ = nn.Activation('relu')

    def hybrid_forward(self, F, x):
        identity = x
        x = self.branches(x)
        x = self.conv(x)
        x = self.scale * x + identity
        if self.activate:
            x = self.activ(x)
        return x
Beispiel #17
0
class StemBlock(HybridBlock):
    """
    PeleeNet stem block.

    Parameters:
    ----------
    in_channels : int
        Number of input channels.
    out_channels : int
        Number of output channels.
    bn_use_global_stats : bool
        Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
    """
    def __init__(self,
                 in_channels,
                 out_channels,
                 bn_use_global_stats,
                 **kwargs):
        super(StemBlock, self).__init__(**kwargs)
        mid1_channels = out_channels // 2
        mid2_channels = out_channels * 2

        with self.name_scope():
            self.first_conv = conv3x3_block(
                in_channels=in_channels,
                out_channels=out_channels,
                bn_use_global_stats=bn_use_global_stats,
                strides=2)

            self.branches = HybridConcurrent(axis=1, prefix="")
            self.branches.add(PeleeBranch1(
                in_channels=out_channels,
                out_channels=out_channels,
                mid_channels=mid1_channels,
                strides=2,
                bn_use_global_stats=bn_use_global_stats))
            self.branches.add(nn.MaxPool2D(
                pool_size=2,
                strides=2,
                padding=0))

            self.last_conv = conv1x1_block(
                in_channels=mid2_channels,
                out_channels=out_channels,
                bn_use_global_stats=bn_use_global_stats)

    def hybrid_forward(self, F, x):
        x = self.first_conv(x)
        x = self.branches(x)
        x = self.last_conv(x)
        return x
Beispiel #18
0
def _make_C(in_channels, channels_7x7, prefix, norm_layer, norm_kwargs):
    out = HybridConcurrent(axis=1, prefix=prefix)
    with out.name_scope():
        out.add(
            _make_branch(None, norm_layer, norm_kwargs,
                         (in_channels, 192, 1, None, None)))
        out.add(
            _make_branch(None, norm_layer, norm_kwargs,
                         (in_channels, channels_7x7, 1, None, None),
                         (channels_7x7, channels_7x7, (7, 1, 7), None,
                          (3, 0, 3)),
                         (channels_7x7, 192, (7, 7, 1), None, (3, 3, 0))))
        out.add(
            _make_branch(None, norm_layer, norm_kwargs,
                         (in_channels, channels_7x7, 1, None, None),
                         (channels_7x7, channels_7x7, (7, 7, 1), None,
                          (3, 3, 0)), (channels_7x7, channels_7x7,
                                       (7, 1, 7), None, (3, 0, 3)),
                         (channels_7x7, channels_7x7, (7, 7, 1), None,
                          (3, 3, 0)),
                         (channels_7x7, 192, (7, 1, 7), None, (3, 0, 3))))
        out.add(
            _make_branch('avg', norm_layer, norm_kwargs,
                         (in_channels, 192, 1, None, None)))
    return out
Beispiel #19
0
class _RiseBlockB(_InceptionResnetBlock):

    def __init__(self, name, in_ch, ch, res_scale_fac, act_type, bn_mom, use_se, shortcut, pool_type):
        super(_RiseBlockB, self).__init__(name, ch, res_scale_fac, act_type, bn_mom, use_se, shortcut)

        self.body = HybridSequential(prefix='')

        # entry point for all branches
        self.branches = HybridConcurrent(axis=1, prefix='')

        ch_0_0 = 32
        ch_0_1 = 96
        ch_0_2 = 96

        ch_1_0 = 32
        ch_1_1 = 96
        ch_1_2 = 96

        ch_2_0 = 192

        with self.name_scope():
            # branch 0
            self.b_0 = HybridSequential()
            self.b_0.add(get_pool(pool_type, pool_size=(2, 2), strides=(2, 2)))
            self.b_0.add(Conv2D(channels=ch_0_0, kernel_size=(1, 1), in_channels=in_ch))
            self.b_0.add(get_act(act_type))
            self.b_0.add(
                Conv2D(channels=ch_0_1, kernel_size=(3, 1), padding=(0, 1), in_channels=ch_0_0, use_bias=False))
            self.b_0.add(
                Conv2D(channels=ch_0_2, kernel_size=(1, 3), padding=(1, 0), in_channels=ch_0_1, use_bias=False))
            self.b_0.add(_UpsampleBlock('upsample0', scale=2))

            # branch 1
            self.b_1 = HybridSequential()
            self.b_1.add(Conv2D(channels=ch_1_0, kernel_size=(1, 1), in_channels=in_ch))
            self.b_1.add(get_act(act_type))
            self.b_1.add(
                Conv2D(channels=ch_1_1, kernel_size=(3, 1), padding=(0, 1), in_channels=ch_1_0, use_bias=False))
            self.b_1.add(
                Conv2D(channels=ch_1_2, kernel_size=(1, 3), padding=(1, 0), in_channels=ch_1_1, use_bias=False))

            # branch 2
            self.b_2 = HybridSequential()
            self.b_2.add(Conv2D(channels=ch_2_0, kernel_size=(1, 1), in_channels=in_ch, use_bias=False))

            # concatenate all branches and add them to the body
            self.branches.add(self.b_0)
            self.branches.add(self.b_1)
            self.branches.add(self.b_2)
            self.body.add(self.branches)
Beispiel #20
0
class InceptionBUnit(HybridBlock):
    """
    InceptionV3 type Inception-B unit.

    Parameters:
    ----------
    in_channels : int
        Number of input channels.
    out_channels : int
        Number of output channels.
    mid_channels : int
        Number of output channels in the 7x7 branches.
    bn_epsilon : float
        Small float added to variance in Batch norm.
    bn_use_global_stats : bool
        Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
    """
    def __init__(self, in_channels, out_channels, mid_channels, bn_epsilon,
                 bn_use_global_stats, **kwargs):
        super(InceptionBUnit, self).__init__(**kwargs)
        assert (in_channels == 768)
        assert (out_channels == 768)

        with self.name_scope():
            self.branches = HybridConcurrent(axis=1, prefix="")
            self.branches.add(
                Conv1x1Branch(in_channels=in_channels,
                              out_channels=192,
                              bn_epsilon=bn_epsilon,
                              bn_use_global_stats=bn_use_global_stats))
            self.branches.add(
                ConvSeqBranch(in_channels=in_channels,
                              out_channels_list=(mid_channels, mid_channels,
                                                 192),
                              kernel_size_list=(1, (1, 7), (7, 1)),
                              strides_list=(1, 1, 1),
                              padding_list=(0, (0, 3), (3, 0)),
                              bn_epsilon=bn_epsilon,
                              bn_use_global_stats=bn_use_global_stats))
            self.branches.add(
                ConvSeqBranch(in_channels=in_channels,
                              out_channels_list=(mid_channels, mid_channels,
                                                 mid_channels, mid_channels,
                                                 192),
                              kernel_size_list=(1, (7, 1), (1, 7), (7, 1),
                                                (1, 7)),
                              strides_list=(1, 1, 1, 1, 1),
                              padding_list=(0, (3, 0), (0, 3), (3, 0), (0, 3)),
                              bn_epsilon=bn_epsilon,
                              bn_use_global_stats=bn_use_global_stats))
            self.branches.add(
                AvgPoolBranch(in_channels=in_channels,
                              out_channels=192,
                              bn_epsilon=bn_epsilon,
                              bn_use_global_stats=bn_use_global_stats))

    def hybrid_forward(self, F, x):
        x = self.branches(x)
        return x
Beispiel #21
0
class _MPUnit(nn.HybridBlock):
    def __init__(self,
                 channels,
                 atrous_rates,
                 in_channels,
                 norm_layer=nn.BatchNorm,
                 norm_kwargs=None,
                 activation='prelu',
                 light=False,
                 **kwargs):
        super(_MPUnit, self).__init__()
        with self.name_scope():
            self.concurrent = HybridConcurrent(
                axis=1) if not light else HybridConcurrentSum()
            for i in range(len(atrous_rates)):
                rate = atrous_rates[i]
                self.concurrent.add(
                    ConvModule2d(channels,
                                 3,
                                 1,
                                 padding=rate,
                                 dilation=rate,
                                 groups=in_channels,
                                 in_channels=in_channels,
                                 norm_layer=norm_layer,
                                 norm_kwargs=norm_kwargs,
                                 activation=activation))
            if not light:
                self.concurrent.add(
                    ConvModule2d(channels,
                                 1,
                                 in_channels=in_channels,
                                 norm_layer=norm_layer,
                                 norm_kwargs=norm_kwargs,
                                 activation=activation))
                self.conv1x1 = ConvModule2d(channels,
                                            1,
                                            norm_layer=norm_layer,
                                            norm_kwargs=norm_kwargs,
                                            activation=activation)
            else:
                self.conv1x1 = None

    def hybrid_forward(self, F, x, *args, **kwargs):
        x = self.concurrent(x)
        if self.conv1x1:
            x = self.conv1x1(x)
        return x
Beispiel #22
0
def _make_dense_layer(growth_rate, bn_size, dropout):
    new_features = nn.HybridSequential(prefix='')
    new_features.add(nn.BatchNorm(use_global_stats=True))
    new_features.add(nn.Activation('relu'))
    new_features.add(nn.Conv2D(bn_size*growth_rate,
                               kernel_size=1, use_bias=False))
    new_features.add(nn.BatchNorm(use_global_stats=True))
    new_features.add(nn.Activation('relu'))
    new_features.add(nn.Conv2D(growth_rate, kernel_size=3,
                               padding=1, use_bias=False))
    if dropout:
        new_features.add(nn.Dropout(dropout))
    out = HybridConcurrent(axis=1, prefix='')
    out.add(Identity())
    out.add(new_features)
    return out
Beispiel #23
0
def _make_dense_layer(growth_rate, bn_size, dropout, norm_layer, norm_kwargs):
    new_features = nn.HybridSequential(prefix='')
    new_features.add(norm_layer(**({} if norm_kwargs is None else norm_kwargs)))
    new_features.add(nn.Activation('relu'))
    new_features.add(nn.Conv2D(bn_size * growth_rate, kernel_size=1, use_bias=False))
    new_features.add(norm_layer(**({} if norm_kwargs is None else norm_kwargs)))
    new_features.add(nn.Activation('relu'))
    new_features.add(nn.Conv2D(growth_rate, kernel_size=3, padding=1, use_bias=False))
    if dropout:
        new_features.add(nn.Dropout(dropout))

    out = HybridConcurrent(axis=1, prefix='')
    out.add(Identity())
    out.add(new_features)

    return out
def _make_D():
    out = HybridConcurrent(axis=1, prefix='')
    with out.name_scope():
        out.add(_make_basic_conv(channels=352, kernel_size=1, strides=1))
        out.add(_make_branch(None, (192, 1, None, None), (320, 3, None, 1)))
        out.add(
            _make_branch(None, (160, 1, None, None), (224, 3, None, 1),
                         (224, 3, None, 1)))
        out.add(_make_branch('avg', (128, 1, None, None)))
    return out
Beispiel #25
0
def _make_A(pool_features):
    block = HybridConcurrent(axis=1)

    block.add(_make_branch(None, (64, 1, None, None)))
    block.add(_make_branch(None, (48, 1, None, None), (64, 5, None, 2)))
    block.add(
        _make_branch(None, (64, 1, None, None), (96, 3, None, 1),
                     (96, 3, None, 1)))
    block.add(_make_branch('avg', (pool_features, 1, None, None)))

    return block
Beispiel #26
0
class InceptionCUnit(HybridBlock):
    """
    InceptionV4 type Inception-C unit.

    Parameters:
    ----------
    bn_epsilon : float
        Small float added to variance in Batch norm.
    bn_use_global_stats : bool
        Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
    """
    def __init__(self,
                 bn_epsilon,
                 bn_use_global_stats,
                 **kwargs):
        super(InceptionCUnit, self).__init__(**kwargs)
        in_channels = 1536

        with self.name_scope():
            self.branches = HybridConcurrent(axis=1, prefix="")
            self.branches.add(Conv1x1Branch(
                in_channels=in_channels,
                out_channels=256,
                bn_epsilon=bn_epsilon,
                bn_use_global_stats=bn_use_global_stats))
            self.branches.add(ConvSeq3x3Branch(
                in_channels=in_channels,
                out_channels=256,
                mid_channels_list=(384,),
                kernel_size_list=(1,),
                strides_list=(1,),
                padding_list=(0,),
                bn_epsilon=bn_epsilon,
                bn_use_global_stats=bn_use_global_stats))
            self.branches.add(ConvSeq3x3Branch(
                in_channels=in_channels,
                out_channels=256,
                mid_channels_list=(384, 448, 512),
                kernel_size_list=(1, (3, 1), (1, 3)),
                strides_list=(1, 1, 1),
                padding_list=(0, (1, 0), (0, 1)),
                bn_epsilon=bn_epsilon,
                bn_use_global_stats=bn_use_global_stats))
            self.branches.add(AvgPoolBranch(
                in_channels=in_channels,
                out_channels=256,
                bn_epsilon=bn_epsilon,
                bn_use_global_stats=bn_use_global_stats,
                count_include_pad=False))

    def hybrid_forward(self, F, x):
        x = self.branches(x)
        return x
Beispiel #27
0
class ReductionBUnit(HybridBlock):
    """
    InceptionResNetV1 type Reduction-B unit.

    Parameters:
    ----------
    in_channels : int
        Number of input channels.
    out_channels_list : list of int
        List for numbers of output channels.
    bn_epsilon : float
        Small float added to variance in Batch norm.
    bn_use_global_stats : bool
        Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
    """
    def __init__(self,
                 in_channels,
                 out_channels_list,
                 bn_epsilon,
                 bn_use_global_stats,
                 **kwargs):
        super(ReductionBUnit, self).__init__(**kwargs)
        with self.name_scope():
            self.branches = HybridConcurrent(axis=1, prefix="")
            self.branches.add(ConvSeqBranch(
                in_channels=in_channels,
                out_channels_list=out_channels_list[0:2],
                kernel_size_list=(1, 3),
                strides_list=(1, 2),
                padding_list=(0, 0),
                bn_epsilon=bn_epsilon,
                bn_use_global_stats=bn_use_global_stats))
            self.branches.add(ConvSeqBranch(
                in_channels=in_channels,
                out_channels_list=out_channels_list[2:4],
                kernel_size_list=(1, 3),
                strides_list=(1, 2),
                padding_list=(0, 0),
                bn_epsilon=bn_epsilon,
                bn_use_global_stats=bn_use_global_stats))
            self.branches.add(ConvSeqBranch(
                in_channels=in_channels,
                out_channels_list=out_channels_list[4:7],
                kernel_size_list=(1, 3, 3),
                strides_list=(1, 1, 2),
                padding_list=(0, 1, 0),
                bn_epsilon=bn_epsilon,
                bn_use_global_stats=bn_use_global_stats))
            self.branches.add(MaxPoolBranch())

    def hybrid_forward(self, F, x):
        x = self.branches(x)
        return x
Beispiel #28
0
 def __init__(self,
              growth_rate,
              dilation,
              bn_size,
              dropout,
              replace_by_skip_connection=False,
              **kwargs):
     super().__init__(**kwargs)
     self.growth_rate = growth_rate
     self.dilation = dilation
     self.bn_size = bn_size
     self.dropout = dropout
     new_feature_computation = nn.HybridSequential(prefix='')
     self.replace_by_skip_connection = replace_by_skip_connection
     if self.replace_by_skip_connection:
         self._add_conv_block(
             new_feature_computation,
             nn.activated_conv(self.growth_rate, kernel_size=1, padding=0))
     else:
         if self.bn_size == 0:
             # no bottleneck
             self._add_conv_block(
                 new_feature_computation,
                 nn.activated_conv(self.growth_rate,
                                   kernel_size=3,
                                   padding=dilation,
                                   dilation=dilation))
         else:
             # bottleneck design
             self._add_conv_block(
                 new_feature_computation,
                 nn.activated_conv(self.bn_size * self.growth_rate,
                                   kernel_size=1))
             self._add_conv_block(
                 new_feature_computation,
                 nn.activated_conv(self.growth_rate,
                                   kernel_size=3,
                                   padding=1))
     dense_block = HybridConcurrent(axis=1, prefix='')
     dense_block.add(Identity())
     dense_block.add(new_feature_computation)
     self.dense_block = dense_block
Beispiel #29
0
class InceptionBUnit(HybridBlock):
    """
    InceptionResNetV2 type Inception-B unit.

    Parameters:
    ----------
    bn_use_global_stats : bool
        Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
    """
    def __init__(self,
                 bn_use_global_stats,
                 **kwargs):
        super(InceptionBUnit, self).__init__(**kwargs)
        self.scale = 0.10
        in_channels = 1088

        with self.name_scope():
            self.branches = HybridConcurrent(axis=1, prefix='')
            self.branches.add(Conv1x1Branch(
                in_channels=in_channels,
                out_channels=192,
                bn_use_global_stats=bn_use_global_stats))
            self.branches.add(ConvSeqBranch(
                in_channels=in_channels,
                out_channels_list=(128, 160, 192),
                kernel_size_list=(1, (1, 7), (7, 1)),
                strides_list=(1, 1, 1),
                padding_list=(0, (0, 3), (3, 0)),
                bn_use_global_stats=bn_use_global_stats))
            self.conv = conv1x1(
                in_channels=384,
                out_channels=in_channels,
                use_bias=True)
            self.activ = nn.Activation('relu')

    def hybrid_forward(self, F, x):
        identity = x
        x = self.branches(x)
        x = self.conv(x)
        x = self.scale * x + identity
        x = self.activ(x)
        return x
Beispiel #30
0
def _make_dense_layer(growth_rate, bn_size, dropout, norm_layer, norm_kwargs):
    new_features = nn.HybridSequential(prefix='')
    new_features.add(
        norm_layer(**({} if norm_kwargs is None else norm_kwargs)))
    new_features.add(nn.Activation('relu'))
    new_features.add(
        nn.Conv2D(bn_size * growth_rate, kernel_size=1, use_bias=False))
    new_features.add(
        norm_layer(**({} if norm_kwargs is None else norm_kwargs)))
    new_features.add(nn.Activation('relu'))
    new_features.add(
        nn.Conv2D(growth_rate, kernel_size=3, padding=1, use_bias=False))
    if dropout:
        new_features.add(nn.Dropout(dropout))

    out = HybridConcurrent(axis=1, prefix='')
    out.add(Identity())
    out.add(new_features)

    return out
Beispiel #31
0
class ReductionAUnit(HybridBlock):
    """
    InceptionResNetV2 type Reduction-A unit.

    Parameters:
    ----------
    bn_use_global_stats : bool
        Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
    """
    def __init__(self, bn_use_global_stats, **kwargs):
        super(ReductionAUnit, self).__init__(**kwargs)
        in_channels = 320

        with self.name_scope():
            self.branches = HybridConcurrent(axis=1, prefix="")
            self.branches.add(
                ConvSeqBranch(in_channels=in_channels,
                              out_channels_list=(384, ),
                              kernel_size_list=(3, ),
                              strides_list=(2, ),
                              padding_list=(0, ),
                              bn_use_global_stats=bn_use_global_stats))
            self.branches.add(
                ConvSeqBranch(in_channels=in_channels,
                              out_channels_list=(256, 256, 384),
                              kernel_size_list=(1, 3, 3),
                              strides_list=(1, 1, 2),
                              padding_list=(0, 1, 0),
                              bn_use_global_stats=bn_use_global_stats))
            self.branches.add(MaxPoolBranch())

    def hybrid_forward(self, F, x):
        x = self.branches(x)
        return x
Beispiel #32
0
def _make_A(pool_features, prefix, norm_layer, norm_kwargs):
    out = HybridConcurrent(axis=1, prefix=prefix)
    with out.name_scope():
        out.add(_make_branch(None, norm_layer, norm_kwargs,
                             (64, 1, None, None)))
        out.add(_make_branch(None, norm_layer, norm_kwargs,
                             (48, 1, None, None),
                             (64, 5, None, 2)))
        out.add(_make_branch(None, norm_layer, norm_kwargs,
                             (64, 1, None, None),
                             (96, 3, None, 1),
                             (96, 3, None, 1)))
        out.add(_make_branch('avg', norm_layer, norm_kwargs,
                             (pool_features, 1, None, None)))
    return out
Beispiel #33
0
def _make_C(channels_7x7, prefix, norm_layer, norm_kwargs):
    out = HybridConcurrent(axis=1, prefix=prefix)
    with out.name_scope():
        out.add(_make_branch(None, norm_layer, norm_kwargs,
                             (192, 1, None, None)))
        out.add(_make_branch(None, norm_layer, norm_kwargs,
                             (channels_7x7, 1, None, None),
                             (channels_7x7, (1, 7), None, (0, 3)),
                             (192, (7, 1), None, (3, 0))))
        out.add(_make_branch(None, norm_layer, norm_kwargs,
                             (channels_7x7, 1, None, None),
                             (channels_7x7, (7, 1), None, (3, 0)),
                             (channels_7x7, (1, 7), None, (0, 3)),
                             (channels_7x7, (7, 1), None, (3, 0)),
                             (192, (1, 7), None, (0, 3))))
        out.add(_make_branch('avg', norm_layer, norm_kwargs,
                             (192, 1, None, None)))
    return out
Beispiel #34
0
def _make_B(prefix, norm_layer, norm_kwargs):
    out = HybridConcurrent(axis=1, prefix=prefix)
    with out.name_scope():
        out.add(_make_branch(None, norm_layer, norm_kwargs,
                             (384, 3, 2, None)))
        out.add(_make_branch(None, norm_layer, norm_kwargs,
                             (64, 1, None, None),
                             (96, 3, None, 1),
                             (96, 3, 2, None)))
        out.add(_make_branch('max', norm_layer, norm_kwargs))
    return out
Beispiel #35
0
def _make_D(prefix, norm_layer, norm_kwargs):
    out = HybridConcurrent(axis=1, prefix=prefix)
    with out.name_scope():
        out.add(_make_branch(None, norm_layer, norm_kwargs,
                             (192, 1, None, None),
                             (320, 3, 2, None)))
        out.add(_make_branch(None, norm_layer, norm_kwargs,
                             (192, 1, None, None),
                             (192, (1, 7), None, (0, 3)),
                             (192, (7, 1), None, (3, 0)),
                             (192, 3, 2, None)))
        out.add(_make_branch('max', norm_layer, norm_kwargs))
    return out
def test_concurrent():
    model = HybridConcurrent(axis=1)
    model.add(nn.Dense(128, activation='tanh', in_units=10))
    model.add(nn.Dense(64, activation='tanh', in_units=10))
    model.add(nn.Dense(32, in_units=10))
    model2 = Concurrent(axis=1)
    model2.add(nn.Dense(128, activation='tanh', in_units=10))
    model2.add(nn.Dense(64, activation='tanh', in_units=10))
    model2.add(nn.Dense(32, in_units=10))

    # symbol
    x = mx.sym.var('data')
    y = model(x)
    assert len(y.list_arguments()) == 7

    # ndarray
    model.initialize(mx.init.Xavier(magnitude=2.24))
    model2.initialize(mx.init.Xavier(magnitude=2.24))
    x = model(mx.nd.zeros((32, 10)))
    x2 = model2(mx.nd.zeros((32, 10)))
    assert x.shape == (32, 224)
    assert x2.shape == (32, 224)
    x.wait_to_read()
    x2.wait_to_read()
Beispiel #37
0
def _make_E(prefix, norm_layer, norm_kwargs):
    out = HybridConcurrent(axis=1, prefix=prefix)
    with out.name_scope():
        out.add(_make_branch(None, norm_layer, norm_kwargs,
                             (320, 1, None, None)))

        branch_3x3 = nn.HybridSequential(prefix='')
        out.add(branch_3x3)
        branch_3x3.add(_make_branch(None, norm_layer, norm_kwargs,
                                    (384, 1, None, None)))
        branch_3x3_split = HybridConcurrent(axis=1, prefix='')
        branch_3x3_split.add(_make_branch(None, norm_layer, norm_kwargs,
                                          (384, (1, 3), None, (0, 1))))
        branch_3x3_split.add(_make_branch(None, norm_layer, norm_kwargs,
                                          (384, (3, 1), None, (1, 0))))
        branch_3x3.add(branch_3x3_split)

        branch_3x3dbl = nn.HybridSequential(prefix='')
        out.add(branch_3x3dbl)
        branch_3x3dbl.add(_make_branch(None, norm_layer, norm_kwargs,
                                       (448, 1, None, None),
                                       (384, 3, None, 1)))
        branch_3x3dbl_split = HybridConcurrent(axis=1, prefix='')
        branch_3x3dbl.add(branch_3x3dbl_split)
        branch_3x3dbl_split.add(_make_branch(None, norm_layer, norm_kwargs,
                                             (384, (1, 3), None, (0, 1))))
        branch_3x3dbl_split.add(_make_branch(None, norm_layer, norm_kwargs,
                                             (384, (3, 1), None, (1, 0))))

        out.add(_make_branch('avg', norm_layer, norm_kwargs,
                             (192, 1, None, None)))
    return out