Esempio n. 1
0
    def __init__(self, bn_eps, scale=0.2, activate=True):
        super(InceptionCUnit, self).__init__()
        self.activate = activate
        self.scale = scale
        in_channels = 1792

        self.branches = Concurrent()
        self.branches.add_module(
            "branch1",
            Conv1x1Branch(in_channels=in_channels,
                          out_channels=192,
                          bn_eps=bn_eps))
        self.branches.add_module(
            "branch2",
            ConvSeqBranch(in_channels=in_channels,
                          out_channels_list=(192, 192, 192),
                          kernel_size_list=(1, (1, 3), (3, 1)),
                          strides_list=(1, 1, 1),
                          padding_list=(0, (0, 1), (1, 0)),
                          bn_eps=bn_eps))
        self.conv = conv1x1(in_channels=384,
                            out_channels=in_channels,
                            bias=True)
        if self.activate:
            self.activ = nn.ReLU(inplace=True)
Esempio n. 2
0
    def __init__(self, in_channels, out_channels, kernel_sizes, scale_factors,
                 use_residual, bn_eps):
        super(ESPBlock, self).__init__()
        self.use_residual = use_residual
        groups = len(kernel_sizes)

        mid_channels = int(out_channels / groups)
        res_channels = out_channels - groups * mid_channels

        self.conv = conv1x1(in_channels=in_channels,
                            out_channels=mid_channels,
                            groups=groups)

        self.c_shuffle = ChannelShuffle(channels=mid_channels, groups=groups)

        self.branches = Concurrent()
        for i in range(groups):
            out_channels_i = (mid_channels +
                              res_channels) if i == 0 else mid_channels
            self.branches.add_module(
                "branch{}".format(i + 1),
                SBBlock(in_channels=mid_channels,
                        out_channels=out_channels_i,
                        kernel_size=kernel_sizes[i],
                        scale_factor=scale_factors[i],
                        bn_eps=bn_eps))

        self.preactiv = PreActivation(in_channels=out_channels, bn_eps=bn_eps)
Esempio n. 3
0
    def __init__(self, bn_eps):
        super(InceptionAUnit, self).__init__()
        self.scale = 0.17
        in_channels = 256

        self.branches = Concurrent()
        self.branches.add_module(
            "branch1",
            Conv1x1Branch(in_channels=in_channels,
                          out_channels=32,
                          bn_eps=bn_eps))
        self.branches.add_module(
            "branch2",
            ConvSeqBranch(in_channels=in_channels,
                          out_channels_list=(32, 32),
                          kernel_size_list=(1, 3),
                          strides_list=(1, 1),
                          padding_list=(0, 1),
                          bn_eps=bn_eps))
        self.branches.add_module(
            "branch3",
            ConvSeqBranch(in_channels=in_channels,
                          out_channels_list=(32, 32, 32),
                          kernel_size_list=(1, 3, 3),
                          strides_list=(1, 1, 1),
                          padding_list=(0, 1, 1),
                          bn_eps=bn_eps))
        self.conv = conv1x1(in_channels=96,
                            out_channels=in_channels,
                            bias=True)
        self.activ = nn.ReLU(inplace=True)
Esempio n. 4
0
    def __init__(self, bn_eps):
        super(ReductionBUnit, self).__init__()
        in_channels = 896

        self.branches = Concurrent()
        self.branches.add_module(
            "branch1",
            ConvSeqBranch(in_channels=in_channels,
                          out_channels_list=(256, 384),
                          kernel_size_list=(1, 3),
                          strides_list=(1, 2),
                          padding_list=(0, 0),
                          bn_eps=bn_eps))
        self.branches.add_module(
            "branch2",
            ConvSeqBranch(in_channels=in_channels,
                          out_channels_list=(256, 256),
                          kernel_size_list=(1, 3),
                          strides_list=(1, 2),
                          padding_list=(0, 0),
                          bn_eps=bn_eps))
        self.branches.add_module(
            "branch3",
            ConvSeqBranch(in_channels=in_channels,
                          out_channels_list=(256, 256, 256),
                          kernel_size_list=(1, 3, 3),
                          strides_list=(1, 1, 2),
                          padding_list=(0, 1, 0),
                          bn_eps=bn_eps))
        self.branches.add_module("branch4", MaxPoolBranch())
Esempio n. 5
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 in_size,
                 data_format="channels_last",
                 **kwargs):
        super(FastPyramidPooling, self).__init__(**kwargs)
        down_sizes = [1, 2, 3, 6]
        mid_channels = in_channels // 4

        self.branches = Concurrent(
            data_format=data_format,
            name="branches")
        self.branches.add(Identity(name="branch1"))
        for i, down_size in enumerate(down_sizes):
            self.branches.add(PoolingBranch(
                in_channels=in_channels,
                out_channels=mid_channels,
                in_size=in_size,
                down_size=down_size,
                data_format=data_format,
                name="branch{}".format(i + 2)))
        self.conv = conv1x1_block(
            in_channels=(in_channels * 2),
            out_channels=out_channels,
            data_format=data_format,
            name="conv")
Esempio n. 6
0
class FastPyramidPooling(HybridBlock):
    def __init__(self,
                 in_channels,
                 out_channels,
                 in_size):
        super(FastPyramidPooling, self).__init__()
        down_sizes = [1, 2, 3, 6]
        mid_channels = in_channels // 4

        with self.name_scope():
            self.branches = Concurrent()
            self.branches.add(Identity())
            for down_size in down_sizes:
                self.branches.add(PoolingBranch(
                    in_channels=in_channels,
                    out_channels=mid_channels,
                    in_size=in_size,
                    down_size=down_size))
            self.conv = conv1x1_block(
                in_channels=(in_channels * 2),
                out_channels=out_channels)

    def hybrid_forward(self, F, x):
        x = self.branches(x)
        x = self.conv(x)
        return x
Esempio n. 7
0
    def __init__(self, in_channels, out_channels, in_size):
        super(FastPyramidPooling, self).__init__()
        down_sizes = [1, 2, 3, 6]
        mid_channels = in_channels // 4

        with self.name_scope():
            self.branches = Concurrent()
            self.branches.add(Identity())
            for down_size in down_sizes:
                self.branches.add(
                    PoolingBranch(in_channels=in_channels,
                                  out_channels=mid_channels,
                                  in_size=in_size,
                                  down_size=down_size))
            self.conv = conv1x1_block(in_channels=(in_channels * 2),
                                      out_channels=out_channels)
Esempio n. 8
0
class InceptionCUnit(nn.Module):
    """
    InceptionResNetV1 type Inception-C unit.

    Parameters:
    ----------
    scale : float, default 1.0
        Scale value for residual branch.
    activate : bool, default True
        Whether activate the convolution block.
    bn_eps : float
        Small float added to variance in Batch norm.
    """
    def __init__(self, bn_eps, scale=0.2, activate=True):
        super(InceptionCUnit, self).__init__()
        self.activate = activate
        self.scale = scale
        in_channels = 1792

        self.branches = Concurrent()
        self.branches.add_module(
            "branch1",
            Conv1x1Branch(in_channels=in_channels,
                          out_channels=192,
                          bn_eps=bn_eps))
        self.branches.add_module(
            "branch2",
            ConvSeqBranch(in_channels=in_channels,
                          out_channels_list=(192, 192, 192),
                          kernel_size_list=(1, (1, 3), (3, 1)),
                          strides_list=(1, 1, 1),
                          padding_list=(0, (0, 1), (1, 0)),
                          bn_eps=bn_eps))
        self.conv = conv1x1(in_channels=384,
                            out_channels=in_channels,
                            bias=True)
        if self.activate:
            self.activ = nn.ReLU(inplace=True)

    def forward(self, x):
        identity = x
        x = self.branches(x)
        x = self.conv(x)
        x = self.scale * x + identity
        if self.activate:
            x = self.activ(x)
        return x
Esempio n. 9
0
class FastPyramidPooling(nn.Layer):
    """
    Fast-SCNN specific fast pyramid pooling block.

    Parameters:
    ----------
    in_channels : int
        Number of input channels.
    out_channels : int
        Number of output channels.
    in_size : tuple of 2 int or None
        Spatial size of input image.
    data_format : str, default 'channels_last'
        The ordering of the dimensions in tensors.
    """
    def __init__(self,
                 in_channels,
                 out_channels,
                 in_size,
                 data_format="channels_last",
                 **kwargs):
        super(FastPyramidPooling, self).__init__(**kwargs)
        down_sizes = [1, 2, 3, 6]
        mid_channels = in_channels // 4

        self.branches = Concurrent(
            data_format=data_format,
            name="branches")
        self.branches.add(Identity(name="branch1"))
        for i, down_size in enumerate(down_sizes):
            self.branches.add(PoolingBranch(
                in_channels=in_channels,
                out_channels=mid_channels,
                in_size=in_size,
                down_size=down_size,
                data_format=data_format,
                name="branch{}".format(i + 2)))
        self.conv = conv1x1_block(
            in_channels=(in_channels * 2),
            out_channels=out_channels,
            data_format=data_format,
            name="conv")

    def call(self, x, training=None):
        x = self.branches(x, training=training)
        x = self.conv(x, training=training)
        return x
Esempio n. 10
0
class InceptionBUnit(nn.Module):
    """
    InceptionResNetV1 type Inception-B unit.

    Parameters:
    ----------
    bn_eps : float
        Small float added to variance in Batch norm.
    """
    def __init__(self, bn_eps):
        super(InceptionBUnit, self).__init__()
        self.scale = 0.10
        in_channels = 896

        self.branches = Concurrent()
        self.branches.add_module(
            "branch1",
            Conv1x1Branch(in_channels=in_channels,
                          out_channels=128,
                          bn_eps=bn_eps))
        self.branches.add_module(
            "branch2",
            ConvSeqBranch(in_channels=in_channels,
                          out_channels_list=(128, 128, 128),
                          kernel_size_list=(1, (1, 7), (7, 1)),
                          strides_list=(1, 1, 1),
                          padding_list=(0, (0, 3), (3, 0)),
                          bn_eps=bn_eps))
        self.conv = conv1x1(in_channels=256,
                            out_channels=in_channels,
                            bias=True)
        self.activ = nn.ReLU(inplace=True)

    def forward(self, x):
        identity = x
        x = self.branches(x)
        x = self.conv(x)
        x = self.scale * x + identity
        x = self.activ(x)
        return x
Esempio n. 11
0
class ReductionAUnit(nn.Module):
    """
    InceptionResNetV1 type Reduction-A unit.

    Parameters:
    ----------
    bn_eps : float
        Small float added to variance in Batch norm.
    """
    def __init__(self, bn_eps):
        super(ReductionAUnit, self).__init__()
        in_channels = 256

        self.branches = Concurrent()
        self.branches.add_module(
            "branch1",
            ConvSeqBranch(in_channels=in_channels,
                          out_channels_list=(384, ),
                          kernel_size_list=(3, ),
                          strides_list=(2, ),
                          padding_list=(0, ),
                          bn_eps=bn_eps))
        self.branches.add_module(
            "branch2",
            ConvSeqBranch(in_channels=in_channels,
                          out_channels_list=(192, 192, 256),
                          kernel_size_list=(1, 3, 3),
                          strides_list=(1, 1, 2),
                          padding_list=(0, 1, 0),
                          bn_eps=bn_eps))
        self.branches.add_module("branch3", MaxPoolBranch())

    def forward(self, x):
        x = self.branches(x)
        return x
Esempio n. 12
0
    def __init__(self, bn_eps):
        super(InceptionBUnit, self).__init__()
        self.scale = 0.10
        in_channels = 896

        self.branches = Concurrent()
        self.branches.add_module(
            "branch1",
            Conv1x1Branch(in_channels=in_channels,
                          out_channels=128,
                          bn_eps=bn_eps))
        self.branches.add_module(
            "branch2",
            ConvSeqBranch(in_channels=in_channels,
                          out_channels_list=(128, 128, 128),
                          kernel_size_list=(1, (1, 7), (7, 1)),
                          strides_list=(1, 1, 1),
                          padding_list=(0, (0, 3), (3, 0)),
                          bn_eps=bn_eps))
        self.conv = conv1x1(in_channels=256,
                            out_channels=in_channels,
                            bias=True)
        self.activ = nn.ReLU(inplace=True)
Esempio n. 13
0
class ESPBlock(nn.Module):
    """
    ESP block, which is based on the following principle: Reduce ---> Split ---> Transform --> Merge.

    Parameters:
    ----------
    in_channels : int
        Number of input channels.
    out_channels : int
        Number of output channels.
    kernel_sizes : list of int
        Convolution window size for branches.
    scale_factors : list of int
        Scale factor for branches.
    use_residual : bool
        Whether to use residual connection.
    bn_eps : float
        Small float added to variance in Batch norm.
    """
    def __init__(self, in_channels, out_channels, kernel_sizes, scale_factors,
                 use_residual, bn_eps):
        super(ESPBlock, self).__init__()
        self.use_residual = use_residual
        groups = len(kernel_sizes)

        mid_channels = int(out_channels / groups)
        res_channels = out_channels - groups * mid_channels

        self.conv = conv1x1(in_channels=in_channels,
                            out_channels=mid_channels,
                            groups=groups)

        self.c_shuffle = ChannelShuffle(channels=mid_channels, groups=groups)

        self.branches = Concurrent()
        for i in range(groups):
            out_channels_i = (mid_channels +
                              res_channels) if i == 0 else mid_channels
            self.branches.add_module(
                "branch{}".format(i + 1),
                SBBlock(in_channels=mid_channels,
                        out_channels=out_channels_i,
                        kernel_size=kernel_sizes[i],
                        scale_factor=scale_factors[i],
                        bn_eps=bn_eps))

        self.preactiv = PreActivation(in_channels=out_channels, bn_eps=bn_eps)

    def forward(self, x):
        if self.use_residual:
            identity = x

        x = self.conv(x)
        x = self.c_shuffle(x)
        x = self.branches(x)

        if self.use_residual:
            x = identity + x

        x = self.preactiv(x)
        return x