Esempio n. 1
0
def _make_E(in_channels, prefix, norm_layer, norm_kwargs):
    out = HybridConcurrent(axis=1, prefix=prefix)
    with out.name_scope():
        out.add(_make_branch(None, norm_layer, norm_kwargs,
                             (in_channels, 320, 1, None, None)))

        branch_3x3 = nn.HybridSequential(prefix='')
        out.add(branch_3x3)
        branch_3x3.add(_make_branch(None, norm_layer, norm_kwargs,
                                    (in_channels, 384, 1, None, None)))
        branch_3x3_split = HybridConcurrent(axis=1, prefix='')
        branch_3x3_split.add(_make_branch(None, norm_layer, norm_kwargs,
                                          (384, 384, (3, 1, 3), None, (1, 0, 1))))
        branch_3x3_split.add(_make_branch(None, norm_layer, norm_kwargs,
                                          (384, 384, (3, 3, 1), None, (1, 1, 0))))
        branch_3x3.add(branch_3x3_split)

        branch_3x3dbl = nn.HybridSequential(prefix='')
        out.add(branch_3x3dbl)
        branch_3x3dbl.add(_make_branch(None, norm_layer, norm_kwargs,
                                       (in_channels, 448, 1, None, None),
                                       (448, 384, 3, None, 1)))
        branch_3x3dbl_split = HybridConcurrent(axis=1, prefix='')
        branch_3x3dbl.add(branch_3x3dbl_split)
        branch_3x3dbl_split.add(_make_branch(None, norm_layer, norm_kwargs,
                                             (384, 384, (3, 1, 3), None, (1, 0, 1))))
        branch_3x3dbl_split.add(_make_branch(None, norm_layer, norm_kwargs,
                                             (384, 384, (3, 3, 1), None, (1, 1, 0))))

        out.add(_make_branch('avg', norm_layer, norm_kwargs,
                             (in_channels, 192, 1, None, None)))
    return out
Esempio n. 2
0
def _make_C(in_channels, channels_7x7, prefix, norm_layer, norm_kwargs):
    out = HybridConcurrent(axis=1, prefix=prefix)
    with out.name_scope():
        out.add(
            _make_branch(None, norm_layer, norm_kwargs,
                         (in_channels, 192, 1, None, None)))
        out.add(
            _make_branch(None, norm_layer, norm_kwargs,
                         (in_channels, channels_7x7, 1, None, None),
                         (channels_7x7, channels_7x7, (7, 1, 7), None,
                          (3, 0, 3)),
                         (channels_7x7, 192, (7, 7, 1), None, (3, 3, 0))))
        out.add(
            _make_branch(None, norm_layer, norm_kwargs,
                         (in_channels, channels_7x7, 1, None, None),
                         (channels_7x7, channels_7x7, (7, 7, 1), None,
                          (3, 3, 0)), (channels_7x7, channels_7x7,
                                       (7, 1, 7), None, (3, 0, 3)),
                         (channels_7x7, channels_7x7, (7, 7, 1), None,
                          (3, 3, 0)),
                         (channels_7x7, 192, (7, 1, 7), None, (3, 0, 3))))
        out.add(
            _make_branch('avg', norm_layer, norm_kwargs,
                         (in_channels, 192, 1, None, None)))
    return out
Esempio n. 3
0
def _make_E(prefix, norm_layer, norm_kwargs):
    out = HybridConcurrent(axis=1, prefix=prefix)
    with out.name_scope():
        out.add(_make_branch(None, norm_layer, norm_kwargs,
                             (320, 1, None, None)))

        branch_3x3 = nn.HybridSequential(prefix='')
        out.add(branch_3x3)
        branch_3x3.add(_make_branch(None, norm_layer, norm_kwargs,
                                    (384, 1, None, None)))
        branch_3x3_split = HybridConcurrent(axis=1, prefix='')
        branch_3x3_split.add(_make_branch(None, norm_layer, norm_kwargs,
                                          (384, (1, 3), None, (0, 1))))
        branch_3x3_split.add(_make_branch(None, norm_layer, norm_kwargs,
                                          (384, (3, 1), None, (1, 0))))
        branch_3x3.add(branch_3x3_split)

        branch_3x3dbl = nn.HybridSequential(prefix='')
        out.add(branch_3x3dbl)
        branch_3x3dbl.add(_make_branch(None, norm_layer, norm_kwargs,
                                       (448, 1, None, None),
                                       (384, 3, None, 1)))
        branch_3x3dbl_split = HybridConcurrent(axis=1, prefix='')
        branch_3x3dbl.add(branch_3x3dbl_split)
        branch_3x3dbl_split.add(_make_branch(None, norm_layer, norm_kwargs,
                                             (384, (1, 3), None, (0, 1))))
        branch_3x3dbl_split.add(_make_branch(None, norm_layer, norm_kwargs,
                                             (384, (3, 1), None, (1, 0))))

        out.add(_make_branch('avg', norm_layer, norm_kwargs,
                             (192, 1, None, None)))
    return out
def _make_E(prefix):
    out = HybridConcurrent(axis=1, prefix=prefix)
    with out.name_scope():
        out.add(_make_branch(None, (320, 1, None, None)))

        branch_3x3 = nn.HybridSequential(prefix='')
        out.add(branch_3x3)
        branch_3x3.add(_make_branch(None, (384, 1, None, None)))
        branch_3x3_split = HybridConcurrent(axis=1, prefix='')
        branch_3x3_split.add(_make_branch(None, (384, (1, 3), None, (0, 1))))
        branch_3x3_split.add(_make_branch(None, (384, (3, 1), None, (1, 0))))
        branch_3x3.add(branch_3x3_split)

        branch_3x3dbl = nn.HybridSequential(prefix='')
        out.add(branch_3x3dbl)
        branch_3x3dbl.add(
            _make_branch(None, (448, 1, None, None), (384, 3, None, 1)))
        branch_3x3dbl_split = HybridConcurrent(axis=1, prefix='')
        branch_3x3dbl.add(branch_3x3dbl_split)
        branch_3x3dbl_split.add(_make_branch(None,
                                             (384, (1, 3), None, (0, 1))))
        branch_3x3dbl_split.add(_make_branch(None,
                                             (384, (3, 1), None, (1, 0))))

        out.add(_make_branch('avg', (192, 1, None, None)))
    return out
def _make_B(prefix):
    out = HybridConcurrent(axis=1, prefix=prefix)
    with out.name_scope():
        out.add(_make_branch(None, (384, 3, 2, None)))
        out.add(
            _make_branch(None, (64, 1, None, None), (96, 3, None, 1),
                         (96, 3, 2, None)))
        out.add(_make_branch('max'))
    return out
def _make_D(prefix):
    out = HybridConcurrent(axis=1, prefix=prefix)
    with out.name_scope():
        out.add(_make_branch(None, (192, 1, None, None), (320, 3, 2, None)))
        out.add(
            _make_branch(None, (192, 1, None, None),
                         (192, (1, 7), None, (0, 3)),
                         (192, (7, 1), None, (3, 0)), (192, 3, 2, None)))
        out.add(_make_branch('max'))
    return out
def _make_A(pool_features, prefix):
    out = HybridConcurrent(axis=1, prefix=prefix)
    with out.name_scope():
        out.add(_make_branch(None, (64, 1, None, None)))
        out.add(_make_branch(None, (48, 1, None, None), (64, 5, None, 2)))
        out.add(
            _make_branch(None, (64, 1, None, None), (96, 3, None, 1),
                         (96, 3, None, 1)))
        out.add(_make_branch('avg', (pool_features, 1, None, None)))
    return out
def _make_D():
    out = HybridConcurrent(axis=1, prefix='')
    with out.name_scope():
        out.add(_make_basic_conv(channels=352, kernel_size=1, strides=1))
        out.add(_make_branch(None, (192, 1, None, None), (320, 3, None, 1)))
        out.add(
            _make_branch(None, (160, 1, None, None), (224, 3, None, 1),
                         (224, 3, None, 1)))
        out.add(_make_branch('avg', (128, 1, None, None)))
    return out
Esempio n. 9
0
def _make_B(prefix, norm_layer, norm_kwargs):
    out = HybridConcurrent(axis=1, prefix=prefix)
    with out.name_scope():
        out.add(_make_branch(None, norm_layer, norm_kwargs,
                             (384, 3, 2, None)))
        out.add(_make_branch(None, norm_layer, norm_kwargs,
                             (64, 1, None, None),
                             (96, 3, None, 1),
                             (96, 3, 2, None)))
        out.add(_make_branch('max', norm_layer, norm_kwargs))
    return out
Esempio n. 10
0
def _make_B(prefix, norm_layer, norm_kwargs):
    out = HybridConcurrent(axis=1, prefix=prefix)
    with out.name_scope():
        out.add(_make_branch(None, norm_layer, norm_kwargs,
                             (288, 384, 3, 2, (1, 0, 0))))
        out.add(_make_branch(None, norm_layer, norm_kwargs,
                             (288, 64, 1, None, None),
                             (64, 96, 3, None, 1),
                             (96, 96, 3, 2, (1, 0, 0))))
        out.add(_make_branch('max', norm_layer, norm_kwargs))
    return out
Esempio n. 11
0
def _make_D(prefix, norm_layer, norm_kwargs):
    out = HybridConcurrent(axis=1, prefix=prefix)
    with out.name_scope():
        out.add(_make_branch(None, norm_layer, norm_kwargs,
                             (192, 1, None, None),
                             (320, 3, 2, None)))
        out.add(_make_branch(None, norm_layer, norm_kwargs,
                             (192, 1, None, None),
                             (192, (1, 7), None, (0, 3)),
                             (192, (7, 1), None, (3, 0)),
                             (192, 3, 2, None)))
        out.add(_make_branch('max', norm_layer, norm_kwargs))
    return out
Esempio n. 12
0
def _make_D(prefix, norm_layer, norm_kwargs):
    out = HybridConcurrent(axis=1, prefix=prefix)
    with out.name_scope():
        out.add(_make_branch(None, norm_layer, norm_kwargs,
                             (768, 192, 1, None, None),
                             (192, 320, 3, 2, (1, 0, 0))))
        out.add(_make_branch(None, norm_layer, norm_kwargs,
                             (768, 192, 1, None, None),
                             (192, 192, (7, 1, 7), None, (3, 0, 3)),
                             (192, 192, (7, 7, 1), None, (3, 3, 0)),
                             (192, 192, 3, 2, (1, 0, 0))))
        out.add(_make_branch('max', norm_layer, norm_kwargs))
    return out
def _make_Mixed_4a(in_channels, channels, pool_features, prefix, norm_layer, norm_kwargs):
    out = HybridConcurrent(axis=1, prefix=prefix)
    with out.name_scope():
        out.add(_make_branch(None, norm_layer, norm_kwargs,
                             (in_channels, channels, int(channels*3), 1, None, None)))
        out.add(_make_branch(None, norm_layer, norm_kwargs,
                             (in_channels, channels, int(channels*1.5), 1, None, None),
                             (int(channels*1.5), int(channels*3.25), 3, None, 1)))
        out.add(_make_branch(None, norm_layer, norm_kwargs,
                             (in_channels, channels, int(channels/4), 1, None, None),
                             (int(channels/4), int(channels*0.75), 3, None, 1)))
        out.add(_make_branch('max', norm_layer, norm_kwargs,
                             (in_channels, channels, pool_features, 1, None, None)))
    return out
Esempio n. 14
0
def _make_Mixed_4b(in_channels, pool_features, prefix, norm_layer, norm_kwargs):
    out = HybridConcurrent(axis=1, prefix=prefix)
    with out.name_scope():
        out.add(_make_branch(None, norm_layer, norm_kwargs,
                             (in_channels, 160, 1, None, None)))
        out.add(_make_branch(None, norm_layer, norm_kwargs,
                             (in_channels, 112, 1, None, None),
                             (112, 224, 3, None, 1)))
        out.add(_make_branch(None, norm_layer, norm_kwargs,
                             (in_channels, 24, 1, None, None),
                             (24, 64, 3, None, 1)))
        out.add(_make_branch('max', norm_layer, norm_kwargs,
                             (in_channels, pool_features, 1, None, None)))
    return out
Esempio n. 15
0
def _make_Mixed_4a(in_channels, pool_features, prefix, norm_layer, norm_kwargs):
    out = HybridConcurrent(axis=1, prefix=prefix)
    with out.name_scope():
        out.add(_make_branch(None, norm_layer, norm_kwargs,
                             (in_channels, 192, 1, None, None)))
        out.add(_make_branch(None, norm_layer, norm_kwargs,
                             (in_channels, 96, 1, None, None),
                             (96, 208, 3, None, 1)))
        out.add(_make_branch(None, norm_layer, norm_kwargs,
                             (in_channels, 16, 1, None, None),
                             (16, 48, 3, None, 1)))
        out.add(_make_branch('max', norm_layer, norm_kwargs,
                             (in_channels, pool_features, 1, None, None)))
    return out
Esempio n. 16
0
def _make_mixed_5b(prefix):
    out = HybridConcurrent(axis=1, prefix=prefix)
    with out.name_scope():
        # branch 0
        out.add(_make_branch(None, (256, 1, None, None)))
        # branch 1
        out.add(_make_branch(None, (160, 1, None, None), (320, 3, None, 1)))
        # branch 2
        out.add(_make_branch(None, (32, 1, None, None), (128, 3, None, 1)))
        # branch 3
        out.add(_make_branch(
            'max',
            (128, 1, None, None),
        ))
        return out
Esempio n. 17
0
def _make_A(pool_features, prefix, norm_layer, norm_kwargs):
    out = HybridConcurrent(axis=1, prefix=prefix)
    with out.name_scope():
        out.add(_make_branch(None, norm_layer, norm_kwargs,
                             (64, 1, None, None)))
        out.add(_make_branch(None, norm_layer, norm_kwargs,
                             (48, 1, None, None),
                             (64, 5, None, 2)))
        out.add(_make_branch(None, norm_layer, norm_kwargs,
                             (64, 1, None, None),
                             (96, 3, None, 1),
                             (96, 3, None, 1)))
        out.add(_make_branch('avg', norm_layer, norm_kwargs,
                             (pool_features, 1, None, None)))
    return out
Esempio n. 18
0
def _make_mixed_3c(prefix):
    out = HybridConcurrent(axis=1, prefix=prefix)
    # axis 1 still correct
    with out.name_scope():
        # branch0
        out.add(_make_branch(None, (128, 1, None, None)))
        # branch1
        out.add(_make_branch(None, (128, 1, None, None), (192, 3, None, 1)))

        # branch 2
        out.add(_make_branch(None, (32, 1, None, None), (96, 3, None, 1)))

        # branch 3
        out.add(_make_branch('max', (64, 1, None, None)))
    return out
Esempio n. 19
0
def _make_A(in_channels, pool_features, prefix, norm_layer, norm_kwargs):
    out = HybridConcurrent(axis=1, prefix=prefix)
    with out.name_scope():
        out.add(_make_branch(None, norm_layer, norm_kwargs,
                             (in_channels, 64, 1, None, None)))
        out.add(_make_branch(None, norm_layer, norm_kwargs,
                             (in_channels, 48, 1, None, None),
                             (48, 64, 5, None, 2)))
        out.add(_make_branch(None, norm_layer, norm_kwargs,
                             (in_channels, 64, 1, None, None),
                             (64, 96, 3, None, 1),
                             (96, 96, 3, None, 1)))
        out.add(_make_branch('avg', norm_layer, norm_kwargs,
                             (in_channels, pool_features, 1, None, None)))
    return out
Esempio n. 20
0
def _make_mixed_4b(prefix):
    out = HybridConcurrent(axis=1, prefix=prefix)
    with out.name_scope():
        # branch 0
        out.add(_make_branch(None, (192, 1, None, None)))
        # branch 1
        out.add(_make_branch(None, (96, 1, None, None), (208, 3, None, 1)))
        # branch 2
        out.add(_make_branch(None, (16, 1, None, None), (48, 3, None, 1)))
        # branch 3
        out.add(_make_branch(
            'max',
            (64, 1, None, None),
        ))
        return out
def _make_C(channels_7x7, prefix):
    out = HybridConcurrent(axis=1, prefix=prefix)
    with out.name_scope():
        out.add(_make_branch(None, (192, 1, None, None)))
        out.add(
            _make_branch(None, (channels_7x7, 1, None, None),
                         (channels_7x7, (1, 7), None, (0, 3)),
                         (192, (7, 1), None, (3, 0))))
        out.add(
            _make_branch(None, (channels_7x7, 1, None, None),
                         (channels_7x7, (7, 1), None, (3, 0)),
                         (channels_7x7, (1, 7), None, (0, 3)),
                         (channels_7x7, (7, 1), None, (3, 0)),
                         (192, (1, 7), None, (0, 3))))
        out.add(_make_branch('avg', (192, 1, None, None)))
    return out
Esempio n. 22
0
def _make_mixed_3b(prefix):

    out = HybridConcurrent(axis=1, prefix=prefix)

    with out.name_scope():
        # branch0
        out.add(_make_branch(None, (64, 1, None, None)))
        #(1, 64, 35, 107, 107)

        # branch1
        out.add(_make_branch(None, (96, 1, None, None), (128, 3, None, 1)))
        # branch2
        out.add(_make_branch(None, (16, 1, None, None), (32, 3, None, 1)))
        # branch3
        out.add(_make_branch('max', (32, 1, None, None)))

        return out
Esempio n. 23
0
def _make_C(channels_7x7, prefix, norm_layer, norm_kwargs):
    out = HybridConcurrent(axis=1, prefix=prefix)
    with out.name_scope():
        out.add(_make_branch(None, norm_layer, norm_kwargs,
                             (192, 1, None, None)))
        out.add(_make_branch(None, norm_layer, norm_kwargs,
                             (channels_7x7, 1, None, None),
                             (channels_7x7, (1, 7), None, (0, 3)),
                             (192, (7, 1), None, (3, 0))))
        out.add(_make_branch(None, norm_layer, norm_kwargs,
                             (channels_7x7, 1, None, None),
                             (channels_7x7, (7, 1), None, (3, 0)),
                             (channels_7x7, (1, 7), None, (0, 3)),
                             (channels_7x7, (7, 1), None, (3, 0)),
                             (192, (1, 7), None, (0, 3))))
        out.add(_make_branch('avg', norm_layer, norm_kwargs,
                             (192, 1, None, None)))
    return out
def _make_C(prefix):
    out = HybridConcurrent(axis=1, prefix=prefix)
    with out.name_scope():
        out.add(_make_2D("2D_"))
        out.add(_make_3D("3D_"))
    return out
Esempio n. 25
0
class ESPBlock(HybridBlock):
    """
    ESP block, which is based on the following principle: Reduce ---> Split ---> Transform --> Merge.

    Parameters:
    ----------
    in_channels : int
        Number of input channels.
    out_channels : int
        Number of output channels.
    kernel_sizes : list of int
        Convolution window size for branches.
    scale_factors : list of int
        Scale factor for branches.
    use_residual : bool
        Whether to use residual connection.
    in_size : tuple of 2 int
        Spatial size of the output tensor for the bilinear upsampling operation.
    bn_epsilon : float
        Small float added to variance in Batch norm.
    """
    def __init__(self, in_channels, out_channels, kernel_sizes, scale_factors,
                 use_residual, in_size, bn_epsilon, **kwargs):
        super(ESPBlock, self).__init__(**kwargs)
        self.use_residual = use_residual
        groups = len(kernel_sizes)

        mid_channels = int(out_channels / groups)
        res_channels = out_channels - groups * mid_channels

        with self.name_scope():
            self.conv = conv1x1(in_channels=in_channels,
                                out_channels=mid_channels,
                                groups=groups)

            self.c_shuffle = ChannelShuffle(channels=mid_channels,
                                            groups=groups)

            self.branches = HybridConcurrent(axis=1, prefix="")
            with self.branches.name_scope():
                for i in range(groups):
                    out_channels_i = (mid_channels +
                                      res_channels) if i == 0 else mid_channels
                    self.branches.add(
                        SBBlock(in_channels=mid_channels,
                                out_channels=out_channels_i,
                                kernel_size=kernel_sizes[i],
                                scale_factor=scale_factors[i],
                                size=in_size,
                                bn_epsilon=bn_epsilon))

            self.preactiv = PreActivation(in_channels=out_channels,
                                          bn_epsilon=bn_epsilon)

    def hybrid_forward(self, F, x):
        if self.use_residual:
            identity = x

        x = self.conv(x)
        x = self.c_shuffle(x)
        x = self.branches(x)

        if self.use_residual:
            x = identity + x

        x = self.preactiv(x)
        return x