Beispiel #1
0
def _make_E(in_channels, prefix, norm_layer, norm_kwargs):
    out = HybridConcurrent(axis=1, prefix=prefix)
    with out.name_scope():
        out.add(_make_branch(None, norm_layer, norm_kwargs,
                             (in_channels, 320, 1, None, None)))

        branch_3x3 = nn.HybridSequential(prefix='')
        out.add(branch_3x3)
        branch_3x3.add(_make_branch(None, norm_layer, norm_kwargs,
                                    (in_channels, 384, 1, None, None)))
        branch_3x3_split = HybridConcurrent(axis=1, prefix='')
        branch_3x3_split.add(_make_branch(None, norm_layer, norm_kwargs,
                                          (384, 384, (3, 1, 3), None, (1, 0, 1))))
        branch_3x3_split.add(_make_branch(None, norm_layer, norm_kwargs,
                                          (384, 384, (3, 3, 1), None, (1, 1, 0))))
        branch_3x3.add(branch_3x3_split)

        branch_3x3dbl = nn.HybridSequential(prefix='')
        out.add(branch_3x3dbl)
        branch_3x3dbl.add(_make_branch(None, norm_layer, norm_kwargs,
                                       (in_channels, 448, 1, None, None),
                                       (448, 384, 3, None, 1)))
        branch_3x3dbl_split = HybridConcurrent(axis=1, prefix='')
        branch_3x3dbl.add(branch_3x3dbl_split)
        branch_3x3dbl_split.add(_make_branch(None, norm_layer, norm_kwargs,
                                             (384, 384, (3, 1, 3), None, (1, 0, 1))))
        branch_3x3dbl_split.add(_make_branch(None, norm_layer, norm_kwargs,
                                             (384, 384, (3, 3, 1), None, (1, 1, 0))))

        out.add(_make_branch('avg', norm_layer, norm_kwargs,
                             (in_channels, 192, 1, None, None)))
    return out
def _make_E(prefix):
    out = HybridConcurrent(axis=1, prefix=prefix)
    with out.name_scope():
        out.add(_make_branch(None, (320, 1, None, None)))

        branch_3x3 = nn.HybridSequential(prefix='')
        out.add(branch_3x3)
        branch_3x3.add(_make_branch(None, (384, 1, None, None)))
        branch_3x3_split = HybridConcurrent(axis=1, prefix='')
        branch_3x3_split.add(_make_branch(None, (384, (1, 3), None, (0, 1))))
        branch_3x3_split.add(_make_branch(None, (384, (3, 1), None, (1, 0))))
        branch_3x3.add(branch_3x3_split)

        branch_3x3dbl = nn.HybridSequential(prefix='')
        out.add(branch_3x3dbl)
        branch_3x3dbl.add(
            _make_branch(None, (448, 1, None, None), (384, 3, None, 1)))
        branch_3x3dbl_split = HybridConcurrent(axis=1, prefix='')
        branch_3x3dbl.add(branch_3x3dbl_split)
        branch_3x3dbl_split.add(_make_branch(None,
                                             (384, (1, 3), None, (0, 1))))
        branch_3x3dbl_split.add(_make_branch(None,
                                             (384, (3, 1), None, (1, 0))))

        out.add(_make_branch('avg', (192, 1, None, None)))
    return out
Beispiel #3
0
    def __init__(self, bn_use_global_stats, **kwargs):
        super(InceptionCUnit, self).__init__(**kwargs)
        in_channels = 1536

        with self.name_scope():
            self.branches = HybridConcurrent(axis=1, prefix='')
            self.branches.add(
                Conv1x1Branch(in_channels=in_channels,
                              out_channels=256,
                              bn_use_global_stats=bn_use_global_stats))
            self.branches.add(
                ConvSeq3x3Branch(in_channels=in_channels,
                                 out_channels=256,
                                 mid_channels_list=(384, ),
                                 kernel_size_list=(1, ),
                                 strides_list=(1, ),
                                 padding_list=(0, ),
                                 bn_use_global_stats=bn_use_global_stats))
            self.branches.add(
                ConvSeq3x3Branch(in_channels=in_channels,
                                 out_channels=256,
                                 mid_channels_list=(384, 448, 512),
                                 kernel_size_list=(1, (3, 1), (1, 3)),
                                 strides_list=(1, 1, 1),
                                 padding_list=(0, (1, 0), (0, 1)),
                                 bn_use_global_stats=bn_use_global_stats))
            self.branches.add(
                AvgPoolBranch(in_channels=in_channels,
                              out_channels=256,
                              bn_use_global_stats=bn_use_global_stats))
Beispiel #4
0
def _make_C(in_channels, channels_7x7, prefix, norm_layer, norm_kwargs):
    out = HybridConcurrent(axis=1, prefix=prefix)
    with out.name_scope():
        out.add(
            _make_branch(None, norm_layer, norm_kwargs,
                         (in_channels, 192, 1, None, None)))
        out.add(
            _make_branch(None, norm_layer, norm_kwargs,
                         (in_channels, channels_7x7, 1, None, None),
                         (channels_7x7, channels_7x7, (7, 1, 7), None,
                          (3, 0, 3)),
                         (channels_7x7, 192, (7, 7, 1), None, (3, 3, 0))))
        out.add(
            _make_branch(None, norm_layer, norm_kwargs,
                         (in_channels, channels_7x7, 1, None, None),
                         (channels_7x7, channels_7x7, (7, 7, 1), None,
                          (3, 3, 0)), (channels_7x7, channels_7x7,
                                       (7, 1, 7), None, (3, 0, 3)),
                         (channels_7x7, channels_7x7, (7, 7, 1), None,
                          (3, 3, 0)),
                         (channels_7x7, 192, (7, 1, 7), None, (3, 0, 3))))
        out.add(
            _make_branch('avg', norm_layer, norm_kwargs,
                         (in_channels, 192, 1, None, None)))
    return out
    def __init__(self, bn_use_global_stats, **kwargs):
        super(InceptionBUnit, self).__init__(**kwargs)
        in_channels = 1024

        with self.name_scope():
            self.branches = HybridConcurrent(axis=1, prefix="")
            self.branches.add(
                Conv1x1Branch(in_channels=in_channels,
                              out_channels=384,
                              bn_use_global_stats=bn_use_global_stats))
            self.branches.add(
                ConvSeqBranch(in_channels=in_channels,
                              out_channels_list=(192, 224, 256),
                              kernel_size_list=(1, (1, 7), (7, 1)),
                              strides_list=(1, 1, 1),
                              padding_list=(0, (0, 3), (3, 0)),
                              bn_use_global_stats=bn_use_global_stats))
            self.branches.add(
                ConvSeqBranch(in_channels=in_channels,
                              out_channels_list=(192, 192, 224, 224, 256),
                              kernel_size_list=(1, (7, 1), (1, 7), (7, 1),
                                                (1, 7)),
                              strides_list=(1, 1, 1, 1, 1),
                              padding_list=(0, (3, 0), (0, 3), (3, 0), (0, 3)),
                              bn_use_global_stats=bn_use_global_stats))
            self.branches.add(
                AvgPoolBranch(in_channels=in_channels,
                              out_channels=128,
                              bn_use_global_stats=bn_use_global_stats))
Beispiel #6
0
    def __init__(self,
                 in_channels,
                 upscale_out_size,
                 **kwargs):
        super(AtrousSpatialPyramidPooling, self).__init__(**kwargs)
        atrous_rates = [12, 24, 36]
        assert (in_channels % 8 == 0)
        mid_channels = in_channels // 8
        project_in_channels = 5 * mid_channels

        with self.name_scope():
            self.branches = HybridConcurrent(axis=1, prefix='')
            self.branches.add(conv1x1_block(
                in_channels=in_channels,
                out_channels=mid_channels))
            for atrous_rate in atrous_rates:
                self.branches.add(conv3x3_block(
                    in_channels=in_channels,
                    out_channels=mid_channels,
                    padding=atrous_rate,
                    dilation=atrous_rate))
            self.branches.add(ASPPAvgBranch(
                in_channels=in_channels,
                out_channels=mid_channels,
                upscale_out_size=upscale_out_size))
            self.conv = conv1x1_block(
                in_channels=project_in_channels,
                out_channels=mid_channels)
            self.dropout = nn.Dropout(rate=0.5)
Beispiel #7
0
    def __init__(self,
                 bn_epsilon,
                 bn_use_global_stats,
                 **kwargs):
        super(ReductionAUnit, self).__init__(**kwargs)
        in_channels = 384

        with self.name_scope():
            self.branches = HybridConcurrent(axis=1, prefix="")
            self.branches.add(ConvSeqBranch(
                in_channels=in_channels,
                out_channels_list=(384,),
                kernel_size_list=(3,),
                strides_list=(2,),
                padding_list=(0,),
                bn_epsilon=bn_epsilon,
                bn_use_global_stats=bn_use_global_stats))
            self.branches.add(ConvSeqBranch(
                in_channels=in_channels,
                out_channels_list=(192, 224, 256),
                kernel_size_list=(1, 3, 3),
                strides_list=(1, 1, 2),
                padding_list=(0, 1, 0),
                bn_epsilon=bn_epsilon,
                bn_use_global_stats=bn_use_global_stats))
            self.branches.add(MaxPoolBranch())
Beispiel #8
0
    def __init__(self, in_channels, out_channels, bn_use_global_stats,
                 **kwargs):
        super(InceptionCUnit, self).__init__(**kwargs)
        assert (out_channels == 2048)

        with self.name_scope():
            self.branches = HybridConcurrent(axis=1, prefix='')
            self.branches.add(
                Conv1x1Branch(in_channels=in_channels,
                              out_channels=320,
                              bn_use_global_stats=bn_use_global_stats))
            self.branches.add(
                ConvSeq3x3Branch(in_channels=in_channels,
                                 out_channels_list=(384, ),
                                 kernel_size_list=(1, ),
                                 strides_list=(1, ),
                                 padding_list=(0, ),
                                 bn_use_global_stats=bn_use_global_stats))
            self.branches.add(
                ConvSeq3x3Branch(in_channels=in_channels,
                                 out_channels_list=(448, 384),
                                 kernel_size_list=(1, 3),
                                 strides_list=(1, 1),
                                 padding_list=(0, 1),
                                 bn_use_global_stats=bn_use_global_stats))
            self.branches.add(
                AvgPoolBranch(in_channels=in_channels,
                              out_channels=192,
                              bn_use_global_stats=bn_use_global_stats))
Beispiel #9
0
    def __init__(self,
                 bn_use_global_stats,
                 **kwargs):
        super(InceptionBUnit, self).__init__(**kwargs)
        self.scale = 0.10
        in_channels = 1088

        with self.name_scope():
            self.branches = HybridConcurrent(axis=1, prefix='')
            self.branches.add(Conv1x1Branch(
                in_channels=in_channels,
                out_channels=192,
                bn_use_global_stats=bn_use_global_stats))
            self.branches.add(ConvSeqBranch(
                in_channels=in_channels,
                out_channels_list=(128, 160, 192),
                kernel_size_list=(1, (1, 7), (7, 1)),
                strides_list=(1, 1, 1),
                padding_list=(0, (0, 3), (3, 0)),
                bn_use_global_stats=bn_use_global_stats))
            self.conv = conv1x1(
                in_channels=384,
                out_channels=in_channels,
                use_bias=True)
            self.activ = nn.Activation('relu')
Beispiel #10
0
def _make_dense_layer(bits, bits_a, growth_rate, bn_size, dropout):
    new_features = nn.HybridSequential(prefix='')
    if bn_size == 0:
        # no bottleneck
        new_features.add(nn.QActivation(bits=bits_a))
        new_features.add(
            nn.QConv2D(growth_rate, bits=bits, kernel_size=3, padding=1))
        if dropout:
            new_features.add(nn.Dropout(dropout))
        new_features.add(nn.BatchNorm())
    else:
        # bottleneck design
        new_features.add(nn.BatchNorm())
        new_features.add(nn.QActivation(bits=bits_a))
        new_features.add(
            nn.QConv2D(bn_size * growth_rate, bits=bits, kernel_size=1))
        if dropout:
            new_features.add(nn.Dropout(dropout))
        new_features.add(nn.BatchNorm())
        new_features.add(nn.QActivation(bits=bits_a))
        new_features.add(
            nn.QConv2D(growth_rate, bits=bits, kernel_size=3, padding=1))
        if dropout:
            new_features.add(nn.Dropout(dropout))

    out = HybridConcurrent(axis=1, prefix='')
    out.add(Identity())
    out.add(new_features)

    return out
Beispiel #11
0
    def _add_dense_block(self, dilation):
        new_features = nn.HybridSequential(prefix='')

        def _add_conv_block(layer):
            new_features.add(nn.BatchNorm())
            new_features.add(layer)
            if self.dropout:
                new_features.add(nn.Dropout(self.dropout))

        if self.bn_size == 0:
            # no bottleneck
            _add_conv_block(
                nn.activated_conv(self.growth_rate,
                                  kernel_size=3,
                                  padding=dilation,
                                  dilation=dilation))
        else:
            # bottleneck design
            _add_conv_block(
                nn.activated_conv(self.bn_size * self.growth_rate,
                                  kernel_size=1))
            _add_conv_block(
                nn.activated_conv(self.growth_rate, kernel_size=3, padding=1))

        self.num_features += self.growth_rate

        dense_block = HybridConcurrent(axis=1, prefix='')
        dense_block.add(Identity())
        dense_block.add(new_features)
        self.current_stage.add(dense_block)
def _make_dense_layer(growth_rate, bn_size, dropout, dilation):
    new_features = nn.HybridSequential(prefix='')
    if bn_size == 0:
        # no bottleneck
        new_features.add(nn.BatchNorm())
        new_features.add(
            nn.activated_conv(growth_rate,
                              kernel_size=3,
                              padding=dilation,
                              dilation=dilation))
        if dropout:
            new_features.add(nn.Dropout(dropout))
    else:
        # bottleneck design
        new_features.add(nn.BatchNorm())
        new_features.add(
            nn.activated_conv(bn_size * growth_rate, kernel_size=1))
        if dropout:
            new_features.add(nn.Dropout(dropout))
        new_features.add(nn.BatchNorm())
        new_features.add(
            nn.activated_conv(growth_rate, kernel_size=3, padding=1))
        if dropout:
            new_features.add(nn.Dropout(dropout))

    out = HybridConcurrent(axis=1, prefix='')
    out.add(Identity())
    out.add(new_features)

    return out
Beispiel #13
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 bn_use_global_stats,
                 **kwargs):
        super(ReductionAUnit, self).__init__(**kwargs)
        assert (in_channels == 288)
        assert (out_channels == 768)

        with self.name_scope():
            self.branches = HybridConcurrent(axis=1, prefix='')
            self.branches.add(ConvSeqBranch(
                in_channels=in_channels,
                out_channels_list=(384,),
                kernel_size_list=(3,),
                strides_list=(2,),
                padding_list=(0,),
                bn_use_global_stats=bn_use_global_stats))
            self.branches.add(ConvSeqBranch(
                in_channels=in_channels,
                out_channels_list=(64, 96, 96),
                kernel_size_list=(1, 3, 3),
                strides_list=(1, 1, 2),
                padding_list=(0, 1, 0),
                bn_use_global_stats=bn_use_global_stats))
            self.branches.add(MaxPoolBranch())
Beispiel #14
0
    def __init__(self, in_channels, out_channels, bn_epsilon,
                 bn_use_global_stats, **kwargs):
        super(ReductionBUnit, self).__init__(**kwargs)
        assert (in_channels == 768)
        assert (out_channels == 1280)

        with self.name_scope():
            self.branches = HybridConcurrent(axis=1, prefix="")
            self.branches.add(
                ConvSeqBranch(in_channels=in_channels,
                              out_channels_list=(192, 320),
                              kernel_size_list=(1, 3),
                              strides_list=(1, 2),
                              padding_list=(0, 0),
                              bn_epsilon=bn_epsilon,
                              bn_use_global_stats=bn_use_global_stats))
            self.branches.add(
                ConvSeqBranch(in_channels=in_channels,
                              out_channels_list=(192, 192, 192, 192),
                              kernel_size_list=(1, (1, 7), (7, 1), 3),
                              strides_list=(1, 1, 1, 2),
                              padding_list=(0, (0, 3), (3, 0), 0),
                              bn_epsilon=bn_epsilon,
                              bn_use_global_stats=bn_use_global_stats))
            self.branches.add(MaxPoolBranch())
Beispiel #15
0
    def __init__(self, in_channels, out_channels, kernel_sizes, scale_factors,
                 use_residual, in_size, bn_epsilon, **kwargs):
        super(ESPBlock, self).__init__(**kwargs)
        self.use_residual = use_residual
        groups = len(kernel_sizes)

        mid_channels = int(out_channels / groups)
        res_channels = out_channels - groups * mid_channels

        with self.name_scope():
            self.conv = conv1x1(in_channels=in_channels,
                                out_channels=mid_channels,
                                groups=groups)

            self.c_shuffle = ChannelShuffle(channels=mid_channels,
                                            groups=groups)

            self.branches = HybridConcurrent(axis=1, prefix="")
            with self.branches.name_scope():
                for i in range(groups):
                    out_channels_i = (mid_channels +
                                      res_channels) if i == 0 else mid_channels
                    self.branches.add(
                        SBBlock(in_channels=mid_channels,
                                out_channels=out_channels_i,
                                kernel_size=kernel_sizes[i],
                                scale_factor=scale_factors[i],
                                size=in_size,
                                bn_epsilon=bn_epsilon))

            self.preactiv = PreActivation(in_channels=out_channels,
                                          bn_epsilon=bn_epsilon)
Beispiel #16
0
    def __init__(self, in_channels, out_channels, mid_channels,
                 bn_use_global_stats, **kwargs):
        super(InceptionBUnit, self).__init__(**kwargs)
        assert (in_channels == 768)
        assert (out_channels == 768)

        with self.name_scope():
            self.branches = HybridConcurrent(axis=1, prefix='')
            self.branches.add(
                Conv1x1Branch(in_channels=in_channels,
                              out_channels=192,
                              bn_use_global_stats=bn_use_global_stats))
            self.branches.add(
                ConvSeqBranch(in_channels=in_channels,
                              out_channels_list=(mid_channels, mid_channels,
                                                 192),
                              kernel_size_list=(1, (1, 7), (7, 1)),
                              strides_list=(1, 1, 1),
                              padding_list=(0, (0, 3), (3, 0)),
                              bn_use_global_stats=bn_use_global_stats))
            self.branches.add(
                ConvSeqBranch(in_channels=in_channels,
                              out_channels_list=(mid_channels, mid_channels,
                                                 mid_channels, mid_channels,
                                                 192),
                              kernel_size_list=(1, (7, 1), (1, 7), (7, 1),
                                                (1, 7)),
                              strides_list=(1, 1, 1, 1, 1),
                              padding_list=(0, (3, 0), (0, 3), (3, 0), (0, 3)),
                              bn_use_global_stats=bn_use_global_stats))
            self.branches.add(
                AvgPoolBranch(in_channels=in_channels,
                              out_channels=192,
                              bn_use_global_stats=bn_use_global_stats))
Beispiel #17
0
 def __init__(self,
              in_channels,
              out_channels_list,
              bn_epsilon,
              bn_use_global_stats,
              **kwargs):
     super(ReductionBUnit, self).__init__(**kwargs)
     with self.name_scope():
         self.branches = HybridConcurrent(axis=1, prefix="")
         self.branches.add(ConvSeqBranch(
             in_channels=in_channels,
             out_channels_list=out_channels_list[0:2],
             kernel_size_list=(1, 3),
             strides_list=(1, 2),
             padding_list=(0, 0),
             bn_epsilon=bn_epsilon,
             bn_use_global_stats=bn_use_global_stats))
         self.branches.add(ConvSeqBranch(
             in_channels=in_channels,
             out_channels_list=out_channels_list[2:4],
             kernel_size_list=(1, 3),
             strides_list=(1, 2),
             padding_list=(0, 0),
             bn_epsilon=bn_epsilon,
             bn_use_global_stats=bn_use_global_stats))
         self.branches.add(ConvSeqBranch(
             in_channels=in_channels,
             out_channels_list=out_channels_list[4:7],
             kernel_size_list=(1, 3, 3),
             strides_list=(1, 1, 2),
             padding_list=(0, 1, 0),
             bn_epsilon=bn_epsilon,
             bn_use_global_stats=bn_use_global_stats))
         self.branches.add(MaxPoolBranch())
Beispiel #18
0
    def __init__(self, in_channels, mid1_channels_list, mid2_channels_list,
                 bn_use_global_stats, avg_pool, **kwargs):
        super(InceptionBlock, self).__init__(**kwargs)
        assert (len(mid1_channels_list) == 2)
        assert (len(mid2_channels_list) == 4)

        with self.name_scope():
            self.branches = HybridConcurrent(axis=1, prefix="")
            self.branches.add(
                conv1x1_block(in_channels=in_channels,
                              out_channels=mid2_channels_list[0],
                              use_bias=True,
                              bn_use_global_stats=bn_use_global_stats))
            self.branches.add(
                Inception3x3Branch(in_channels=in_channels,
                                   out_channels=mid2_channels_list[1],
                                   mid_channels=mid1_channels_list[0],
                                   bn_use_global_stats=bn_use_global_stats))
            self.branches.add(
                InceptionDouble3x3Branch(
                    in_channels=in_channels,
                    out_channels=mid2_channels_list[2],
                    mid_channels=mid1_channels_list[1],
                    bn_use_global_stats=bn_use_global_stats))
            self.branches.add(
                InceptionPoolBranch(in_channels=in_channels,
                                    out_channels=mid2_channels_list[3],
                                    bn_use_global_stats=bn_use_global_stats,
                                    avg_pool=avg_pool))
Beispiel #19
0
    def __init__(self,
                 in_channels,
                 out_channels_list,
                 bn_epsilon,
                 bn_use_global_stats,
                 **kwargs):
        super(InceptionBUnit, self).__init__(**kwargs)
        self.scale = 0.10

        with self.name_scope():
            self.branches = HybridConcurrent(axis=1, prefix="")
            self.branches.add(Conv1x1Branch(
                in_channels=in_channels,
                out_channels=out_channels_list[0],
                bn_epsilon=bn_epsilon,
                bn_use_global_stats=bn_use_global_stats))
            self.branches.add(ConvSeqBranch(
                in_channels=in_channels,
                out_channels_list=out_channels_list[1:4],
                kernel_size_list=(1, (1, 7), (7, 1)),
                strides_list=(1, 1, 1),
                padding_list=(0, (0, 3), (3, 0)),
                bn_epsilon=bn_epsilon,
                bn_use_global_stats=bn_use_global_stats))
            conv_in_channels = out_channels_list[0] + out_channels_list[3]
            self.conv = conv1x1(
                in_channels=conv_in_channels,
                out_channels=in_channels,
                use_bias=True)
            self.activ = nn.Activation("relu")
Beispiel #20
0
    def __init__(self, in_channels, out_channels, bn_use_global_stats,
                 **kwargs):
        super(StemBlock, self).__init__(**kwargs)
        mid1_channels = out_channels // 2
        mid2_channels = out_channels * 2

        with self.name_scope():
            self.first_conv = conv3x3_block(
                in_channels=in_channels,
                out_channels=out_channels,
                bn_use_global_stats=bn_use_global_stats,
                strides=2)

            self.branches = HybridConcurrent(axis=1, prefix='')
            self.branches.add(
                PeleeBranch1(in_channels=out_channels,
                             out_channels=out_channels,
                             mid_channels=mid1_channels,
                             strides=2,
                             bn_use_global_stats=bn_use_global_stats))
            self.branches.add(nn.MaxPool2D(pool_size=2, strides=2, padding=0))

            self.last_conv = conv1x1_block(
                in_channels=mid2_channels,
                out_channels=out_channels,
                bn_use_global_stats=bn_use_global_stats)
Beispiel #21
0
    def __init__(self, in_channels, out_channels, bn_use_global_stats,
                 **kwargs):
        super(InceptionAUnit, self).__init__(**kwargs)
        assert (out_channels > 224)
        pool_out_channels = out_channels - 224

        with self.name_scope():
            self.branches = HybridConcurrent(axis=1, prefix='')
            self.branches.add(
                Conv1x1Branch(in_channels=in_channels,
                              out_channels=64,
                              bn_use_global_stats=bn_use_global_stats))
            self.branches.add(
                ConvSeqBranch(in_channels=in_channels,
                              out_channels_list=(48, 64),
                              kernel_size_list=(1, 5),
                              strides_list=(1, 1),
                              padding_list=(0, 2),
                              bn_use_global_stats=bn_use_global_stats))
            self.branches.add(
                ConvSeqBranch(in_channels=in_channels,
                              out_channels_list=(64, 96, 96),
                              kernel_size_list=(1, 3, 3),
                              strides_list=(1, 1, 1),
                              padding_list=(0, 1, 1),
                              bn_use_global_stats=bn_use_global_stats))
            self.branches.add(
                AvgPoolBranch(in_channels=in_channels,
                              out_channels=pool_out_channels,
                              bn_use_global_stats=bn_use_global_stats))
Beispiel #22
0
    def __init__(self, bn_use_global_stats, **kwargs):
        super(InceptBlock5b, self).__init__(**kwargs)
        in_channels = 192

        with self.name_scope():
            self.branches = HybridConcurrent(axis=1, prefix="")
            self.branches.add(
                Conv1x1Branch(in_channels=in_channels,
                              out_channels=96,
                              bn_use_global_stats=bn_use_global_stats))
            self.branches.add(
                ConvSeqBranch(in_channels=in_channels,
                              out_channels_list=(48, 64),
                              kernel_size_list=(1, 5),
                              strides_list=(1, 1),
                              padding_list=(0, 2),
                              bn_use_global_stats=bn_use_global_stats))
            self.branches.add(
                ConvSeqBranch(in_channels=in_channels,
                              out_channels_list=(64, 96, 96),
                              kernel_size_list=(1, 3, 3),
                              strides_list=(1, 1, 1),
                              padding_list=(0, 1, 1),
                              bn_use_global_stats=bn_use_global_stats))
            self.branches.add(
                AvgPoolBranch(in_channels=in_channels,
                              out_channels=64,
                              bn_use_global_stats=bn_use_global_stats))
    def __init__(self, in_channels, mid1_channels_list, mid2_channels_list,
                 use_bias, use_bn, bn_use_global_stats, **kwargs):
        super(ReductionBlock, self).__init__(**kwargs)
        assert (len(mid1_channels_list) == 2)
        assert (len(mid2_channels_list) == 4)

        with self.name_scope():
            self.branches = HybridConcurrent(axis=1, prefix="")
            self.branches.add(
                Inception3x3Branch(in_channels=in_channels,
                                   out_channels=mid2_channels_list[1],
                                   mid_channels=mid1_channels_list[0],
                                   strides=2,
                                   use_bias=use_bias,
                                   use_bn=use_bn,
                                   bn_use_global_stats=bn_use_global_stats))
            self.branches.add(
                InceptionDouble3x3Branch(
                    in_channels=in_channels,
                    out_channels=mid2_channels_list[2],
                    mid_channels=mid1_channels_list[1],
                    strides=2,
                    use_bias=use_bias,
                    use_bn=use_bn,
                    bn_use_global_stats=bn_use_global_stats))
            self.branches.add(
                nn.MaxPool2D(pool_size=3, strides=2, padding=0,
                             ceil_mode=True))
Beispiel #24
0
    def __init__(self, bn_use_global_stats, **kwargs):
        super(ReductionBUnit, self).__init__(**kwargs)
        in_channels = 1088

        with self.name_scope():
            self.branches = HybridConcurrent(axis=1, prefix="")
            self.branches.add(
                ConvSeqBranch(in_channels=in_channels,
                              out_channels_list=(256, 384),
                              kernel_size_list=(1, 3),
                              strides_list=(1, 2),
                              padding_list=(0, 0),
                              bn_use_global_stats=bn_use_global_stats))
            self.branches.add(
                ConvSeqBranch(in_channels=in_channels,
                              out_channels_list=(256, 288),
                              kernel_size_list=(1, 3),
                              strides_list=(1, 2),
                              padding_list=(0, 0),
                              bn_use_global_stats=bn_use_global_stats))
            self.branches.add(
                ConvSeqBranch(in_channels=in_channels,
                              out_channels_list=(256, 288, 320),
                              kernel_size_list=(1, 3, 3),
                              strides_list=(1, 1, 2),
                              padding_list=(0, 1, 0),
                              bn_use_global_stats=bn_use_global_stats))
            self.branches.add(MaxPoolBranch())
Beispiel #25
0
    def __init__(self,
                 scale=0.2,
                 activate=True,
                 bn_use_global_stats=False,
                 **kwargs):
        super(InceptionCUnit, self).__init__(**kwargs)
        self.activate = activate
        self.scale = scale
        in_channels = 2080

        with self.name_scope():
            self.branches = HybridConcurrent(axis=1, prefix="")
            self.branches.add(
                Conv1x1Branch(in_channels=in_channels,
                              out_channels=192,
                              bn_use_global_stats=bn_use_global_stats))
            self.branches.add(
                ConvSeqBranch(in_channels=in_channels,
                              out_channels_list=(192, 224, 256),
                              kernel_size_list=(1, (1, 3), (3, 1)),
                              strides_list=(1, 1, 1),
                              padding_list=(0, (0, 1), (1, 0)),
                              bn_use_global_stats=bn_use_global_stats))
            self.conv = conv1x1(in_channels=448,
                                out_channels=in_channels,
                                use_bias=True)
            if self.activate:
                self.activ = nn.Activation("relu")
Beispiel #26
0
    def __init__(self, bn_use_global_stats, **kwargs):
        super(InceptionAUnit, self).__init__(**kwargs)
        self.scale = 0.17
        in_channels = 320

        with self.name_scope():
            self.branches = HybridConcurrent(axis=1, prefix="")
            self.branches.add(
                Conv1x1Branch(in_channels=in_channels,
                              out_channels=32,
                              bn_use_global_stats=bn_use_global_stats))
            self.branches.add(
                ConvSeqBranch(in_channels=in_channels,
                              out_channels_list=(32, 32),
                              kernel_size_list=(1, 3),
                              strides_list=(1, 1),
                              padding_list=(0, 1),
                              bn_use_global_stats=bn_use_global_stats))
            self.branches.add(
                ConvSeqBranch(in_channels=in_channels,
                              out_channels_list=(32, 48, 64),
                              kernel_size_list=(1, 3, 3),
                              strides_list=(1, 1, 1),
                              padding_list=(0, 1, 1),
                              bn_use_global_stats=bn_use_global_stats))
            self.conv = conv1x1(in_channels=128,
                                out_channels=in_channels,
                                use_bias=True)
            self.activ = nn.Activation("relu")
Beispiel #27
0
    def __init__(self, bn_use_global_stats, **kwargs):
        super(TwoWayABlock, self).__init__(**kwargs)
        in_channels = 384

        with self.name_scope():
            self.branches = HybridConcurrent(axis=1, prefix='')
            self.branches.add(
                ConvSeqBranch(in_channels=in_channels,
                              out_channels_list=(32, 48, 64),
                              kernel_size_list=(1, 3, 3),
                              strides_list=(1, 1, 1),
                              padding_list=(0, 1, 1),
                              bn_use_global_stats=bn_use_global_stats))
            self.branches.add(
                ConvSeqBranch(in_channels=in_channels,
                              out_channels_list=(32, 32),
                              kernel_size_list=(1, 3),
                              strides_list=(1, 1),
                              padding_list=(0, 1),
                              bn_use_global_stats=bn_use_global_stats))
            self.branches.add(
                Conv1x1Branch(in_channels=in_channels,
                              out_channels=32,
                              bn_use_global_stats=bn_use_global_stats))
            self.conv = conv1x1_block(in_channels=128,
                                      out_channels=in_channels,
                                      bn_use_global_stats=bn_use_global_stats,
                                      activate=False)
Beispiel #28
0
    def __init__(
        self,
        name,
        in_ch,
        ch_0_0=192,
        ch_1_0=128,
        ch_1_1=224,
        ch_1_2=256,
        ch=2144,
        bn_mom=0.9,
        act_type="relu",
        res_scale_fac=0.2,
        use_se=True,
        shortcut=True,
    ):
        """
        Definition of the InceptionResnetC block

        :param name: name prefix for all blocks
        :param ch_0_0: Number of channels for 1st conv operation in branch 0
        :param ch_1_0: Number of channels for 1st conv operation in branch 1
        :param ch_1_1: Number of channels for 2nd conv operation in branch 1
        :param ch_1_2: Number of channels for 3rd conv operation in branch 1
        :param ch: Number of channels for conv operation after concatenating branches (no act is applied here)
        :param bn_mom: Batch normalization momentum parameter
        :param act_type: Activation type to use
        :param res_scale_fac: Constant multiply scalar which is applied to the residual activations maps
        """
        super(_InceptionResnetC, self).__init__(name, ch, res_scale_fac, act_type, bn_mom, use_se, shortcut)
        self.res_scale_fac = res_scale_fac
        self.block_name = name
        self.body = HybridSequential(prefix="")
        self.branches = HybridConcurrent(axis=1, prefix="")  # entry point for all branches
        # branch 0 of block type C
        self.b_0 = HybridSequential()
        self.b_0.add(Conv2D(channels=ch_0_0, kernel_size=(1, 1), prefix="%s_0_conv0" % name, in_channels=in_ch))
        self.b_0.add(get_act(act_type, prefix="%s_0_%s0" % (name, act_type)))
        # branch 2 of block type C
        self.b_1 = HybridSequential()
        self.b_1.add(Conv2D(channels=ch_1_0, kernel_size=(1, 1), prefix="%s_1_conv0" % name, in_channels=in_ch))
        self.b_1.add(get_act(act_type, prefix="%s_2_%s0" % (name, act_type)))
        self.b_1.add(
            Conv2D(channels=ch_1_1, kernel_size=(1, 3), padding=(0, 1), prefix="%s_1_conv1" % name, in_channels=ch_1_0)
        )
        self.b_1.add(get_act(act_type, prefix="%s_2_%s1" % (name, act_type)))
        self.b_1.add(
            Conv2D(channels=ch_1_2, kernel_size=(3, 1), padding=(1, 0), prefix="%s_1_conv2" % name, in_channels=ch_1_1)
        )
        self.b_1.add(get_act(act_type, prefix="%s_1_%s2" % (name, act_type)))
        # concatenate all branches and add them to the body
        self.branches.add(self.b_0)
        self.branches.add(self.b_1)
        self.body.add(self.branches)
        # apply a single CNN layer without activation function
        self.body.add(
            Conv2D(
                channels=ch, kernel_size=(1, 1), prefix="%s_conv0" % name, in_channels=ch_0_0 + ch_1_2, use_bias=False
            )
        )
Beispiel #29
0
 def __init__(self, bn_use_global_stats, **kwargs):
     super(PolyBlock5a, self).__init__(**kwargs)
     with self.name_scope():
         self.branches = HybridConcurrent(axis=1, prefix='')
         self.branches.add(MaxPoolBranch())
         self.branches.add(
             Conv3x3Branch(in_channels=192,
                           out_channels=192,
                           bn_use_global_stats=bn_use_global_stats))
def _make_B(prefix):
    out = HybridConcurrent(axis=1, prefix=prefix)
    with out.name_scope():
        out.add(_make_branch(None, (384, 3, 2, None)))
        out.add(
            _make_branch(None, (64, 1, None, None), (96, 3, None, 1),
                         (96, 3, 2, None)))
        out.add(_make_branch('max'))
    return out