예제 #1
0
    def _init(self):
        self.body.add(
            nn.activated_conv(self.channels,
                              kernel_size=3,
                              stride=self.stride,
                              padding=1,
                              in_channels=self.in_channels))
        self.body.add(nn.BatchNorm())
        self.body.add(
            nn.activated_conv(self.channels,
                              kernel_size=3,
                              stride=1,
                              padding=1,
                              in_channels=self.channels))
        self.body.add(nn.BatchNorm())

        if self.downsample is not None:
            self.downsample.add(
                nn.activated_conv(self.channels,
                                  kernel_size=1,
                                  stride=self.stride,
                                  padding=0,
                                  in_channels=self.in_channels,
                                  prefix="sc_qconv_"))
            self.downsample.add(nn.BatchNorm())
예제 #2
0
 def _init(self):
     self.body.add(
         nn.activated_conv(self.slice_width,
                           kernel_size=3,
                           stride=self.stride,
                           padding=1,
                           in_channels=self.in_channels))
     self.body.add(nn.BatchNorm())
     if self.downsample is not None:
         conv_stride = self.stride
         if self.use_pooling:
             conv_stride = 1
             self.downsample.add(
                 nn.AvgPool2D(pool_size=2, strides=2, padding=0))
         if self.use_fp:
             self.downsample.add(
                 nn.Conv2D(self.channels,
                           kernel_size=1,
                           strides=conv_stride,
                           use_bias=False,
                           groups=self.num_groups,
                           in_channels=self.in_channels,
                           prefix="sc_conv_"))
         else:
             self.downsample.add(
                 nn.activated_conv(self.channels,
                                   kernel_size=1,
                                   stride=conv_stride,
                                   padding=0,
                                   in_channels=self.in_channels,
                                   prefix="sc_qconv_"))
         self.downsample.add(nn.BatchNorm())
예제 #3
0
    def _add_dense_block(self, dilation):
        new_features = nn.HybridSequential(prefix='')

        def _add_conv_block(layer):
            new_features.add(nn.BatchNorm())
            new_features.add(layer)
            if self.dropout:
                new_features.add(nn.Dropout(self.dropout))

        if self.bn_size == 0:
            # no bottleneck
            _add_conv_block(
                nn.activated_conv(self.growth_rate,
                                  kernel_size=3,
                                  padding=dilation,
                                  dilation=dilation))
        else:
            # bottleneck design
            _add_conv_block(
                nn.activated_conv(self.bn_size * self.growth_rate,
                                  kernel_size=1))
            _add_conv_block(
                nn.activated_conv(self.growth_rate, kernel_size=3, padding=1))

        self.num_features += self.growth_rate

        dense_block = HybridConcurrent(axis=1, prefix='')
        dense_block.add(Identity())
        dense_block.add(new_features)
        self.current_stage.add(dense_block)
예제 #4
0
def _make_dense_layer(growth_rate, bn_size, dropout, dilation):
    new_features = nn.HybridSequential(prefix='')
    if bn_size == 0:
        # no bottleneck
        new_features.add(nn.BatchNorm())
        new_features.add(
            nn.activated_conv(growth_rate,
                              kernel_size=3,
                              padding=dilation,
                              dilation=dilation))
        if dropout:
            new_features.add(nn.Dropout(dropout))
    else:
        # bottleneck design
        new_features.add(nn.BatchNorm())
        new_features.add(
            nn.activated_conv(bn_size * growth_rate, kernel_size=1))
        if dropout:
            new_features.add(nn.Dropout(dropout))
        new_features.add(nn.BatchNorm())
        new_features.add(
            nn.activated_conv(growth_rate, kernel_size=3, padding=1))
        if dropout:
            new_features.add(nn.Dropout(dropout))

    out = HybridConcurrent(axis=1, prefix='')
    out.add(Identity())
    out.add(new_features)

    return out
예제 #5
0
    def _make_transition(self, transition_num):
        dilation = self.dilation[transition_num + 1]
        num_out_features = self.num_features // self.reduction_rates[transition_num]
        num_out_features = int(round(num_out_features / 32)) * 32
        logging.info("Features in transition {}: {} -> {}".format(
            transition_num + 1, self.num_features, num_out_features
        ))
        self.num_features = num_out_features
        
        transition = nn.HybridSequential(prefix='')
        with transition.name_scope():
            for layer in self.downsample_struct.split(","):
                if layer == "bn":
                    transition.add(nn.BatchNorm())
                elif layer == "relu":
                    transition.add(nn.Activation("relu"))
                elif layer == "q_conv":
                    transition.add(nn.activated_conv(self.num_features, kernel_size=1))
                elif "fp_conv" in layer:
                    groups = 1
                    if ":" in layer:
                        groups = int(layer.split(":")[1])
                    transition.add(nn.Conv2D(self.num_features, kernel_size=1, groups=groups, use_bias=False))
                elif layer == "pool" and dilation == 1:
                    transition.add(nn.AvgPool2D(pool_size=2, strides=2))
                elif layer == "max_pool" and dilation == 1:
                    transition.add(nn.MaxPool2D(pool_size=2, strides=2))
                elif "cs" in layer:
                    groups = 16
                    if ":" in layer:
                        groups = int(layer.split(":")[1])
                    transition.add(ChannelShuffle(groups=groups))

        self.get_layer(transition_num + 1).add(transition)
예제 #6
0
 def __init__(self,
              growth_rate,
              dilation,
              bn_size,
              dropout,
              replace_by_skip_connection=False,
              **kwargs):
     super().__init__(**kwargs)
     self.growth_rate = growth_rate
     self.dilation = dilation
     self.bn_size = bn_size
     self.dropout = dropout
     new_feature_computation = nn.HybridSequential(prefix='')
     self.replace_by_skip_connection = replace_by_skip_connection
     if self.replace_by_skip_connection:
         self._add_conv_block(
             new_feature_computation,
             nn.activated_conv(self.growth_rate, kernel_size=1, padding=0))
     else:
         if self.bn_size == 0:
             # no bottleneck
             self._add_conv_block(
                 new_feature_computation,
                 nn.activated_conv(self.growth_rate,
                                   kernel_size=3,
                                   padding=dilation,
                                   dilation=dilation))
         else:
             # bottleneck design
             self._add_conv_block(
                 new_feature_computation,
                 nn.activated_conv(self.bn_size * self.growth_rate,
                                   kernel_size=1))
             self._add_conv_block(
                 new_feature_computation,
                 nn.activated_conv(self.growth_rate,
                                   kernel_size=3,
                                   padding=1))
     dense_block = HybridConcurrent(axis=1, prefix='')
     dense_block.add(Identity())
     dense_block.add(new_feature_computation)
     self.dense_block = dense_block
예제 #7
0
    def __init__(self, channels, in_channels, replace_by_skip_connection=False, dilation=1, **kwargs):
        super().__init__(**kwargs)
        self.replace_by_skip_connection = replace_by_skip_connection
        if(not replace_by_skip_connection):
            self.body = nn.HybridSequential(prefix='')
            self.body.add(nn.BatchNorm())
            self.body.add(nn.activated_conv(channels=channels, kernel_size=3, stride=1,
                                            padding=dilation, in_channels=in_channels, dilation=dilation))

            self.use_sliced_addition = channels != in_channels
            if self.use_sliced_addition:
                assert channels < in_channels
                self.slices = [0, in_channels - channels, in_channels]
                self.slices_add_x = [False, True]
예제 #8
0
    def __init__(self, channels, in_channels, dilation=1, **kwargs):
        super(ImprovementBlock, self).__init__(**kwargs)
        self.body = nn.HybridSequential(prefix='')
        self.body.add(nn.BatchNorm())
        self.body.add(
            nn.activated_conv(channels=channels,
                              kernel_size=3,
                              stride=1,
                              padding=dilation,
                              in_channels=in_channels,
                              dilation=dilation))

        self.use_sliced_addition = channels != in_channels
        if self.use_sliced_addition:
            assert channels < in_channels
            # 0,256,320
            self.slices = [0, in_channels - channels, in_channels]
            self.slices_add_x = [False, True]
예제 #9
0
def _make_transition(num_output_features,
                     use_fp=False,
                     use_relu=False,
                     structure='bn,relu?,conv,pool',
                     dilation=1):
    out = nn.HybridSequential(prefix='')
    for layer in structure.split(","):
        if layer == "bn":
            out.add(nn.BatchNorm())
        elif layer == "relu?" and use_relu and use_fp:
            out.add(nn.Activation("relu"))
        elif layer == "conv":
            if use_fp:
                out.add(
                    nn.Conv2D(num_output_features,
                              kernel_size=1,
                              use_bias=False))
            else:
                out.add(nn.activated_conv(num_output_features, kernel_size=1))
        elif layer == "pool" and dilation == 1:
            out.add(nn.AvgPool2D(pool_size=2, strides=2))
        elif layer == "max_pool" and dilation == 1:
            out.add(nn.MaxPool2D(pool_size=2, strides=2))
    return out
예제 #10
0
def test_binary_layer_config_scaling():
    assert isinstance(nn.activated_conv(3), nn.BinaryConvolution)
    with nn.set_binary_layer_config(approximation="xnor"):
        assert isinstance(nn.activated_conv(3), nn.ScaledBinaryConv)
    assert isinstance(nn.activated_conv(3), nn.BinaryConvolution)