Exemplo n.º 1
0
    def forward(self, x, out_channel=None):
        if out_channel is None:
            out_channel = self.active_out_channel
        in_channel = x.size(1)
        filters = self.get_active_filter(out_channel, in_channel).contiguous()

        padding = get_same_padding(self.kernel_size)
        filters = self.conv.weight_standardization(filters) if isinstance(
            self.conv, MyConv2d) else filters
        y = F.conv2d(x, filters, None, self.stride, padding, self.dilation, 1)
        return y
Exemplo n.º 2
0
    def forward(self, x, kernel_size=None, groups=None):
        if kernel_size is None:
            kernel_size = self.active_kernel_size
        if groups is None:
            groups = self.active_groups

        filters = self.get_active_filter(kernel_size, groups).contiguous()
        padding = get_same_padding(kernel_size)
        filters = self.conv.weight_standardization(filters) if isinstance(
            self.conv, MyConv2d) else filters
        y = F.conv2d(
            x,
            filters,
            None,
            self.stride,
            padding,
            self.dilation,
            groups,
        )
        return y
Exemplo n.º 3
0
    def weight_op(self):
        padding = get_same_padding(self.kernel_size)
        if isinstance(padding, int):
            padding *= self.dilation
        else:
            padding[0] *= self.dilation
            padding[1] *= self.dilation

        weight_dict = OrderedDict({
            'conv':
            nn.Conv2d(self.in_channels,
                      self.out_channels,
                      kernel_size=self.kernel_size,
                      stride=self.stride,
                      padding=padding,
                      dilation=self.dilation,
                      groups=min_divisible_value(self.in_channels,
                                                 self.groups),
                      bias=self.bias)
        })
        if self.has_shuffle and self.groups > 1:
            weight_dict['shuffle'] = ShuffleLayer(self.groups)

        return weight_dict
Exemplo n.º 4
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 kernel_size=3,
                 stride=1,
                 expand_ratio=0.25,
                 mid_channels=None,
                 act_func='relu',
                 groups=1,
                 downsample_mode='avgpool_conv'):
        super(ResNetBottleneckBlock, self).__init__()

        self.in_channels = in_channels
        self.out_channels = out_channels

        self.kernel_size = kernel_size
        self.stride = stride
        self.expand_ratio = expand_ratio
        self.mid_channels = mid_channels
        self.act_func = act_func
        self.groups = groups

        self.downsample_mode = downsample_mode

        if self.mid_channels is None:
            feature_dim = round(self.out_channels * self.expand_ratio)
        else:
            feature_dim = self.mid_channels

        feature_dim = make_divisible(feature_dim, MyNetwork.CHANNEL_DIVISIBLE)
        self.mid_channels = feature_dim

        # build modules
        self.conv1 = nn.Sequential(
            OrderedDict([
                ('conv',
                 nn.Conv2d(self.in_channels, feature_dim, 1, 1, 0,
                           bias=False)),
                ('bn', nn.BatchNorm2d(feature_dim)),
                ('act', build_activation(self.act_func, inplace=True)),
            ]))

        pad = get_same_padding(self.kernel_size)
        self.conv2 = nn.Sequential(
            OrderedDict([('conv',
                          nn.Conv2d(feature_dim,
                                    feature_dim,
                                    kernel_size,
                                    stride,
                                    pad,
                                    groups=groups,
                                    bias=False)),
                         ('bn', nn.BatchNorm2d(feature_dim)),
                         ('act', build_activation(self.act_func,
                                                  inplace=True))]))

        self.conv3 = nn.Sequential(
            OrderedDict([
                ('conv',
                 nn.Conv2d(feature_dim, self.out_channels, 1, 1, 0,
                           bias=False)),
                ('bn', nn.BatchNorm2d(self.out_channels)),
            ]))

        if stride == 1 and in_channels == out_channels:
            self.downsample = IdentityLayer(in_channels, out_channels)
        elif self.downsample_mode == 'conv':
            self.downsample = nn.Sequential(
                OrderedDict([
                    ('conv',
                     nn.Conv2d(in_channels,
                               out_channels,
                               1,
                               stride,
                               0,
                               bias=False)),
                    ('bn', nn.BatchNorm2d(out_channels)),
                ]))
        elif self.downsample_mode == 'avgpool_conv':
            self.downsample = nn.Sequential(
                OrderedDict([
                    ('avg_pool',
                     nn.AvgPool2d(kernel_size=stride,
                                  stride=stride,
                                  padding=0,
                                  ceil_mode=True)),
                    ('conv',
                     nn.Conv2d(in_channels, out_channels, 1, 1, 0,
                               bias=False)),
                    ('bn', nn.BatchNorm2d(out_channels)),
                ]))
        else:
            raise NotImplementedError

        self.final_act = build_activation(self.act_func, inplace=True)
Exemplo n.º 5
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 kernel_size=3,
                 stride=1,
                 expand_ratio=6,
                 mid_channels=None,
                 act_func='relu6',
                 use_se=False,
                 groups=None):
        super(MBConvLayer, self).__init__()

        self.in_channels = in_channels
        self.out_channels = out_channels

        self.kernel_size = kernel_size
        self.stride = stride
        self.expand_ratio = expand_ratio
        self.mid_channels = mid_channels
        self.act_func = act_func
        self.use_se = use_se
        self.groups = groups

        if self.mid_channels is None:
            feature_dim = round(self.in_channels * self.expand_ratio)
        else:
            feature_dim = self.mid_channels

        if self.expand_ratio == 1:
            self.inverted_bottleneck = None
        else:
            self.inverted_bottleneck = nn.Sequential(
                OrderedDict([
                    ('conv',
                     nn.Conv2d(self.in_channels,
                               feature_dim,
                               1,
                               1,
                               0,
                               bias=False)),
                    ('bn', nn.BatchNorm2d(feature_dim)),
                    ('act', build_activation(self.act_func, inplace=True)),
                ]))

        pad = get_same_padding(self.kernel_size)
        groups = feature_dim if self.groups is None else min_divisible_value(
            feature_dim, self.groups)
        depth_conv_modules = [('conv',
                               nn.Conv2d(feature_dim,
                                         feature_dim,
                                         kernel_size,
                                         stride,
                                         pad,
                                         groups=groups,
                                         bias=False)),
                              ('bn', nn.BatchNorm2d(feature_dim)),
                              ('act',
                               build_activation(self.act_func, inplace=True))]
        if self.use_se:
            depth_conv_modules.append(('se', SEModule(feature_dim)))
        self.depth_conv = nn.Sequential(OrderedDict(depth_conv_modules))

        self.point_linear = nn.Sequential(
            OrderedDict([
                ('conv',
                 nn.Conv2d(feature_dim, out_channels, 1, 1, 0, bias=False)),
                ('bn', nn.BatchNorm2d(out_channels)),
            ]))