예제 #1
0
파일: mb_layers.py 프로젝트: taoari/XNAS
    def weight_op(self):
        padding = get_same_padding(self.kernel_size)
        if isinstance(padding, int):
            padding *= self.dilation
        else:
            padding[0] *= self.dilation
            padding[1] *= self.dilation

        weight_dict = OrderedDict()
        weight_dict['depth_conv'] = nn.Conv2d(self.in_channels,
                                              self.in_channels,
                                              kernel_size=self.kernel_size,
                                              stride=self.stride,
                                              padding=padding,
                                              dilation=self.dilation,
                                              groups=self.in_channels,
                                              bias=False)
        weight_dict['point_conv'] = nn.Conv2d(self.in_channels,
                                              self.out_channels,
                                              kernel_size=1,
                                              groups=self.groups,
                                              bias=self.bias)
        if self.has_shuffle and self.groups > 1:
            weight_dict['shuffle'] = ShuffleLayer(self.groups)
        return weight_dict
예제 #2
0
    def forward(self, x, out_channel=None):
        assert out_channel is None, "users shoud give out channel"
        if out_channel is None:
            out_channel = self.active_out_channel
        in_channel = x.size(1)
        if self.weight_sharing:
            filters = self.conv.weight[:out_channel, :
                                       in_channel, :, :].contiguous()
        else:
            assert in_channel in self.in_channel_list, "Input kernel should in in_channel_list"
            assert out_channel in self.out_channel_list, "out channel should in out_channel_list"
            filters = self.conv["{}_{}".format(int(in_channel),
                                               out_channel)].weight

        padding = get_same_padding(self.kernel_size)
        y = F.conv2d(x, filters, None, self.stride, padding, self.dilation, 1)
        return y
예제 #3
0
    def weight_op(self):
        if self.stride == 1:
            # same padding if `stride == 1`
            padding = get_same_padding(self.kernel_size)
        else:
            padding = 0

        weight_dict = OrderedDict()
        if self.pool_type == 'avg':
            weight_dict['pool'] = nn.AvgPool2d(
                self.kernel_size, stride=self.stride, padding=padding, count_include_pad=False
            )
        elif self.pool_type == 'max':
            weight_dict['pool'] = nn.MaxPool2d(self.kernel_size, stride=self.stride, padding=padding)
        else:
            raise NotImplementedError
        return weight_dict
예제 #4
0
 def forward(self, x, kernel=None):
     assert kernel is None, "users shoud give kernel size"
     _kernel = self.active_kernel if kernel is None else kernel
     _in_channel = x.size(1)
     if self.weight_sharing_mode == 0 or self.weight_sharing_mode == 1:
         filters = self.get_active_filter(_in_channel, _kernel).contiguous()
     elif self.weight_sharing_mode == 2:
         filters = self.conv[str(
             kernel)].weight[:_in_channel, :_in_channel, :, :]
     else:
         assert _kernel in self.kernel_size_list, "Input kernel should in kernel size list"
         assert _in_channel in self.in_channel_list, "in channel should in in_channel_list"
         filters = self.conv["{}_{}".format(int(_kernel),
                                            int(_in_channel))].weight
     padding = get_same_padding(_kernel)
     y = F.conv2d(x, filters, None, self.stride, padding, self.dilation,
                  _in_channel)
     return y
예제 #5
0
 def __init__(self, in_channel, expand_ratio, kernel_size, stride, act_func,
              se, out_channel):
     # expansion, 3x3 dwise, BN, Swish, SE, 1x1, BN, skip_connection
     super(MBConv, self).__init__()
     middle_channel = int(in_channel * expand_ratio)
     middle_channel = make_divisible(middle_channel, 8)
     if middle_channel != in_channel:
         self.expand = True
         self.inverted_bottleneck_conv = nn.Conv2d(in_channel,
                                                   middle_channel,
                                                   1,
                                                   stride=1,
                                                   padding=0,
                                                   bias=False)
         self.inverted_bottleneck_bn = nn.BatchNorm2d(middle_channel,
                                                      eps=cfg.BN.EPS,
                                                      momentum=cfg.BN.MOM)
         self.inverted_bottleneck_act = build_activation(act_func)
     else:
         self.expand = False
     self.depth_conv = nn.Conv2d(middle_channel,
                                 middle_channel,
                                 kernel_size,
                                 stride=stride,
                                 groups=middle_channel,
                                 padding=get_same_padding(kernel_size),
                                 bias=False)
     self.depth_bn = nn.BatchNorm2d(middle_channel,
                                    eps=cfg.BN.EPS,
                                    momentum=cfg.BN.MOM)
     self.depth_act = build_activation(act_func)
     if se > 0:
         self.depth_se = SEModule(middle_channel, se)
     self.point_linear_conv = nn.Conv2d(middle_channel,
                                        out_channel,
                                        1,
                                        stride=1,
                                        padding=0,
                                        bias=False)
     self.point_linear_bn = nn.BatchNorm2d(out_channel,
                                           eps=cfg.BN.EPS,
                                           momentum=cfg.BN.MOM)
     # Skip connection if in and out shapes are the same (MN-V2 style)
     self.has_skip = stride == 1 and in_channel == out_channel
예제 #6
0
    def __init__(self, in_channels, out_channels,
                 kernel_size=3, stride=1, expand_ratio=6, mid_channels=None, act_func='relu6', use_se=False):
        super(MBInvertedConvLayer, self).__init__()

        self.in_channels = in_channels
        self.out_channels = out_channels

        self.kernel_size = kernel_size
        self.stride = stride
        self.expand_ratio = expand_ratio
        self.mid_channels = mid_channels
        self.act_func = act_func
        self.use_se = use_se

        if self.mid_channels is None:
            feature_dim = round(self.in_channels * self.expand_ratio)
        else:
            feature_dim = self.mid_channels

        if self.expand_ratio == 1:
            self.inverted_bottleneck = None
        else:
            self.inverted_bottleneck = nn.Sequential(OrderedDict([
                ('conv', nn.Conv2d(self.in_channels, feature_dim, 1, 1, 0, bias=False)),
                ('bn', nn.BatchNorm2d(feature_dim)),
                ('act', build_activation(self.act_func, inplace=True)),
            ]))

        pad = get_same_padding(self.kernel_size)
        depth_conv_modules = [
            ('conv', nn.Conv2d(feature_dim, feature_dim, kernel_size, stride, pad, groups=feature_dim, bias=False)),
            ('bn', nn.BatchNorm2d(feature_dim)),
            ('act', build_activation(self.act_func, inplace=True))
        ]
        if self.use_se:
            depth_conv_modules.append(('se', SEModule(feature_dim, reduction=0.25)))
        self.depth_conv = nn.Sequential(OrderedDict(depth_conv_modules))

        self.point_linear = nn.Sequential(OrderedDict([
            ('conv', nn.Conv2d(feature_dim, out_channels, 1, 1, 0, bias=False)),
            ('bn', nn.BatchNorm2d(out_channels)),
        ]))