Exemple #1
0
    def weight_op(self):
        padding = get_same_padding(self.kernel_size)
        if isinstance(padding, int):
            padding *= self.dilation
        else:
            padding[0] *= self.dilation
            padding[1] *= self.dilation

        weight_dict = OrderedDict()
        weight_dict['depth_conv'] = nn.Conv2d(self.in_channels,
                                              self.in_channels,
                                              kernel_size=self.kernel_size,
                                              stride=self.stride,
                                              padding=padding,
                                              dilation=self.dilation,
                                              groups=self.in_channels,
                                              bias=False)
        weight_dict['point_conv'] = nn.Conv2d(self.in_channels,
                                              self.out_channels,
                                              kernel_size=1,
                                              groups=self.groups,
                                              bias=self.bias)
        if self.has_shuffle and self.groups > 1:
            weight_dict['shuffle'] = ShuffleLayer(self.groups)
        return weight_dict
Exemple #2
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 kernel_size=3,
                 stride=1,
                 expand_ratio=6,
                 mid_channels=None,
                 act_func='relu6',
                 use_se=False,
                 groups=1):
        super(ReducedMBConvLayer, self).__init__()

        self.in_channels = in_channels
        self.out_channels = out_channels

        self.kernel_size = kernel_size
        self.stride = stride
        self.expand_ratio = expand_ratio
        self.mid_channels = mid_channels
        self.act_func = act_func
        self.use_se = use_se
        self.groups = groups

        if self.mid_channels is None:
            feature_dim = round(self.in_channels * self.expand_ratio)
        else:
            feature_dim = self.mid_channels

        pad = get_same_padding(self.kernel_size)
        groups = feature_dim if self.groups is None else min_divisible_value(
            feature_dim, self.groups)
        self.expand_conv = nn.Sequential(
            OrderedDict({
                'conv':
                nn.Conv2d(in_channels,
                          feature_dim,
                          kernel_size,
                          stride,
                          pad,
                          groups=groups,
                          bias=False),
                'bn':
                nn.BatchNorm2d(feature_dim),
                'act':
                build_activation(self.act_func, inplace=True),
            }))
        if self.use_se:
            self.expand_conv.add_module('se', SEModule(feature_dim))

        self.reduce_conv = nn.Sequential(
            OrderedDict({
                'conv':
                nn.Conv2d(feature_dim, out_channels, 1, 1, 0, bias=False),
                'bn':
                nn.BatchNorm2d(out_channels),
            }))
Exemple #3
0
    def forward(self, x, out_channel=None):
        if out_channel is None:
            out_channel = self.active_out_channel
        in_channel = x.size(1)
        filters = self.conv.weight[:out_channel, :in_channel, :, :].contiguous(
        )

        padding = get_same_padding(self.kernel_size)
        y = F.conv2d(x, filters, None, self.stride, padding, self.dilation, 1)
        return y
Exemple #4
0
	def forward(self, x, out_channel=None):
		if out_channel is None:
			out_channel = self.active_out_channel
		in_channel = x.size(1)
		filters = self.get_active_filter(out_channel, in_channel).contiguous()

		padding = get_same_padding(self.kernel_size)
		filters = self.conv.weight_standardization(filters) if isinstance(self.conv, MyConv2d) else filters
		y = F.conv2d(x, filters, None, self.stride, padding, self.dilation, 1)
		return y
Exemple #5
0
    def forward(self, x, kernel_size=None):
        if kernel_size is None:
            kernel_size = self.active_kernel_size
        in_channel = x.size(1)

        filters = self.get_active_filter(in_channel, kernel_size).contiguous()

        padding = get_same_padding(kernel_size)
        y = F.conv2d(x, filters, None, self.stride, padding, self.dilation,
                     in_channel)
        return y
Exemple #6
0
	def forward(self, x, kernel_size=None, groups=None):
		if kernel_size is None:
			kernel_size = self.active_kernel_size
		if groups is None:
			groups = self.active_groups

		filters = self.get_active_filter(kernel_size, groups).contiguous()
		padding = get_same_padding(kernel_size)
		filters = self.conv.weight_standardization(filters) if isinstance(self.conv, MyConv2d) else filters
		y = F.conv2d(
			x, filters, None, self.stride, padding, self.dilation, groups,
		)
		return y
Exemple #7
0
    def weight_op(self):
        if self.stride == 1:
            # same padding if `stride == 1`
            padding = get_same_padding(self.kernel_size)
        else:
            padding = 0

        weight_dict = OrderedDict()
        if self.pool_type == 'avg':
            weight_dict['pool'] = nn.AvgPool2d(
                self.kernel_size, stride=self.stride, padding=padding, count_include_pad=False
            )
        elif self.pool_type == 'max':
            weight_dict['pool'] = nn.MaxPool2d(self.kernel_size, stride=self.stride, padding=padding)
        else:
            raise NotImplementedError
        return weight_dict
Exemple #8
0
    def __init__(self, in_channels, out_channels,
                 kernel_size=3, stride=1, expand_ratio=6, mid_channels=None, act_func='relu6', use_se=False):
        super(MBInvertedConvLayer, self).__init__()

        self.in_channels = in_channels
        self.out_channels = out_channels

        self.kernel_size = kernel_size
        self.stride = stride
        self.expand_ratio = expand_ratio
        self.mid_channels = mid_channels
        self.act_func = act_func
        self.use_se = use_se

        if self.mid_channels is None:
            feature_dim = round(self.in_channels * self.expand_ratio)
        else:
            feature_dim = self.mid_channels

        if self.expand_ratio == 1:
            self.inverted_bottleneck = None
        else:
            self.inverted_bottleneck = nn.Sequential(OrderedDict([
                ('conv', nn.Conv2d(self.in_channels, feature_dim, 1, 1, 0, bias=False)),
                ('bn', nn.BatchNorm2d(feature_dim)),
                ('act', build_activation(self.act_func, inplace=True)),
            ]))

        pad = get_same_padding(self.kernel_size)
        depth_conv_modules = [
            ('conv', nn.Conv2d(feature_dim, feature_dim, kernel_size, stride, pad, groups=feature_dim, bias=False)),
            ('bn', nn.BatchNorm2d(feature_dim)),
            ('act', build_activation(self.act_func, inplace=True))
        ]
        if self.use_se:
            depth_conv_modules.append(('se', SEModule(feature_dim)))
        self.depth_conv = nn.Sequential(OrderedDict(depth_conv_modules))

        self.point_linear = nn.Sequential(OrderedDict([
            ('conv', nn.Conv2d(feature_dim, out_channels, 1, 1, 0, bias=False)),
            ('bn', nn.BatchNorm2d(out_channels)),
        ]))
Exemple #9
0
    def weight_op(self):
        padding = get_same_padding(self.kernel_size)
        if isinstance(padding, int):
            padding *= self.dilation
        else:
            padding[0] *= self.dilation
            padding[1] *= self.dilation

        weight_dict = OrderedDict({
            'conv':
            nn.Conv2d(self.in_channels,
                      self.out_channels,
                      kernel_size=self.kernel_size,
                      stride=self.stride,
                      padding=padding,
                      dilation=self.dilation,
                      groups=min_divisible_value(self.in_channels,
                                                 self.groups),
                      bias=self.bias)
        })
        if self.has_shuffle and self.groups > 1:
            weight_dict['shuffle'] = ShuffleLayer(self.groups)

        return weight_dict
Exemple #10
0
    def __init__(self,
                 main_branch,
                 in_channels,
                 out_channels,
                 expand=1.0,
                 kernel_size=3,
                 act_func='relu',
                 n_groups=2,
                 downsample_ratio=2,
                 upsample_type='bilinear',
                 stride=1):
        super(LiteResidualModule, self).__init__()

        self.main_branch = main_branch

        self.lite_residual_config = {
            'in_channels': in_channels,
            'out_channels': out_channels,
            'expand': expand,
            'kernel_size': kernel_size,
            'act_func': act_func,
            'n_groups': n_groups,
            'downsample_ratio': downsample_ratio,
            'upsample_type': upsample_type,
            'stride': stride,
        }

        kernel_size = 1 if downsample_ratio is None else kernel_size

        padding = get_same_padding(kernel_size)
        if downsample_ratio is None:
            pooling = MyGlobalAvgPool2d()
        else:
            pooling = nn.AvgPool2d(downsample_ratio, downsample_ratio, 0)
        num_mid = make_divisible(int(in_channels * expand),
                                 divisor=MyNetwork.CHANNEL_DIVISIBLE)
        self.lite_residual = nn.Sequential(
            OrderedDict({
                'pooling':
                pooling,
                'conv1':
                nn.Conv2d(in_channels,
                          num_mid,
                          kernel_size,
                          stride,
                          padding,
                          groups=n_groups,
                          bias=False),
                'bn1':
                nn.BatchNorm2d(num_mid),
                'act':
                build_activation(act_func),
                'conv2':
                nn.Conv2d(num_mid, out_channels, 1, 1, 0, bias=False),
                'final_bn':
                nn.BatchNorm2d(out_channels),
            }))

        # initialize
        init_models(self.lite_residual)
        self.lite_residual.final_bn.weight.data.zero_()
Exemple #11
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 kernel_size=3,
                 stride=1,
                 expand_ratio=0.25,
                 mid_channels=None,
                 act_func='relu',
                 groups=1,
                 downsample_mode='avgpool_conv'):
        super(ResNetBottleneckBlock, self).__init__()

        self.in_channels = in_channels
        self.out_channels = out_channels

        self.kernel_size = kernel_size
        self.stride = stride
        self.expand_ratio = expand_ratio
        self.mid_channels = mid_channels
        self.act_func = act_func
        self.groups = groups

        self.downsample_mode = downsample_mode

        if self.mid_channels is None:
            feature_dim = round(self.out_channels * self.expand_ratio)
        else:
            feature_dim = self.mid_channels

        feature_dim = make_divisible(feature_dim, MyNetwork.CHANNEL_DIVISIBLE)
        self.mid_channels = feature_dim

        # build modules
        self.conv1 = nn.Sequential(
            OrderedDict([
                ('conv',
                 nn.Conv2d(self.in_channels, feature_dim, 1, 1, 0,
                           bias=False)),
                ('bn', nn.BatchNorm2d(feature_dim)),
                ('act', build_activation(self.act_func, inplace=True)),
            ]))

        pad = get_same_padding(self.kernel_size)
        self.conv2 = nn.Sequential(
            OrderedDict([('conv',
                          nn.Conv2d(feature_dim,
                                    feature_dim,
                                    kernel_size,
                                    stride,
                                    pad,
                                    groups=groups,
                                    bias=False)),
                         ('bn', nn.BatchNorm2d(feature_dim)),
                         ('act', build_activation(self.act_func,
                                                  inplace=True))]))

        self.conv3 = nn.Sequential(
            OrderedDict([
                ('conv',
                 nn.Conv2d(feature_dim, self.out_channels, 1, 1, 0,
                           bias=False)),
                ('bn', nn.BatchNorm2d(self.out_channels)),
            ]))

        if stride == 1 and in_channels == out_channels:
            self.downsample = IdentityLayer(in_channels, out_channels)
        elif self.downsample_mode == 'conv':
            self.downsample = nn.Sequential(
                OrderedDict([
                    ('conv',
                     nn.Conv2d(in_channels,
                               out_channels,
                               1,
                               stride,
                               0,
                               bias=False)),
                    ('bn', nn.BatchNorm2d(out_channels)),
                ]))
        elif self.downsample_mode == 'avgpool_conv':
            self.downsample = nn.Sequential(
                OrderedDict([
                    ('avg_pool',
                     nn.AvgPool2d(kernel_size=stride,
                                  stride=stride,
                                  padding=0,
                                  ceil_mode=True)),
                    ('conv',
                     nn.Conv2d(in_channels, out_channels, 1, 1, 0,
                               bias=False)),
                    ('bn', nn.BatchNorm2d(out_channels)),
                ]))
        else:
            raise NotImplementedError

        self.final_act = build_activation(self.act_func, inplace=True)