Exemplo n.º 1
0
    def __init__(self,
                 in_channel_list,
                 out_channel_list,
                 kernel_size_list=3,
                 expand_ratio_list=6,
                 stride=1,
                 act_func='relu6',
                 use_se=False):
        super(DynamicMBConvLayer, self).__init__()

        self.in_channel_list = in_channel_list
        self.out_channel_list = out_channel_list

        self.kernel_size_list = val2list(kernel_size_list)
        self.expand_ratio_list = val2list(expand_ratio_list)

        self.stride = stride
        self.act_func = act_func
        self.use_se = use_se

        # build modules
        max_middle_channel = make_divisible(
            round(max(self.in_channel_list) * max(self.expand_ratio_list)),
            MyNetwork.CHANNEL_DIVISIBLE)
        if max(self.expand_ratio_list) == 1:
            self.inverted_bottleneck = None
        else:
            self.inverted_bottleneck = nn.Sequential(
                OrderedDict([
                    ('conv',
                     DynamicConv2d(max(self.in_channel_list),
                                   max_middle_channel)),
                    ('bn', DynamicBatchNorm2d(max_middle_channel)),
                    ('act', build_activation(self.act_func)),
                ]))

        self.depth_conv = nn.Sequential(
            OrderedDict([('conv',
                          DynamicSeparableConv2d(max_middle_channel,
                                                 self.kernel_size_list,
                                                 self.stride)),
                         ('bn', DynamicBatchNorm2d(max_middle_channel)),
                         ('act', build_activation(self.act_func))]))
        if self.use_se:
            self.depth_conv.add_module('se', DynamicSE(max_middle_channel))

        self.point_linear = nn.Sequential(
            OrderedDict([
                ('conv',
                 DynamicConv2d(max_middle_channel,
                               max(self.out_channel_list))),
                ('bn', DynamicBatchNorm2d(max(self.out_channel_list))),
            ]))

        self.active_kernel_size = max(self.kernel_size_list)
        self.active_expand_ratio = max(self.expand_ratio_list)
        self.active_out_channel = max(self.out_channel_list)
Exemplo n.º 2
0
    def __init__(self, in_channel_list, out_channel_list, expand_ratio_list=0.25,
                 kernel_size=3, stride=1, act_func='relu', downsample_mode='avgpool_conv'):
        super(DynamicResNetBottleneckBlock, self).__init__()

        self.in_channel_list = in_channel_list
        self.out_channel_list = out_channel_list
        self.expand_ratio_list = val2list(expand_ratio_list)

        self.kernel_size = kernel_size
        self.stride = stride
        self.act_func = act_func
        self.downsample_mode = downsample_mode

        # build modules
        max_middle_channel = make_divisible(
            round(max(self.out_channel_list) * max(self.expand_ratio_list)), MyNetwork.CHANNEL_DIVISIBLE)

        self.conv1 = nn.Sequential(OrderedDict([
            ('conv', DynamicConv2d(max(self.in_channel_list), max_middle_channel)),
            ('bn', DynamicBatchNorm2d(max_middle_channel)),
            ('act', build_activation(self.act_func, inplace=True)),
        ]))

        self.conv2 = nn.Sequential(OrderedDict([
            ('conv', DynamicConv2d(max_middle_channel, max_middle_channel, kernel_size, stride)),
            ('bn', DynamicBatchNorm2d(max_middle_channel)),
            ('act', build_activation(self.act_func, inplace=True))
        ]))

        self.conv3 = nn.Sequential(OrderedDict([
            ('conv', DynamicConv2d(max_middle_channel, max(self.out_channel_list))),
            ('bn', DynamicBatchNorm2d(max(self.out_channel_list))),
        ]))

        if self.stride == 1 and self.in_channel_list == self.out_channel_list:
            self.downsample = IdentityLayer(max(self.in_channel_list), max(self.out_channel_list))
        elif self.downsample_mode == 'conv':
            self.downsample = nn.Sequential(OrderedDict([
                ('conv', DynamicConv2d(max(self.in_channel_list), max(self.out_channel_list), stride=stride)),
                ('bn', DynamicBatchNorm2d(max(self.out_channel_list))),
            ]))
        elif self.downsample_mode == 'avgpool_conv':
            self.downsample = nn.Sequential(OrderedDict([
                ('avg_pool', nn.AvgPool2d(kernel_size=stride, stride=stride, padding=0, ceil_mode=True)),
                ('conv', DynamicConv2d(max(self.in_channel_list), max(self.out_channel_list))),
                ('bn', DynamicBatchNorm2d(max(self.out_channel_list))),
            ]))
        else:
            raise NotImplementedError

        self.final_act = build_activation(self.act_func, inplace=True)

        self.active_expand_ratio = max(self.expand_ratio_list)
        self.active_out_channel = max(self.out_channel_list)
Exemplo n.º 3
0
    def __init__(self,
                 in_channel_list,
                 out_channel_list,
                 kernel_size=3,
                 stride=1,
                 dilation=1,
                 use_bn=True,
                 act_func='relu6'):
        super(DynamicConvLayer, self).__init__()

        self.in_channel_list = in_channel_list
        self.out_channel_list = out_channel_list
        self.kernel_size = kernel_size
        self.stride = stride
        self.dilation = dilation
        self.use_bn = use_bn
        self.act_func = act_func

        self.conv = DynamicConv2d(
            max_in_channels=max(self.in_channel_list),
            max_out_channels=max(self.out_channel_list),
            kernel_size=self.kernel_size,
            stride=self.stride,
            dilation=self.dilation,
        )
        if self.use_bn:
            self.bn = DynamicBatchNorm2d(max(self.out_channel_list))
        self.act = build_activation(self.act_func)

        self.active_out_channel = max(self.out_channel_list)
Exemplo n.º 4
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 kernel_size=3,
                 stride=1,
                 expand_ratio=6,
                 mid_channels=None,
                 act_func='relu6',
                 use_se=False,
                 groups=1):
        super(ReducedMBConvLayer, self).__init__()

        self.in_channels = in_channels
        self.out_channels = out_channels

        self.kernel_size = kernel_size
        self.stride = stride
        self.expand_ratio = expand_ratio
        self.mid_channels = mid_channels
        self.act_func = act_func
        self.use_se = use_se
        self.groups = groups

        if self.mid_channels is None:
            feature_dim = round(self.in_channels * self.expand_ratio)
        else:
            feature_dim = self.mid_channels

        pad = get_same_padding(self.kernel_size)
        groups = feature_dim if self.groups is None else min_divisible_value(
            feature_dim, self.groups)
        self.expand_conv = nn.Sequential(
            OrderedDict({
                'conv':
                nn.Conv2d(in_channels,
                          feature_dim,
                          kernel_size,
                          stride,
                          pad,
                          groups=groups,
                          bias=False),
                'bn':
                nn.BatchNorm2d(feature_dim),
                'act':
                build_activation(self.act_func, inplace=True),
            }))
        if self.use_se:
            self.expand_conv.add_module('se', SEModule(feature_dim))

        self.reduce_conv = nn.Sequential(
            OrderedDict({
                'conv':
                nn.Conv2d(feature_dim, out_channels, 1, 1, 0, bias=False),
                'bn':
                nn.BatchNorm2d(out_channels),
            }))
Exemplo n.º 5
0
    def __init__(self, in_channels, out_channels,
                 kernel_size=3, stride=1, expand_ratio=6, mid_channels=None, act_func='relu6', use_se=False):
        super(MBInvertedConvLayer, self).__init__()

        self.in_channels = in_channels
        self.out_channels = out_channels

        self.kernel_size = kernel_size
        self.stride = stride
        self.expand_ratio = expand_ratio
        self.mid_channels = mid_channels
        self.act_func = act_func
        self.use_se = use_se

        if self.mid_channels is None:
            feature_dim = round(self.in_channels * self.expand_ratio)
        else:
            feature_dim = self.mid_channels

        if self.expand_ratio == 1:
            self.inverted_bottleneck = None
        else:
            self.inverted_bottleneck = nn.Sequential(OrderedDict([
                ('conv', nn.Conv2d(self.in_channels, feature_dim, 1, 1, 0, bias=False)),
                ('bn', nn.BatchNorm2d(feature_dim)),
                ('act', build_activation(self.act_func, inplace=True)),
            ]))

        pad = get_same_padding(self.kernel_size)
        depth_conv_modules = [
            ('conv', nn.Conv2d(feature_dim, feature_dim, kernel_size, stride, pad, groups=feature_dim, bias=False)),
            ('bn', nn.BatchNorm2d(feature_dim)),
            ('act', build_activation(self.act_func, inplace=True))
        ]
        if self.use_se:
            depth_conv_modules.append(('se', SEModule(feature_dim)))
        self.depth_conv = nn.Sequential(OrderedDict(depth_conv_modules))

        self.point_linear = nn.Sequential(OrderedDict([
            ('conv', nn.Conv2d(feature_dim, out_channels, 1, 1, 0, bias=False)),
            ('bn', nn.BatchNorm2d(out_channels)),
        ]))
Exemplo n.º 6
0
    def __init__(self,
                 in_features,
                 out_features,
                 bias=True,
                 use_bn=False,
                 act_func=None,
                 dropout_rate=0,
                 ops_order='weight_bn_act'):
        super(LinearLayer, self).__init__()

        self.in_features = in_features
        self.out_features = out_features
        self.bias = bias

        self.use_bn = use_bn
        self.act_func = act_func
        self.dropout_rate = dropout_rate
        self.ops_order = ops_order
        """ modules """
        modules = {}
        # batch norm
        if self.use_bn:
            if self.bn_before_weight:
                modules['bn'] = nn.BatchNorm1d(in_features)
            else:
                modules['bn'] = nn.BatchNorm1d(out_features)
        else:
            modules['bn'] = None
        # activation
        modules['act'] = build_activation(self.act_func,
                                          self.ops_list[0] != 'act')
        # dropout
        if self.dropout_rate > 0:
            modules['dropout'] = nn.Dropout(self.dropout_rate, inplace=True)
        else:
            modules['dropout'] = None
        # linear
        modules['weight'] = {
            'linear': nn.Linear(self.in_features, self.out_features, self.bias)
        }

        # add modules
        for op in self.ops_list:
            if modules[op] is None:
                continue
            elif op == 'weight':
                if modules['dropout'] is not None:
                    self.add_module('dropout', modules['dropout'])
                for key in modules['weight']:
                    self.add_module(key, modules['weight'][key])
            else:
                self.add_module(op, modules[op])
Exemplo n.º 7
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 use_bn=True,
                 act_func='relu',
                 dropout_rate=0,
                 ops_order='weight_bn_act'):
        super(My2DLayer, self).__init__()
        self.in_channels = in_channels
        self.out_channels = out_channels

        self.use_bn = use_bn
        self.act_func = act_func
        self.dropout_rate = dropout_rate
        self.ops_order = ops_order
        """ modules """
        modules = {}
        # batch norm
        if self.use_bn:
            if self.bn_before_weight:
                modules['bn'] = nn.BatchNorm2d(in_channels)
            else:
                modules['bn'] = nn.BatchNorm2d(out_channels)
        else:
            modules['bn'] = None
        # activation
        modules['act'] = build_activation(
            self.act_func, self.ops_list[0] != 'act' and self.use_bn)
        # dropout
        if self.dropout_rate > 0:
            modules['dropout'] = nn.Dropout2d(self.dropout_rate, inplace=True)
        else:
            modules['dropout'] = None
        # weight
        modules['weight'] = self.weight_op()

        # add modules
        for op in self.ops_list:
            if modules[op] is None:
                continue
            elif op == 'weight':
                # dropout before weight operation
                if modules['dropout'] is not None:
                    self.add_module('dropout', modules['dropout'])
                for key in modules['weight']:
                    self.add_module(key, modules['weight'][key])
            else:
                self.add_module(op, modules[op])
Exemplo n.º 8
0
    def __init__(self,
                 main_branch,
                 in_channels,
                 out_channels,
                 expand=1.0,
                 kernel_size=3,
                 act_func='relu',
                 n_groups=2,
                 downsample_ratio=2,
                 upsample_type='bilinear',
                 stride=1):
        super(LiteResidualModule, self).__init__()

        self.main_branch = main_branch

        self.lite_residual_config = {
            'in_channels': in_channels,
            'out_channels': out_channels,
            'expand': expand,
            'kernel_size': kernel_size,
            'act_func': act_func,
            'n_groups': n_groups,
            'downsample_ratio': downsample_ratio,
            'upsample_type': upsample_type,
            'stride': stride,
        }

        kernel_size = 1 if downsample_ratio is None else kernel_size

        padding = get_same_padding(kernel_size)
        if downsample_ratio is None:
            pooling = MyGlobalAvgPool2d()
        else:
            pooling = nn.AvgPool2d(downsample_ratio, downsample_ratio, 0)
        num_mid = make_divisible(int(in_channels * expand),
                                 divisor=MyNetwork.CHANNEL_DIVISIBLE)
        self.lite_residual = nn.Sequential(
            OrderedDict({
                'pooling':
                pooling,
                'conv1':
                nn.Conv2d(in_channels,
                          num_mid,
                          kernel_size,
                          stride,
                          padding,
                          groups=n_groups,
                          bias=False),
                'bn1':
                nn.BatchNorm2d(num_mid),
                'act':
                build_activation(act_func),
                'conv2':
                nn.Conv2d(num_mid, out_channels, 1, 1, 0, bias=False),
                'final_bn':
                nn.BatchNorm2d(out_channels),
            }))

        # initialize
        init_models(self.lite_residual)
        self.lite_residual.final_bn.weight.data.zero_()
Exemplo n.º 9
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 kernel_size=3,
                 stride=1,
                 expand_ratio=0.25,
                 mid_channels=None,
                 act_func='relu',
                 groups=1,
                 downsample_mode='avgpool_conv'):
        super(ResNetBottleneckBlock, self).__init__()

        self.in_channels = in_channels
        self.out_channels = out_channels

        self.kernel_size = kernel_size
        self.stride = stride
        self.expand_ratio = expand_ratio
        self.mid_channels = mid_channels
        self.act_func = act_func
        self.groups = groups

        self.downsample_mode = downsample_mode

        if self.mid_channels is None:
            feature_dim = round(self.out_channels * self.expand_ratio)
        else:
            feature_dim = self.mid_channels

        feature_dim = make_divisible(feature_dim, MyNetwork.CHANNEL_DIVISIBLE)
        self.mid_channels = feature_dim

        # build modules
        self.conv1 = nn.Sequential(
            OrderedDict([
                ('conv',
                 nn.Conv2d(self.in_channels, feature_dim, 1, 1, 0,
                           bias=False)),
                ('bn', nn.BatchNorm2d(feature_dim)),
                ('act', build_activation(self.act_func, inplace=True)),
            ]))

        pad = get_same_padding(self.kernel_size)
        self.conv2 = nn.Sequential(
            OrderedDict([('conv',
                          nn.Conv2d(feature_dim,
                                    feature_dim,
                                    kernel_size,
                                    stride,
                                    pad,
                                    groups=groups,
                                    bias=False)),
                         ('bn', nn.BatchNorm2d(feature_dim)),
                         ('act', build_activation(self.act_func,
                                                  inplace=True))]))

        self.conv3 = nn.Sequential(
            OrderedDict([
                ('conv',
                 nn.Conv2d(feature_dim, self.out_channels, 1, 1, 0,
                           bias=False)),
                ('bn', nn.BatchNorm2d(self.out_channels)),
            ]))

        if stride == 1 and in_channels == out_channels:
            self.downsample = IdentityLayer(in_channels, out_channels)
        elif self.downsample_mode == 'conv':
            self.downsample = nn.Sequential(
                OrderedDict([
                    ('conv',
                     nn.Conv2d(in_channels,
                               out_channels,
                               1,
                               stride,
                               0,
                               bias=False)),
                    ('bn', nn.BatchNorm2d(out_channels)),
                ]))
        elif self.downsample_mode == 'avgpool_conv':
            self.downsample = nn.Sequential(
                OrderedDict([
                    ('avg_pool',
                     nn.AvgPool2d(kernel_size=stride,
                                  stride=stride,
                                  padding=0,
                                  ceil_mode=True)),
                    ('conv',
                     nn.Conv2d(in_channels, out_channels, 1, 1, 0,
                               bias=False)),
                    ('bn', nn.BatchNorm2d(out_channels)),
                ]))
        else:
            raise NotImplementedError

        self.final_act = build_activation(self.act_func, inplace=True)