def __init__(self, in_channel_list, out_channel_list, kernel_size_list=3, expand_ratio_list=6, stride=1, act_func='relu6', use_se=False): super(DynamicMBConvLayer, self).__init__() self.in_channel_list = in_channel_list self.out_channel_list = out_channel_list self.kernel_size_list = val2list(kernel_size_list, 1) self.expand_ratio_list = val2list(expand_ratio_list, 1) self.stride = stride self.act_func = act_func self.use_se = use_se # build modules max_middle_channel = round( max(self.in_channel_list) * max(self.expand_ratio_list)) if max(self.expand_ratio_list) == 1: self.inverted_bottleneck = None else: self.inverted_bottleneck = nn.Sequential( OrderedDict([ ('conv', DynamicPointConv2d(max(self.in_channel_list), max_middle_channel)), ('bn', DynamicBatchNorm2d(max_middle_channel)), ('act', build_activation(self.act_func, inplace=True)), ])) self.depth_conv = nn.Sequential( OrderedDict([('conv', DynamicSeparableConv2d(max_middle_channel, self.kernel_size_list, self.stride)), ('bn', DynamicBatchNorm2d(max_middle_channel)), ('act', build_activation(self.act_func, inplace=True))])) if self.use_se: self.depth_conv.add_module('se', DynamicSE(max_middle_channel)) self.point_linear = nn.Sequential( OrderedDict([ ('conv', DynamicPointConv2d(max_middle_channel, max(self.out_channel_list))), ('bn', DynamicBatchNorm2d(max(self.out_channel_list))), ])) self.active_kernel_size = max(self.kernel_size_list) self.active_expand_ratio = max(self.expand_ratio_list) self.active_out_channel = max(self.out_channel_list)
def __init__(self, in_channel_list, out_channel_list, kernel_size=3, stride=1, dilation=1, use_bn=True, act_func='relu6'): super(DynamicConvLayer, self).__init__() self.in_channel_list = in_channel_list self.out_channel_list = out_channel_list self.kernel_size = kernel_size self.stride = stride self.dilation = dilation self.use_bn = use_bn self.act_func = act_func self.conv = DynamicPointConv2d( max_in_channels=max(self.in_channel_list), max_out_channels=max(self.out_channel_list), kernel_size=self.kernel_size, stride=self.stride, dilation=self.dilation, ) if self.use_bn: self.bn = DynamicBatchNorm2d(max(self.out_channel_list)) self.act = build_activation(self.act_func, inplace=True) self.active_out_channel = max(self.out_channel_list)
def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, expand_ratio=6, mid_channels=None, act_func='relu6', use_se=False): super(MBInvertedConvLayer, self).__init__() self.in_channels = in_channels self.out_channels = out_channels self.kernel_size = kernel_size self.stride = stride self.expand_ratio = expand_ratio self.mid_channels = mid_channels self.act_func = act_func self.use_se = use_se if self.mid_channels is None: feature_dim = round(self.in_channels * self.expand_ratio) else: feature_dim = self.mid_channels if self.expand_ratio == 1: self.inverted_bottleneck = None else: self.inverted_bottleneck = nn.Sequential(OrderedDict([ ('conv', nn.Conv2d(self.in_channels, feature_dim, 1, 1, 0, bias=False)), ('bn', nn.BatchNorm2d(feature_dim)), ('act', build_activation(self.act_func, inplace=True)), ])) pad = get_same_padding(self.kernel_size) depth_conv_modules = [ ('conv', nn.Conv2d(feature_dim, feature_dim, kernel_size, stride, pad, groups=feature_dim, bias=False)), ('bn', nn.BatchNorm2d(feature_dim)), ('act', build_activation(self.act_func, inplace=True)) ] if self.use_se: depth_conv_modules.append(('se', SEModule(feature_dim))) self.depth_conv = nn.Sequential(OrderedDict(depth_conv_modules)) self.point_linear = nn.Sequential(OrderedDict([ ('conv', nn.Conv2d(feature_dim, out_channels, 1, 1, 0, bias=False)), ('bn', nn.BatchNorm2d(out_channels)), ]))
def __init__(self, in_features, out_features, bias=True, use_bn=False, act_func=None, dropout_rate=0, ops_order='weight_bn_act'): super(LinearLayer, self).__init__() self.in_features = in_features self.out_features = out_features self.bias = bias self.use_bn = use_bn self.act_func = act_func self.dropout_rate = dropout_rate self.ops_order = ops_order """ modules """ modules = {} # batch norm if self.use_bn: if self.bn_before_weight: modules['bn'] = nn.BatchNorm1d(in_features) else: modules['bn'] = nn.BatchNorm1d(out_features) else: modules['bn'] = None # activation modules['act'] = build_activation(self.act_func, self.ops_list[0] != 'act') # dropout if self.dropout_rate > 0: modules['dropout'] = nn.Dropout(self.dropout_rate, inplace=True) else: modules['dropout'] = None # linear modules['weight'] = { 'linear': nn.Linear(self.in_features, self.out_features, self.bias) } # add modules for op in self.ops_list: if modules[op] is None: continue elif op == 'weight': if modules['dropout'] is not None: self.add_module('dropout', modules['dropout']) for key in modules['weight']: self.add_module(key, modules['weight'][key]) else: self.add_module(op, modules[op])
def __init__(self, in_channels, out_channels, use_bn=True, act_func='relu', dropout_rate=0, ops_order='weight_bn_act'): super(My2DLayer, self).__init__() self.in_channels = in_channels self.out_channels = out_channels self.use_bn = use_bn self.act_func = act_func self.dropout_rate = dropout_rate self.ops_order = ops_order """ modules """ modules = {} # batch norm if self.use_bn: if self.bn_before_weight: modules['bn'] = nn.BatchNorm2d(in_channels) else: modules['bn'] = nn.BatchNorm2d(out_channels) else: modules['bn'] = None # activation modules['act'] = build_activation(self.act_func, self.ops_list[0] != 'act') # dropout if self.dropout_rate > 0: modules['dropout'] = nn.Dropout2d(self.dropout_rate, inplace=True) else: modules['dropout'] = None # weight modules['weight'] = self.weight_op() # add modules for op in self.ops_list: if modules[op] is None: continue elif op == 'weight': # dropout before weight operation if modules['dropout'] is not None: self.add_module('dropout', modules['dropout']) for key in modules['weight']: self.add_module(key, modules['weight'][key]) else: self.add_module(op, modules[op])