def __init__( self, num_input_features, growth_rate, bn_size, drop_rate, expandsize, ): super(_DenseLayer, self).__init__() self.add_module('norm.1', nn.BatchNorm2d(num_input_features)), self.add_module('relu.1', nn.ReLU(inplace=True)), #print(num_input_features,bn_size ,growth_rate) self.add_module( 'conv.1', ExpanderConv2d(num_input_features, bn_size * growth_rate, kernel_size=1, stride=1, expandSize=(num_input_features // expandsize))), self.add_module('norm.2', nn.BatchNorm2d(bn_size * growth_rate)), self.add_module('relu.2', nn.ReLU(inplace=True)), self.add_module( 'conv.2', ExpanderConv2d(bn_size * growth_rate, growth_rate, kernel_size=3, stride=1, padding=1, expandSize=((bn_size * growth_rate) // expandsize))), self.drop_rate = drop_rate
def __init__(self, in_planes, out_planes, dropRate=0.0,expandSize=2): super(BottleneckBlock, self).__init__() inter_planes = out_planes * 4 self.bn1 = nn.BatchNorm2d(in_planes) self.relu = nn.ReLU(inplace=True) self.conv1 = ExpanderConv2d(in_planes, inter_planes, kernel_size=1, stride=1, padding=0, expandSize=(in_planes//expandSize)) self.bn2 = nn.BatchNorm2d(inter_planes) self.conv2 = ExpanderConv2d(inter_planes, out_planes, kernel_size=3, stride=1, padding=1, expandSize=(inter_planes//expandSize)) self.droprate = dropRate
def __init__(self, inplanes, planes, expandsize, stride=1, downsample=None): super(Bottleneck, self).__init__() self.conv1 = ExpanderConv2d(inplanes, planes, kernel_size=1, expandSize=(inplanes//expandsize)) self.bn1 = nn.BatchNorm2d(planes) self.conv2 = ExpanderConv2d(planes, planes, kernel_size=3, expandSize=(planes//expandsize) , stride=stride, padding=1) self.bn2 = nn.BatchNorm2d(planes) self.conv3 = ExpanderConv2d(planes, planes * 4, kernel_size=1, expandSize=(planes//expandsize)) self.bn3 = nn.BatchNorm2d(planes * 4) self.relu = nn.ReLU(inplace=True) self.downsample = downsample self.stride = stride
def make_layers(cfg, expandcfg, batch_norm=False): layers = [ nn.Conv2d(3, 64, kernel_size=3, padding=1), nn.BatchNorm2d(64), nn.ReLU(True) ] in_channels = 64 for i in range(len(cfg)): if cfg[i] == 'M': layers += [nn.MaxPool2d(kernel_size=2, stride=2)] else: if expandcfg[i] < in_channels: conv2d = ExpanderConv2d(in_channels, cfg[i], expandSize=expandcfg[i], kernel_size=3, padding=1) else: conv2d = nn.Conv2d(in_channels, cfg[i], kernel_size=3, padding=1) if batch_norm: layers += [ conv2d, nn.BatchNorm2d(cfg[i]), nn.ReLU(inplace=True) ] else: layers += [conv2d, nn.ReLU(inplace=True)] in_channels = cfg[i] return nn.Sequential(*layers)
def conv3x3(in_planes, out_planes, sparsity, stride=1): "3x3 convolution with padding" return ExpanderConv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, expandSize=int(in_planes * sparsity / 100))
def expconv1x1(in_planes, out_planes, sparsity, stride=1): "1x1 convolution with padding" return ExpanderConv2d(in_planes, out_planes, kernel_size=1, stride=stride, padding=0, expandSize=int(out_planes * sparsity / 100))
def conv_dw(inp, oup, stride, gtype, groupsz=1, expandsz=1): if inp == 32 and gtype == 'full': inp = inp oup = int(oup * self.wm) elif oup == 1024 and inp == 1024: inp = int(inp * self.wm) oup = oup elif inp == 128 and oup == 128 and gtype == 'expanderacc': gtype = 'expander' else: inp = int(inp * self.wm) oup = int(oup * self.wm) if gtype == 'full': layerdl = nn.Conv2d(inp, inp, 3, stride, 1, groups=inp, bias=False) layer = nn.Conv2d(inp, oup, 1, 1, 0, bias=False) elif gtype == 'groups': layerdl = nn.Conv2d(inp, inp, 3, stride, 1, groups=inp, bias=False) layer = nn.Conv2d(inp, oup, 1, 1, 0, groups=groupsz, bias=False) elif gtype == 'expander': layerdl = nn.Conv2d(inp, inp, 3, stride, 1, groups=inp, bias=False) layer = ExpanderConv2d(inp, oup, kernel_size=1, stride=1, padding=0, expandSize=(oup // groupsz)) return nn.Sequential( layerdl, nn.BatchNorm2d(inp), nn.ReLU6(inplace=True), layer, nn.BatchNorm2d(oup), nn.ReLU6(inplace=True), )
def conv3x3(inplanes,outplanes,sparsity): return ExpanderConv2d(inplanes,outplanes,expandSize = int(inplanes*sparsity/100),kernel_size=3, padding=1)
def conv3x3(in_planes, out_planes, expandsize, stride=1): "3x3 convolution with padding" return ExpanderConv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, expandSize=(in_planes//expandsize))