def __init__(self, args, inplanes, planes, stride=1, downsample=None): super(Bottleneck, self).__init__() self.conv1 = conv1x1(inplanes, planes) self.bn1 = nn.BatchNorm2d(planes) self.relu1 = ReLuPCA(args, planes) self.conv2 = conv3x3(planes, planes, stride) self.bn2 = nn.BatchNorm2d(planes) self.relu2 = ReLuPCA(args, planes) self.conv3 = conv1x1(planes, planes * self.expansion) self.bn3 = nn.BatchNorm2d(planes * self.expansion) self.relu3 = ReLuPCA(args, planes * self.expansion) self.downsample = downsample self.stride = stride
def __init__(self, args, in_planes, planes, stride=1, downsample=None): super(BasicBlock, self).__init__() self.conv1 = conv3x3(in_planes, planes, stride) self.bn1 = nn.BatchNorm2d(planes) self.relu1 = ReLuPCA(args, planes) self.conv2 = conv3x3(planes, planes) self.bn2 = nn.BatchNorm2d(planes) self.relu2 = ReLuPCA(args, planes) self.downsample = downsample self.stride = stride
def __init__(self, args, inp, oup, stride, expand_ratio): super(InvertedResidual, self).__init__() self.stride = stride assert stride in [1, 2] hidden_dim = int(round(inp * expand_ratio)) self.use_res_connect = self.stride == 1 and inp == oup if expand_ratio == 1: self.conv = nn.Sequential( # dw nn.Conv2d(hidden_dim, hidden_dim, 3, stride, 1, groups=hidden_dim, bias=False), nn.BatchNorm2d(hidden_dim), ReLuPCA(args, mxRelu6=True), # nn.ReLU6(inplace=True) # pw-linear nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False), nn.BatchNorm2d(oup), ) else: self.conv = nn.Sequential( # pw nn.Conv2d(inp, hidden_dim, 1, 1, 0, bias=False), nn.BatchNorm2d(hidden_dim), ReLuPCA(args, mxRelu6=True), # nn.ReLU6(inplace=True) # dw nn.Conv2d(hidden_dim, hidden_dim, 3, stride, 1, groups=hidden_dim, bias=False), nn.BatchNorm2d(hidden_dim), ReLuPCA(args, mxRelu6=True), # nn.ReLU6(inplace=True) # pw-linear nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False), nn.BatchNorm2d(oup), )
def __init__(self, block, layers, args, zero_init_residual=False): super(ResNetImagenet, self).__init__() num_classes = 1000 self.name = args.model self.inplanes = 64 self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False) self.bn1 = nn.BatchNorm2d(64) self.relu = ReLuPCA(args, 64) self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) self.layer1 = self._make_layer(args, block, 64, layers[0]) self.layer2 = self._make_layer(args, block, 128, layers[1], stride=2) self.layer3 = self._make_layer(args, block, 256, layers[2], stride=2) self.layer4 = self._make_layer(args, block, 512, layers[3], stride=2) self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) self.fc = nn.Linear(512 * block.expansion, num_classes)
def __init__(self, args, in_channels, out_channels, **kwargs): super(BasicConv2d, self).__init__() self.conv = nn.Conv2d(in_channels, out_channels, bias=False, **kwargs) self.bn = nn.BatchNorm2d(out_channels, eps=0.001) self.relu = ReLuPCA(args)
def conv_bn(inp, oup, stride, args): return nn.Sequential( nn.Conv2d(inp, oup, 3, stride, 1, bias=False), nn.BatchNorm2d(oup), ReLuPCA(args, mxRelu6=True) # nn.ReLU6(inplace=True) )