Beispiel #1
0
 def __init__(self, args, inplanes, planes, stride=1, downsample=None):
     super(Bottleneck, self).__init__()
     self.conv1 = conv1x1(inplanes, planes)
     self.bn1 = nn.BatchNorm2d(planes)
     self.relu1 = ReLU(args, inplace=True)  # ReLuPCA(args, planes)
     self.conv2 = conv3x3(planes, planes, stride)
     self.bn2 = nn.BatchNorm2d(planes)
     self.relu2 = ReLU(args, inplace=True)  # ReLuPCA(args, planes)
     self.conv3 = conv1x1(planes, planes * self.expansion)
     self.bn3 = nn.BatchNorm2d(planes * self.expansion)
     self.relu3 = ReLU(args, inplace=True)  # ReLuPCA(args, planes)
     self.downsample = downsample
     self.stride = stride
Beispiel #2
0
    def __init__(self, args, in_planes, planes, stride=1, downsample=None):
        super(BasicBlock, self).__init__()

        self.conv1 = conv3x3(args, in_planes, planes, stride)
        self.bn1 = BatchNorm2d(planes)
        self.relu1 = ReLU(args, inplace=True)  # ReLuPCA(args, planes)

        self.conv2 = conv3x3(args, planes, planes)
        self.bn2 = BatchNorm2d(planes)

        self.downsample = downsample
        self.relu2 = ReLU(args, inplace=True)  # ReLuPCA(args, planes)

        self.stride = stride
Beispiel #3
0
    def __init__(self, depth, args):
        super(ResNetCifar, self).__init__()
        num_classes = args.nClasses
        assert (depth - 2) % 6 == 0, 'Depth should be 6n + 2'
        n = (depth - 2) // 6
        self.name = args.model
        self.depth = depth - 1
        self.dataset = args.dataset
        block = BasicBlock
        self.inplanes = 64
        fmaps = [64, 128, 256]  # CIFAR10

        self.conv = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)
        self.bn = nn.BatchNorm2d(64)
        self.relu = ReLU(args, inplace=True)  # ReLuPCA(args, 64)

        self.layer1 = self._make_layer(args, block, fmaps[0], n, stride=1)
        self.layer2 = self._make_layer(args, block, fmaps[1], n, stride=2)
        self.layer3 = self._make_layer(args, block, fmaps[2], n, stride=2)

        self.avgpool = nn.AvgPool2d(kernel_size=8, stride=1)
        self.flatten = flatten
        self.fc = nn.Linear(fmaps[2] * block.expansion, num_classes)

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
            #   nn.init.kaiming_uniform_(m.weight, mode='fan_out', nonlinearity='relu')
            elif isinstance(m, nn.BatchNorm2d):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)
Beispiel #4
0
    def __init__(self, args, in_planes, planes, stride=1, downsample=None):
        super(BasicBlock, self).__init__()

        self.conv1 = conv3x3(in_planes, planes, stride)
        self.bn1 = nn.BatchNorm2d(planes)
        self.relu1 = ReLU(args, inplace=True, channel_count=planes,
                          entropy_approximation=args.ea)  # ReLuPCA(args, planes)

        self.conv2 = conv3x3(planes, planes)
        self.bn2 = nn.BatchNorm2d(planes)
        self.relu2 = ReLU(args, inplace=True, channel_count=planes,
                          entropy_approximation=args.ea)  # ReLuPCA(args, planes)

        self.downsample = downsample

        self.stride = stride
Beispiel #5
0
    def __init__(self, args, inp, oup, stride, expand_ratio):
        super(InvertedResidual, self).__init__()
        self.stride = stride
        assert stride in [1, 2]

        hidden_dim = int(round(inp * expand_ratio))
        self.use_res_connect = self.stride == 1 and inp == oup

        if expand_ratio == 1:
            self.conv = nn.Sequential(
                # dw
                Conv2d(args,
                       hidden_dim,
                       hidden_dim,
                       3,
                       stride,
                       1,
                       groups=hidden_dim,
                       bias=False),
                BatchNorm2d(hidden_dim),
                ReLU(args, inplace=True, relu6=True),
                # pw-linear
                Conv2d(args, hidden_dim, oup, 1, 1, 0, bias=False),
                BatchNorm2d(oup),
            )
        else:
            self.conv = nn.Sequential(
                # pw
                Conv2d(args, inp, hidden_dim, 1, 1, 0, bias=False),
                BatchNorm2d(hidden_dim),
                ReLU(args, inplace=True, relu6=True),
                # dw
                Conv2d(args,
                       hidden_dim,
                       hidden_dim,
                       3,
                       stride,
                       1,
                       groups=hidden_dim,
                       bias=False),
                BatchNorm2d(hidden_dim),
                ReLU(args, inplace=True, relu6=True),
                # pw-linear
                Conv2d(args, hidden_dim, oup, 1, 1, 0, bias=False),
                BatchNorm2d(oup),
            )
Beispiel #6
0
    def Layer(self, input_):
        """
        This function creates the components inside a composite layer
        of a Dense Block.
        """
        with tf.variable_scope("Composite"):
            next_layer = BatchNorm(input_, isTrain=self.isTrain)
            next_layer = ReLU(next_layer)
            next_layer = Conv(next_layer,
                              kernel_size=3,
                              stride=1,
                              output_channels=self.growth_rate)
            next_layer = DropOut(next_layer, isTrain=self.isTrain, rate=0.2)

            return next_layer
Beispiel #7
0
 def __init__(self, block, layers, args, zero_init_residual=False):
     super(ResNetImagenet, self).__init__()
     num_classes = 1000
     self.name = args.model
     self.inplanes = 64
     self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
                            bias=False)
     self.bn1 = nn.BatchNorm2d(64)
     self.relu = ReLU(args, inplace=True, channel_count=64, entropy_approximation=args.ea)  # ReLuPCA(args, planes)
     self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
     self.layer1 = self._make_layer(args, block, 64, layers[0])
     self.layer2 = self._make_layer(args, block, 128, layers[1], stride=2)
     self.layer3 = self._make_layer(args, block, 256, layers[2], stride=2)
     self.layer4 = self._make_layer(args, block, 512, layers[3], stride=2)
     self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
     self.fc = nn.Linear(512 * block.expansion, num_classes)
Beispiel #8
0
 def __init__(self, block, layers, args, depth, zero_init_residual=False):
     super(ResNetImagenet, self).__init__()
     num_classes = 1000
     self.name = args.model
     if args.noQuantEdges:
         self.depth = depth - 2
     else:
         self.depth = depth - 1
     self.inplanes = 64
     self.conv1 = Conv2d(args, 3, 64, kernel_size=7, stride=2, padding=3,
                            bias=False)
     self.bn1 = BatchNorm2d(64)
     self.relu = ReLU(args, inplace=True)  # ReLuPCA(args, planes)
     self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
     self.layer1 = self._make_layer(args, block, 64, layers[0])
     self.layer2 = self._make_layer(args, block, 128, layers[1], stride=2)
     self.layer3 = self._make_layer(args, block, 256, layers[2], stride=2)
     self.layer4 = self._make_layer(args, block, 512, layers[3], stride=2)
     self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
     self.fc = nn.Linear(512 * block.expansion, num_classes)
     self.quantBlocks = self.buildQuantBlocks(args.noQuantEdges)
Beispiel #9
0
def conv_1x1_bn(inp, oup, args):
    return nn.Sequential(Conv2d(args, inp, oup, 1, 1, 0, bias=False),
                         BatchNorm2d(oup), ReLU(args, inplace=True,
                                                relu6=True))