def __init__(self, inplanes, planes, stride=1, downsample=None):
     super(BasicBlock, self).__init__()
     self.conv1 = conv3x3(inplanes, planes, stride)
     self.bn1 = NN.BatchNorm2d(planes)  #NN.BatchNorm2d
     self.relu = nn.ReLU(inplace=True)
     self.conv2 = conv3x3(planes, planes)
     self.bn2 = NN.BatchNorm2d(planes)  #NN.BatchNorm2d
     self.downsample = downsample
     self.stride = stride
Exemplo n.º 2
0
 def __init__(self, inplanes, planes, stride=1, downsample=None):
     super(Bottleneck, self).__init__()
     self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
     self.bn1 = NN.BatchNorm2d(planes) #NN.BatchNorm2d
     self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
                            padding=1, bias=False)
     self.bn2 = NN.BatchNorm2d(planes) #NN.BatchNorm2d
     self.conv3 = nn.Conv2d(planes, planes * self.expansion, kernel_size=1, bias=False)
     self.bn3 = NN.BatchNorm2d(planes * self.expansion) #NN.BatchNorm2d
     self.relu = nn.ReLU(inplace=True)
     self.downsample = downsample
     self.stride = stride
    def __init__(self, block, layers, num_classes=1000):
        self.inplanes = 64
        super(ResNet, self).__init__()
        self.conv1 = nn.Conv2d(3,
                               64,
                               kernel_size=7,
                               stride=2,
                               padding=3,
                               bias=False)
        self.bn1 = NN.BatchNorm2d(64)  #NN.BatchNorm2d
        self.relu = nn.ReLU(inplace=True)
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
        self.layer1 = self._make_layer(block, 64, layers[0])
        self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
        self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
        self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
        #self.avgpool = nn.AvgPool2d(7, stride=1)
        #self.fc = nn.Linear(512 * block.expansion, num_classes)

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal_(m.weight,
                                        mode='fan_out',
                                        nonlinearity='relu')
            elif isinstance(m, nn.BatchNorm2d):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)
Exemplo n.º 4
0
    def __init__(self, inchannels, outchannels, upfactor=2):
        super(AO, self).__init__()
        self.inchannels = inchannels
        self.outchannels = outchannels
        self.upfactor = upfactor

        self.adapt_conv = nn.Sequential(nn.Conv2d(in_channels=self.inchannels, out_channels=int(self.inchannels/2), kernel_size=3, padding=1, stride=1, bias=True),\
                                  NN.BatchNorm2d(num_features=int(self.inchannels/2)),\
                                  nn.ReLU(inplace=True),\
                                  nn.Conv2d(in_channels=int(self.inchannels/2), out_channels=self.outchannels, kernel_size=3, padding=1, stride=1, bias=True),\
                                       nn.Upsample(scale_factor=self.upfactor, mode='bilinear', align_corners=True))

        self.init_params()
Exemplo n.º 5
0
    def _make_layer(self, block, planes, blocks, stride=1):
        downsample = None
        if stride != 1 or self.inplanes != planes * block.expansion:
            downsample = nn.Sequential(
                nn.Conv2d(self.inplanes, planes * block.expansion,
                          kernel_size=1, stride=stride, bias=False),
                NN.BatchNorm2d(planes * block.expansion), #NN.BatchNorm2d
            )

        layers = []
        layers.append(block(self.inplanes, planes, stride, downsample))
        self.inplanes = planes * block.expansion
        for i in range(1, blocks):
            layers.append(block(self.inplanes, planes))

        return nn.Sequential(*layers)
Exemplo n.º 6
0
    def __init__(self, inchannels, midchannels=512):
        super(FTB, self).__init__()
        self.in1 = inchannels
        self.mid = midchannels

        self.conv1 = nn.Conv2d(in_channels=self.in1,
                               out_channels=self.mid,
                               kernel_size=3,
                               padding=1,
                               stride=1,
                               bias=True)
        # NN.BatchNorm2d
        self.conv_branch = nn.Sequential(nn.ReLU(inplace=True),\
                                         nn.Conv2d(in_channels=self.mid, out_channels=self.mid, kernel_size=3, padding=1, stride=1, bias=True),\
                                         NN.BatchNorm2d(num_features=self.mid),\
                                         nn.ReLU(inplace=True),\
                                         nn.Conv2d(in_channels=self.mid, out_channels= self.mid, kernel_size=3, padding=1, stride=1, bias=True))
        self.relu = nn.ReLU(inplace=True)

        self.init_params()
if num_gpu < 2:
    print(
        "No multi-gpu found. NN.BatchNorm2d will act as normal nn.BatchNorm2d")

m1 = nn.Sequential(
    nn.Conv2d(3, 3, 1, 1, bias=False),
    nn.BatchNorm2d(3),
    nn.ReLU(inplace=True),
    nn.Conv2d(3, 3, 1, 1, bias=False),
    nn.BatchNorm2d(3),
).cuda()
torch.manual_seed(123)
init_weight(m1)
m2 = nn.Sequential(
    nn.Conv2d(3, 3, 1, 1, bias=False),
    NN.BatchNorm2d(3),
    nn.ReLU(inplace=True),
    nn.Conv2d(3, 3, 1, 1, bias=False),
    NN.BatchNorm2d(3),
).cuda()
torch.manual_seed(123)
init_weight(m2)
m2 = nn.DataParallel(m2, device_ids=range(num_gpu))
o1 = torch.optim.SGD(m1.parameters(), 1e-3)
o2 = torch.optim.SGD(m2.parameters(), 1e-3)
y = torch.ones(num_gpu).float().cuda()
torch.manual_seed(123)
for _ in range(100):
    x = torch.rand(num_gpu, 3, 2, 2).cuda()
    o1.zero_grad()
    z1 = m1(x)