コード例 #1
0
    def build_bottom_up(self, pretrained):
        backbone = self.params['backbone']

        if backbone == "resnet50":
            model = models.resnet50(pretrained=pretrained)
        elif backbone == "resnet101":
            model = models.resnet101(pretrained=pretrained)
        else:
            raise Exception("unimplemented backbone %s" % backbone)

        # p3 ~ p5 are extracted from backbone
        p3 = nn.Sequential(model.conv1, model.bn1, model.relu, model.maxpool,
                           model.layer1, model.layer2)

        p4 = model.layer3
        p5 = model.layer4

        # build remaining layers
        in_channels = self.calc_in_channel_width(p5)
        p6 = nn.Sequential(nn.Conv2d(in_channels, 256, 3, 2, 1),
                           nn.BatchNorm2d(256))

        p7 = nn.Sequential(nn.ReLU(), nn.Conv2d(256, 256, 3, 2, 1),
                           nn.BatchNorm2d(256))

        # register bottom up layers
        self.bottom_up_layers = nn.ModuleList((p3, p4, p5, p6, p7))
コード例 #2
0
 def __init__(self, inplanes, planes, stride=1, downsample=None):
     super(BasicBlock, self).__init__()
     self.conv1 = conv3x3(inplanes, planes, stride)
     self.bn1 = NN.BatchNorm2d(planes) #NN.BatchNorm2d
     self.relu = nn.ReLU(inplace=True)
     self.conv2 = conv3x3(planes, planes)
     self.bn2 = NN.BatchNorm2d(planes) #NN.BatchNorm2d
     self.downsample = downsample
     self.stride = stride
コード例 #3
0
 def __init__(self, inplanes, planes, stride=1, downsample=None):
     super(Bottleneck, self).__init__()
     self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
     self.bn1 = NN.BatchNorm2d(planes) #NN.BatchNorm2d
     self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
                            padding=1, bias=False)
     self.bn2 = NN.BatchNorm2d(planes) #NN.BatchNorm2d
     self.conv3 = nn.Conv2d(planes, planes * self.expansion, kernel_size=1, bias=False)
     self.bn3 = NN.BatchNorm2d(planes * self.expansion) #NN.BatchNorm2d
     self.relu = nn.ReLU(inplace=True)
     self.downsample = downsample
     self.stride = stride
コード例 #4
0
        def __init__(self, in_planes, out_planes):
            super(ConvBlock, self).__init__()
            self.bn1 = nn.BatchNorm2d(in_planes)
            conv3x3 = partial(nn.Conv2d, kernel_size=3, stride=1, padding=1, bias=False, dilation=1)
            self.conv1 = conv3x3(in_planes, int(out_planes / 2))
            self.bn2 = nn.BatchNorm2d(int(out_planes / 2))
            self.conv2 = conv3x3(int(out_planes / 2), int(out_planes / 4))
            self.bn3 = nn.BatchNorm2d(int(out_planes / 4))
            self.conv3 = conv3x3(int(out_planes / 4), int(out_planes / 4))

            self.downsample = None
            if in_planes != out_planes:
                self.downsample = nn.Sequential(nn.BatchNorm2d(in_planes),
                                                nn.ReLU(True),
                                                nn.Conv2d(in_planes, out_planes, 1, 1, bias=False))
コード例 #5
0
    def __init__(self, in_channels, out_channels, size):
        super().__init__()

        conv = [
            nn.Conv2d(in_channels, out_channels, 1),
            nn.BatchNorm2d(out_channels)
        ]

        layer_conv = [
            nn.Conv2d(out_channels, out_channels, 3, 1, 1),
            nn.BatchNorm2d(out_channels)
        ]

        self.conv = nn.Sequential(*conv)
        self.upsample = nn.Upsample(size)
        self.layer_conv = nn.Sequential(*layer_conv)
コード例 #6
0
    def __init__(self, inchannels, outchannels, upfactor=2):
        super(AO, self).__init__()
        self.inchannels = inchannels
        self.outchannels = outchannels
        self.upfactor = upfactor

        self.adapt_conv = nn.Sequential(nn.Conv2d(in_channels=self.inchannels, out_channels=self.inchannels//2, kernel_size=3, padding=1, stride=1, bias=True),\
                                  NN.BatchNorm2d(num_features=self.inchannels//2),\
                                  nn.ReLU(inplace=True),\
                                  nn.Conv2d(in_channels=self.inchannels//2, out_channels=self.outchannels, kernel_size=3, padding=1, stride=1, bias=True),\
                                       nn.Upsample(scale_factor=self.upfactor, mode='bilinear', align_corners=True))

        self.init_params()
コード例 #7
0
        def __init__(self, num_modules=1, end_relu=False, num_landmarks=98, fname_pretrained=None):
            super(FAN, self).__init__()
            self.num_modules = num_modules
            self.end_relu = end_relu

            # Base part
            self.conv1 = CoordConvTh(256, 256, True, False,
                                     in_channels=3, out_channels=64,
                                     kernel_size=7, stride=2, padding=3)
            self.bn1 = nn.BatchNorm2d(64)
            self.conv2 = ConvBlock(64, 128)
            self.conv3 = ConvBlock(128, 128)
            self.conv4 = ConvBlock(128, 256)

            # Stacking part
            self.add_module('m0', HourGlass(1, 4, 256, first_one=True))
            self.add_module('top_m_0', ConvBlock(256, 256))
            self.add_module('conv_last0', nn.Conv2d(256, 256, 1, 1, 0))
            self.add_module('bn_end0', nn.BatchNorm2d(256))
            self.add_module('l0', nn.Conv2d(256, num_landmarks + 1, 1, 1, 0))

            if fname_pretrained is not None:
                self.load_pretrained_weights(fname_pretrained)
コード例 #8
0
    def _make_layer(self, block, planes, blocks, stride=1):
        downsample = None
        if stride != 1 or self.inplanes != planes * block.expansion:
            downsample = nn.Sequential(
                nn.Conv2d(self.inplanes, planes * block.expansion,
                          kernel_size=1, stride=stride, bias=False),
                NN.BatchNorm2d(planes * block.expansion), #NN.BatchNorm2d
            )

        layers = []
        layers.append(block(self.inplanes, planes, stride, downsample))
        self.inplanes = planes * block.expansion
        for i in range(1, blocks):
            layers.append(block(self.inplanes, planes))

        return nn.Sequential(*layers)
コード例 #9
0
    def __init__(self, inchannels, midchannels=512):
        super(FTB, self).__init__()
        self.in1 = inchannels
        self.mid = midchannels

        self.conv1 = nn.Conv2d(in_channels=self.in1,
                               out_channels=self.mid,
                               kernel_size=3,
                               padding=1,
                               stride=1,
                               bias=True)
        # NN.BatchNorm2d
        self.conv_branch = nn.Sequential(nn.ReLU(inplace=True),\
                                         nn.Conv2d(in_channels=self.mid, out_channels=self.mid, kernel_size=3, padding=1, stride=1, bias=True),\
                                         NN.BatchNorm2d(num_features=self.mid),\
                                         nn.ReLU(inplace=True),\
                                         nn.Conv2d(in_channels=self.mid, out_channels= self.mid, kernel_size=3, padding=1, stride=1, bias=True))
        self.relu = nn.ReLU(inplace=True)

        self.init_params()
コード例 #10
0
    def __init__(self, block, layers, num_classes=1000):
        self.inplanes = 64
        super(ResNet, self).__init__()
        self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
                               bias=False)
        self.bn1 = NN.BatchNorm2d(64)  #NN.BatchNorm2d
        self.relu = nn.ReLU(inplace=True)
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
        self.layer1 = self._make_layer(block, 64, layers[0])
        self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
        self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
        self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
        #self.avgpool = nn.AvgPool2d(7, stride=1)
        #self.fc = nn.Linear(512 * block.expansion, num_classes)

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
            elif isinstance(m, nn.BatchNorm2d):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)
コード例 #11
0
 def __init__(self,
              in_channels,
              out_channels,
              kernel_size=3,
              padding=1,
              dilation=1,
              stride=1,
              groups=1,
              is_bn=True,
              is_relu=True):
     super(ConvBnRelu2d, self).__init__()
     self.conv = nn.Conv2d(in_channels,
                           out_channels,
                           kernel_size=kernel_size,
                           padding=padding,
                           stride=stride,
                           dilation=dilation,
                           groups=groups,
                           bias=False)
     self.bn = nn.BatchNorm2d(out_channels, eps=BN_EPS)
     self.relu = nn.ReLU(inplace=True)
     if is_bn is False: self.bn = None
     if is_relu is False: self.relu = None
コード例 #12
0
if num_gpu < 2:
    print(
        "No multi-gpu found. NN.BatchNorm2d will act as normal nn.BatchNorm2d")

m1 = nn.Sequential(
    nn.Conv2d(3, 3, 1, 1, bias=False),
    nn.BatchNorm2d(3),
    nn.ReLU(inplace=True),
    nn.Conv2d(3, 3, 1, 1, bias=False),
    nn.BatchNorm2d(3),
).cuda()
torch.manual_seed(123)
init_weight(m1)
m2 = nn.Sequential(
    nn.Conv2d(3, 3, 1, 1, bias=False),
    NN.BatchNorm2d(3),
    nn.ReLU(inplace=True),
    nn.Conv2d(3, 3, 1, 1, bias=False),
    NN.BatchNorm2d(3),
).cuda()
torch.manual_seed(123)
init_weight(m2)
m2 = nn.DataParallel(m2, device_ids=range(num_gpu))
o1 = torch.optim.SGD(m1.parameters(), 1e-3)
o2 = torch.optim.SGD(m2.parameters(), 1e-3)
y = torch.ones(num_gpu).float().cuda()
torch.manual_seed(123)
for _ in range(100):
    x = torch.rand(num_gpu, 3, 2, 2).cuda()
    o1.zero_grad()
    z1 = m1(x)