Пример #1
0
    def __init__(self, inplanes, planes, baseWidth, cardinality, stride=1, downsample=None):
        """ Constructor
        Args:
            inplanes: input channel dimensionality
            planes: output channel dimensionality
            baseWidth: base width.
            cardinality: num of convolution groups.
            stride: conv stride. Replaces pooling layer.
        """
        super(Bottleneck, self).__init__()

        D = int(math.floor(planes * (baseWidth / 64)))
        C = cardinality

        self.conv1 = L.Conv2d(inplanes, D * C, kernel_size=1, stride=1, padding=0, bias=False)
        self.bn1 = L.BatchNorm2d(D * C)
        self.conv2 = L.Conv2d(
            D * C, D * C, kernel_size=3, stride=stride, padding=1, groups=C, bias=False
        )
        self.bn2 = L.BatchNorm2d(D * C)
        self.conv3 = L.Conv2d(D * C, planes * 4, kernel_size=1, stride=1, padding=0, bias=False)
        self.bn3 = L.BatchNorm2d(planes * 4)
        self.relu = nn.ReLU(inplace=True)

        self.downsample = downsample
Пример #2
0
 def __init__(self, inplanes, planes, stride=1, downsample=None):
     super(BasicBlock, self).__init__()
     self.conv1 = conv3x3(inplanes, planes, stride)
     self.bn1 = L.BatchNorm2d(planes)
     self.relu = nn.ReLU(inplace=True)
     self.conv2 = conv3x3(planes, planes)
     self.bn2 = L.BatchNorm2d(planes)
     self.downsample = downsample
     self.stride = stride
Пример #3
0
 def __init__(self, inplanes, planes, stride=1, downsample=None):
     super(Bottleneck, self).__init__()
     self.conv1 = conv1x1(inplanes, planes)
     self.bn1 = L.BatchNorm2d(planes)
     self.conv2 = conv3x3(planes, planes, stride)
     self.bn2 = L.BatchNorm2d(planes)
     self.conv3 = conv1x1(planes, planes * self.expansion)
     self.bn3 = L.BatchNorm2d(planes * self.expansion)
     self.relu = nn.ReLU(inplace=True)
     self.downsample = downsample
     self.stride = stride
Пример #4
0
    def __init__(
        self,
        in_planes,
        out_planes,
        kernel_size,
        stride=1,
        padding=0,
        dilation=1,
        groups=1,
        bias=False,
    ):
        super(BasicConv2d, self).__init__()

        self.basicconv = nn.Sequential(
            L.Conv2d(
                in_planes,
                out_planes,
                kernel_size=kernel_size,
                stride=stride,
                padding=padding,
                dilation=dilation,
                groups=groups,
                bias=bias,
            ),
            L.BatchNorm2d(out_planes),
            nn.ReLU(inplace=True),
        )
Пример #5
0
    def _make_layer(self, block, planes, blocks, stride=1):
        """Stack n bottleneck modules where n is inferred from the depth of the network.
        Args:
            block: block type used to construct ResNext
            planes: number of output channels (need to multiply by block.expansion)
            blocks: number of blocks to be built
            stride: factor to reduce the spatial dimensionality in the first bottleneck of the block.
        Returns: a Module consisting of n sequential bottlenecks.
        """
        downsample = None
        if stride != 1 or self.inplanes != planes * block.expansion:
            downsample = nn.Sequential(
                L.Conv2d(
                    self.inplanes,
                    planes * block.expansion,
                    kernel_size=1,
                    stride=stride,
                    bias=False,
                ),
                L.BatchNorm2d(planes * block.expansion),
            )

        layers = []
        layers.append(
            block(self.inplanes, planes, self.baseWidth, self.cardinality,
                  stride, downsample))
        self.inplanes = planes * block.expansion
        for i in range(1, blocks):
            layers.append(
                block(self.inplanes, planes, self.baseWidth, self.cardinality))

        return nn.Sequential(*layers)
Пример #6
0
    def __init__(self, baseWidth, cardinality, layers, num_classes):
        """Constructor
        Args:
            baseWidth: baseWidth for ResNeXt.
            cardinality: number of convolution groups.
            layers: config of layers, e.g., [3, 4, 6, 3]
            num_classes: number of classes
        """
        super(ResNeXt, self).__init__()
        block = Bottleneck

        self.cardinality = cardinality
        self.baseWidth = baseWidth
        self.num_classes = num_classes
        self.inplanes = 64
        self.output_size = 64

        self.conv1 = L.Conv2d(3, 64, 7, 2, 3, bias=False)
        self.bn1 = L.BatchNorm2d(64)
        self.relu = nn.ReLU(inplace=True)
        self.maxpool1 = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
        self.layer1 = self._make_layer(block, 64, layers[0])
        self.layer2 = self._make_layer(block, 128, layers[1], 2)
        self.layer3 = self._make_layer(block, 256, layers[2], 2)
        self.layer4 = self._make_layer(block, 512, layers[3], 2)
        self.avgpool = nn.AvgPool2d(7)
        self.fc = nn.Linear(512 * block.expansion, num_classes)

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
                m.weight.data.normal_(0, math.sqrt(2.0 / n))
            elif isinstance(m, nn.BatchNorm2d):
                m.weight.data.fill_(1)
                m.bias.data.zero_()
Пример #7
0
    def __init__(self, block, layers, num_classes=1000, zero_init_residual=False):
        super(ResNet, self).__init__()
        self.inplanes = 64
        self.conv1 = L.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)
        self.bn1 = L.BatchNorm2d(64)
        self.relu = nn.ReLU(inplace=True)
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
        self.layer1 = self._make_layer(block, 64, layers[0])
        self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
        self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
        self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
        self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
        self.fc = nn.Linear(512 * block.expansion, num_classes)

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal_(m.weight, mode="fan_out", nonlinearity="relu")
            elif isinstance(m, nn.BatchNorm2d):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)

        # Zero-initialize the last BN in each residual branch,
        # so that the residual branch starts with zeros, and each residual block behaves like an identity.
        # This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
        if zero_init_residual:
            for m in self.modules():
                if isinstance(m, Bottleneck):
                    nn.init.constant_(m.bn3.weight, 0)
                elif isinstance(m, BasicBlock):
                    nn.init.constant_(m.bn2.weight, 0)
Пример #8
0
    def __init__(self, h_C, l_C):
        super(SIM, self).__init__()
        self.h2l_pool = nn.AvgPool2d((2, 2), stride=2)
        self.l2h_up = cus_sample

        self.h2l_0 = L.Conv2d(h_C, l_C, 3, 1, 1)
        self.h2h_0 = L.Conv2d(h_C, h_C, 3, 1, 1)
        self.bnl_0 = L.BatchNorm2d(l_C)
        self.bnh_0 = L.BatchNorm2d(h_C)

        self.h2h_1 = L.Conv2d(h_C, h_C, 3, 1, 1)
        self.h2l_1 = L.Conv2d(h_C, l_C, 3, 1, 1)
        self.l2h_1 = L.Conv2d(l_C, h_C, 3, 1, 1)
        self.l2l_1 = L.Conv2d(l_C, l_C, 3, 1, 1)
        self.bnl_1 = L.BatchNorm2d(l_C)
        self.bnh_1 = L.BatchNorm2d(h_C)

        self.h2h_2 = L.Conv2d(h_C, h_C, 3, 1, 1)
        self.l2h_2 = L.Conv2d(l_C, h_C, 3, 1, 1)
        self.bnh_2 = L.BatchNorm2d(h_C)

        self.relu = nn.ReLU(True)
Пример #9
0
    def __init__(self, in_hc=64, in_lc=256, out_c=64, main=0):
        super(conv_2nV1, self).__init__()
        self.main = main
        mid_c = min(in_hc, in_lc)
        self.relu = nn.ReLU(True)
        self.h2l_pool = nn.AvgPool2d((2, 2), stride=2)
        self.l2h_up = nn.Upsample(scale_factor=2, mode="nearest")

        # stage 0
        self.h2h_0 = L.Conv2d(in_hc, mid_c, 3, 1, 1)
        self.l2l_0 = L.Conv2d(in_lc, mid_c, 3, 1, 1)
        self.bnh_0 = L.BatchNorm2d(mid_c)
        self.bnl_0 = L.BatchNorm2d(mid_c)

        # stage 1
        self.h2h_1 = L.Conv2d(mid_c, mid_c, 3, 1, 1)
        self.h2l_1 = L.Conv2d(mid_c, mid_c, 3, 1, 1)
        self.l2h_1 = L.Conv2d(mid_c, mid_c, 3, 1, 1)
        self.l2l_1 = L.Conv2d(mid_c, mid_c, 3, 1, 1)
        self.bnl_1 = L.BatchNorm2d(mid_c)
        self.bnh_1 = L.BatchNorm2d(mid_c)

        if self.main == 0:
            # stage 2
            self.h2h_2 = L.Conv2d(mid_c, mid_c, 3, 1, 1)
            self.l2h_2 = L.Conv2d(mid_c, mid_c, 3, 1, 1)
            self.bnh_2 = L.BatchNorm2d(mid_c)

            # stage 3
            self.h2h_3 = L.Conv2d(mid_c, out_c, 3, 1, 1)
            self.bnh_3 = L.BatchNorm2d(out_c)

            self.identity = L.Conv2d(in_hc, out_c, 1)

        elif self.main == 1:
            # stage 2
            self.h2l_2 = L.Conv2d(mid_c, mid_c, 3, 1, 1)
            self.l2l_2 = L.Conv2d(mid_c, mid_c, 3, 1, 1)
            self.bnl_2 = L.BatchNorm2d(mid_c)

            # stage 3
            self.l2l_3 = L.Conv2d(mid_c, out_c, 3, 1, 1)
            self.bnl_3 = L.BatchNorm2d(out_c)

            self.identity = L.Conv2d(in_lc, out_c, 1)

        else:
            raise NotImplementedError
Пример #10
0
    def _make_layer(self, block, planes, blocks, stride=1):
        downsample = None
        if stride != 1 or self.inplanes != planes * block.expansion:
            downsample = nn.Sequential(
                conv1x1(self.inplanes, planes * block.expansion, stride),
                L.BatchNorm2d(planes * block.expansion),
            )

        layers = []
        layers.append(block(self.inplanes, planes, stride, downsample))
        self.inplanes = planes * block.expansion
        for _ in range(1, blocks):
            layers.append(block(self.inplanes, planes))

        return nn.Sequential(*layers)
Пример #11
0
    def __init__(self, in_hc=64, in_mc=256, in_lc=512, out_c=64):
        super(conv_3nV1, self).__init__()
        self.upsample = nn.Upsample(scale_factor=2, mode="nearest")
        self.downsample = nn.AvgPool2d((2, 2), stride=2)

        mid_c = 64
        self.relu = nn.ReLU(True)

        # stage 0
        self.h2h_0 = L.Conv2d(in_hc, mid_c, 3, 1, 1)
        self.m2m_0 = L.Conv2d(in_mc, mid_c, 3, 1, 1)
        self.l2l_0 = L.Conv2d(in_lc, mid_c, 3, 1, 1)
        self.bnh_0 = L.BatchNorm2d(mid_c)
        self.bnm_0 = L.BatchNorm2d(mid_c)
        self.bnl_0 = L.BatchNorm2d(mid_c)

        # stage 1
        self.h2h_1 = L.Conv2d(mid_c, mid_c, 3, 1, 1)
        self.h2m_1 = L.Conv2d(mid_c, mid_c, 3, 1, 1)
        self.m2h_1 = L.Conv2d(mid_c, mid_c, 3, 1, 1)
        self.m2m_1 = L.Conv2d(mid_c, mid_c, 3, 1, 1)
        self.m2l_1 = L.Conv2d(mid_c, mid_c, 3, 1, 1)
        self.l2m_1 = L.Conv2d(mid_c, mid_c, 3, 1, 1)
        self.l2l_1 = L.Conv2d(mid_c, mid_c, 3, 1, 1)
        self.bnh_1 = L.BatchNorm2d(mid_c)
        self.bnm_1 = L.BatchNorm2d(mid_c)
        self.bnl_1 = L.BatchNorm2d(mid_c)

        # stage 2
        self.h2m_2 = L.Conv2d(mid_c, mid_c, 3, 1, 1)
        self.l2m_2 = L.Conv2d(mid_c, mid_c, 3, 1, 1)
        self.m2m_2 = L.Conv2d(mid_c, mid_c, 3, 1, 1)
        self.bnm_2 = L.BatchNorm2d(mid_c)

        # stage 3
        self.m2m_3 = L.Conv2d(mid_c, out_c, 3, 1, 1)
        self.bnm_3 = L.BatchNorm2d(out_c)

        self.identity = L.Conv2d(in_mc, out_c, 1)