Beispiel #1
0
    def __init__(self, in_channels, out_channels, stride, norm="BN"):
        super().__init__()

        if not 1 <= stride <= 3:
            raise ValueError("illegal stride value")
        self.stride = stride

        branch_features = out_channels // 2
        assert stride != 1 or in_channels == branch_features << 1

        if stride > 1:
            self.branch1 = nn.Sequential(
                depthwise_conv2d(in_channels, in_channels, 3, stride, 1),
                get_norm(norm, in_channels),
                nn.Conv2d(in_channels, branch_features, 1, bias=False),
                get_norm(norm, branch_features), nn.ReLU(inplace=True))
        else:
            self.branch1 = nn.Sequential()
            in_channels = branch_features

        self.branch2 = nn.Sequential(
            nn.Conv2d(in_channels, branch_features, 1, bias=False),
            get_norm(norm, branch_features), nn.ReLU(inplace=True),
            depthwise_conv2d(branch_features, branch_features, 3, stride, 1),
            get_norm(norm, branch_features),
            nn.Conv2d(branch_features, branch_features, 1, bias=False),
            get_norm(norm, branch_features), nn.ReLU(inplace=True))
Beispiel #2
0
    def __init__(self,
                 stages_repeats,
                 stages_out_channels,
                 inverted_residual=InvertedResidual,
                 norm="BN",
                 num_classes=1000,
                 out_features=None):
        super().__init__()

        if len(stages_repeats) != 3:
            raise ValueError(
                "expected stages_repeats as list of 3 positive ints")
        if len(stages_out_channels) != 5:
            raise ValueError(
                "expected stages_out_channels as list of 5 positive ints")
        self._stages_out_channels = stages_out_channels

        input_channels = 3
        output_channels = stages_out_channels[0]
        self.conv1 = nn.Sequential(
            nn.Conv2d(input_channels, output_channels, 3, 2, 1, bias=False),
            get_norm(norm, output_channels))
        self._out_feature_strides = {"stem": 4}
        self._out_feature_channels = {"stem": output_channels}
        input_channels = output_channels

        stride = 4
        stage_names = [f"stage{i}" for i in (2, 3, 4)]
        for name, repeats, output_channels in zip(
                stage_names, stages_repeats, self._stages_out_channels[1:]):
            seq = [inverted_residual(input_channels, output_channels, 2, norm)]
            for i in range(repeats - 1):
                seq.append(
                    inverted_residual(output_channels, output_channels, 1,
                                      norm))
            setattr(self, name, nn.Sequential(*seq))
            input_channels = output_channels
            self._out_feature_channels[name] = output_channels
            stride *= 2
            self._out_feature_strides[name] = stride

        output_channels = self._stages_out_channels[-1]
        self.conv5 = nn.Sequential(
            nn.Conv2d(input_channels, output_channels, 1, bias=False),
            get_norm(norm, output_channels))
        self._out_feature_channels["conv5"] = output_channels
        self._out_feature_strides["conv5"] = stride

        if not out_features:
            out_features = ["linear"]
        if "linear" in out_features and num_classes is not None:
            self.fc = nn.Linear(output_channels, num_classes)
        self._out_features = out_features
Beispiel #3
0
    def _make_layers(self, block, channels, blocks, stride=1, dilate=False):
        norm = self.norm
        downsample = None
        previous_dilation = self.dilation
        if dilate:
            self.dilation *= stride
            stride = 1
        if stride != 1 or self.in_channels != channels * block.expansion:
            downsample = nn.Sequential(
                conv1x1(self.in_channels, channels * block.expansion, stride),
                get_norm(norm, channels * block.expansion))

        layers = [
            block(self.in_channels, channels, stride, downsample, self.groups,
                  self.base_width, previous_dilation, norm)
        ]
        self.in_channels = channels * block.expansion
        for _ in range(1, blocks):
            layers.append(
                block(self.in_channels,
                      channels,
                      groups=self.groups,
                      base_width=self.base_width,
                      dilation=self.dilation,
                      norm=norm))
        return nn.Sequential(*layers)
Beispiel #4
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 stride=1,
                 downsample=None,
                 groups=1,
                 base_width=64,
                 dilation=1,
                 norm="BN"):
        super().__init__()

        width = int(out_channels * (base_width / 64)) * groups
        self.conv1 = conv1x1(in_channels, width)
        self.bn1 = get_norm(norm, width)
        self.conv2 = conv3x3(width, width, stride, groups, dilation)
        self.bn2 = get_norm(norm, width)
        self.conv3 = conv1x1(width, out_channels * self.expansion)
        self.bn3 = get_norm(norm, out_channels * self.expansion)
        self.downsample = downsample
Beispiel #5
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 stride=1,
                 downsample=None,
                 groups=1,
                 base_width=64,
                 dilation=1,
                 norm="BN"):
        super().__init__()

        if groups != 1 or base_width != 64:
            raise ValueError(
                "BasicBlock only supports groups=1 and base_width=64")
        if dilation > 1:
            raise NotImplementedError(
                "Dilation > 1 not supported in BasicBlock")

        self.conv1 = conv3x3(in_channels, out_channels, stride)
        self.bn1 = get_norm(norm, out_channels)
        self.conv2 = conv3x3(out_channels, out_channels)
        self.bn2 = get_norm(norm, out_channels)
        self.downsample = downsample
Beispiel #6
0
 def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, groups=1, norm="BN"):
     padding = (kernel_size - 1) // 2
     super(ConvBNReLU, self).__init__(
         nn.Conv2d(
             in_channels,
             out_channels,
             kernel_size,
             stride,
             padding,
             groups=groups,
             bias=False
         ),
         get_norm(norm, out_channels),
         nn.ReLU6(inplace=True)
     )
Beispiel #7
0
    def __init__(self, in_channels, out_channels, stride, expand_ratio, norm="BN"):
        super().__init__()

        assert stride in (1, 2)
        self.stride = stride

        hidden_dim = round(in_channels * expand_ratio)
        self.use_res_connect = stride == 1 and in_channels == out_channels

        layers = []
        if expand_ratio != 1:
            layers.append(ConvBNReLU(in_channels, hidden_dim, 1))
        layers.extend(
            [
                ConvBNReLU(hidden_dim, hidden_dim, stride=stride, groups=hidden_dim),
                nn.Conv2d(hidden_dim, out_channels, 1, bias=False),
                get_norm(norm, out_channels)
            ]
        )
        self.conv = nn.Sequential(*layers)
Beispiel #8
0
    def __init__(self,
                 block,
                 layers,
                 zero_init_residual=False,
                 groups=1,
                 width_per_group=64,
                 replace_stride_with_dilation=None,
                 norm="",
                 out_features=None,
                 freeze_at=0,
                 num_classes=1000):
        super().__init__()

        if replace_stride_with_dilation is None:
            replace_stride_with_dilation = [False, False, False]

        self.in_channels = 64
        self.dilation = 1
        self.groups = groups
        self.base_width = width_per_group
        self.norm = norm

        self.conv1 = nn.Conv2d(3, self.in_channels, 7, 2, 3, bias=False)
        self.bn1 = get_norm(norm, self.in_channels)
        self.layer1 = self._make_layers(block, 64, layers[0])
        self.layer2 = self._make_layers(block, 128, layers[1], 2,
                                        replace_stride_with_dilation[0])
        self.layer3 = self._make_layers(block, 256, layers[2], 2,
                                        replace_stride_with_dilation[1])
        self.layer4 = self._make_layers(block, 512, layers[3], 2,
                                        replace_stride_with_dilation[2])
        self._out_feature_channels = {
            "stem": 64,
            "layer1": 64 * block.expansion,
            "layer2": 128 * block.expansion,
            "layer3": 256 * block.expansion,
            "layer4": 512 * block.expansion
        }
        self._out_feature_strides = {
            "stem": 4,
            "layer1": 4,
            "layer2": 8,
            "layer3": 16,
            "layer4": 32,
        }

        if not out_features:
            out_features = ["linear"]
        if "linear" in out_features and num_classes is not None:
            self.fc = nn.Linear(512 * block.expansion, num_classes)
        self._out_features = out_features

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                weight_init.c2_msra_fill(m)
            elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)

        if zero_init_residual:
            for m in self.modules():
                if isinstance(m, Bottleneck):
                    nn.init.constant_(m.bn3.weight, 0)
                elif isinstance(m, BasicBlock):
                    nn.init.constant_(m.bn2.weight, 0)

        self.freeze(freeze_at)