Ejemplo n.º 1
0
    def __init__(self, feature_levels=(3, 4, 5), pretrained=True):
        super().__init__()
        _check_levels(feature_levels)
        self.forward_levels = tuple(range(1, feature_levels[-1] + 1))
        self.feature_levels = feature_levels
        from torchvision.models.squeezenet import squeezenet1_1, Fire
        backbone = squeezenet1_1(pretrained=pretrained)
        del backbone.classifier
        backbone = backbone.features
        backbone[0].padding = (1, 1)

        self.layer1 = backbone[:2]
        self.layer2 = backbone[2:5]
        self.layer3 = backbone[5:8]
        self.layer4 = backbone[8:]

        if 5 in feature_levels:
            self.layer5 = nn.Sequential(
                nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
                Fire(512, 64, 256, 256),
            )

        channels = [64, 128, 256, 512, 512]

        self.out_channels = [channels[i - 1] for i in feature_levels]
Ejemplo n.º 2
0
def get_squeezenet_features(input_n_channels):
    return (Conv2d(input_n_channels, 64, kernel_size=3,
                   stride=1), ReLU(inplace=True),
            MaxPool2d(kernel_size=2, stride=2,
                      ceil_mode=True), Fire(64, 16, 64,
                                            64), Fire(128, 16, 64, 64),
            MaxPool2d(kernel_size=2, stride=2,
                      ceil_mode=True), Fire(128, 32, 128,
                                            128), Fire(256, 32, 128, 128),
            MaxPool2d(kernel_size=2, stride=2,
                      ceil_mode=True), Fire(256, 48, 192, 192),
            Fire(384, 48, 192, 192), Fire(384, 64, 256,
                                          256), Fire(512, 64, 256, 256))
Ejemplo n.º 3
0
 def __init__(self, verion=1.0, num_classes=1000):
     super(sq_feature, self).__init__()
     self.num_classes = num_classes
     self.conv1 = nn.Sequential(
         nn.Conv2d(3, 64, kernel_size=3, stride=2),
         nn.ReLU(inplace=True),
         nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
         Fire(64, 16, 64, 64),
         Fire(128, 16, 64, 64),
         nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
         Fire(128, 32, 128, 128),
         Fire(256, 32, 128, 128),
     )
     #self.Fire1 = Fire(256, 32, 128, 128)
     self.Fire1 = nn.Sequential(
         nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
         Fire(256, 48, 192, 192),
         Fire(384, 48, 192, 192),
         Fire(384, 64, 256, 256),
         Fire(512, 64, 256, 256),
     )
     self.Fire2 = nn.Sequential(
         nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
         Fire(512, 96, 384, 384),
     )
     self.Fire3 = nn.Sequential(
         nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
         Fire(768, 96, 384, 384),
     )
     self.Fire4 = nn.Sequential(
         conv_bn(768, 128, 1, 1),
         conv_bn(128, 128, 2),
     )
     self.Fire5 = nn.Sequential(conv_bn(128, 64, 1, 1), conv_bn(64, 128, 2))
     for m in self.modules():
         if isinstance(m, nn.Conv2d):
             nn.init.kaiming_uniform(m.weight.data)
             if m.bias is not None:
                 m.bias.data.zero_()
Ejemplo n.º 4
0
    def __init__(self, num_classes=(10, 4), extract=('4', '7', '12')):
        super().__init__()
        self.extract_layers = extract
        self.num_classes = num_classes
        self.features = nn.Sequential(
            nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
            Fire(64, 16, 64, 64),
            Fire(128, 16, 64, 64),
            nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
            Fire(128, 32, 128, 128),
            Fire(256, 32, 128, 128),
            nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
            Fire(256, 48, 192, 192),
            Fire(384, 48, 192, 192),
            Fire(384, 64, 256, 256),
            Fire(512, 64, 256, 256),
        )

        # Final convolution is initialized differently form the rest
        final_conv = [
            nn.Conv2d(512, self.num_classes[0], kernel_size=1),
            nn.Conv2d(512, self.num_classes[1], kernel_size=1)
        ]

        self.classifier_slide = nn.Sequential(
            nn.Dropout(p=0.5), final_conv[0], nn.ReLU(inplace=True),
            nn.AvgPool2d(256, ceil_mode=True))
        self.classifier_domain = nn.Sequential(
            nn.Dropout(p=0.5), final_conv[1], nn.ReLU(inplace=True),
            nn.AvgPool2d(256, ceil_mode=True))

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                if m in final_conv:
                    init.normal(m.weight.data, mean=0.0, std=0.01)
                else:
                    init.kaiming_uniform(m.weight.data)
                if m.bias is not None:
                    m.bias.data.zero_()

        self._initialize()
Ejemplo n.º 5
0
    def __init__(self, version=1.0, num_classes=1000, pretrained=False):
        if version != 1.0:
            raise NotImplementedError("No support for version 1.1 yet")

        super().__init__(version='_'.join("{:.1f}".format(version).split('.')),
                         num_classes=num_classes)
        self.version = version

        # Here we copy the architecture of SqueezeNet, but break it apart to see
        # the activations
        # Outputs to each of these will have minimum value of 0, due to ReLU
        if version == 1.0:
            self.features_1 = nn.Sequential(
                nn.Conv2d(3, 96, kernel_size=7, stride=2),  # 0 - 0
                nn.ReLU(inplace=True),
            )
            self.features_2 = nn.Sequential(
                nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
                Fire(96, 16, 64, 64),  # 3 - 1
                Fire(128, 16, 64, 64),  # 4 - 2
                Fire(128, 32, 128, 128),  # 5 - 3
            )
            self.features_3 = nn.Sequential(
                nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
                Fire(256, 32, 128, 128),  # 7 - 1
                Fire(256, 48, 192, 192),  # 8 - 2
                Fire(384, 48, 192, 192),  # 9 - 3
                Fire(384, 64, 256, 256),  # 10 4
            )
            self.features_4 = nn.Sequential(
                nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
                Fire(512, 64, 256, 256),  # 12 - 1
            )

        if pretrained:
            self.load_state_dict(model_zoo.load_url(model_urls[version]))
Ejemplo n.º 6
0
    def __init__(self, n_classes=21, pretrained=False):
        super(segnet_squeeze, self).__init__()

        # ----下采样----
        self.conv1 = nn.Conv2d(3, 96, kernel_size=7, stride=2)
        self.relu1 = nn.ReLU(inplace=True)
        self.pool1 = nn.MaxPool2d(kernel_size=3,
                                  stride=2,
                                  ceil_mode=True,
                                  return_indices=True)

        self.fire2 = Fire(96, 16, 64, 64)
        self.fire3 = Fire(128, 16, 64, 64)
        self.fire4 = Fire(128, 32, 128, 128)
        self.pool2 = nn.MaxPool2d(kernel_size=3,
                                  stride=2,
                                  ceil_mode=True,
                                  return_indices=True)

        self.fire5 = Fire(256, 32, 128, 128)
        self.fire6 = Fire(256, 48, 192, 192)
        self.fire7 = Fire(384, 48, 192, 192)
        self.fire8 = Fire(384, 64, 256, 256)
        self.pool3 = nn.MaxPool2d(kernel_size=3,
                                  stride=2,
                                  ceil_mode=True,
                                  return_indices=True)

        self.fire9 = Fire(512, 64, 256, 256)

        self.conv10 = nn.Conv2d(512, 1000, kernel_size=1)
        self.relu10 = nn.ReLU(inplace=True)

        # ----上采样----
        self.conv10_D = nn.Conv2d(1000, 512, kernel_size=1)
        self.relu10_D = nn.ReLU(inplace=True)

        self.fire9_D = Fire(512, 64, 256, 256)

        self.unpool3 = nn.MaxUnpool2d(kernel_size=3, stride=2)
        self.fire8_D = Fire(512, 48, 192, 192)
        self.fire7_D = Fire(384, 48, 192, 192)
        self.fire6_D = Fire(384, 32, 128, 128)
        self.fire5_D = Fire(256, 32, 128, 128)

        self.unpool2 = nn.MaxUnpool2d(kernel_size=3, stride=2)
        self.fire4_D = Fire(256, 16, 64, 64)
        self.fire3_D = Fire(128, 16, 64, 64)
        self.fire2_D = Fire(128, 12, 48, 48)

        self.unpool1 = nn.MaxUnpool2d(kernel_size=3, stride=2)
        # self.conv1_D = nn.ConvTranspose2d(96, n_classes, kernel_size=8, stride=2)
        self.conv1_D = nn.ConvTranspose2d(96,
                                          n_classes,
                                          kernel_size=10,
                                          stride=2,
                                          padding=1)

        self.init_weights(pretrained)
Ejemplo n.º 7
0
    def __init__(self, input_size, version='1_0', num_classes=10):
        super(SqueezeNet, self).__init__()
        self.num_classes = num_classes
        self.output_size = 10
        if version == '1_0':
            self.features = nn.Sequential(
                nn.Conv2d(3, 96, kernel_size=7, stride=2),
                nn.ReLU(inplace=True),
                nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
                Fire(96, 16, 64, 64),
                Fire(128, 16, 64, 64),
                Fire(128, 32, 128, 128),
                nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
                Fire(256, 32, 128, 128),
                Fire(256, 48, 192, 192),
                Fire(384, 48, 192, 192),
                Fire(384, 64, 256, 256),
                nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
                Fire(512, 64, 256, 256),
            )
        elif version == '1_1':
            self.features = nn.Sequential(
                nn.Conv2d(3, 64, kernel_size=3, stride=2),
                nn.ReLU(inplace=True),
                nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
                Fire(64, 16, 64, 64),
                Fire(128, 16, 64, 64),
                nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
                Fire(128, 32, 128, 128),
                Fire(256, 32, 128, 128),
                nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
                Fire(256, 48, 192, 192),
                Fire(384, 48, 192, 192),
                Fire(384, 64, 256, 256),
                Fire(512, 64, 256, 256),
            )
        else:
            # FIXME: Is this needed? SqueezeNet should only be called from the
            # FIXME: squeezenet1_x() functions
            # FIXME: This checking is not done for the other models
            raise ValueError("Unsupported SqueezeNet version {version}:"
                             "1_0 or 1_1 expected".format(version=version))

        # Final convolution is initialized differently from the rest
        final_conv = nn.Conv2d(512, self.num_classes, kernel_size=1)
        self.classifier = nn.Sequential(nn.Dropout(p=0.5), final_conv,
                                        nn.ReLU(inplace=True),
                                        nn.AdaptiveAvgPool2d((1, 1)))

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                if m is final_conv:
                    init.normal_(m.weight, mean=0.0, std=0.01)
                else:
                    init.kaiming_uniform_(m.weight)
                if m.bias is not None:
                    init.constant_(m.bias, 0)
Ejemplo n.º 8
0
    def __init__(self, version=1.0, num_classes=1000):
        super(SqueezeNetDepth, self).__init__()
        if version not in [1.0, 1.1]:
            raise ValueError("Unsupported SqueezeNet version {version}:"
                             "1.0 or 1.1 expected".format(version=version))
        self.num_classes = num_classes
        if version == 1.0:
            self.features = nn.Sequential(
                # -- Changed the input channel size
                nn.Conv2d(1, 96, kernel_size=7, stride=2),
                nn.ReLU(inplace=True),
                nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
                Fire(96, 16, 64, 64),
                Fire(128, 16, 64, 64),
                Fire(128, 32, 128, 128),
                nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
                Fire(256, 32, 128, 128),
                Fire(256, 48, 192, 192),
                Fire(384, 48, 192, 192),
                Fire(384, 64, 256, 256),
                nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
                Fire(512, 64, 256, 256),
            )
        else:
            self.features = nn.Sequential(
                # -- Changed the input channel size
                nn.Conv2d(1, 64, kernel_size=3, stride=2),
                nn.ReLU(inplace=True),
                nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
                Fire(64, 16, 64, 64),
                Fire(128, 16, 64, 64),
                nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
                Fire(128, 32, 128, 128),
                Fire(256, 32, 128, 128),
                nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
                Fire(256, 48, 192, 192),
                Fire(384, 48, 192, 192),
                Fire(384, 64, 256, 256),
                Fire(512, 64, 256, 256),
            )

        # -- Initialize the layers with xavier weights
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                init.kaiming_uniform(m.weight.data)
                if m.bias is not None:
                    m.bias.data.zero_()
Ejemplo n.º 9
0
    def __init__(self, num_in_channels=3, version="1_1"):
        super().__init__()
        self.version = version
        if version == "1_0":
            self.conv1 = nn.Conv2d(num_in_channels, 96, kernel_size=7, stride=2)
            self.relu1 = nn.ReLU(inplace=True)
            self.max_pool1 = nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True)
            self.fire1 = Fire(96, 16, 64, 64)
            self.fire2 = Fire(128, 16, 64, 64)
            self.fire3 = Fire(128, 32, 128, 128)
            self.max_pool2 = nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True)
            self.fire4 = Fire(256, 32, 128, 128)
            self.fire5 = Fire(256, 48, 192, 192)
            self.fire6 = Fire(384, 48, 192, 192)
            self.fire7 = Fire(384, 64, 256, 256)
            self.max_pool3 = nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True)
            self.fire8 = Fire(512, 64, 256, 256)
        elif version == "1_1":
            self.conv1 = nn.Conv2d(num_in_channels, 64, kernel_size=3, stride=2)
            self.relu1 = nn.ReLU(inplace=True)
            self.max_pool1 = nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True)
            self.fire1 = Fire(64, 16, 64, 64)
            self.fire2 = Fire(128, 16, 64, 64)
            self.max_pool2 = nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True)
            self.fire3 = Fire(128, 32, 128, 128)
            self.fire4 = Fire(256, 32, 128, 128)
            self.max_pool3 = nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True)
            self.fire5 = Fire(256, 48, 192, 192)
            self.fire6 = Fire(384, 48, 192, 192)
            self.fire7 = Fire(384, 64, 256, 256)
            self.fire8 = Fire(512, 64, 256, 256)
        else:
            raise ValueError(f"Unsupported version {version}: 1_0 or 1_1 expected")

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                init.kaiming_uniform_(m.weight)
                if m.bias is not None:
                    init.constant_(m.bias, 0)