Exemplo n.º 1
0
    def __init__(self,
                 num_classes,
                 init_conv_channels,
                 init_conv_stride,
                 channels,
                 mid_channels,
                 final_conv_channels,
                 strides,
                 in_channels=3,
                 in_size=(224, 224),
                 use_data_batchnorm=True):
        super().__init__()
        self.use_data_batchnorm = use_data_batchnorm
        self.in_size = in_size

        self.backbone = torch.nn.Sequential()

        # data batchnorm
        if self.use_data_batchnorm:
            self.backbone.add_module(
                "data_bn", torch.nn.BatchNorm2d(num_features=in_channels))

        # init conv
        self.backbone.add_module(
            "init_conv",
            conv3x3_block(in_channels=in_channels,
                          out_channels=init_conv_channels,
                          stride=init_conv_stride,
                          activation="relu6"))

        # stages
        in_channels = init_conv_channels
        for stage_id, stage_channels in enumerate(channels):
            stage = torch.nn.Sequential()
            for unit_id, unit_channels in enumerate(stage_channels):
                stride = strides[stage_id] if unit_id == 0 else 1
                mid_channel = mid_channels[stage_id][unit_id]
                stage.add_module(
                    "unit{}".format(unit_id + 1),
                    LinearBottleneck(in_channels=in_channels,
                                     mid_channels=mid_channel,
                                     out_channels=unit_channels,
                                     stride=stride))
                in_channels = unit_channels
            self.backbone.add_module("stage{}".format(stage_id + 1), stage)

        self.backbone.add_module(
            "final_conv",
            conv1x1_block(in_channels=in_channels,
                          out_channels=final_conv_channels,
                          activation="relu6"))
        self.backbone.add_module("global_pool",
                                 torch.nn.AdaptiveAvgPool2d(output_size=1))

        # classifier
        self.classifier = Classifier(in_channels=final_conv_channels,
                                     num_classes=num_classes)

        self.init_params()
Exemplo n.º 2
0
    def __init__(self,
                 channels,
                 num_classes,
                 preact=False,
                 init_unit_channels=64,
                 use_init_unit_large=True,
                 in_channels=3,
                 in_size=(224, 224),
                 use_data_batchnorm=True):
        super().__init__()
        self.use_data_batchnorm = use_data_batchnorm
        self.use_init_unit_large = use_init_unit_large
        self.in_size = in_size

        self.backbone = torch.nn.Sequential()

        # data batchnorm
        if self.use_data_batchnorm:
            self.backbone.add_module("data_bn", torch.nn.BatchNorm2d(num_features=in_channels))

        # init unit
        if self.use_init_unit_large:
            self.backbone.add_module("init_unit", InitUnitLarge(in_channels=in_channels, out_channels=init_unit_channels))
        else:
            self.backbone.add_module("init_unit", InitUnitSmall(in_channels=in_channels, out_channels=init_unit_channels, preact=preact))

        # stages
        in_channels = init_unit_channels
        for stage_id, stage_channels in enumerate(channels):
            stage = torch.nn.Sequential()
            for unit_id, unit_channels in enumerate(stage_channels):
                stride = 2 if (unit_id == 0) and (stage_id != 0) else 1
                if preact:
                    stage.add_module("unit{}".format(unit_id + 1), PreactUnit(in_channels=in_channels, out_channels=unit_channels, stride=stride))
                else:
                    stage.add_module("unit{}".format(unit_id + 1), StandardUnit(in_channels=in_channels, out_channels=unit_channels, stride=stride))
                in_channels = unit_channels
            self.backbone.add_module("stage{}".format(stage_id + 1), stage)
        if preact:
            self.backbone.add_module("final_activation", PostActivation(in_channels))
        self.backbone.add_module("global_pool", torch.nn.AdaptiveAvgPool2d(output_size=1))

        # classifier
        self.classifier = Classifier(in_channels=in_channels, num_classes=num_classes)

        self.init_params()
Exemplo n.º 3
0
class ResNet(torch.nn.Module):
    def __init__(self,
                 channels,
                 num_classes,
                 preact=False,
                 init_unit_channels=64,
                 use_init_unit_large=True,
                 in_channels=3,
                 in_size=(224, 224),
                 use_data_batchnorm=True):
        super().__init__()
        self.use_data_batchnorm = use_data_batchnorm
        self.use_init_unit_large = use_init_unit_large
        self.in_size = in_size

        self.backbone = torch.nn.Sequential()

        # data batchnorm
        if self.use_data_batchnorm:
            self.backbone.add_module("data_bn", torch.nn.BatchNorm2d(num_features=in_channels))

        # init unit
        if self.use_init_unit_large:
            self.backbone.add_module("init_unit", InitUnitLarge(in_channels=in_channels, out_channels=init_unit_channels))
        else:
            self.backbone.add_module("init_unit", InitUnitSmall(in_channels=in_channels, out_channels=init_unit_channels, preact=preact))

        # stages
        in_channels = init_unit_channels
        for stage_id, stage_channels in enumerate(channels):
            stage = torch.nn.Sequential()
            for unit_id, unit_channels in enumerate(stage_channels):
                stride = 2 if (unit_id == 0) and (stage_id != 0) else 1
                if preact:
                    stage.add_module("unit{}".format(unit_id + 1), PreactUnit(in_channels=in_channels, out_channels=unit_channels, stride=stride))
                else:
                    stage.add_module("unit{}".format(unit_id + 1), StandardUnit(in_channels=in_channels, out_channels=unit_channels, stride=stride))
                in_channels = unit_channels
            self.backbone.add_module("stage{}".format(stage_id + 1), stage)
        if preact:
            self.backbone.add_module("final_activation", PostActivation(in_channels))
        self.backbone.add_module("global_pool", torch.nn.AdaptiveAvgPool2d(output_size=1))

        # classifier
        self.classifier = Classifier(in_channels=in_channels, num_classes=num_classes)

        self.init_params()

    def init_params(self):
        # backbone
        for name, module in self.backbone.named_modules():
            if isinstance(module, torch.nn.Conv2d):
                torch.nn.init.kaiming_uniform_(module.weight)
                if module.bias is not None:
                    torch.nn.init.constant_(module.bias, 0)
        # classifier
        self.classifier.init_params()

    def forward(self, x):
        x = self.backbone(x)
        x = self.classifier(x)
        return x
Exemplo n.º 4
0
class MobileNetV1(torch.nn.Module):
    def __init__(self,
                 num_classes,
                 init_conv_channels,
                 init_conv_stride,
                 channels,
                 strides,
                 in_channels=3,
                 in_size=(224, 224),
                 use_data_batchnorm=True):
        super().__init__()
        self.use_data_batchnorm = use_data_batchnorm
        self.in_size = in_size

        self.backbone = torch.nn.Sequential()

        # data batchnorm
        if self.use_data_batchnorm:
            self.backbone.add_module(
                "data_bn", torch.nn.BatchNorm2d(num_features=in_channels))

        # init conv
        self.backbone.add_module(
            "init_conv",
            conv3x3_block(in_channels=in_channels,
                          out_channels=init_conv_channels,
                          stride=init_conv_stride))

        # stages
        in_channels = init_conv_channels
        for stage_id, stage_channels in enumerate(channels):
            stage = torch.nn.Sequential()
            for unit_id, unit_channels in enumerate(stage_channels):
                stride = strides[stage_id] if unit_id == 0 else 1
                stage.add_module(
                    "unit{}".format(unit_id + 1),
                    DepthwiseSeparableConvBlock(in_channels=in_channels,
                                                out_channels=unit_channels,
                                                stride=stride))
                in_channels = unit_channels
            self.backbone.add_module("stage{}".format(stage_id + 1), stage)
        self.backbone.add_module("global_pool",
                                 torch.nn.AdaptiveAvgPool2d(output_size=1))

        # classifier
        self.classifier = Classifier(in_channels=in_channels,
                                     num_classes=num_classes)

        self.init_params()

    def init_params(self):
        # backbone
        for name, module in self.backbone.named_modules():
            if isinstance(module, torch.nn.Conv2d):
                torch.nn.init.kaiming_uniform_(module.weight)
                if module.bias is not None:
                    torch.nn.init.constant_(module.bias, 0)

        # classifier
        self.classifier.init_params()

    def forward(self, x):
        x = self.backbone(x)
        x = self.classifier(x)
        return x
Exemplo n.º 5
0
    def __init__(self,
                 num_classes,
                 init_conv_channels,
                 init_conv_stride,
                 final_conv_channels,
                 final_conv_se,
                 channels,
                 mid_channels,
                 strides,
                 se_units,
                 kernel_sizes,
                 activations,
                 dropout_rate=0.0,
                 in_channels=3,
                 in_size=(224, 224),
                 use_data_batchnorm=True):
        super().__init__()
        self.use_data_batchnorm = use_data_batchnorm
        self.in_size = in_size
        self.dropout_rate = dropout_rate

        self.backbone = torch.nn.Sequential()

        # data batchnorm
        if self.use_data_batchnorm:
            self.backbone.add_module(
                "data_bn", torch.nn.BatchNorm2d(num_features=in_channels))

        # init conv
        self.backbone.add_module(
            "init_conv",
            conv3x3_block(in_channels=in_channels,
                          out_channels=init_conv_channels,
                          stride=init_conv_stride,
                          activation="hswish"))

        # stages
        in_channels = init_conv_channels
        for stage_id, stage_channels in enumerate(channels):
            stage = torch.nn.Sequential()
            for unit_id, unit_channels in enumerate(stage_channels):
                stride = strides[stage_id] if unit_id == 0 else 1
                mid_channel = mid_channels[stage_id][unit_id]
                use_se = se_units[stage_id][unit_id] == 1
                kernel_size = kernel_sizes[stage_id]
                activation = activations[stage_id]
                stage.add_module(
                    "unit{}".format(unit_id + 1),
                    LinearBottleneck(in_channels=in_channels,
                                     mid_channels=mid_channel,
                                     out_channels=unit_channels,
                                     stride=stride,
                                     activation=activation,
                                     use_se=use_se,
                                     kernel_size=kernel_size))
                in_channels = unit_channels
            self.backbone.add_module("stage{}".format(stage_id + 1), stage)

        self.backbone.add_module(
            "final_conv1",
            conv1x1_block(in_channels=in_channels,
                          out_channels=final_conv_channels[0],
                          activation="hswish"))
        in_channels = final_conv_channels[0]
        if final_conv_se:
            self.backbone.add_module(
                "final_se",
                SEUnit(channels=in_channels,
                       squeeze_factor=4,
                       squeeze_activation="relu",
                       excite_activation="hsigmoid"))
        self.backbone.add_module("final_pool",
                                 torch.nn.AdaptiveAvgPool2d(output_size=1))
        if len(final_conv_channels) > 1:
            self.backbone.add_module(
                "final_conv2",
                conv1x1_block(in_channels=in_channels,
                              out_channels=final_conv_channels[1],
                              activation="hswish",
                              use_bn=False))
            in_channels = final_conv_channels[1]
        if self.dropout_rate != 0.0:
            self.backbone.add_module("final_dropout",
                                     torch.nn.Dropout(dropout_rate))

        # classifier
        self.classifier = Classifier(in_channels=in_channels,
                                     num_classes=num_classes)

        self.init_params()
Exemplo n.º 6
0
class MobileNetV3(torch.nn.Module):
    """
    Class for constructing MobileNetsV3.
    
    If you are in doubt, please use the high-level function `get_mobilenet` to
    obtain ready-to-use models.
    """
    def __init__(self,
                 num_classes,
                 init_conv_channels,
                 init_conv_stride,
                 final_conv_channels,
                 final_conv_se,
                 channels,
                 mid_channels,
                 strides,
                 se_units,
                 kernel_sizes,
                 activations,
                 dropout_rate=0.0,
                 in_channels=3,
                 in_size=(224, 224),
                 use_data_batchnorm=True):
        super().__init__()
        self.use_data_batchnorm = use_data_batchnorm
        self.in_size = in_size
        self.dropout_rate = dropout_rate

        self.backbone = torch.nn.Sequential()

        # data batchnorm
        if self.use_data_batchnorm:
            self.backbone.add_module(
                "data_bn", torch.nn.BatchNorm2d(num_features=in_channels))

        # init conv
        self.backbone.add_module(
            "init_conv",
            conv3x3_block(in_channels=in_channels,
                          out_channels=init_conv_channels,
                          stride=init_conv_stride,
                          activation="hswish"))

        # stages
        in_channels = init_conv_channels
        for stage_id, stage_channels in enumerate(channels):
            stage = torch.nn.Sequential()
            for unit_id, unit_channels in enumerate(stage_channels):
                stride = strides[stage_id] if unit_id == 0 else 1
                mid_channel = mid_channels[stage_id][unit_id]
                use_se = se_units[stage_id][unit_id] == 1
                kernel_size = kernel_sizes[stage_id]
                activation = activations[stage_id]
                stage.add_module(
                    "unit{}".format(unit_id + 1),
                    LinearBottleneck(in_channels=in_channels,
                                     mid_channels=mid_channel,
                                     out_channels=unit_channels,
                                     stride=stride,
                                     activation=activation,
                                     use_se=use_se,
                                     kernel_size=kernel_size))
                in_channels = unit_channels
            self.backbone.add_module("stage{}".format(stage_id + 1), stage)

        self.backbone.add_module(
            "final_conv1",
            conv1x1_block(in_channels=in_channels,
                          out_channels=final_conv_channels[0],
                          activation="hswish"))
        in_channels = final_conv_channels[0]
        if final_conv_se:
            self.backbone.add_module(
                "final_se",
                SEUnit(channels=in_channels,
                       squeeze_factor=4,
                       squeeze_activation="relu",
                       excite_activation="hsigmoid"))
        self.backbone.add_module("final_pool",
                                 torch.nn.AdaptiveAvgPool2d(output_size=1))
        if len(final_conv_channels) > 1:
            self.backbone.add_module(
                "final_conv2",
                conv1x1_block(in_channels=in_channels,
                              out_channels=final_conv_channels[1],
                              activation="hswish",
                              use_bn=False))
            in_channels = final_conv_channels[1]
        if self.dropout_rate != 0.0:
            self.backbone.add_module("final_dropout",
                                     torch.nn.Dropout(dropout_rate))

        # classifier
        self.classifier = Classifier(in_channels=in_channels,
                                     num_classes=num_classes)

        self.init_params()

    def init_params(self):
        # backbone
        for name, module in self.backbone.named_modules():
            if isinstance(module, torch.nn.Conv2d):
                torch.nn.init.kaiming_uniform_(module.weight)
                if module.bias is not None:
                    torch.nn.init.constant_(module.bias, 0)

        # classifier
        self.classifier.init_params()

    def forward(self, x):
        x = self.backbone(x)
        x = self.classifier(x)
        return x
Exemplo n.º 7
0
class MobileNetV2(torch.nn.Module):
    """
    Class for constructing MobileNetsV2.
    
    If you are in doubt, please use the high-level function `get_mobilenet` to
    obtain ready-to-use models.
    """
    def __init__(self,
                 num_classes,
                 init_conv_channels,
                 init_conv_stride,
                 channels,
                 mid_channels,
                 final_conv_channels,
                 strides,
                 in_channels=3,
                 in_size=(224, 224),
                 use_data_batchnorm=True):
        super().__init__()
        self.use_data_batchnorm = use_data_batchnorm
        self.in_size = in_size

        self.backbone = torch.nn.Sequential()

        # data batchnorm
        if self.use_data_batchnorm:
            self.backbone.add_module(
                "data_bn", torch.nn.BatchNorm2d(num_features=in_channels))

        # init conv
        self.backbone.add_module(
            "init_conv",
            conv3x3_block(in_channels=in_channels,
                          out_channels=init_conv_channels,
                          stride=init_conv_stride,
                          activation="relu6"))

        # stages
        in_channels = init_conv_channels
        for stage_id, stage_channels in enumerate(channels):
            stage = torch.nn.Sequential()
            for unit_id, unit_channels in enumerate(stage_channels):
                stride = strides[stage_id] if unit_id == 0 else 1
                mid_channel = mid_channels[stage_id][unit_id]
                stage.add_module(
                    "unit{}".format(unit_id + 1),
                    LinearBottleneck(in_channels=in_channels,
                                     mid_channels=mid_channel,
                                     out_channels=unit_channels,
                                     stride=stride))
                in_channels = unit_channels
            self.backbone.add_module("stage{}".format(stage_id + 1), stage)

        self.backbone.add_module(
            "final_conv",
            conv1x1_block(in_channels=in_channels,
                          out_channels=final_conv_channels,
                          activation="relu6"))
        self.backbone.add_module("global_pool",
                                 torch.nn.AdaptiveAvgPool2d(output_size=1))

        # classifier
        self.classifier = Classifier(in_channels=final_conv_channels,
                                     num_classes=num_classes)

        self.init_params()

    def init_params(self):
        # backbone
        for name, module in self.backbone.named_modules():
            if isinstance(module, torch.nn.Conv2d):
                torch.nn.init.kaiming_uniform_(module.weight)
                if module.bias is not None:
                    torch.nn.init.constant_(module.bias, 0)

        # classifier
        self.classifier.init_params()

    def forward(self, x):
        x = self.backbone(x)
        x = self.classifier(x)
        return x