Пример #1
0
    def create_block_list(self):
        self.clear_list()

        backbone = PointNet(data_channel=self.data_channel,
                            feature_transform=self.feature_transform,
                            bn_name=self.bn_name,
                            activation_name=self.activation_name)
        base_out_channels = backbone.get_outchannel_list()
        self.add_block_list(BlockType.BaseNet, backbone, base_out_channels[-1])

        input_channel = self.block_out_channels[-1]
        fc1 = FcBNActivationBlock(input_channel,
                                  512,
                                  bnName=self.bn_name,
                                  activationName=self.activation_name)
        self.add_block_list(fc1.get_name(), fc1, 512)

        input_channel = 512
        fc2 = nn.Linear(input_channel, 256)
        self.add_block_list(LayerType.FcLinear, fc2, 256)

        input_channel = 256
        dropout = nn.Dropout(p=0.3)
        self.add_block_list(LayerType.Dropout, dropout, input_channel)

        normalize = NormalizeLayer(self.bn_name, input_channel)
        self.add_block_list(normalize.get_name(), normalize, input_channel)

        activate = ActivationLayer(self.activation_name, inplace=False)
        self.add_block_list(activate.get_name(), activate, input_channel)

        fc3 = nn.Linear(input_channel, self.class_number)
        self.add_block_list(LayerType.FcLinear, fc3, self.class_number)

        self.create_loss()
Пример #2
0
    def create_block_list(self):
        self.clear_list()

        backbone = self.factory.get_base_model(BackboneName.GhostNet,
                                               self.data_channel)
        base_out_channels = backbone.get_outchannel_list()
        self.add_block_list(BlockType.BaseNet, backbone, base_out_channels[-1])

        avgpool = nn.AdaptiveAvgPool2d((1, 1))
        self.add_block_list(LayerType.GlobalAvgPool, avgpool,
                            base_out_channels[-1])

        output_channel = 1280
        layer1 = FcLayer(base_out_channels[-1], output_channel)
        self.add_block_list(layer1.get_name(), layer1, output_channel)

        layer2 = NormalizeLayer(bn_name=NormalizationType.BatchNormalize1d,
                                out_channel=output_channel)
        self.add_block_list(layer2.get_name(), layer2, output_channel)

        layer3 = ActivationLayer(self.activation_name, inplace=False)
        self.add_block_list(layer3.get_name(), layer3, output_channel)

        layer4 = nn.Dropout(0.2)
        self.add_block_list(LayerType.Dropout, layer4, output_channel)

        layer5 = nn.Linear(output_channel, self.class_number)
        self.add_block_list(LayerType.FcLinear, layer5, self.class_number)

        self.create_loss()
Пример #3
0
 def __init__(self,
              in_channels,
              out_channels,
              kernel_size,
              stride=1,
              padding=0,
              dilation=1,
              bias=True,
              bn_name=NormalizationType.BatchNormalize2d,
              activation_name=ActivationType.ReLU):
     super().__init__(EncNetBlockName.SeperableConv2dNActivation)
     self.depthwise = nn.Conv2d(in_channels,
                                in_channels,
                                kernel_size,
                                stride,
                                padding,
                                dilation,
                                in_channels,
                                bias=bias)
     self.norm_layer = NormalizeLayer(bn_name, in_channels)
     self.pointwise = ConvBNActivationBlock(in_channels=in_channels,
                                            out_channels=out_channels,
                                            kernel_size=1,
                                            bias=bias,
                                            bnName=bn_name,
                                            activationName=activation_name)
Пример #4
0
    def __init__(self,
                 in_channels,
                 nclass,
                 ncodes=32,
                 se_loss=True,
                 bn_name=NormalizationType.BatchNormalize2d,
                 activation_name=ActivationType.ReLU):
        super().__init__(EncNetBlockName.EncBlock)
        self.se_loss = se_loss
        self.encoding = nn.Sequential(
            ConvBNActivationBlock(in_channels=in_channels,
                                  out_channels=in_channels,
                                  kernel_size=1,
                                  bias=False,
                                  bnName=bn_name,
                                  activationName=activation_name),
            Encoding(D=in_channels, K=ncodes),
            NormalizeLayer(NormalizationType.BatchNormalize1d, ncodes),
            ActivationLayer(activation_name), MeanLayer(dim=1))
        self.fc = nn.Sequential(nn.Linear(in_channels, in_channels),
                                nn.Sigmoid())

        self.activate = ActivationLayer(activation_name)

        if self.se_loss:
            self.se_layer = nn.Linear(in_channels, nclass)
Пример #5
0
    def __init__(self, in_channels, out_channels, stride=1, dilation=1,
                 start_with_relu=True, bn_name=NormalizationType.BatchNormalize2d,
                 activation_name=ActivationType.ReLU):
        super().__init__(XceptionBlockName.BlockA)
        if out_channels != in_channels or stride != 1:
            self.skip = ConvBNActivationBlock(in_channels=in_channels,
                                              out_channels=out_channels,
                                              kernel_size=1,
                                              stride=stride,
                                              bias=False,
                                              bnName=bn_name,
                                              activationName=ActivationType.Linear)
        else:
            self.skip = None

        self.relu = ActivationLayer(activation_name, inplace=False)
        rep = list()
        inter_channels = out_channels // 4

        if start_with_relu:
            rep.append(self.relu)
        rep.append(SeparableConv2dBNActivation(in_channels, inter_channels, 3, 1,
                                               dilation, bn_name=bn_name,
                                               activation_name=activation_name))
        rep.append(NormalizeLayer(bn_name, inter_channels))

        rep.append(self.relu)
        rep.append(SeparableConv2dBNActivation(inter_channels, inter_channels, 3, 1,
                                               dilation, bn_name=bn_name,
                                               activation_name=activation_name))
        rep.append(NormalizeLayer(bn_name, inter_channels))

        if stride != 1:
            rep.append(self.relu)
            rep.append(SeparableConv2dBNActivation(inter_channels, out_channels, 3, stride,
                                                   bn_name=bn_name,
                                                   activation_name=activation_name))
            rep.append(NormalizeLayer(bn_name, out_channels))
        else:
            rep.append(self.relu)
            rep.append(SeparableConv2dBNActivation(inter_channels, out_channels, 3, 1,
                                                   bn_name=bn_name,
                                                   activation_name=activation_name))
            rep.append(NormalizeLayer(bn_name, out_channels))
        self.rep = nn.Sequential(*rep)
Пример #6
0
    def create_block_list(self):
        self.clear_list()

        layer1 = ConvBNActivationBlock(in_channels=self.data_channel,
                                       out_channels=self.num_init_features,
                                       kernel_size=7,
                                       stride=2,
                                       padding=3,
                                       bnName=self.bnName,
                                       activationName=self.activationName)
        self.add_block_list(layer1.get_name(), layer1, self.num_init_features)

        layer2 = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
        self.add_block_list(LayerType.MyMaxPool2d, layer2,
                            self.num_init_features)

        self.in_channels = self.num_init_features
        for index, num_block in enumerate(self.num_blocks):
            self.make_densenet_layer(num_block, self.dilations[index],
                                     self.bn_size, self.growth_rate,
                                     self.drop_rate, self.bnName,
                                     self.activationName)
            self.in_channels = self.block_out_channels[-1]
            if index != len(self.num_blocks) - 1:
                trans = TransitionBlock(in_channel=self.in_channels,
                                        output_channel=self.in_channels // 2,
                                        stride=1,
                                        bnName=self.bnName,
                                        activationName=self.activationName)
                self.add_block_list(trans.get_name(), trans,
                                    self.in_channels // 2)
                avg_pool = nn.AvgPool2d(kernel_size=2, stride=2)
                self.add_block_list(LayerType.GlobalAvgPool, avg_pool,
                                    self.block_out_channels[-1])
                self.in_channels = self.block_out_channels[-1]
        layer3 = NormalizeLayer(bn_name=self.bnName,
                                out_channel=self.in_channels)
        self.add_block_list(layer3.get_name(), layer3, self.in_channels)

        layer4 = ActivationLayer(self.activationName, False)
        self.add_block_list(layer4.get_name(), layer4, self.in_channels)
 def __init__(self, in_channel, kernel_size,
              padding=0, stride=1, dilation=1, bias=False,
              bn_name=NormalizationType.BatchNormalize2d,
              activation_name=ActivationType.ReLU):
     super().__init__(BlockType.DepthwiseConv2dBlock)
     conv = nn.Conv2d(in_channel, in_channel, kernel_size,
                      padding=padding, stride=stride, dilation=dilation,
                      groups=in_channel, bias=bias)
     normal = NormalizeLayer(bn_name, in_channel)
     activation = ActivationLayer(activation_name)
     self.block = nn.Sequential(OrderedDict([
         (LayerType.Convolutional, conv),
         (bn_name, normal),
         (activation_name, activation)
     ]))
 def __init__(self,
              in_planes,
              out_planes,
              kernel_size,
              stride,
              bn_name=NormalizationType.BatchNormalize2d):
     super().__init__(PNASNetBlockName.SeparableConv)
     self.conv1 = nn.Conv2d(in_planes,
                            out_planes,
                            kernel_size,
                            stride,
                            padding=(kernel_size - 1) // 2,
                            bias=False,
                            groups=in_planes)
     self.bn1 = NormalizeLayer(bn_name, out_planes)
Пример #9
0
    def __init__(self,
                 input_channel,
                 bn_name=NormalizationType.BatchNormalize2d,
                 activation_name=ActivationType.ReLU):
        super().__init__(InceptionBlockName.InceptionResNetA)
        self.branch3x3stack = nn.Sequential(
            ConvBNActivationBlock(input_channel,
                                  32,
                                  kernel_size=1,
                                  bnName=bn_name,
                                  activationName=activation_name),
            ConvBNActivationBlock(32,
                                  48,
                                  kernel_size=3,
                                  padding=1,
                                  bnName=bn_name,
                                  activationName=activation_name),
            ConvBNActivationBlock(48,
                                  64,
                                  kernel_size=3,
                                  padding=1,
                                  bnName=bn_name,
                                  activationName=activation_name))

        self.branch3x3 = nn.Sequential(
            ConvBNActivationBlock(input_channel,
                                  32,
                                  kernel_size=1,
                                  bnName=bn_name,
                                  activationName=activation_name),
            ConvBNActivationBlock(32,
                                  32,
                                  kernel_size=3,
                                  padding=1,
                                  bnName=bn_name,
                                  activationName=activation_name))

        self.branch1x1 = ConvBNActivationBlock(input_channel,
                                               32,
                                               kernel_size=1,
                                               bnName=bn_name,
                                               activationName=activation_name)

        self.reduction1x1 = nn.Conv2d(128, 384, kernel_size=1)
        self.shortcut = nn.Conv2d(input_channel, 384, kernel_size=1)
        self.bn = NormalizeLayer(bn_name=bn_name, out_channel=384)
        self.relu = ActivationLayer(activation_name=activation_name,
                                    inplace=False)
Пример #10
0
 def create_layer(self, module_def):
     if module_def['type'] == LayerType.MyMaxPool2d:
         kernel_size = int(module_def['size'])
         stride = int(module_def['stride'])
         maxpool = MyMaxPool2d(kernel_size, stride)
         self.addBlockList(LayerType.MyMaxPool2d, maxpool, self.filters)
         self.input_channels = self.filters
     elif module_def['type'] == LayerType.GlobalAvgPool:
         globalAvgPool = GlobalAvgPool2d()
         self.addBlockList(LayerType.GlobalAvgPool, globalAvgPool, self.filters)
         self.input_channels = self.filters
     elif module_def['type'] == LayerType.FcLayer:
         num_output = int(module_def['num_output'])
         self.filters = num_output
         layer = FcLayer(self.input_channels, num_output)
         self.addBlockList(LayerType.FcLayer, layer, num_output)
         self.input_channels = num_output
     elif module_def['type'] == LayerType.Upsample:
         scale = int(module_def['stride'])
         mode = module_def.get('model', 'bilinear')
         upsample = Upsample(scale_factor=scale, mode=mode)
         self.addBlockList(LayerType.Upsample, upsample, self.filters)
         self.input_channels = self.filters
     elif module_def['type'] == LayerType.MultiplyLayer:
         layer = MultiplyLayer(module_def['layers'])
         self.addBlockList(LayerType.MultiplyLayer, layer, self.filters)
         self.input_channels = self.filters
     elif module_def['type'] == LayerType.AddLayer:
         layer = AddLayer(module_def['layers'])
         self.addBlockList(LayerType.AddLayer, layer, self.filters)
         self.input_channels = self.filters
     elif module_def['type'] == LayerType.Dropout:
         probability = float(module_def['probability'])
         layer = nn.Dropout(p=probability, inplace=False)
         self.addBlockList(LayerType.Dropout, layer, self.filters)
         self.input_channels = self.filters
     elif module_def['type'] == LayerType.NormalizeLayer:
         bn_name = module_def['batch_normalize'].strip()
         layer = NormalizeLayer(bn_name, self.filters)
         self.addBlockList(LayerType.NormalizeLayer, layer, self.filters)
         self.input_channels = self.filters
     elif module_def['type'] == LayerType.ActivationLayer:
         activation_name = module_def['activation'].strip()
         layer = ActivationLayer(activation_name, inplace=False)
         self.addBlockList(LayerType.ActivationLayer, layer, self.filters)
         self.input_channels = self.filters
Пример #11
0
    def __init__(self,
                 input_channel,
                 bn_name=NormalizationType.BatchNormalize2d,
                 activation_name=ActivationType.ReLU):
        # Figure 19. The schema for 8×8 grid (Inception-ResNet-C)
        # module of the Inception-ResNet-v2 network."""
        super().__init__(InceptionBlockName.InceptionResNetC)
        self.branch3x3 = nn.Sequential(
            ConvBNActivationBlock(input_channel,
                                  192,
                                  kernel_size=1,
                                  bnName=bn_name,
                                  activationName=activation_name),
            ConvBNActivationBlock(192,
                                  224,
                                  kernel_size=(1, 3),
                                  padding=(0, 1),
                                  bnName=bn_name,
                                  activationName=activation_name),
            ConvBNActivationBlock(224,
                                  256,
                                  kernel_size=(3, 1),
                                  padding=(1, 0),
                                  bnName=bn_name,
                                  activationName=activation_name))

        self.branch1x1 = ConvBNActivationBlock(input_channel,
                                               192,
                                               kernel_size=1,
                                               bnName=bn_name,
                                               activationName=activation_name)
        self.reduction1x1 = nn.Conv2d(448, 2048, kernel_size=1)
        self.shorcut = nn.Conv2d(input_channel, 2048, kernel_size=1)

        self.bn = NormalizeLayer(bn_name=bn_name, out_channel=2048)
        self.relu = ActivationLayer(activation_name=activation_name,
                                    inplace=False)
Пример #12
0
    def __init__(self,
                 input_channel,
                 bn_name=NormalizationType.BatchNormalize2d,
                 activation_name=ActivationType.ReLU):
        super().__init__(InceptionBlockName.InceptionResNetB)
        self.branch7x7 = nn.Sequential(
            ConvBNActivationBlock(input_channel,
                                  128,
                                  kernel_size=1,
                                  bnName=bn_name,
                                  activationName=activation_name),
            ConvBNActivationBlock(128,
                                  160,
                                  kernel_size=(1, 7),
                                  padding=(0, 3),
                                  bnName=bn_name,
                                  activationName=activation_name),
            ConvBNActivationBlock(160,
                                  192,
                                  kernel_size=(7, 1),
                                  padding=(3, 0),
                                  bnName=bn_name,
                                  activationName=activation_name))

        self.branch1x1 = ConvBNActivationBlock(input_channel,
                                               192,
                                               kernel_size=1,
                                               bnName=bn_name,
                                               activationName=activation_name)

        self.reduction1x1 = nn.Conv2d(384, 1154, kernel_size=1)
        self.shortcut = nn.Conv2d(input_channel, 1154, kernel_size=1)

        self.bn = NormalizeLayer(bn_name=bn_name, out_channel=1154)
        self.relu = ActivationLayer(activation_name=activation_name,
                                    inplace=False)
Пример #13
0
    def __init__(self,
                 in_channels,
                 channels,
                 stride=1,
                 dilation=1,
                 groups=1,
                 dropout=None,
                 dist_bn=False,
                 bn_name=NormalizationType.BatchNormalize2d,
                 activation_name=ActivationType.ReLU):
        super().__init__(WiderResNetBlockName.IdentityResidualBlock)
        # Check parameters for inconsistencies
        if len(channels) != 2 and len(channels) != 3:
            raise ValueError(
                "channels must contain either two or three values")
        if len(channels) == 2 and groups != 1:
            raise ValueError("groups > 1 are only valid if len(channels) == 3")
        self.dist_bn = dist_bn
        is_bottleneck = len(channels) == 3
        need_proj_conv = stride != 1 or in_channels != channels[-1]

        self.normal = NormalizeLayer(bn_name, in_channels)
        self.activate = ActivationLayer(activation_name, inplace=False)
        if not is_bottleneck:
            layers = [("conv1",
                       ConvBNActivationBlock(in_channels=in_channels,
                                             out_channels=channels[0],
                                             kernel_size=3,
                                             stride=stride,
                                             padding=dilation,
                                             dilation=dilation,
                                             bias=False,
                                             bnName=bn_name,
                                             activationName=activation_name)),
                      ("conv2",
                       nn.Conv2d(channels[0],
                                 channels[1],
                                 kernel_size=3,
                                 stride=1,
                                 padding=dilation,
                                 dilation=dilation,
                                 bias=False))]
            if dropout is not None:
                layers = [layers[0], ("dropout", dropout()), layers[1]]
        else:
            layers = [("conv1",
                       ConvBNActivationBlock(in_channels=in_channels,
                                             out_channels=channels[0],
                                             kernel_size=1,
                                             stride=stride,
                                             padding=0,
                                             bias=False,
                                             bnName=bn_name,
                                             activationName=activation_name)),
                      ("conv2",
                       ConvBNActivationBlock(in_channels=channels[0],
                                             out_channels=channels[1],
                                             kernel_size=3,
                                             stride=1,
                                             padding=dilation,
                                             dilation=dilation,
                                             groups=groups,
                                             bias=False,
                                             bnName=bn_name,
                                             activationName=activation_name)),
                      ("conv3",
                       nn.Conv2d(channels[1],
                                 channels[2],
                                 kernel_size=1,
                                 stride=1,
                                 padding=0,
                                 bias=False))]
            if dropout is not None:
                layers = [
                    layers[0], layers[1], ("dropout", dropout()), layers[2]
                ]

        self.convs = nn.Sequential(OrderedDict(layers))

        self.shortcut = nn.Sequential()
        if need_proj_conv:
            self.shortcut = nn.Conv2d(in_channels,
                                      channels[-1],
                                      kernel_size=1,
                                      stride=stride,
                                      padding=0,
                                      bias=False)