Exemple #1
0
 def __init__(self, in_ch=3, num_classes=1000):
     '''
         The AlexNet.
         args:
             in_ch: int, the number of channels of inputs
             num_classes: int, the number of classes that need to predict
         reference:
             "One weird trick for parallelizing convolutional neural networks"<https://arxiv.org/abs/1404.5997>
     '''
     super(AlexNet, self).__init__()
     #the part to extract feature
     self.features = M.Sequential(
         M.Conv2d(in_ch, 64, kernel_size=11, stride=4, padding=11 // 4),
         M.ReLU(),
         M.MaxPool2d(kernel_size=3, stride=2),
         M.Conv2d(64, 192, kernel_size=5, padding=2),
         M.ReLU(),
         M.MaxPool2d(kernel_size=3, stride=2),
         M.Conv2d(192, 384, kernel_size=3, stride=1, padding=1),
         M.ReLU(),
         M.Conv2d(384, 256, kernel_size=3, stride=1, padding=1),
         M.ReLU(),
         M.Conv2d(256, 256, kernel_size=3, stride=1, padding=1),
         M.ReLU(),
         M.MaxPool2d(kernel_size=3, stride=2),
     )
     #global avg pooling
     self.avgpool = M.AdaptiveAvgPool2d((6, 6))
     #classify part
     self.classifier = M.Sequential(M.Dropout(),
                                    M.Linear(256 * 6 * 6, 4096), M.ReLU(),
                                    M.Dropout(), M.Linear(4096, 4096),
                                    M.ReLU(), M.Linear(4096, num_classes))
Exemple #2
0
    def __init__(self,
                 channels,
                 reduction=16,
                 norm_layer=M.BatchNorm2d,
                 activation=M.ReLU(),
                 attention_act=M.Sigmoid()):
        """

        Args:
            channels (int):
            reduction (int):
            norm_layer (M.Module):
            activation (M.Module):
            attention_act (M.Module):
        """
        super(SEModule, self).__init__()
        inter_ch = int(channels // reduction)
        self.fc = M.Sequential(
            M.AdaptiveAvgPool2d(1),
            Conv2d(channels,
                   inter_ch,
                   norm_layer=norm_layer,
                   activation=activation),
            Conv2d(inter_ch,
                   channels,
                   norm_layer=norm_layer,
                   activation=attention_act))
Exemple #3
0
def test_adaptivepooling():
    pool1 = M.AdaptiveMaxPool2d((2, 2))
    pool2 = M.AdaptiveAvgPool2d((2, 2))

    @trace(symbolic=True, capture_as_const=True)
    def fwd(data):
        out = pool1(data)
        out = pool2(out)
        return out

    input = Tensor(np.random.random((1, 3, 32, 32)))
    result = fwd(input)
    check_pygraph_dump(fwd, [input], [result])
Exemple #4
0
 def __init__(self,
              cfg,
              in_chs=3,
              num_classes=1000,
              norm_layer=M.BatchNorm2d,
              activation=M.ReLU(),
              features_only=False):
     super(GENet, self).__init__()
     self.current_channels = in_chs
     features = OrderedDict()
     for i in range(len(cfg)):
         features[f"layer_{i}"] = self._make_layer(cfg[i], norm_layer,
                                                   activation)
     self.features = M.Sequential(features)
     self.global_avg = M.AdaptiveAvgPool2d(1)
     self.classifier = M.Linear(self.current_channels, num_classes)
     self.features_only = features_only
Exemple #5
0
    def __init__(self,
                 in_ch,
                 reduction=16,
                 norm_layer=None,
                 nolinear=M.ReLU(),
                 sigmoid=M.Sigmoid()):
        '''
            Initialize the module.
            @in_ch: int, the number of channels of input,
            @reduction: int, the coefficient of dimensionality reduction
            @sigmoid: M.Module, the sigmoid function, in MobilenetV3 is H-Sigmoid and in SeNet is sigmoid
            @norm_layer: M.Module, the batch normalization moldule
            @nolinear: M.Module, the nolinear function module
            @sigmoid: M.Module, the sigmoid layer
        '''
        super(SEModule, self).__init__()
        if norm_layer is None:
            norm_layer = M.BatchNorm2d

        if nolinear is None:
            nolinear = M.ReLU()

        if sigmoid is None:
            sigmoid = M.Sigmoid()

        self.avgpool = M.AdaptiveAvgPool2d(1)
        self.fc = M.Sequential(
            M.Conv2d(in_ch,
                     in_ch // reduction,
                     kernel_size=1,
                     stride=1,
                     padding=0),
            norm_layer(in_ch // reduction),
            nolinear,
            M.Conv2d(in_ch // reduction,
                     in_ch,
                     kernel_size=1,
                     stride=1,
                     padding=0),
            norm_layer(in_ch),
            sigmoid,
        )
Exemple #6
0
    def __init__(self,
                 cfg,
                 num_classes=1000,
                 in_channels=3,
                 init_weights=True,
                 batch_norm=False):
        '''
            VGGNet from paper
            "Very Deep Convolutional Networks For Large-Scale Image Recognition"<https://arxiv.org/pdf/1409.1556.pdf>
        '''
        super(VGG, self).__init__()
        self.features = self._make_layers(in_channels, cfg, batch_norm)
        self.avgpool = M.AdaptiveAvgPool2d((7, 7))
        self.classifier = M.Sequential(M.Linear(512 * 7 * 7, 4096), M.ReLU(),
                                       M.Dropout(), M.Linear(4096, 4096),
                                       M.ReLU(), M.Dropout(),
                                       M.Linear(4096, num_classes))

        if init_weights:
            self._init_weights()
Exemple #7
0
    def __init__(self,
                 block,
                 blocks,
                 in_ch=3,
                 num_classes=1000,
                 first_stride=2,
                 light_head=False,
                 zero_init_residual=False,
                 groups=1,
                 width_per_group=64,
                 strides=[1, 2, 2, 2],
                 dilations=[1, 1, 1, 1],
                 multi_grids=[1, 1, 1],
                 norm_layer=None,
                 se_module=None,
                 reduction=16,
                 radix=0,
                 avd=False,
                 avd_first=False,
                 avg_layer=False,
                 avg_down=False,
                 stem_width=64):
        '''
            Modified resnet according to https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch.
            Implementate  ResNet and the variation of ResNet.
            Args:
                in_ch: int, the number of channels of the input
                block: BasicBlock or Bottleneck.The block of the resnet

                num_classes: int, the number of classes to predict
                first_stride: int, the stride of the first conv layer
                light_head: boolean, whether use conv3x3 replace the conv7x7 in first conv layer
                zero_init_residual: whether initilize the residule block's batchnorm with zero
                groups: int, the number of groups for the conv in net
                width_per_group: int, the width of the conv layers
                strides: list, the list of the strides for the each stage
                dilations: list, the dilations of each block
                multi_grids: list, implementation of the multi grid layer in deeplabv3
                norm_layer: megengine.module.Module, the normalization layer, default is batch normalization
                se_module: SEModule, the Squeeze Excitation Module
                radix: int, the radix index from ResNest
                reduction: int, the reduction rate
                avd: bool, whether use the avd layer
                avd_first: bool, whether use the avd layer before bottleblock's conv2
                stem_width: int, the channels of the conv3x3 when use 3 conv3x3 replace conv7x7
            References:
                "Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>
                "Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>
                https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch
                deeplab v3: https://arxiv.org/pdf/1706.05587.pdf
                deeplab v3+: https://arxiv.org/pdf/1802.02611.pdf
                "Squeeze-and-Excitation Networks"<https://arxiv.org/abs/1709.01507>
                "ResNeSt: Split-Attention Networks"<https://arxiv.org/pdf/2004.08955.pdf>
        '''
        super(ResNet, self).__init__()

        if len(dilations) != 4:
            raise ValueError(
                "The length of dilations must be 4, but got {}".format(
                    len(dilations)))

        if len(strides) != 4:
            raise ValueError(
                "The length of dilations must be 4, but got {}".format(
                    len(strides)))

        if len(multi_grids) > blocks[-1]:
            multi_grids = multi_grids[:blocks[-1]]
        elif len(multi_grids) < blocks[-1]:
            raise ValueError(
                "The length of multi_grids must greater than or equal the number of blocks for last stage , but got {}/{}"
                .format(len(multi_grids), blocks[-1]))

        if norm_layer is None:
            norm_layer = M.BatchNorm2d

        self.base_width = width_per_group
        self.multi_grids = multi_grids
        self.inplanes = 64
        self.groups = groups
        self.norm_layer = norm_layer
        self.avg_layer = avg_layer
        self.avg_down = avg_down

        if light_head:
            self.conv1 = M.Sequential(
                conv3x3(in_ch, stem_width, stride=first_stride),
                norm_layer(stem_width),
                M.ReLU(),
                conv3x3(stem_width, stem_width, stride=1),
                norm_layer(stem_width),
                M.ReLU(),
                conv3x3(stem_width, self.inplanes, stride=1),
            )
        else:
            self.conv1 = M.Conv2d(in_ch,
                                  self.inplanes,
                                  kernel_size=7,
                                  stride=first_stride,
                                  padding=3,
                                  bias=False)
        self.bn1 = norm_layer(self.inplanes)
        self.relu = M.ReLU()
        self.maxpool = M.MaxPool2d(kernel_size=3, stride=2, padding=1)

        #4 stage
        self.layer1 = self._make_layer(block,
                                       64,
                                       blocks[0],
                                       stride=strides[0],
                                       dilation=dilations[0],
                                       se_module=se_module,
                                       reduction=reduction,
                                       radix=radix,
                                       avd=avd,
                                       avd_first=avd_first)
        self.layer2 = self._make_layer(block,
                                       128,
                                       blocks[1],
                                       stride=strides[1],
                                       dilation=dilations[1],
                                       se_module=se_module,
                                       reduction=reduction,
                                       radix=radix,
                                       avd=avd,
                                       avd_first=avd_first)
        self.layer3 = self._make_layer(block,
                                       256,
                                       blocks[2],
                                       stride=strides[2],
                                       dilation=dilations[2],
                                       se_module=se_module,
                                       reduction=reduction,
                                       radix=radix,
                                       avd=avd,
                                       avd_first=avd_first)
        self.layer4 = self._make_grid_layer(block,
                                            512,
                                            blocks[3],
                                            stride=strides[3],
                                            dilation=dilations[3],
                                            se_module=se_module,
                                            reduction=reduction,
                                            radix=radix,
                                            avd=avd,
                                            avd_first=avd_first)

        #classification part
        self.avgpool = M.AdaptiveAvgPool2d(1)
        self.fc = M.Linear(self.inplanes, num_classes)

        for m in self.modules():
            if isinstance(m, M.Conv2d):
                M.init.msra_normal_(m.weight,
                                    mode="fan_out",
                                    nonlinearity="relu")
            elif isinstance(m, M.BatchNorm2d):
                M.init.fill_(m.weight, 1)
                M.init.zeros_(m.bias)
        # Zero-initialize the last BN in each residual branch,
        # so that the residual branch starts with zeros, and each residual block behaves like an identity.
        # This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
        if zero_init_residual:
            for m in self.modules():
                if isinstance(m, Bottleneck):
                    M.init.zeros_(m.bn3.weight)
                elif isinstance(m, BasicBlock):
                    M.init.zeros_(m.bn2.weight)
Exemple #8
0
 def __init__(self):
     super().__init__()
     self.data = np.random.random((2, 512, 64, 64)).astype(np.float32)
     self.gap = M.AdaptiveAvgPool2d((2, 2))