Exemple #1
0
    def __init__(self, in_channels_left, out_channels_left, in_channels_right, out_channels_right):
        super(FirstCell, self).__init__()
        self.conv_1x1 = []
        self.conv_1x1.append(M.ReLU())
        self.conv_1x1.append(M.Conv2d(in_channels_right, out_channels_right, 1, stride=1, bias=False))
        self.conv_1x1.append(M.BatchNorm2d(out_channels_right, eps=0.001, momentum=0.1, affine=True))
        self.conv_1x1 = M.Sequential(*self.conv_1x1)

        self.relu = M.ReLU()
        self.path_1 = []
        self.path_1.append(M.AvgPool2d(1, stride=2))
        self.path_1.append(M.Conv2d(in_channels_left, out_channels_left, 1, stride=1, bias=False))
        self.path_1 = M.Sequential(*self.path_1)

        self.path_2 = []
        # self.path_2.append(M.ZeroPad2d((0, 1, 0, 1)))
        self.path_2.append(M.AvgPool2d(1, stride=2))
        self.path_2.append(M.Conv2d(in_channels_left, out_channels_left, 1, stride=1, bias=False))
        self.path_2 = M.Sequential(*self.path_2)

        self.final_path_bn = M.BatchNorm2d(out_channels_left * 2, eps=0.001, momentum=0.1, affine=True)

        self.comb_iter_0_left = BranchSeparables(out_channels_right, out_channels_right, 5, 1, 2, bias=False)
        self.comb_iter_0_right = BranchSeparables(out_channels_right, out_channels_right, 3, 1, 1, bias=False)

        self.comb_iter_1_left = BranchSeparables(out_channels_right, out_channels_right, 5, 1, 2, bias=False)
        self.comb_iter_1_right = BranchSeparables(out_channels_right, out_channels_right, 3, 1, 1, bias=False)

        self.comb_iter_2_left = M.AvgPool2d(3, stride=1, padding=1)

        self.comb_iter_3_left = M.AvgPool2d(3, stride=1, padding=1)
        self.comb_iter_3_right = M.AvgPool2d(3, stride=1, padding=1)

        self.comb_iter_4_left = BranchSeparables(out_channels_right, out_channels_right, 3, 1, 1, bias=False)
Exemple #2
0
    def __init__(self, in_channels_left, out_channels_left, in_channels_right, out_channels_right):
        super(ReductionCell1, self).__init__()
        self.conv_prev_1x1 = []
        self.conv_prev_1x1.append(M.ReLU())
        self.conv_prev_1x1.append(M.Conv2d(in_channels_left, out_channels_left, 1, stride=1, bias=False))
        self.conv_prev_1x1.append(M.BatchNorm2d(out_channels_left, eps=0.001, momentum=0.1, affine=True))
        self.conv_prev_1x1 = M.Sequential(*self.conv_prev_1x1)

        self.conv_1x1 = []
        self.conv_1x1.append(M.ReLU())
        self.conv_1x1.append(M.Conv2d(in_channels_right, out_channels_right, 1, stride=1, bias=False))
        self.conv_1x1.append(M.BatchNorm2d(out_channels_right, eps=0.001, momentum=0.1, affine=True))
        self.conv_1x1 = M.Sequential(*self.conv_1x1)

        self.comb_iter_0_left = BranchSeparables(out_channels_right, out_channels_right, 5, 2, 2, bias=False)
        self.comb_iter_0_right = BranchSeparables(out_channels_right, out_channels_right, 7, 2, 3, bias=False)

        self.comb_iter_1_left = M.MaxPool2d(3, stride=2, padding=1)
        self.comb_iter_1_right = BranchSeparables(out_channels_right, out_channels_right, 7, 2, 3, bias=False)

        self.comb_iter_2_left = M.AvgPool2d(3, stride=2, padding=1)
        self.comb_iter_2_right = BranchSeparables(out_channels_right, out_channels_right, 5, 2, 2, bias=False)

        self.comb_iter_3_right = M.AvgPool2d(3, stride=1, padding=1)

        self.comb_iter_4_left = BranchSeparables(out_channels_right, out_channels_right, 3, 1, 1, bias=False)
        self.comb_iter_4_right = M.MaxPool2d(3, stride=2, padding=1)
 def __init__(self, in_channels, num_classes, conv_block=None):
     super(InceptionAux, self).__init__()
     if conv_block is None:
         conv_block = BasicConv2d
     self.avgpool1 = M.AvgPool2d(5, 3)
     self.conv0 = conv_block(in_channels, 128, kernel_size=1)
     self.conv1 = conv_block(128, 768, kernel_size=5)
     self.avgpool = M.AvgPool2d(1)
     self.conv1.stddev = 0.01
     self.fc = M.Linear(768, num_classes)
     self.fc.stddev = 0.001
    def __init__(self, in_channels, conv_block=None):
        super(InceptionE, self).__init__()
        if conv_block is None:
            conv_block = BasicConv2d
        self.branch1x1 = conv_block(in_channels, 320, kernel_size=1)

        self.branch3x3_1 = conv_block(in_channels, 384, kernel_size=1)
        self.branch3x3_2a = conv_block(384,
                                       384,
                                       kernel_size=(1, 3),
                                       padding=(0, 1))
        self.branch3x3_2b = conv_block(384,
                                       384,
                                       kernel_size=(3, 1),
                                       padding=(1, 0))

        self.branch3x3dbl_1 = conv_block(in_channels, 448, kernel_size=1)
        self.branch3x3dbl_2 = conv_block(448, 384, kernel_size=3, padding=1)
        self.branch3x3dbl_3a = conv_block(384,
                                          384,
                                          kernel_size=(1, 3),
                                          padding=(0, 1))
        self.branch3x3dbl_3b = conv_block(384,
                                          384,
                                          kernel_size=(3, 1),
                                          padding=(1, 0))

        self.avgpool = M.AvgPool2d(3, 1, padding=1)
        self.branch_pool = conv_block(in_channels, 192, kernel_size=1)
    def __init__(self, in_channels, num_classes):
        super(InceptionAux, self).__init__()
        self.avgpool = M.AvgPool2d(5, padding=3)
        self.conv = BasicConv2d(in_channels, 128, kernel_size=1)

        self.fc1 = M.Linear(2048, 1024)
        self.fc2 = M.Linear(1024, num_classes)
    def __init__(self,
                 channels,
                 exp_channels,
                 init_block_channels,
                 final_block_channels,
                 classifier_mid_channels,
                 kernels3,
                 use_relu,
                 use_se,
                 first_stride,
                 final_use_se,
                 in_channels=3,
                 in_size=(224, 224),
                 num_classes=1000):
        super(MobileNetV3, self).__init__()
        self.in_size = in_size
        self.num_classes = num_classes

        self.features = []
        init_block = conv3x3_block(in_channels=in_channels,
                                   out_channels=init_block_channels,
                                   stride=2,
                                   activation=HSwish())
        self.features.append(init_block)

        in_channels = init_block_channels

        for i, channels_per_stage in enumerate(channels):
            stage = []
            for j, out_channels in enumerate(channels_per_stage):
                exp_channels_ij = exp_channels[i][j]
                stride = 2 if (j == 0) and ((i != 0) or first_stride) else 1
                use_kernel3 = kernels3[i][j] == 1
                activation = M.ReLU() if use_relu[i][j] == 1 else HSwish()
                use_se_flag = use_se[i][j] == 1
                unit = MobileNetV3Unit(in_channels=in_channels,
                                       out_channels=out_channels,
                                       exp_channels=exp_channels_ij,
                                       use_kernel3=use_kernel3,
                                       stride=stride,
                                       activation=activation,
                                       use_se=use_se_flag)
                stage.append(unit)
                in_channels = out_channels
            self.features += stage
        final_block = MobileNetV3FinalBlock(in_channels=in_channels,
                                            out_channels=final_block_channels,
                                            use_se=final_use_se)
        self.features.append(final_block)
        in_channels = final_block_channels
        final_pool = M.AvgPool2d(kernel_size=7, stride=1)
        self.features.append(final_pool)
        self.features = M.Sequential(*self.features)

        self.output = MobileNetV3Classifier(
            in_channels=in_channels,
            out_channels=num_classes,
            mid_channels=classifier_mid_channels,
            dropout_rate=0.2)
Exemple #7
0
    def __init__(self, num_input_features, num_output_features):
        super(_Transition, self).__init__()

        self.norm = M.BatchNorm2d(num_input_features)
        self.relu = M.ReLU()
        self.conv = M.Conv2d(num_input_features,
                             num_output_features,
                             kernel_size=1,
                             stride=1,
                             bias=False)
        self.pool = M.AvgPool2d(kernel_size=2, stride=2)
 def __init__(self):
     super().__init__()
     self.conv1 = M.Conv2d(3,
                           64,
                           kernel_size=7,
                           stride=2,
                           padding=3,
                           bias=False)
     self.bn1 = M.BatchNorm2d(64)
     self.avgpool = M.AvgPool2d(kernel_size=5, stride=5, padding=0)
     self.fc = M.Linear(64, 10)
Exemple #9
0
def SSIM(x, y, md=1):
    patch_size = 2 * md + 1
    C1 = 0.01**2
    C2 = 0.03**2

    mu_x = nn.AvgPool2d(patch_size, 1, 0, mode="average")(x)
    mu_y = nn.AvgPool2d(patch_size, 1, 0, mode="average")(y)
    mu_x_mu_y = mu_x * mu_y
    mu_x_sq = F.pow(mu_x, 2)
    mu_y_sq = F.pow(mu_y, 2)

    sigma_x = nn.AvgPool2d(patch_size, 1, 0, mode="average")(x * x) - mu_x_sq
    sigma_y = nn.AvgPool2d(patch_size, 1, 0, mode="average")(y * y) - mu_y_sq
    sigma_xy = nn.AvgPool2d(patch_size, 1, 0, mode="average")(
        x * y) - mu_x_mu_y

    SSIM_n = (2 * mu_x_mu_y + C1) * (2 * sigma_xy + C2)
    SSIM_d = (mu_x_sq + mu_y_sq + C1) * (sigma_x + sigma_y + C2)
    SSIM = SSIM_n / SSIM_d
    dist = F.clip((1 - SSIM) / 2, 0, 1)
    return dist
    def __init__(self,
                 channels,
                 residuals,
                 init_block_kernel_size,
                 init_block_channels,
                 maxpool_pad,
                 in_channels=3,
                 in_size=(224, 224),
                 num_classes=1000):
        super(SqueezeNet, self).__init__()
        self.in_size = in_size
        self.num_classes = num_classes

        self.feature = []
        init_block = SqueezeInitBlock(in_channels=in_channels,
                                      out_channels=init_block_channels,
                                      kernel_size=init_block_kernel_size)
        self.feature.append(init_block)
        in_channels = init_block_channels

        for i, channels_per_stage in enumerate(channels):
            stage = []

            pool = M.MaxPool2d(kernel_size=3, stride=2, padding=maxpool_pad[i])
            stage.append(pool)

            for j, out_channels in enumerate(channels_per_stage):
                expand_channels = out_channels // 2
                squeeze_channels = out_channels // 8
                unit = FireUnit(in_channels=in_channels,
                                squeeze_channels=squeeze_channels,
                                expand1x1_channels=expand_channels,
                                expand3x3_channels=expand_channels,
                                residual=((residuals is not None)
                                          and (residuals[i][j] == 1)))
                stage.append(unit)
                in_channels = out_channels
            self.feature += stage
        self.feature.append(M.Dropout(drop_prob=0.5))
        self.feature = M.Sequential(*self.feature)

        self.output = []
        final_conv = M.Conv2d(in_channels=in_channels,
                              out_channels=num_classes,
                              kernel_size=1)
        self.output.append(final_conv)
        final_activ = M.ReLU()
        self.output.append(final_activ)
        final_pool = M.AvgPool2d(kernel_size=13, stride=1)
        self.output.append(final_pool)
        self.output = M.Sequential(*self.output)
Exemple #11
0
    def __init__(self, stem_filters, num_filters=42):
        super(CellStem0, self).__init__()
        self.num_filters = num_filters
        self.stem_filters = stem_filters
        self.conv_1x1 = []
        self.conv_1x1.append(M.ReLU())
        self.conv_1x1.append(M.Conv2d(self.stem_filters, self.num_filters, 1, stride=1, bias=False))
        self.conv_1x1.append(M.BatchNorm2d(self.num_filters, eps=0.001, momentum=0.1, affine=True))
        self.conv_1x1 = M.Sequential(*self.conv_1x1)

        self.comb_iter_0_left = BranchSeparables(self.num_filters, self.num_filters, 5, 2, 2)
        self.comb_iter_0_right = BranchSeparablesStem(self.stem_filters, self.num_filters, 7, 2, 3, bias=False)

        self.comb_iter_1_left = M.MaxPool2d(3, stride=2, padding=1)
        self.comb_iter_1_right = BranchSeparablesStem(self.stem_filters, self.num_filters, 7, 2, 3, bias=False)

        self.comb_iter_2_left = M.AvgPool2d(3, stride=2, padding=1)
        self.comb_iter_2_right = BranchSeparablesStem(self.stem_filters, self.num_filters, 5, 2, 2, bias=False)

        self.comb_iter_3_right = M.AvgPool2d(3, stride=1, padding=1)

        self.comb_iter_4_left = BranchSeparables(self.num_filters, self.num_filters, 3, 1, 1, bias=False)
        self.comb_iter_4_right = M.MaxPool2d(3, stride=2, padding=1)
Exemple #12
0
 def __init__(self,
              in_ch,
              out_ch,
              ksize,
              stride=1,
              expansion=1.0,
              bias=False,
              norm_layer=M.BatchNorm2d,
              activation=M.ReLU()):
     super(XXBlock, self).__init__()
     if norm_layer is None:
         norm_layer = M.BatchNorm2d
     if activation is None:
         activation = M.ReLU()
     expansion_out_ch = round(out_ch * expansion)
     self.conv_block = M.Sequential(
         M.Conv2d(in_ch,
                  expansion_out_ch,
                  ksize,
                  stride=stride,
                  padding=ksize // 2), norm_layer(expansion_out_ch),
         activation,
         M.Conv2d(expansion_out_ch,
                  out_ch,
                  ksize,
                  stride=1,
                  padding=(ksize - 1) // 2,
                  bias=bias), norm_layer(out_ch))
     self.activation = activation
     self.shortcut = M.Sequential()
     if stride > 1 or in_ch != out_ch:
         if stride > 1:
             self.shortcut = M.Sequential(
                 M.AvgPool2d(kernel_size=stride + 1,
                             stride=stride,
                             padding=stride // 2),
                 M.Conv2d(in_ch,
                          out_ch,
                          kernel_size=1,
                          stride=1,
                          padding=0,
                          bias=bias), norm_layer(out_ch))
         else:
             self.shortcut = M.Sequential(
                 M.Conv2d(in_ch,
                          out_ch,
                          kernel_size=1,
                          stride=1,
                          padding=0,
                          bias=bias), norm_layer(out_ch))
Exemple #13
0
    def __init__(self, stem_filters, num_filters):
        super(CellStem1, self).__init__()
        self.num_filters = num_filters
        self.stem_filters = stem_filters
        self.conv_1x1 = []
        self.conv_1x1.append(M.ReLU())
        self.conv_1x1.append(M.Conv2d(2*self.num_filters, self.num_filters, 1, stride=1, bias=False))
        self.conv_1x1.append(M.BatchNorm2d(self.num_filters, eps=0.001, momentum=0.1, affine=True))
        self.conv_1x1 = M.Sequential(*self.conv_1x1)

        self.relu = M.ReLU()
        self.path_1 = []
        self.path_1.append(M.AvgPool2d(1, stride=2))
        self.path_1.append(M.Conv2d(self.stem_filters, self.num_filters//2, 1, stride=1, bias=False))
        self.path_1 = M.Sequential(*self.path_1)

        self.path_2 = []
        # self.path_2.add_module('pad', M.ZeroPad2d((0, 1, 0, 1)))
        self.path_2.append(M.AvgPool2d(1, stride=2))
        self.path_2.append(M.Conv2d(self.stem_filters, self.num_filters//2, 1, stride=1, bias=False))
        self.path_2 = M.Sequential(*self.path_2)

        self.final_path_bn = M.BatchNorm2d(self.num_filters, eps=0.001, momentum=0.1, affine=True)

        self.comb_iter_0_left = BranchSeparables(self.num_filters, self.num_filters, 5, 2, 2, bias=False)
        self.comb_iter_0_right = BranchSeparables(self.num_filters, self.num_filters, 7, 2, 3, bias=False)

        self.comb_iter_1_left = M.MaxPool2d(3, stride=2, padding=1)
        self.comb_iter_1_right = BranchSeparables(self.num_filters, self.num_filters, 7, 2, 3, bias=False)

        self.comb_iter_2_left = M.AvgPool2d(3, stride=2, padding=1)
        self.comb_iter_2_right = BranchSeparables(self.num_filters, self.num_filters, 5, 2, 2, bias=False)

        self.comb_iter_3_right = M.AvgPool2d(3, stride=1, padding=1)

        self.comb_iter_4_left = BranchSeparables(self.num_filters, self.num_filters, 3, 1, 1, bias=False)
        self.comb_iter_4_right = M.MaxPool2d(3, stride=2, padding=1)
    def __init__(self,
                 num_classes=1000,
                 aux_logits=True,
                 transform_input=False,
                 inception_blocks=None):
        super(Inception3, self).__init__()
        if inception_blocks is None:
            inception_blocks = [
                BasicConv2d, InceptionA, InceptionB, InceptionC, InceptionD,
                InceptionE, InceptionAux
            ]

        assert len(inception_blocks) == 7
        conv_block = inception_blocks[0]
        inception_a = inception_blocks[1]
        inception_b = inception_blocks[2]
        inception_c = inception_blocks[3]
        inception_d = inception_blocks[4]
        inception_e = inception_blocks[5]
        inception_aux = inception_blocks[6]

        self.aux_logits = aux_logits
        self.transform_input = transform_input
        self.Conv2d_1a_3x3 = conv_block(3, 32, kernel_size=3, stride=2)
        self.Conv2d_2a_3x3 = conv_block(32, 32, kernel_size=3)
        self.Conv2d_2b_3x3 = conv_block(32, 64, kernel_size=3, padding=1)
        self.maxpool1 = M.MaxPool2d(kernel_size=3, stride=2)
        self.Conv2d_3b_1x1 = conv_block(64, 80, kernel_size=1)
        self.Conv2d_4a_3x3 = conv_block(80, 192, kernel_size=3)
        self.maxpool2 = M.MaxPool2d(kernel_size=3, stride=2)
        self.Mixed_5b = inception_a(192, pool_features=32)
        self.Mixed_5c = inception_a(256, pool_features=64)
        self.Mixed_5d = inception_a(288, pool_features=64)
        self.Mixed_6a = inception_b(288)
        self.Mixed_6b = inception_c(768, channels_7x7=128)
        self.Mixed_6c = inception_c(768, channels_7x7=160)
        self.Mixed_6d = inception_c(768, channels_7x7=160)
        self.Mixed_6e = inception_c(768, channels_7x7=192)
        if aux_logits:
            self.AuxLogits = inception_aux(768, num_classes)
        self.Mixed_7a = inception_d(768)
        self.Mixed_7b = inception_e(1280)
        self.Mixed_7c = inception_e(2048)
        self.avgpool = M.AvgPool2d(8)
        self.dropout = M.Dropout()
        self.fc = M.Linear(2048, num_classes)
Exemple #15
0
    def __init__(self,
                 num_classes=1000,
                 aux_logits=True,
                 transform_input=False,
                 init_weights=True):
        super(GoogLeNet, self).__init__()
        self.aux_logits = aux_logits
        self.transform_input = transform_input

        self.conv1 = BasicConv2d(3, 64, kernel_size=7, stride=2, padding=3)
        self.maxpool1 = M.MaxPool2d(3, stride=2, padding=1)  #向上取整
        self.conv2 = BasicConv2d(64, 64, kernel_size=1)
        self.conv3 = BasicConv2d(64, 192, kernel_size=3, padding=1)
        self.maxpool2 = M.MaxPool2d(3, stride=2, padding=1)

        self.inception3a = Inception(192, 64, 96, 128, 16, 32, 32)
        self.inception3b = Inception(256, 128, 128, 192, 32, 96, 64)
        self.maxpool3 = M.MaxPool2d(3, stride=2, padding=1)

        self.inception4a = Inception(480, 192, 96, 208, 16, 48, 64)
        self.inception4b = Inception(512, 160, 112, 224, 24, 64, 64)
        self.inception4c = Inception(512, 128, 128, 256, 24, 64, 64)
        self.inception4d = Inception(512, 112, 144, 288, 32, 64, 64)
        self.inception4e = Inception(528, 256, 160, 320, 32, 128, 128)
        self.maxpool4 = M.MaxPool2d(2, stride=2, padding=0)

        self.inception5a = Inception(832, 256, 160, 320, 32, 128, 128)
        self.inception5b = Inception(832, 384, 192, 384, 48, 128, 128)

        if aux_logits:
            self.aux1 = InceptionAux(512, num_classes)
            self.aux2 = InceptionAux(528, num_classes)

        # TODO
        self.avgpool = M.AvgPool2d(7)
        self.dropout = M.Dropout(0.2)
        self.fc = M.Linear(1024, num_classes)
Exemple #16
0
 def __init__(self,
              in_ch,
              out_ch,
              stride=2,
              r_lim=7,
              K=4,
              refin=True,
              refin_ch=3):
     """
         Implementation of the Extremely Efficient Spatial Pyramid module introduced in
         "ESPNetv2: A Light-weight, Power Efficient, and General Purpose Convolutional Neural Network"
         <https://arxiv.org/pdf/1811.11431.pdf>
         Parameters
         ----------
         in_ch (int): number of channels for input
         out_ch (int): number of channels for output
         stride (int): stride of the convs
         r_lim (int): A maximum value of receptive field allowed for EESP block
         K (int): number of parallel branches
         refin (bool): whether use the inference from input image
     """
     super(SESSP, self).__init__()
     eesp_out = out_ch - in_ch
     self.eesp = EESP(in_ch, eesp_out, stride=stride, r_lim=r_lim, K=K)
     self.avg_pool = M.AvgPool2d(3, stride=stride, padding=1)
     self.refin = refin
     self.stride = stride
     self.activation = M.PReLU(out_ch)
     if refin:
         self.refin_conv = M.Sequential(
             Conv2d(refin_ch,
                    refin_ch,
                    ksize=3,
                    stride=1,
                    padding=1,
                    activation=M.PReLU(refin_ch)),
             Conv2d(refin_ch, out_ch, activation=None))
    def __init__(self, in_channels, channels_7x7, conv_block=None):
        super(InceptionC, self).__init__()
        if conv_block is None:
            conv_block = BasicConv2d
        self.branch1x1 = conv_block(in_channels, 192, kernel_size=1)

        c7 = channels_7x7
        self.branch7x7_1 = conv_block(in_channels, c7, kernel_size=1)
        self.branch7x7_2 = conv_block(c7,
                                      c7,
                                      kernel_size=(1, 7),
                                      padding=(0, 3))
        self.branch7x7_3 = conv_block(c7,
                                      192,
                                      kernel_size=(7, 1),
                                      padding=(3, 0))

        self.branch7x7dbl_1 = conv_block(in_channels, c7, kernel_size=1)
        self.branch7x7dbl_2 = conv_block(c7,
                                         c7,
                                         kernel_size=(7, 1),
                                         padding=(3, 0))
        self.branch7x7dbl_3 = conv_block(c7,
                                         c7,
                                         kernel_size=(1, 7),
                                         padding=(0, 3))
        self.branch7x7dbl_4 = conv_block(c7,
                                         c7,
                                         kernel_size=(7, 1),
                                         padding=(3, 0))
        self.branch7x7dbl_5 = conv_block(c7,
                                         192,
                                         kernel_size=(1, 7),
                                         padding=(0, 3))

        self.avgpool = M.AvgPool2d(3, 1, padding=1)
        self.branch_pool = conv_block(in_channels, 192, kernel_size=1)
Exemple #18
0
    def __init__(self):
        super(mobilenetv1, self).__init__()

        def conv_bn(inp, oup, stride):
            return M.Sequential(M.Conv2d(inp, oup, 3, stride, 1),
                                M.BatchNorm2d(oup), M.ReLU())

        def conv_dw(inp, oup, stride):
            return M.Sequential(
                M.Conv2d(inp, inp, 3, stride, 1, groups=inp),
                M.BatchNorm2d(inp),
                M.ReLU(),
                M.Conv2d(inp, oup, 1, 1, 0),
                M.BatchNorm2d(oup),
                M.ReLU(),
            )

        self.model = M.Sequential(
            conv_bn(3, 32, 2),
            conv_dw(32, 64, 1),
            conv_dw(64, 128, 2),
            conv_dw(128, 128, 1),
            conv_dw(128, 256, 2),
            conv_dw(256, 256, 1),
            conv_dw(256, 512, 2),
            conv_dw(512, 512, 1),
            conv_dw(512, 512, 1),
            conv_dw(512, 512, 1),
            conv_dw(512, 512, 1),
            conv_dw(512, 512, 1),
            conv_dw(512, 1024, 2),
            conv_dw(1024, 1024, 1),
            M.AvgPool2d(7),
        )
        self.fc1 = M.Linear(1024, 1000)
        self.fc2 = M.Linear(1000, 10)
Exemple #19
0
    def __init__(self, input_size=224, num_classes=1000, model_size="1.5x"):
        super(ShuffleNetV2, self).__init__()

        self.stage_repeats = [4, 8, 4]
        self.model_size = model_size
        if model_size == "0.5x":
            self.stage_out_channels = [-1, 24, 48, 96, 192, 1024]
        elif model_size == "1.0x":
            self.stage_out_channels = [-1, 24, 116, 232, 464, 1024]
        elif model_size == "1.5x":
            self.stage_out_channels = [-1, 24, 176, 352, 704, 1024]
        elif model_size == "2.0x":
            self.stage_out_channels = [-1, 24, 244, 488, 976, 2048]
        else:
            raise NotImplementedError

        # building first layer
        input_channel = self.stage_out_channels[1]
        self.first_conv = M.Sequential(
            M.Conv2d(3, input_channel, 3, 2, 1, bias=False),
            M.BatchNorm2d(input_channel),
            FReLU(input_channel),
        )

        self.maxpool = M.MaxPool2d(kernel_size=3, stride=2, padding=1)

        self.features = []
        for idxstage in range(len(self.stage_repeats)):
            numrepeat = self.stage_repeats[idxstage]
            output_channel = self.stage_out_channels[idxstage + 2]

            for i in range(numrepeat):
                if i == 0:
                    self.features.append(
                        ShuffleV2Block(
                            input_channel,
                            output_channel,
                            mid_channels=output_channel // 2,
                            ksize=3,
                            stride=2,
                        ))
                else:
                    self.features.append(
                        ShuffleV2Block(
                            input_channel // 2,
                            output_channel,
                            mid_channels=output_channel // 2,
                            ksize=3,
                            stride=1,
                        ))

                input_channel = output_channel

        self.features = M.Sequential(*self.features)

        self.conv_last = M.Sequential(
            M.Conv2d(input_channel,
                     self.stage_out_channels[-1],
                     1,
                     1,
                     0,
                     bias=False),
            M.BatchNorm2d(self.stage_out_channels[-1]),
            FReLU(self.stage_out_channels[-1]),
        )
        self.globalpool = M.AvgPool2d(7)
        if self.model_size == "2.0x":
            self.dropout = M.Dropout(0.2)
        self.classifier = M.Sequential(
            M.Linear(self.stage_out_channels[-1], num_classes, bias=False))
        self._initialize_weights()
Exemple #20
0
    def __init__(self, num_classes=1001, stem_filters=96, penultimate_filters=4032, filters_multiplier=2):
        super(NASNetALarge, self).__init__()
        self.num_classes = num_classes
        self.stem_filters = stem_filters
        self.penultimate_filters = penultimate_filters
        self.filters_multiplier = filters_multiplier

        filters = self.penultimate_filters // 24
        # 24 is default value for the architecture

        self.conv0 = []
        self.conv0.append(M.Conv2d(in_channels=3, out_channels=self.stem_filters, kernel_size=3, padding=0, stride=2,
                                                bias=False))
        self.conv0.append(M.BatchNorm2d(self.stem_filters, eps=0.001, momentum=0.1, affine=True))
        self.conv0 = M.Sequential(*self.conv0)

        self.cell_stem_0 = CellStem0(self.stem_filters, num_filters=filters // (filters_multiplier ** 2))
        self.cell_stem_1 = CellStem1(self.stem_filters, num_filters=filters // filters_multiplier)

        self.cell_0 = FirstCell(in_channels_left=filters, out_channels_left=filters//2,
                                in_channels_right=2*filters, out_channels_right=filters)
        self.cell_1 = NormalCell(in_channels_left=2*filters, out_channels_left=filters,
                                 in_channels_right=6*filters, out_channels_right=filters)
        self.cell_2 = NormalCell(in_channels_left=6*filters, out_channels_left=filters,
                                 in_channels_right=6*filters, out_channels_right=filters)
        self.cell_3 = NormalCell(in_channels_left=6*filters, out_channels_left=filters,
                                 in_channels_right=6*filters, out_channels_right=filters)
        self.cell_4 = NormalCell(in_channels_left=6*filters, out_channels_left=filters,
                                 in_channels_right=6*filters, out_channels_right=filters)
        self.cell_5 = NormalCell(in_channels_left=6*filters, out_channels_left=filters,
                                 in_channels_right=6*filters, out_channels_right=filters)

        self.reduction_cell_0 = ReductionCell0(in_channels_left=6*filters, out_channels_left=2*filters,
                                               in_channels_right=6*filters, out_channels_right=2*filters)

        self.cell_6 = FirstCell(in_channels_left=6*filters, out_channels_left=filters,
                                in_channels_right=8*filters, out_channels_right=2*filters)
        self.cell_7 = NormalCell(in_channels_left=8*filters, out_channels_left=2*filters,
                                 in_channels_right=12*filters, out_channels_right=2*filters)
        self.cell_8 = NormalCell(in_channels_left=12*filters, out_channels_left=2*filters,
                                 in_channels_right=12*filters, out_channels_right=2*filters)
        self.cell_9 = NormalCell(in_channels_left=12*filters, out_channels_left=2*filters,
                                 in_channels_right=12*filters, out_channels_right=2*filters)
        self.cell_10 = NormalCell(in_channels_left=12*filters, out_channels_left=2*filters,
                                  in_channels_right=12*filters, out_channels_right=2*filters)
        self.cell_11 = NormalCell(in_channels_left=12*filters, out_channels_left=2*filters,
                                  in_channels_right=12*filters, out_channels_right=2*filters)

        self.reduction_cell_1 = ReductionCell1(in_channels_left=12*filters, out_channels_left=4*filters,
                                               in_channels_right=12*filters, out_channels_right=4*filters)

        self.cell_12 = FirstCell(in_channels_left=12*filters, out_channels_left=2*filters,
                                 in_channels_right=16*filters, out_channels_right=4*filters)
        self.cell_13 = NormalCell(in_channels_left=16*filters, out_channels_left=4*filters,
                                  in_channels_right=24*filters, out_channels_right=4*filters)
        self.cell_14 = NormalCell(in_channels_left=24*filters, out_channels_left=4*filters,
                                  in_channels_right=24*filters, out_channels_right=4*filters)
        self.cell_15 = NormalCell(in_channels_left=24*filters, out_channels_left=4*filters,
                                  in_channels_right=24*filters, out_channels_right=4*filters)
        self.cell_16 = NormalCell(in_channels_left=24*filters, out_channels_left=4*filters,
                                  in_channels_right=24*filters, out_channels_right=4*filters)
        self.cell_17 = NormalCell(in_channels_left=24*filters, out_channels_left=4*filters,
                                  in_channels_right=24*filters, out_channels_right=4*filters)

        self.relu = M.ReLU()
        self.avg_pool = M.AvgPool2d(11, stride=1, padding=0)
        self.dropout = M.Dropout()
        self.last_linear = M.Linear(24*filters, self.num_classes)
Exemple #21
0
    def __init__(self,
                 inplanes,
                 outplanes,
                 stride=1,
                 dilation=1,
                 groups=1,
                 downsample=None,
                 base_width=64,
                 norm_layer=None,
                 se_module=None,
                 radix=2,
                 reduction=4,
                 avd=False,
                 avd_first=False,
                 is_first=False):
        '''
            Implementation of the basic block.
            Args:
                inplanes (int): the number of channels of input
                outplanes (int): the number of channels of output (the number of kernels of conv layers)
                stride (int, tuple or list): the stride of the second conv layer
                dilation (int):the dilation rate of the second conv layer of the block
                groups (int): the number of groups for the second conv layer
                downsample (megendine.module.Module or None): if not None, will do the downsample for x
                base_width (int): the basic width of the layer
                norm_layer (None or megendine.module.Module): the normalization layer of the block, default is batch normalization
                se_module (SEModule):  the Squeeze Excitation Module
                radix (int): the radix index
                reduction (int): the reduction factor
                avd (bool): whether use the avd layer
                avd_first (bool): whether use the avd layer befo conv2
                is_first (bool): whether is the first block of the stage 
        '''
        super(Bottleneck, self).__init__()
        width = int((base_width / 64) * outplanes) * groups
        if norm_layer is None:
            norm_layer = M.BatchNorm2d
        self.avd = avd and (stride > 1 or is_first)
        self.avd_first = avd_first
        if self.avd:
            self.avd_layer = M.AvgPool2d(3, stride, padding=1)
            stride = 1
        self.radix = radix
        #layer1
        self.conv1 = conv1x1(inplanes, width)
        self.bn1 = norm_layer(width)
        #layer2
        if self.radix >= 1:
            self.conv2 = SplAtConv2d(width,
                                     width,
                                     kernel_size=3,
                                     stride=stride,
                                     padding=dilation,
                                     dilation=dilation,
                                     groups=groups,
                                     radix=radix,
                                     reduction=reduction)
        else:
            self.conv2 = conv3x3(width,
                                 width,
                                 stride=stride,
                                 groups=groups,
                                 dilation=dilation)
            self.bn2 = norm_layer(width)
        #layer3
        self.conv3 = conv1x1(width, outplanes * self.expansion)
        self.bn3 = norm_layer(outplanes * self.expansion)

        #activation layer
        self.relu = M.ReLU()

        #downsample layer
        self.downsample = downsample
        #se module
        self.se = se_module

        #stride
        self.stride = stride
Exemple #22
0
 def _make_layer(self,
                 block,
                 planes,
                 blocks,
                 stride=1,
                 dilation=1,
                 se_module=None,
                 reduction=16,
                 radix=0,
                 avd=False,
                 avd_first=False):
     '''
         Implementation of the stage in resnet.
         Args:
             block: megengine.module.Module, the block module
             planes: int, the base channels
             blocks: int, the number of blocks for this stage
             stride: int, the stride for the first block in the stage
             dilation: int, the rate of the dilation(atrous)
             reduction: int, the reduction rate
             radix: int, the radix index from ResNest
             avd: bool, whether use the avd layer
             avd_first: bool, whether use the avd layer before bottleblock's conv2
     '''
     norm_layer = self.norm_layer
     downsample = None
     se = None
     if se_module is not None:
         se = se_module(planes * block.expansion,
                        reduction,
                        norm_layer=self.norm_layer)
     if stride != 1 or self.inplanes != planes * block.expansion:
         down_layers = []
         down_stride = stride
         if self.avg_layer:
             if self.avg_down:
                 avg_layer = M.AvgPool2d(kernel_size=down_stride,
                                         stride=down_stride,
                                         padding=0)
                 down_stride = 1
             else:
                 avg_layer = M.AvgPool2d(kernel_size=1, stride=1, padding=0)
             down_layers.append(avg_layer)
         down_layers += [
             conv1x1(self.inplanes, planes * block.expansion, down_stride),
             norm_layer(planes * block.expansion)
         ]
         downsample = M.Sequential(*down_layers)
     layers = []
     layers.append(
         block(self.inplanes,
               planes,
               groups=self.groups,
               downsample=downsample,
               stride=stride,
               base_width=self.base_width,
               dilation=dilation,
               norm_layer=norm_layer,
               se_module=se,
               radix=radix,
               reduction=reduction,
               avd=avd,
               avd_first=avd_first,
               is_first=True))
     self.inplanes = planes * block.expansion
     if se_module is not None:
         se = se_module(self.inplanes,
                        reduction,
                        norm_layer=self.norm_layer)
     for _ in range(1, blocks):
         layers.append(
             block(self.inplanes,
                   planes,
                   groups=self.groups,
                   base_width=self.base_width,
                   dilation=dilation,
                   norm_layer=norm_layer,
                   se_module=se,
                   reduction=reduction,
                   avd=avd,
                   avd_first=avd_first))
     return M.Sequential(*layers)
Exemple #23
0
 def _make_grid_layer(self,
                      block,
                      planes,
                      blocks,
                      stride=1,
                      dilation=1,
                      se_module=None,
                      reduction=16,
                      radix=0,
                      avd=False,
                      avd_first=False):
     '''
         Implementation of the Multi-grid Method in deeplabv3
         Args:
             block: megengine.module.Module, the block module
             planes: int, the base channels
             blocks: int, the number of blocks for this stage
             stride: int, the stride for the first block in the stage
             dilation: int, the rate of the dilation(atrous)
             se_module: SEModule or None, the semodule from SENet
             reduction: int, the reduction rate
             radix: int, the radix index from ResNest
             avd: bool, whether use the avd layer
             avd_first: bool, whether use the avd layer before bottleblock's conv2
         Reference:
             "Rethinking Atrous Convolution for Semantic Image Segmentation"<https://arxiv.org/abs/1706.05587>
     '''
     norm_layer = self.norm_layer
     downsample = None
     se = None
     if se_module is not None:
         se = se_module(planes * block.expansion,
                        reduction,
                        norm_layer=self.norm_layer)
     if stride != 1 or self.inplanes != planes * block.expansion:
         down_layers = []
         if self.avg_layer:
             if self.avg_down:
                 stride = 1
                 avg_layer = M.AvgPool2d(kernel_size=stride,
                                         stride=stride,
                                         padding=0)
             else:
                 avg_layer = M.AvgPool2d(kernel_size=1, stride=1, padding=0)
             down_layers.append(avg_layer)
         down_layers += [
             conv1x1(self.inplanes, planes * block.expansion, stride),
             norm_layer(planes * block.expansion)
         ]
         downsample = M.Sequential(*down_layers)
     layers = []
     layers.append(
         block(self.inplanes,
               planes,
               groups=self.groups,
               stride=stride,
               downsample=downsample,
               base_width=self.base_width,
               dilation=dilation * self.multi_grids[0],
               norm_layer=norm_layer,
               se_module=se,
               radix=radix,
               avd=avd,
               avd_first=avd_first))
     self.inplanes = planes * block.expansion
     if se_module is not None:
         se = se_module(self.inplanes,
                        reduction,
                        norm_layer=self.norm_layer)
     for i in range(1, blocks):
         layers.append(
             block(self.inplanes,
                   planes,
                   groups=self.groups,
                   base_width=self.base_width,
                   dilation=dilation * self.multi_grids[i],
                   norm_layer=norm_layer,
                   se_module=se,
                   radix=radix,
                   avd=avd,
                   avd_first=avd_first))
     return M.Sequential(*layers)