Ejemplo n.º 1
0
    def __init__(self, num_classes=10):
        super(SqueezeNet, self).__init__()

        self.features = nn.SequentialCell([
            nn.Conv2d(3,
                      96,
                      kernel_size=7,
                      stride=2,
                      pad_mode='valid',
                      has_bias=True),
            nn.ReLU(),
            nn.MaxPool2d(kernel_size=3, stride=2),
            Fire(96, 16, 64, 64),
            Fire(128, 16, 64, 64),
            Fire(128, 32, 128, 128),
            nn.MaxPool2d(kernel_size=3, stride=2),
            Fire(256, 32, 128, 128),
            Fire(256, 48, 192, 192),
            Fire(384, 48, 192, 192),
            Fire(384, 64, 256, 256),
            nn.MaxPool2d(kernel_size=3, stride=2),
            Fire(512, 64, 256, 256),
        ])

        # Final convolution is initialized differently from the rest
        self.final_conv = nn.Conv2d(512,
                                    num_classes,
                                    kernel_size=1,
                                    has_bias=True)
        self.dropout = nn.Dropout(keep_prob=0.5)
        self.relu = nn.ReLU()
        self.mean = P.ReduceMean(keep_dims=True)
        self.flatten = nn.Flatten()
        self.custom_init_weight()
Ejemplo n.º 2
0
 def __init__(self, num_classes=10, is_training=True):
     super(InceptionV3, self).__init__()
     self.is_training = is_training
     self.Conv2d_1a = BasicConv2d(3,
                                  32,
                                  kernel_size=3,
                                  stride=2,
                                  pad_mode='valid')
     self.Conv2d_2a = BasicConv2d(32,
                                  32,
                                  kernel_size=3,
                                  stride=1,
                                  pad_mode='valid')
     self.Conv2d_2b = BasicConv2d(32, 64, kernel_size=3, stride=1)
     self.maxpool1 = nn.MaxPool2d(kernel_size=3, stride=2)
     self.Conv2d_3b = BasicConv2d(64, 80, kernel_size=1)
     self.Conv2d_4a = BasicConv2d(80, 192, kernel_size=3, pad_mode='valid')
     self.maxpool2 = nn.MaxPool2d(kernel_size=3, stride=2)
     self.Mixed_5b = Inception_A(192, pool_features=32)
     self.Mixed_5c = Inception_A(256, pool_features=64)
     self.Mixed_5d = Inception_A(288, pool_features=64)
     self.Mixed_6a = Inception_B(288)
     self.Mixed_6b = Inception_C(768, channels_7x7=128)
     self.Mixed_6c = Inception_C(768, channels_7x7=160)
     self.Mixed_6d = Inception_C(768, channels_7x7=160)
     self.Mixed_6e = Inception_C(768, channels_7x7=192)
     self.Mixed_7a = Inception_D(768)
     self.Mixed_7b = Inception_E(1280)
     self.Mixed_7c = Inception_E(2048)
     if is_training:
         self.aux_logits = AuxLogits(768, num_classes)
     self.logits = Logits(num_classes, dropout_keep_prob=0.5)
Ejemplo n.º 3
0
    def __init__(self, backbone_shape, backbone, out_channel):
        super(YOLOv4, self).__init__()
        self.out_channel = out_channel
        self.backbone = backbone

        self.conv1 = _conv_bn_leakyrelu(1024, 512, ksize=1)
        self.conv2 = _conv_bn_leakyrelu(512, 1024, ksize=3)
        self.conv3 = _conv_bn_leakyrelu(1024, 512, ksize=1)

        self.maxpool1 = nn.MaxPool2d(kernel_size=5, stride=1, pad_mode='same')
        self.maxpool2 = nn.MaxPool2d(kernel_size=9, stride=1, pad_mode='same')
        self.maxpool3 = nn.MaxPool2d(kernel_size=13, stride=1, pad_mode='same')
        self.conv4 = _conv_bn_leakyrelu(2048, 512, ksize=1)

        self.conv5 = _conv_bn_leakyrelu(512, 1024, ksize=3)
        self.conv6 = _conv_bn_leakyrelu(1024, 512, ksize=1)
        self.conv7 = _conv_bn_leakyrelu(512, 256, ksize=1)

        self.conv8 = _conv_bn_leakyrelu(512, 256, ksize=1)
        self.backblock0 = YoloBlock(backbone_shape[-2], out_chls=backbone_shape[-3], out_channels=out_channel)

        self.conv9 = _conv_bn_leakyrelu(256, 128, ksize=1)
        self.conv10 = _conv_bn_leakyrelu(256, 128, ksize=1)
        self.conv11 = _conv_bn_leakyrelu(128, 256, ksize=3, stride=2)
        self.conv12 = _conv_bn_leakyrelu(256, 512, ksize=3, stride=2)

        self.backblock1 = YoloBlock(backbone_shape[-3], out_chls=backbone_shape[-4], out_channels=out_channel)
        self.backblock2 = YoloBlock(backbone_shape[-2], out_chls=backbone_shape[-3], out_channels=out_channel)
        self.backblock3 = YoloBlock(backbone_shape[-1], out_chls=backbone_shape[-2], out_channels=out_channel)

        self.concat = P.Concat(axis=1)
Ejemplo n.º 4
0
    def __init__(self, num_classes, include_top=True):
        super(GoogleNet, self).__init__()
        self.conv1 = Conv2dBlock(3, 64, kernel_size=7, stride=2, padding=0)
        self.maxpool1 = nn.MaxPool2d(kernel_size=3, stride=2, pad_mode="same")

        self.conv2 = Conv2dBlock(64, 64, kernel_size=1)
        self.conv3 = Conv2dBlock(64, 192, kernel_size=3, padding=0)
        self.maxpool2 = nn.MaxPool2d(kernel_size=3, stride=2, pad_mode="same")

        self.block3a = Inception(192, 64, 96, 128, 16, 32, 32)
        self.block3b = Inception(256, 128, 128, 192, 32, 96, 64)
        self.maxpool3 = nn.MaxPool2d(kernel_size=3, stride=2, pad_mode="same")

        self.block4a = Inception(480, 192, 96, 208, 16, 48, 64)
        self.block4b = Inception(512, 160, 112, 224, 24, 64, 64)
        self.block4c = Inception(512, 128, 128, 256, 24, 64, 64)
        self.block4d = Inception(512, 112, 144, 288, 32, 64, 64)
        self.block4e = Inception(528, 256, 160, 320, 32, 128, 128)
        self.maxpool4 = nn.MaxPool2d(kernel_size=2, stride=2, pad_mode="same")

        self.block5a = Inception(832, 256, 160, 320, 32, 128, 128)
        self.block5b = Inception(832, 384, 192, 384, 48, 128, 128)

        self.dropout = nn.Dropout(keep_prob=0.8)
        self.include_top = include_top
        if self.include_top:
            self.mean = P.ReduceMean(keep_dims=True)
            self.flatten = nn.Flatten()
            self.classifier = nn.Dense(1024, num_classes, weight_init=weight_variable(),
                                       bias_init=weight_variable())
Ejemplo n.º 5
0
    def __init__(self, stem_filters, num_filters=42):
        super(CellStem0, self).__init__()
        self.num_filters = num_filters
        self.stem_filters = stem_filters
        self.conv_1x1 = nn.SequentialCell([
            nn.ReLU(),
            nn.Conv2d(in_channels=self.stem_filters,
                      out_channels=self.num_filters,
                      kernel_size=1,
                      stride=1,
                      pad_mode='pad',
                      has_bias=False),
            nn.BatchNorm2d(num_features=self.num_filters,
                           eps=0.001,
                           momentum=0.9,
                           affine=True)
        ])

        self.comb_iter_0_left = BranchSeparables(self.num_filters,
                                                 self.num_filters, 5, 2, 2)
        self.comb_iter_0_right = BranchSeparablesStem(self.stem_filters,
                                                      self.num_filters,
                                                      7,
                                                      2,
                                                      3,
                                                      bias=False)

        self.comb_iter_1_left = nn.MaxPool2d(kernel_size=3,
                                             stride=2,
                                             pad_mode='same')
        self.comb_iter_1_right = BranchSeparablesStem(self.stem_filters,
                                                      self.num_filters,
                                                      7,
                                                      2,
                                                      3,
                                                      bias=False)

        self.comb_iter_2_left = nn.AvgPool2d(kernel_size=3,
                                             stride=2,
                                             pad_mode='same')
        self.comb_iter_2_right = BranchSeparablesStem(self.stem_filters,
                                                      self.num_filters,
                                                      5,
                                                      2,
                                                      2,
                                                      bias=False)

        self.comb_iter_3_right = nn.AvgPool2d(kernel_size=3,
                                              stride=1,
                                              pad_mode='same')

        self.comb_iter_4_left = BranchSeparables(self.num_filters,
                                                 self.num_filters,
                                                 3,
                                                 1,
                                                 1,
                                                 bias=False)
        self.comb_iter_4_right = nn.MaxPool2d(kernel_size=3,
                                              stride=2,
                                              pad_mode='same')
Ejemplo n.º 6
0
    def __init__(self, in_classes, kernel_size, padding, maxpool, has_bias):
        super(MusicTaggerCNN, self).__init__()
        self.in_classes = in_classes
        self.kernel_size = kernel_size
        self.maxpool = maxpool
        self.padding = padding
        self.has_bias = has_bias
        # build model
        self.conv1 = nn.Conv2d(self.in_classes[0], self.in_classes[1],
                               self.kernel_size[0])
        self.conv2 = nn.Conv2d(self.in_classes[1], self.in_classes[2],
                               self.kernel_size[1])
        self.conv3 = nn.Conv2d(self.in_classes[2], self.in_classes[3],
                               self.kernel_size[2])
        self.conv4 = nn.Conv2d(self.in_classes[3], self.in_classes[4],
                               self.kernel_size[3])

        self.bn1 = nn.BatchNorm2d(self.in_classes[1])
        self.bn2 = nn.BatchNorm2d(self.in_classes[2])
        self.bn3 = nn.BatchNorm2d(self.in_classes[3])
        self.bn4 = nn.BatchNorm2d(self.in_classes[4])

        self.pool1 = nn.MaxPool2d(maxpool[0], maxpool[0])
        self.pool2 = nn.MaxPool2d(maxpool[1], maxpool[1])
        self.pool3 = nn.MaxPool2d(maxpool[2], maxpool[2])
        self.pool4 = nn.MaxPool2d(maxpool[3], maxpool[3])
        self.poolreduce = P.ReduceMax(keep_dims=False)
        self.Act = nn.ReLU()
        self.flatten = nn.Flatten()
        self.dense = nn.Dense(2048, 50, activation='sigmoid')
        self.sigmoid = nn.Sigmoid()
Ejemplo n.º 7
0
 def __init__(self, is_training=True):
     super(VGG, self).__init__()
     self.conv1 = Conv(3, 64, use_bn=True)
     self.conv2 = Conv(64, 128, use_bn=True)
     self.conv3 = Conv(128, 256, use_bn=True)
     self.conv4 = Conv(256, 256, use_bn=True)
     self.conv5 = Conv(256, 512, use_bn=True)
     self.conv6 = Conv(512, 512, use_bn=True)
     self.conv7 = Conv(512, 512, kernel_size=2, pad_mode='valid', use_bn=True)
     self.maxpool2d1 = nn.MaxPool2d(kernel_size=2, stride=2, pad_mode='same')
     self.maxpool2d2 = nn.MaxPool2d(kernel_size=(2, 1), stride=(2, 1), pad_mode='same')
     self.bn1 = _bn(512)
Ejemplo n.º 8
0
    def __init__(self):
        super(VGG16, self).__init__()
        self.b1 = _make_layer([3, 64, 64])
        self.b2 = _make_layer([64, 128, 128])
        self.b3 = _make_layer([128, 256, 256, 256])
        self.b4 = _make_layer([256, 512, 512, 512])
        self.b5 = _make_layer([512, 512, 512, 512])

        self.m1 = nn.MaxPool2d(kernel_size=2, stride=2, pad_mode='SAME')
        self.m2 = nn.MaxPool2d(kernel_size=2, stride=2, pad_mode='SAME')
        self.m3 = nn.MaxPool2d(kernel_size=2, stride=2, pad_mode='SAME')
        self.m4 = nn.MaxPool2d(kernel_size=2, stride=2, pad_mode='SAME')
        self.m5 = nn.MaxPool2d(kernel_size=3, stride=1, pad_mode='SAME')
Ejemplo n.º 9
0
    def __init__(self, num_classes=10, cut_layer=None):
        super().__init__()
        self.cut_layer = cut_layer

        self.conv1 = nn.Conv2d(1, 6, 5, pad_mode='valid')
        self.relu1 = nn.ReLU()
        self.pool1 = nn.MaxPool2d(kernel_size=2, stride=2)

        self.conv2 = nn.Conv2d(6, 16, 5, pad_mode='valid')
        self.relu2 = nn.ReLU()
        self.pool2 = nn.MaxPool2d(kernel_size=2, stride=2)

        self.flatten = nn.Flatten()

        self.fc1 = nn.Dense(16 * 5 * 5, 120, weight_init=Normal(0.02))
        self.relu3 = nn.ReLU()

        self.fc2 = nn.Dense(120, 84, weight_init=Normal(0.02))
        self.relu4 = nn.ReLU()

        self.fc3 = nn.Dense(84, num_classes, weight_init=Normal(0.02))

        # Preparing named layers so that the model can be split and straddle
        # across the client and the server
        self.layers = []
        self.layerdict = collections.OrderedDict()
        self.layerdict['conv1'] = self.conv1
        self.layerdict['relu1'] = self.relu1
        self.layerdict['pool1'] = self.pool1
        self.layerdict['conv2'] = self.conv2
        self.layerdict['relu2'] = self.relu2
        self.layerdict['pool2'] = self.pool2
        self.layerdict['flatten'] = self.flatten
        self.layerdict['fc1'] = self.fc1
        self.layerdict['relu3'] = self.relu3
        self.layerdict['fc2'] = self.fc2
        self.layerdict['relu4'] = self.relu4
        self.layerdict['fc3'] = self.fc3
        self.layers.append('conv1')
        self.layers.append('relu1')
        self.layers.append('pool1')
        self.layers.append('conv2')
        self.layers.append('relu2')
        self.layers.append('pool2')
        self.layers.append('flatten')
        self.layers.append('fc1')
        self.layers.append('relu3')
        self.layers.append('fc2')
        self.layers.append('relu4')
        self.layers.append('fc3')
Ejemplo n.º 10
0
 def __init__(self,
              num_classes=10,
              channel=1,
              phase='train',
              include_top=True):
     super().__init__()
     self.conv1 = conv(channel,
                       12,
                       11,
                       stride=2,
                       pad_mode="same",
                       has_bias=True)
     self.conv2 = conv(12, 20, 3, pad_mode="same", has_bias=True)
     self.relu = P.ReLU()
     self.max_pool2d = nn.MaxPool2d(kernel_size=3,
                                    stride=2,
                                    pad_mode='valid')
     self.include_top = include_top
     if self.include_top:
         dropout_ratio = 0.65
         if phase == 'test':
             dropout_ratio = 1.0
         self.flatten = nn.Flatten()
         self.fc1 = fc_with_initialize(20 * 3 * 3, 1024)
         self.fc2 = fc_with_initialize(1024, 1024)
         self.fc3 = fc_with_initialize(1024, num_classes)
         self.dropout = nn.Dropout(dropout_ratio)
Ejemplo n.º 11
0
    def __init__(self, block, layer_nums, in_channels, out_channels, strides,
                 num_classes):
        super(resnet, self).__init__()

        if not len(layer_nums) == len(in_channels) == len(out_channels) == 4:
            raise ValueError(
                "the length of layer_num, in_channels, out_channels list must be 4!"
            )
        self.conv1 = ConvBNReLU(3, 64, kernel_size=7, stride=2)
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, pad_mode="same")

        self.layer1 = self._make_layer(block,
                                       layer_nums[0],
                                       in_channel=in_channels[0],
                                       out_channel=out_channels[0],
                                       stride=strides[0])
        self.layer2 = self._make_layer(block,
                                       layer_nums[1],
                                       in_channel=in_channels[1],
                                       out_channel=out_channels[1],
                                       stride=strides[1])
        self.layer3 = self._make_layer(block,
                                       layer_nums[2],
                                       in_channel=in_channels[2],
                                       out_channel=out_channels[2],
                                       stride=strides[2])
        self.layer4 = self._make_layer(block,
                                       layer_nums[3],
                                       in_channel=in_channels[3],
                                       out_channel=out_channels[3],
                                       stride=strides[3])
Ejemplo n.º 12
0
def _make_layer(base, args, batch_norm):
    """Make stage network of VGG."""
    layers = []
    in_channels = 3
    for v in base:
        if v == 'M':
            layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
        else:
            weight = 'ones'
            if args.initialize_mode == "XavierUniform":
                weight_shape = (v, in_channels, 3, 3)
                weight = initializer('XavierUniform',
                                     shape=weight_shape,
                                     dtype=mstype.float32).to_tensor()

            conv2d = nn.Conv2d(in_channels=in_channels,
                               out_channels=v,
                               kernel_size=3,
                               padding=args.padding,
                               pad_mode=args.pad_mode,
                               has_bias=args.has_bias,
                               weight_init=weight)
            if batch_norm:
                layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU()]
            else:
                layers += [conv2d, nn.ReLU()]
            in_channels = v
    return nn.SequentialCell(layers)
Ejemplo n.º 13
0
 def __init__(self, in_channels, has_bias=False):
     super(Inception_D, self).__init__()
     self.concat = P.Concat(axis=1)
     self.branch0 = nn.SequentialCell([
         BasicConv2d(in_channels, 192, kernel_size=1, has_bias=has_bias),
         BasicConv2d(192,
                     320,
                     kernel_size=3,
                     stride=2,
                     pad_mode='valid',
                     has_bias=has_bias)
     ])
     self.branch1 = nn.SequentialCell([
         BasicConv2d(in_channels, 192, kernel_size=1, has_bias=has_bias),
         BasicConv2d(192, 192, kernel_size=(1, 7),
                     has_bias=has_bias),  # check
         BasicConv2d(192, 192, kernel_size=(7, 1), has_bias=has_bias),
         BasicConv2d(192,
                     192,
                     kernel_size=3,
                     stride=2,
                     pad_mode='valid',
                     has_bias=has_bias)
     ])
     self.branch_pool = nn.MaxPool2d(kernel_size=3, stride=2)
Ejemplo n.º 14
0
    def __init__(self):
        super(BasicCell, self).__init__()

        self.conv3x3_1 = _conv3x3(128, 128)
        self.bn3x3_1 = _bn(128)
        self.conv3x3_2 = _conv3x3(128, 128)
        self.bn3x3_2 = _bn(128)
        self.conv3x3_3 = _conv3x3(128, 128)
        self.bn3x3_3 = _bn(128)
        self.mp = nn.MaxPool2d(kernel_size=3, stride=1, pad_mode="same")

        self.proj1 = _conv1x1(128, 64)
        self.bn1 = _bn(64)
        self.proj2 = _conv1x1(128, 64)
        self.bn2 = _bn(64)
        self.proj3 = _conv1x1(128, 64)
        self.bn3 = _bn(64)
        self.proj4 = _conv1x1(128, 64)
        self.bn4 = _bn(64)
        self.proj5 = _conv1x1(128, 64)
        self.bn5 = _bn(64)
        self.proj6 = _conv1x1(128, 64)
        self.bn6 = _bn(64)

        self.relu = P.ReLU()
        self.concat = ops.Concat(axis=1)
Ejemplo n.º 15
0
    def __init__(self, block, num_classes=100, batch_size=32):
        """init"""
        super(ResNet, self).__init__()
        self.batch_size = batch_size
        self.num_classes = num_classes

        self.conv1 = conv7x7(3, 64, stride=2, padding=0)

        self.bn1 = bn_with_initialize(64)
        self.relu = ops.ReLU()
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, pad_mode="same")

        self.layer1 = MakeLayer0(block,
                                 in_channels=64,
                                 out_channels=256,
                                 stride=1)
        self.layer2 = MakeLayer1(block,
                                 in_channels=256,
                                 out_channels=512,
                                 stride=2)
        self.layer3 = MakeLayer2(block,
                                 in_channels=512,
                                 out_channels=1024,
                                 stride=2)
        self.layer4 = MakeLayer3(block,
                                 in_channels=1024,
                                 out_channels=2048,
                                 stride=2)

        self.pool = ops.ReduceMean(keep_dims=True)
        self.squeeze = ops.Squeeze(axis=(2, 3))
        self.fc = fc_with_initialize(512 * block.expansion, num_classes)
Ejemplo n.º 16
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 stride=1,
                 momentum=0.9):
        super(ResidualBlock, self).__init__()

        out_chls = out_channels // self.expansion
        self.conv1 = _conv1x1(in_channels, out_chls, stride=1)
        self.bn1 = _fused_bn(out_chls, momentum=momentum)

        self.conv2 = _conv3x3(out_chls, out_chls, stride=stride)
        self.bn2 = _fused_bn(out_chls, momentum=momentum)

        self.conv3 = _conv1x1(out_chls, out_channels, stride=1)
        self.bn3 = _fused_bn(out_channels, momentum=momentum)

        self.relu = P.ReLU()
        self.downsample = (in_channels != out_channels)
        self.stride = stride
        if self.downsample:
            self.conv_down_sample = _conv1x1(in_channels, out_channels,
                                             stride=stride)
            self.bn_down_sample = _fused_bn(out_channels, momentum=momentum)
        elif self.stride != 1:
            self.maxpool_down = nn.MaxPool2d(kernel_size=1, stride=2, pad_mode='same')

        self.add = P.Add()
Ejemplo n.º 17
0
    def __init__(self, include_last_bn_relu=True):
        super(PAVNet, self).__init__()
        self.conv1_1 = ops._conv_bn_crelu(3, 32, 2, 7)
        self.pool = nn.MaxPool2d(kernel_size=3, stride=2, pad_mode='same')

        self.conv2_1 = ops._mCReLU(64, [48, 48, 128], 1, False)
        self.conv2_2 = ops._mCReLU(128, [48, 48, 128], 1, True)
        self.conv2_3 = ops._mCReLU(128, [48, 48, 128], 1, True)

        self.conv3_1 = ops._mCReLU(128, [96, 96, 256], 2, True)
        self.conv3_2 = ops._mCReLU(256, [96, 96, 256], 1, True)
        self.conv3_3 = ops._mCReLU(256, [96, 96, 256], 1, True)
        self.conv3_4 = ops._mCReLU(256, [96, 96, 256], 1, True)

        self.conv4_1 = ops._inception(256, '128 96-256 48-96-96 256 512', 2,
                                      True)
        self.conv4_2 = ops._inception(512, '128 96-256 48-96-96 512', 1, True)
        self.conv4_3 = ops._inception(512, '128 96-256 48-96-96 512', 1, True)
        self.conv4_4 = ops._inception(512, '128 96-256 48-96-96 512', 1, True)

        self.conv5_1 = ops._inception(512, '128 192-384 64-128-128 256 768', 2,
                                      True)
        self.conv5_2 = ops._inception(768, '128 192-384 64-128-128 768', 1,
                                      True)
        self.conv5_3 = ops._inception(768, '128 192-384 64-128-128 768', 1,
                                      True)
        self.conv5_4 = ops._inception(768, '128 192-384 64-128-128 768', 1,
                                      True)

        self.include_last_bn_relu = include_last_bn_relu
        if include_last_bn_relu:
            self.last_bn = nn.BatchNorm2d(num_features=768)
            self.last_scale = ops._scale(768)
            self.last_relu = nn.ReLU()
Ejemplo n.º 18
0
    def __init__(self,
                 in_channel,
                 n_class=2,
                 feature_scale=2,
                 use_deconv=True,
                 use_bn=True):
        super(UNet, self).__init__()
        self.in_channel = in_channel
        self.n_class = n_class
        self.feature_scale = feature_scale
        self.use_deconv = use_deconv
        self.use_bn = use_bn

        filters = [64, 128, 256, 512, 1024]
        filters = [int(x / self.feature_scale) for x in filters]

        # Down Sample
        self.maxpool = nn.MaxPool2d(kernel_size=2, stride=2, pad_mode="same")
        self.conv0 = UnetConv2d(self.in_channel, filters[0], self.use_bn)
        self.conv1 = UnetConv2d(filters[0], filters[1], self.use_bn)
        self.conv2 = UnetConv2d(filters[1], filters[2], self.use_bn)
        self.conv3 = UnetConv2d(filters[2], filters[3], self.use_bn)
        self.conv4 = UnetConv2d(filters[3], filters[4], self.use_bn)

        # Up Sample
        self.up_concat1 = UnetUp(filters[1], filters[0], self.use_deconv, 2)
        self.up_concat2 = UnetUp(filters[2], filters[1], self.use_deconv, 2)
        self.up_concat3 = UnetUp(filters[3], filters[2], self.use_deconv, 2)
        self.up_concat4 = UnetUp(filters[4], filters[3], self.use_deconv, 2)

        # Finale Convolution
        self.final = nn.Conv2d(filters[0], n_class, 1)
Ejemplo n.º 19
0
def test_maxpool2d_error_padding():
    """ test_maxpool2d_error_padding """
    kernel_size = 3.5
    stride = 3
    padding = 1
    with pytest.raises(ValueError):
        nn.MaxPool2d(kernel_size, stride, padding=padding)
Ejemplo n.º 20
0
    def __init__(self, num_classes=10):
        super(SqueezeNet_Residual, self).__init__()

        self.conv1 = nn.Conv2d(3,
                               96,
                               kernel_size=7,
                               stride=2,
                               pad_mode='valid',
                               has_bias=True)
        self.fire2 = Fire(96, 16, 64, 64)
        self.fire3 = Fire(128, 16, 64, 64)
        self.fire4 = Fire(128, 32, 128, 128)
        self.fire5 = Fire(256, 32, 128, 128)
        self.fire6 = Fire(256, 48, 192, 192)
        self.fire7 = Fire(384, 48, 192, 192)
        self.fire8 = Fire(384, 64, 256, 256)
        self.fire9 = Fire(512, 64, 256, 256)
        # Final convolution is initialized differently from the rest
        self.conv10 = nn.Conv2d(512, num_classes, kernel_size=1, has_bias=True)

        self.relu = nn.ReLU()
        self.max_pool2d = nn.MaxPool2d(kernel_size=3, stride=2)
        self.add = P.TensorAdd()
        self.dropout = nn.Dropout(keep_prob=0.5)
        self.mean = P.ReduceMean(keep_dims=True)
        self.flatten = nn.Flatten()
        self.custom_init_weight()
Ejemplo n.º 21
0
    def __init__(self, num_class=10, channel=1):
        super(LeNet5, self).__init__()
        self.type = "fusion"
        self.num_class = num_class

        # change `nn.Conv2d` to `nn.Conv2dBnAct`
        self.conv1 = nn.Conv2dBnAct(channel,
                                    6,
                                    5,
                                    pad_mode='valid',
                                    has_bn=True,
                                    activation='relu')
        self.conv2 = nn.Conv2dBnAct(6,
                                    16,
                                    5,
                                    pad_mode='valid',
                                    has_bn=True,
                                    activation='relu')
        # change `nn.Dense` to `nn.DenseBnAct`
        self.fc1 = nn.DenseBnAct(16 * 5 * 5, 120, activation='relu')
        self.fc2 = nn.DenseBnAct(120, 84, activation='relu')
        self.fc3 = nn.DenseBnAct(84, self.num_class)

        self.max_pool2d = nn.MaxPool2d(kernel_size=2, stride=2)
        self.flatten = nn.Flatten()
Ejemplo n.º 22
0
    def __init__(self, block, num_classes=100):
        super(ResNet9, self).__init__()

        self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3)
        self.bn1 = nn.BatchNorm2d(64)
        self.relu = nn.ReLU()
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2)

        self.layer1 = self.MakeLayer(block,
                                     1,
                                     in_channels=64,
                                     out_channels=256,
                                     stride=1)
        self.layer2 = self.MakeLayer(block,
                                     1,
                                     in_channels=256,
                                     out_channels=512,
                                     stride=2)
        self.layer3 = self.MakeLayer(block,
                                     1,
                                     in_channels=512,
                                     out_channels=1024,
                                     stride=2)
        self.layer4 = self.MakeLayer(block,
                                     1,
                                     in_channels=1024,
                                     out_channels=2048,
                                     stride=2)

        self.avgpool = nn.AvgPool2d(7, 1)
        self.flatten = nn.Flatten()
        self.fc = nn.Dense(512 * block.expansion, num_classes)
Ejemplo n.º 23
0
    def __init__(self, in_str):
        super(LeNet5, self).__init__()

        a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15 = in_str.strip(
        ).split()
        a1 = int(a1)
        a2 = int(a2)
        a3 = int(a3)
        a4 = int(a4)
        a5 = int(a5)
        a6 = int(a6)
        a7 = int(a7)
        a8 = int(a8)
        a9 = int(a9)
        a10 = int(a10)
        a11 = int(a11)
        a12 = int(a12)
        a13 = int(a13)
        a14 = int(a14)
        a15 = int(a15)

        self.conv1 = nn.Conv2d(a1, a2, a3, pad_mode="valid")
        self.conv2 = nn.Conv2d(a4, a5, a6, pad_mode="valid")
        self.fc1 = nn.Dense(a7 * a8 * a9, a10)
        self.fc2 = nn.Dense(a11, a12)
        self.fc3 = nn.Dense(a13, a14)
        self.relu = nn.ReLU()
        self.max_pool2d = nn.MaxPool2d(kernel_size=a15)
        self.flatten = nn.Flatten()
Ejemplo n.º 24
0
    def __init__(self,
                 block,
                 layer_nums,
                 in_channels,
                 out_channels,
                 strides,
                 num_classes,
                 use_se=False):
        super(ResNet, self).__init__()

        if not len(layer_nums) == len(in_channels) == len(out_channels) == 4:
            raise ValueError(
                "the length of layer_num, in_channels, out_channels list must be 4!"
            )
        self.use_se = use_se
        self.se_block = False
        if self.use_se:
            self.se_block = True

        if self.use_se:
            self.conv1_0 = _conv3x3(3, 32, stride=2, use_se=self.use_se)
            self.bn1_0 = _bn(32)
            self.conv1_1 = _conv3x3(32, 32, stride=1, use_se=self.use_se)
            self.bn1_1 = _bn(32)
            self.conv1_2 = _conv3x3(32, 64, stride=1, use_se=self.use_se)
        else:
            self.conv1 = _conv7x7(3, 64, stride=2)
        self.bn1 = _bn(64)
        self.relu = P.ReLU()
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, pad_mode="same")
        self.layer1 = self._make_layer(block,
                                       layer_nums[0],
                                       in_channel=in_channels[0],
                                       out_channel=out_channels[0],
                                       stride=strides[0],
                                       use_se=self.use_se)
        self.layer2 = self._make_layer(block,
                                       layer_nums[1],
                                       in_channel=in_channels[1],
                                       out_channel=out_channels[1],
                                       stride=strides[1],
                                       use_se=self.use_se)
        self.layer3 = self._make_layer(block,
                                       layer_nums[2],
                                       in_channel=in_channels[2],
                                       out_channel=out_channels[2],
                                       stride=strides[2],
                                       use_se=self.use_se,
                                       se_block=self.se_block)
        self.layer4 = self._make_layer(block,
                                       layer_nums[3],
                                       in_channel=in_channels[3],
                                       out_channel=out_channels[3],
                                       stride=strides[3],
                                       use_se=self.use_se,
                                       se_block=self.se_block)

        self.mean = P.ReduceMean(keep_dims=True)
        self.flatten = nn.Flatten()
        self.end_point = _fc(out_channels[3], num_classes, use_se=self.use_se)
Ejemplo n.º 25
0
    def __init__(self, block, layer_num, num_classes=100, batch_size=32):
        super(ResNet, self).__init__()
        self.batch_size = batch_size
        self.num_classes = num_classes

        self.conv1 = conv7x7(3, 64, stride=2, padding=0)

        self.bn1 = bn_with_initialize(64)
        self.relu = P.ReLU()
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, pad_mode="SAME")

        self.layer1 = MakeLayer0(block,
                                 layer_num[0],
                                 in_channels=64,
                                 out_channels=256,
                                 stride=1)
        self.layer2 = MakeLayer1(block,
                                 layer_num[1],
                                 in_channels=256,
                                 out_channels=512,
                                 stride=2)
        self.layer3 = MakeLayer2(block,
                                 layer_num[2],
                                 in_channels=512,
                                 out_channels=1024,
                                 stride=2)
        self.layer4 = MakeLayer3(block,
                                 layer_num[3],
                                 in_channels=1024,
                                 out_channels=2048,
                                 stride=2)

        self.pool = P.ReduceMean(keep_dims=True)
        self.fc = fc_with_initialize(512 * block.expansion, num_classes)
        self.flatten = nn.Flatten()
    def __init__(self):
        super(LeNet, self).__init__()
        self.relu = P.ReLU()
        self.batch_size = 32

        self.conv1 = nn.Conv2d(1,
                               6,
                               kernel_size=5,
                               stride=1,
                               padding=0,
                               has_bias=False,
                               pad_mode='valid')
        self.conv2 = nn.Conv2d(6,
                               16,
                               kernel_size=5,
                               stride=1,
                               padding=0,
                               has_bias=False,
                               pad_mode='valid')
        self.pool = nn.MaxPool2d(kernel_size=2, stride=2)
        self.reshape = P.Reshape()
        self.fc1 = nn.Dense(400, 120)
        self.fc1.matmul.add_prim_attr("primitive_target", "CPU")
        self.fc1.bias_add.add_prim_attr("primitive_target", "CPU")
        self.fc2 = nn.Dense(120, 84)
        self.fc2.matmul.add_prim_attr("primitive_target", "CPU")
        self.fc2.bias_add.add_prim_attr("primitive_target", "CPU")
        self.fc3 = nn.Dense(84, 10)
        self.fc3.matmul.add_prim_attr("primitive_target", "CPU")
        self.fc3.bias_add.add_prim_attr("primitive_target", "CPU")
Ejemplo n.º 27
0
    def __init__(self, num_class=10, channel=1):
        super(LeNet5, self).__init__()
        self.num_class = num_class

        self.conv1 = nn.Conv2dBnFoldQuant(channel,
                                          6,
                                          5,
                                          pad_mode='valid',
                                          per_channel=True,
                                          quant_delay=900)
        self.conv2 = nn.Conv2dBnFoldQuant(6,
                                          16,
                                          5,
                                          pad_mode='valid',
                                          per_channel=True,
                                          quant_delay=900)
        self.fc1 = nn.DenseQuant(16 * 5 * 5,
                                 120,
                                 per_channel=True,
                                 quant_delay=900)
        self.fc2 = nn.DenseQuant(120, 84, per_channel=True, quant_delay=900)
        self.fc3 = nn.DenseQuant(84,
                                 self.num_class,
                                 per_channel=True,
                                 quant_delay=900)

        self.relu = nn.ActQuant(nn.ReLU(), per_channel=False, quant_delay=900)
        self.max_pool2d = nn.MaxPool2d(kernel_size=2, stride=2)
        self.flatten = nn.Flatten()
Ejemplo n.º 28
0
    def __init__(self, in_channels, out_channels):
        super().__init__()

        self.maxpool_conv = nn.SequentialCell([
            nn.MaxPool2d(kernel_size=2, stride=2),
            DoubleConv(in_channels, out_channels)
        ])
Ejemplo n.º 29
0
    def __init__(self):
        super(LeNet, self).__init__()
        self.relu = P.ReLU()
        self.batch_size = 1
        weight1 = Tensor(np.ones([6, 3, 5, 5]).astype(np.float32) * 0.01)
        weight2 = Tensor(np.ones([16, 6, 5, 5]).astype(np.float32) * 0.01)
        self.conv1 = nn.Conv2d(3,
                               6, (5, 5),
                               weight_init=weight1,
                               stride=1,
                               padding=0,
                               pad_mode='valid')
        self.conv2 = nn.Conv2d(6,
                               16, (5, 5),
                               weight_init=weight2,
                               pad_mode='valid',
                               stride=1,
                               padding=0)
        self.pool = nn.MaxPool2d(kernel_size=2, stride=2, pad_mode="valid")

        self.reshape = P.Reshape()
        self.reshape1 = P.Reshape()

        self.fc1 = Dense(400, 120)
        self.fc2 = Dense(120, 84)
        self.fc3 = Dense(84, 10)
Ejemplo n.º 30
0
    def __init__(self):
        super(LeNet, self).__init__()
        self.relu = P.ReLU()
        self.batch_size = 32

        self.conv1 = nn.Conv2d(1,
                               6,
                               kernel_size=5,
                               stride=1,
                               padding=0,
                               has_bias=False,
                               pad_mode='valid')
        self.conv2 = nn.Conv2d(6,
                               16,
                               kernel_size=5,
                               stride=1,
                               padding=0,
                               has_bias=False,
                               pad_mode='valid')
        self.pool = nn.MaxPool2d(kernel_size=2, stride=2)
        self.pool.recompute()
        self.reshape = P.Reshape()
        self.fc1 = nn.Dense(400, 120)
        self.fc2 = nn.Dense(120, 84)
        self.fc3 = nn.Dense(84, 10)