def __init__(self, block, num_classes=100):
        super(ResNet9, self).__init__()

        self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, pad_mode='pad')
        self.bn1 = nn.BatchNorm2d(64)
        self.relu = nn.ReLU()
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, pad_mode='same')

        self.layer1 = self.MakeLayer(
            block, 1, in_channels=64, out_channels=256, stride=1)
        self.layer2 = self.MakeLayer(
            block, 1, in_channels=256, out_channels=512, stride=2)
        self.layer3 = self.MakeLayer(
            block, 1, in_channels=512, out_channels=1024, stride=2)
        self.layer4 = self.MakeLayer(
            block, 1, in_channels=1024, out_channels=2048, stride=2)

        self.avgpool = nn.AvgPool2d(7, 1)
        self.flatten = nn.Flatten()
        self.fc = nn.Dense(512 * block.expansion, num_classes)
Esempio n. 2
0
def _deep_conv_bn_relu(in_channel,
                       channel_multiplier,
                       ksize,
                       stride=1,
                       padding=0,
                       dilation=1,
                       pad_mode="pad",
                       use_batch_statistics=False):
    """Get a spacetobatch -> conv2d -> batchnorm -> relu -> batchtospace layer"""
    return nn.SequentialCell(
        [DepthwiseConv2dNative(in_channel,
                               channel_multiplier,
                               kernel_size=ksize,
                               stride=stride,
                               padding=padding,
                               dilation=dilation,
                               pad_mode=pad_mode),
         nn.BatchNorm2d(channel_multiplier * in_channel, use_batch_statistics=use_batch_statistics),
         nn.ReLU()]
    )
Esempio n. 3
0
def _conv_bn_relu(in_channel,
                  out_channel,
                  ksize,
                  stride=1,
                  padding=0,
                  dilation=1,
                  pad_mode="pad",
                  use_batch_statistics=False):
    """Get a conv2d -> batchnorm -> relu layer"""
    return nn.SequentialCell([
        nn.Conv2d(in_channel,
                  out_channel,
                  kernel_size=ksize,
                  stride=stride,
                  padding=padding,
                  dilation=dilation,
                  pad_mode=pad_mode),
        nn.BatchNorm2d(out_channel, use_batch_statistics=use_batch_statistics),
        nn.ReLU()
    ])
Esempio n. 4
0
 def __init__(self,
              conv,
              n_feat,
              kernel_size,
              reduction,
              has_bias=True,
              bn=False,
              act=nn.ReLU(),
              res_scale=1):
     """rcan"""
     super(RCAB, self).__init__()
     self.modules_body = []
     for i in range(2):
         self.modules_body.append(
             conv(n_feat, n_feat, kernel_size, has_bias=has_bias))
         if bn: self.modules_body.append(nn.BatchNorm2d(n_feat))
         if i == 0: self.modules_body.append(act)
     self.modules_body.append(CALayer(n_feat, reduction))
     self.body = nn.SequentialCell(*self.modules_body)
     self.res_scale = res_scale
Esempio n. 5
0
    def __init__(self, inp, oup, stride, expand_ratio):
        super(InvertedResidual, self).__init__()
        assert stride in [1, 2]

        hidden_dim = int(round(inp * expand_ratio))
        self.use_res_connect = stride == 1 and inp == oup

        layers = []
        if expand_ratio != 1:
            layers.append(ConvBNReLU(inp, hidden_dim, kernel_size=1))
        layers.extend([
            # dw
            ConvBNReLU(hidden_dim, hidden_dim, stride=stride, groups=hidden_dim),
            # pw-linear
            nn.Conv2d(hidden_dim, oup, kernel_size=1, stride=1, has_bias=False),
            nn.BatchNorm2d(oup),
        ])
        self.conv = nn.SequentialCell(layers)
        self.add = TensorAdd()
        self.cast = P.Cast()
Esempio n. 6
0
 def __init__(self,
              cin,
              cout,
              ks,
              stride=1,
              with_bn=True,
              bias_init='zero'):
     super(Convolution, self).__init__()
     pad = (ks - 1) // 2
     self.conv = nn.Conv2d(cin,
                           cout,
                           kernel_size=ks,
                           pad_mode='pad',
                           padding=pad,
                           stride=stride,
                           has_bias=not with_bn,
                           bias_init=bias_init)
     self.bn = nn.BatchNorm2d(
         cout, momentum=BN_MOMENTUM) if with_bn else nn.SequentialCell()
     self.relu = nn.ReLU()
Esempio n. 7
0
 def __init__(self, in_channels, out_channels, kernel_size, stride):
     super(Conv2dBatchReLU, self).__init__()
     # Parameters
     self.in_channels = in_channels
     self.out_channels = out_channels
     self.kernel_size = kernel_size
     self.stride = stride
     if isinstance(kernel_size, (list, tuple)):
         self.padding = [int(ii / 2) for ii in kernel_size]
     else:
         self.padding = int(kernel_size / 2)
     self.conv = nn.Conv2d(self.in_channels,
                           self.out_channels,
                           self.kernel_size,
                           self.stride,
                           has_bias=False,
                           pad_mode='pad',
                           padding=self.padding)
     self.bn = nn.BatchNorm2d(self.out_channels, momentum=0.9, eps=1e-5)
     self.relu = nn.ReLU()
Esempio n. 8
0
def test_bn2d():
    """ut of nn.BatchNorm2d"""
    gamma = Tensor(np.array([0.1, 0.3, 0.4]).astype(np.float32))
    beta = Tensor(np.zeros((3), dtype=np.float32))
    moving_mean = Tensor(np.zeros((3), dtype=np.float32))
    moving_var = Tensor(np.ones((3), dtype=np.float32))

    bn = nn.BatchNorm2d(num_features=3,
                        eps=1e-5,
                        momentum=0.1,
                        gamma_init=gamma,
                        beta_init=beta,
                        moving_mean_init=moving_mean,
                        moving_var_init=moving_var)

    # 3-channel RGB
    input_data = Tensor(np.random.randint(0, 1, [1, 3, 224, 224]).astype(np.float32))
    output = bn(input_data)
    output_np = output.asnumpy()
    assert isinstance(output_np[0][0][0][0], (np.float32, np.float64))
Esempio n. 9
0
def _conv_bn_relu(in_channel,
                  out_channel,
                  ksize,
                  stride=1,
                  padding=0,
                  dilation=1,
                  alpha=0.1,
                  momentum=0.99,
                  pad_mode="same"):
    """Get a conv2d batchnorm and relu layer."""
    return nn.SequentialCell([
        nn.Conv2d(in_channel,
                  out_channel,
                  kernel_size=ksize,
                  stride=stride,
                  padding=padding,
                  dilation=dilation,
                  pad_mode=pad_mode),
        nn.BatchNorm2d(out_channel, momentum=momentum),
        nn.LeakyReLU(alpha)
    ])
Esempio n. 10
0
    def __init__(self):
        super(Discriminator, self).__init__()
        self.nf = 32
        self.current_scale = 0

        self.sub_discriminators = nn.CellList()

        first_discriminator = nn.CellList()#list

        first_discriminator.append(nn.SequentialCell(nn.Conv2d(3, self.nf, 3, 1, padding=1, has_bias=true, pad_mode='pad'),
                                                     nn.LeakyReLU(2e-1)))#SequentialCell 容器
        for _ in range(3):
            first_discriminator.append(nn.SequentialCell(nn.Conv2d(self.nf, self.nf, 3, 1, padding=1, has_bias=true, pad_mode='pad'),
                                                         nn.BatchNorm2d(self.nf),
                                                         nn.LeakyReLU(2e-1)))

        first_discriminator.append(nn.SequentialCell([nn.Conv2d(self.nf, 1, 3, 1, padding=1, has_bias=true, pad_mode='pad')]))

        first_discriminator = nn.SequentialCell(*first_discriminator)

        self.sub_discriminators.append(first_discriminator)
Esempio n. 11
0
    def __init__(self, block, block_num, output_stride, use_batch_statistics=True):
        super(Resnet, self).__init__()
        self.inplanes = 64
        self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, pad_mode='pad', padding=3,
                               weight_init='xavier_uniform')
        self.bn1 = nn.BatchNorm2d(self.inplanes, use_batch_statistics=use_batch_statistics)
        self.relu = nn.ReLU()
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, pad_mode='same')
        self.layer1 = self._make_layer(block, 64, block_num[0], use_batch_statistics=use_batch_statistics)
        self.layer2 = self._make_layer(block, 128, block_num[1], stride=2, use_batch_statistics=use_batch_statistics)

        if output_stride == 16:
            self.layer3 = self._make_layer(block, 256, block_num[2], stride=2,
                                           use_batch_statistics=use_batch_statistics)
            self.layer4 = self._make_layer(block, 512, block_num[3], stride=1, base_dilation=2, grids=[1, 2, 4],
                                           use_batch_statistics=use_batch_statistics)
        elif output_stride == 8:
            self.layer3 = self._make_layer(block, 256, block_num[2], stride=1, base_dilation=2,
                                           use_batch_statistics=use_batch_statistics)
            self.layer4 = self._make_layer(block, 512, block_num[3], stride=1, base_dilation=4, grids=[1, 2, 4],
                                           use_batch_statistics=use_batch_statistics)
Esempio n. 12
0
def _conv(in_channels,
          out_channels,
          kernel_size=3,
          stride=1,
          padding=0,
          pad_mode='pad',
          weights_update=True):
    """Conv2D wrapper."""
    layers = []
    conv = nn.Conv2d(in_channels,
                     out_channels,
                     kernel_size=kernel_size,
                     stride=stride,
                     padding=padding,
                     pad_mode=pad_mode,
                     has_bias=False)
    if not weights_update:
        conv.weight.requires_grad = False
    layers += [conv]
    layers += [nn.BatchNorm2d(out_channels)]
    return nn.SequentialCell(layers)
Esempio n. 13
0
 def _make_layer(self, cfg, batch_norm=False):
     layers = []
     in_channels = 3
     for v in cfg:
         if v == 'M':
             layers += [
                 nn.MaxPool2d(kernel_size=2, stride=2, pad_mode='same')
             ]
         else:
             conv2d = Conv2d(in_channels=in_channels,
                             out_channels=v,
                             kernel_size=3,
                             stride=1,
                             pad_mode='same',
                             has_bias=True)
             if batch_norm:
                 layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU()]
             else:
                 layers += [conv2d, nn.ReLU()]
             in_channels = v
     return nn.SequentialCell(layers)
Esempio n. 14
0
 def __init__(self,
              num_in,
              num_out,
              kernel_size=1,
              stride=1,
              padding=0,
              num_groups=1,
              use_act=True,
              act_type='relu'):
     super(Unit, self).__init__()
     self.conv = nn.Conv2d(in_channels=num_in,
                           out_channels=num_out,
                           kernel_size=kernel_size,
                           stride=stride,
                           padding=padding,
                           group=num_groups,
                           has_bias=False,
                           pad_mode='pad')
     self.bn = nn.BatchNorm2d(num_out)
     self.use_act = use_act
     self.act = Activation(act_type) if use_act else None
Esempio n. 15
0
    def _make_layer(self, block, planes, blocks, stride=1, base_dilation=1, grids=None, use_batch_statistics=True):
        if stride != 1 or self.inplanes != planes * block.expansion:
            downsample = nn.SequentialCell([
                conv1x1(self.inplanes, planes * block.expansion, stride),
                nn.BatchNorm2d(planes * block.expansion, use_batch_statistics=use_batch_statistics)
            ])

        if grids is None:
            grids = [1] * blocks

        layers = [
            block(self.inplanes, planes, stride, downsample, dilation=base_dilation * grids[0],
                  use_batch_statistics=use_batch_statistics)
        ]
        self.inplanes = planes * block.expansion
        for i in range(1, blocks):
            layers.append(
                block(self.inplanes, planes, dilation=base_dilation * grids[i],
                      use_batch_statistics=use_batch_statistics))

        return nn.SequentialCell(layers)
Esempio n. 16
0
 def __init__(self):
     super(BlockNet, self).__init__()
     self.conv1 = nn.Conv2d(3,
                            64,
                            kernel_size=7,
                            stride=2,
                            pad_mode="pad",
                            padding=3)
     self.bn1 = nn.BatchNorm2d(64)
     self.relu = nn.ReLU()
     self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2)
     self.block_down_sample = ResidualBlock(64,
                                            256,
                                            stride=1,
                                            down_sample=True)
     self.flatten = P.Flatten()
     self.weight = Parameter(Tensor(np.ones([1024, 10]).astype(np.float32)),
                             name="weight")
     self.bias = Parameter(Tensor(np.ones([10]).astype((np.float32))),
                           name="bias")
     self.fc = P.MatMul()
     self.biasAdd = P.BiasAdd()
Esempio n. 17
0
 def __init__(self):
     super(Net, self).__init__()
     self.conv = nn.Conv2d(input_channel,
                           output_channel,
                           kernel_size=1,
                           stride=1,
                           padding=0,
                           has_bias=False,
                           pad_mode="same")
     self.conv1 = nn.Conv2d(input_channel,
                            output_channel,
                            kernel_size=1,
                            stride=1,
                            padding=0,
                            has_bias=False,
                            pad_mode="same")
     self.bn = nn.BatchNorm2d(output_channel, momentum=0.1, eps=0.0001)
     self.add = P.Add()
     self.relu = P.ReLU()
     self.mean = P.ReduceMean(keep_dims=True)
     self.reshape = P.Reshape()
     self.dense = nn.Dense(output_channel, num_class)
Esempio n. 18
0
def Norm(channels,
         type='default',
         affine=None,
         track_running_stats=None,
         zero_init=False):
    if type in ['default', 'def']:
        type = 'bn'
    if type == 'bn':
        cfg = DEFAULTS['bn']
        if affine is None:
            affine = cfg['affine']
        if track_running_stats is None:
            track_running_stats = cfg['track_running_stats']
        if track_running_stats:
            use_batch_statistics = None
        else:
            use_batch_statistics = True
        gamma_init = 'zeros' if zero_init else 'ones'
        if cfg['sync']:
            bn = nn.GlobalBatchNorm(
                num_features=channels,
                momentum=cfg['momentum'],
                eps=cfg['eps'],
                affine=affine,
                gamma_init=gamma_init,
                use_batch_statistics=use_batch_statistics,
                device_num_each_group=cfg['device_num_each_group'])
        else:
            bn = nn.BatchNorm2d(num_features=channels,
                                momentum=cfg['momentum'],
                                eps=cfg['eps'],
                                affine=affine,
                                gamma_init=gamma_init,
                                use_batch_statistics=use_batch_statistics)
        return bn
    elif type == 'none':
        return nn.Identity()
    else:
        raise ValueError("Unsupported normalization type: %s" % type)
Esempio n. 19
0
def test_bn2d():
    """ut of nn.BatchNorm2d"""
    gamma = Tensor(np.random.randn(64).astype(np.float32)*0.01)
    beta = Tensor(np.random.randn(64).astype(np.float32)*0.01)
    moving_mean = Tensor(np.random.randn(64).astype(np.float32)*0.01)
    moving_var = Tensor(np.random.randn(64).astype(np.float32)*0.01)

    bn = nn.BatchNorm2d(num_features=64,
                        eps=1e-5,
                        momentum=0.1,
                        gamma_init=gamma,
                        beta_init=beta,
                        moving_mean_init=moving_mean,
                        moving_var_init=moving_var)

    #3-channel RGB
    input_data = Tensor(np.random.randint(0, 10, [1, 64, 56, 56]).astype(np.float32))
    # for test in infer lib
    output = bn.construct(input_data)

    output_np = output.asnumpy()
    assert isinstance(output_np[0][0][0][0], (np.float32, np.float64))
Esempio n. 20
0
    def __init__(self,
                 conv,
                 n_feats,
                 kernel_size,
                 bias=True,
                 bn=False,
                 act=nn.ReLU(),
                 res_scale=1):

        super(ResBlock, self).__init__()
        m = []
        for i in range(2):
            m.append(conv(n_feats, n_feats, kernel_size, has_bias=bias))
            if bn:
                m.append(nn.BatchNorm2d(n_feats))
            if i == 0:
                m.append(act)

        self.body = nn.SequentialCell(*m)
        self.res_scale = res_scale

        self.mul = P.Mul()
Esempio n. 21
0
def _make_layer(base, batch_norm):
    """Make stage network of VGG."""
    layers = []
    in_channels = 3
    for v in base:
        if v == 'M':
            layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
        else:
            weight_shape = (v, in_channels, 3, 3)
            weight = initializer('XavierUniform', shape=weight_shape, dtype=mstype.float32)
            conv2d = nn.Conv2d(in_channels=in_channels,
                               out_channels=v,
                               kernel_size=3,
                               padding=0,
                               pad_mode='same',
                               weight_init=weight)
            if batch_norm:
                layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU()]
            else:
                layers += [conv2d, nn.ReLU()]
            in_channels = v
    return nn.SequentialCell(layers)
Esempio n. 22
0
 def __init__(self,
              in_planes,
              out_planes,
              kernel_size=3,
              stride=1,
              groups=1):
     super(ConvBNReLU, self).__init__()
     padding = (kernel_size - 1) // 2
     if groups == 1:
         conv = nn.Conv2d(in_planes,
                          out_planes,
                          kernel_size,
                          stride,
                          pad_mode='pad',
                          padding=padding)
     else:
         conv = DepthwiseConv(in_planes,
                              kernel_size,
                              stride,
                              pad_mode='pad',
                              pad=padding)
     layers = [conv, nn.BatchNorm2d(out_planes), nn.ReLU6()]
     self.features = nn.SequentialCell(layers)
Esempio n. 23
0
 def __init__(self,
              in_planes,
              planes,
              kernel_size,
              stride=1,
              pad_mode='pad',
              padding=0,
              dilation=1,
              group=1,
              has_bias=False,
              weight_init='xavier_uniform',
              has_bn=True,
              use_batch_statistics=True,
              activation='relu',
              **kwargs):
     super(ConvModule2d, self).__init__()
     self.conv = nn.Conv2d(in_planes, planes, kernel_size, stride, pad_mode,
                           padding, dilation, group, has_bias, weight_init,
                           **kwargs)
     self.bn = nn.BatchNorm2d(
         planes,
         use_batch_statistics=use_batch_statistics) if has_bn else None
     self.act = nn.get_activation(activation) if activation else None
Esempio n. 24
0
    def _make_layer(self, block, planes, blocks, stride=1):
        downsample = None
        if stride != 1 or self.inplanes != planes * block.expansion:
            downsample = nn.SequentialCell(
                OrderedDict([
                    ('0',
                     nn.Conv2d(self.inplanes,
                               planes * block.expansion,
                               kernel_size=1,
                               stride=stride,
                               has_bias=False)),
                    ('1',
                     nn.BatchNorm2d(planes * block.expansion,
                                    momentum=BN_MOMENTUM)),
                ]))

        layers = OrderedDict()
        layers['0'] = block(self.inplanes, planes, stride, downsample)
        self.inplanes = planes * block.expansion
        for i in range(1, blocks):
            layers['{}'.format(i)] = block(self.inplanes, planes)

        return nn.SequentialCell(layers)
Esempio n. 25
0
 def __init__(self,
              in_channels,
              out_channels,
              atrous_rate=1,
              use_batch_statistics=True):
     super(ASPPConv, self).__init__()
     if atrous_rate == 1:
         conv = nn.Conv2d(in_channels,
                          out_channels,
                          kernel_size=1,
                          has_bias=False,
                          weight_init='xavier_uniform')
     else:
         conv = nn.Conv2d(in_channels,
                          out_channels,
                          kernel_size=3,
                          pad_mode='pad',
                          padding=atrous_rate,
                          dilation=atrous_rate,
                          weight_init='xavier_uniform')
     bn = nn.BatchNorm2d(out_channels,
                         use_batch_statistics=use_batch_statistics)
     relu = nn.ReLU()
     self.aspp_conv = nn.SequentialCell([conv, bn, relu])
Esempio n. 26
0
 def __init__(self,
              in_channels,
              out_channels,
              kernel_size=1,
              stride=1,
              pad_mode="same",
              padding=0,
              weight_init="XavierUniform",
              with_relu=True,
              with_bn=True):
     super(Conv2dBlock, self).__init__()
     self.with_bn = with_bn
     self.with_relu = with_relu
     self.conv = nn.Conv2d(in_channels=in_channels,
                           out_channels=out_channels,
                           kernel_size=kernel_size,
                           stride=stride,
                           pad_mode=pad_mode,
                           padding=padding,
                           weight_init=weight_init)
     if (with_bn):
         self.bn = nn.BatchNorm2d(out_channels, eps=0.001)
     if (with_relu):
         self.relu = nn.ReLU()
Esempio n. 27
0
def _fused_bn(channels, momentum=0.9):
    return nn.BatchNorm2d(channels, momentum=momentum)
Esempio n. 28
0
def _fused_bn(channels, momentum=0.9):
    """Get a fused batchnorm"""
    return nn.BatchNorm2d(channels, momentum=momentum)
Esempio n. 29
0
def _bn_last(channel):
    return nn.BatchNorm2d(channel)
Esempio n. 30
0
def _bn(channel):
    return nn.BatchNorm2d(channel)