Example #1
0
 def __init__(self,
              in_planes,
              out_planes,
              kernel_size=3,
              stride=1,
              groups=1):
     super(ConvBNReLU, self).__init__()
     padding = 0
     conv = nn.Conv2d(in_planes,
                      out_planes,
                      kernel_size,
                      stride,
                      pad_mode='same',
                      padding=padding)
     layers = [conv, _bn(out_planes), nn.ReLU()]
     self.features = nn.SequentialCell(layers)
Example #2
0
def ms_conv2x2(in_channels,
               out_channels,
               stride=1,
               padding=0,
               pad_mode='same',
               has_bias=False):
    """Get a conv2d layer with 2x2 kernel size."""
    init_value = weight_variable((out_channels, in_channels, 1, 1))
    return nn.Conv2d(in_channels,
                     out_channels,
                     kernel_size=2,
                     stride=stride,
                     padding=padding,
                     pad_mode=pad_mode,
                     weight_init=init_value,
                     has_bias=has_bias)
Example #3
0
def _conv1x1(in_channel, out_channel, stride=1, use_se=False):
    if use_se:
        weight = _conv_variance_scaling_initializer(in_channel,
                                                    out_channel,
                                                    kernel_size=1)
    else:
        weight_shape = (out_channel, in_channel, 1, 1)
        weight = Tensor(
            kaiming_normal(weight_shape, mode="fan_out", nonlinearity='relu'))
    return nn.Conv2d(in_channel,
                     out_channel,
                     kernel_size=1,
                     stride=stride,
                     padding=0,
                     pad_mode='same',
                     weight_init=weight)
Example #4
0
 def __init__(self,
              cin,
              cout,
              kernel_size,
              stride=1,
              pad_mode="valid",
              padding=0,
              dilation=1,
              group=1,
              has_bias=True,
              weight_init='normal',
              bias_init='zeros'):
     super(Net, self).__init__()
     self.conv = nn.Conv2d(cin, cout, kernel_size, stride, pad_mode,
                           padding, dilation, group, has_bias, weight_init,
                           bias_init)
Example #5
0
def conv_block(in_channels, out_channels, kernel_size, stride, dilation=1):
    """Get a conv2d batchnorm and relu layer"""
    pad_mode = 'same'
    padding = 0

    return nn.SequentialCell([
        nn.Conv2d(in_channels,
                  out_channels,
                  kernel_size=kernel_size,
                  stride=stride,
                  padding=padding,
                  dilation=dilation,
                  pad_mode=pad_mode),
        nn.BatchNorm2d(out_channels, momentum=0.1),
        nn.ReLU()
    ])
Example #6
0
    def __init__(self, in_str):
        a, b, c, d, e, f, g, h = in_str.strip().split()
        a = int(a)
        b = int(b)
        c = int(b)
        d = int(b)
        e = int(b)
        f = int(b)
        g = int(b)
        h = int(b)

        super(Net, self).__init__()
        self.conv = nn.Conv2d(a, b, c, pad_mode="valid")
        self.bn = nn.BatchNorm2d(d)
        self.relu = nn.ReLU()
        self.flatten = nn.Flatten()
        self.fc = nn.Dense(e * f * g, h)
Example #7
0
def conv(in_channels,
         out_channels,
         kernel_size,
         stride=1,
         padding=0,
         pad_mode='pad',
         has_bias=True):
    """weight initial for conv layer"""
    weight = weight_variable()
    return nn.Conv2d(in_channels,
                     out_channels,
                     kernel_size=kernel_size,
                     stride=stride,
                     padding=padding,
                     weight_init=weight,
                     has_bias=has_bias,
                     pad_mode=pad_mode)
Example #8
0
    def __init__(self, in_planes, out_planes, kernel_size, stride, padding,
                 groups, norm_layer):
        weight_shape = (out_planes, in_planes, kernel_size, kernel_size)
        kaiming_weight, _ = Init_KaimingUniform(weight_shape, a=math.sqrt(5))

        super(ConvBN, self).__init__(
            nn.Conv2d(in_planes,
                      out_planes,
                      kernel_size,
                      stride,
                      pad_mode='pad',
                      padding=padding,
                      group=groups,
                      has_bias=False,
                      weight_init=kaiming_weight),
            norm_layer(out_planes),
        )
Example #9
0
 def __init__(self,
              in_channels,
              out_channels,
              kernel_size=1,
              stride=1,
              padding=0,
              pad_mode="same"):
     super(Conv2dBlock, self).__init__()
     self.conv = nn.Conv2d(in_channels,
                           out_channels,
                           kernel_size=kernel_size,
                           stride=stride,
                           padding=padding,
                           pad_mode=pad_mode,
                           weight_init=weight_variable())
     self.bn = nn.BatchNorm2d(out_channels, eps=0.001)
     self.relu = nn.ReLU()
Example #10
0
 def __init__(self, upsample_scales, mode="nearest",
              freq_axis_kernel_size=1, cin_pad=0, cin_channels=80):
     super(UpsampleNetwork, self).__init__()
     self.expand_op = P.ExpandDims()
     self.squeeze_op = P.Squeeze(1)
     up_layers = []
     total_scale = np.prod(upsample_scales)
     self.indent = cin_pad * total_scale
     for scale in upsample_scales:
         freq_axis_padding = (freq_axis_kernel_size - 1) // 2
         k_size = (freq_axis_kernel_size, scale * 2 + 1)
         padding = (freq_axis_padding, freq_axis_padding, scale, scale)
         stretch = Resize(scale, 1, mode)
         conv = nn.Conv2d(1, 1, kernel_size=k_size, has_bias=False, pad_mode='pad', padding=padding)
         up_layers.append(stretch)
         up_layers.append(conv)
     self.up_layers = nn.CellList(up_layers)
Example #11
0
 def __init__(self,
              cin,
              cout,
              kernel_size,
              stride=1,
              pad_mode='pad',
              padding=0,
              dilation=1,
              group=1,
              has_bias=False,
              weight_init='normal',
              bias_init='zeros'):
     super(Net, self).__init__()
     Tensor(np.ones([6, 3, 3, 3]).astype(np.float32) * 0.01)
     self.conv = nn.Conv2d(cin, cout, kernel_size, stride, pad_mode,
                           padding, dilation, group, has_bias, weight_init,
                           bias_init)
Example #12
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 kernel_size,
                 stride=1,
                 pad_mode='same',
                 padding=0,
                 dilation=1,
                 group=1,
                 has_bias=False,
                 weight_init='normal',
                 bias_init='zeros',
                 has_bn=False,
                 momentum=0.997,
                 eps=1e-5,
                 activation=None,
                 alpha=0.2,
                 after_fake=True):
        super(Conv2dBnAct, self).__init__()

        self.conv = nn.Conv2d(in_channels,
                              out_channels,
                              kernel_size=kernel_size,
                              stride=stride,
                              pad_mode=pad_mode,
                              padding=padding,
                              dilation=dilation,
                              group=group,
                              has_bias=has_bias,
                              weight_init=weight_init,
                              bias_init=bias_init)
        self.has_bn = Validator.check_bool(has_bn, "has_bn")
        self.has_act = activation is not None
        self.after_fake = Validator.check_bool(after_fake, "after_fake")
        if has_bn:
            self.batchnorm = BatchNorm2d(out_channels, eps, momentum)
        if activation == "leakyrelu":
            self.activation = LeakyReLU(alpha)
        else:
            self.activation = get_activation(activation) if isinstance(
                activation, str) else activation
            if activation is not None and not isinstance(
                    self.activation, (Cell, Primitive)):
                raise TypeError(
                    "The activation must be str or Cell or Primitive,"
                    " but got {}.".format(activation))
Example #13
0
def conv3x3(in_channels,
            out_channels,
            stride=1,
            groups=1,
            dilation=1,
            pad_mode="pad",
            padding=1):
    """3x3 convolution with padding"""
    return nn.Conv2d(in_channels,
                     out_channels,
                     kernel_size=3,
                     stride=stride,
                     pad_mode=pad_mode,
                     group=groups,
                     has_bias=False,
                     dilation=dilation,
                     padding=padding)
Example #14
0
def _conv(in_channels,
          out_channels,
          kernel_size=3,
          stride=1,
          padding=0,
          pad_mode='pad'):
    """Conv2D wrapper."""
    shape = (out_channels, in_channels, kernel_size, kernel_size)
    weights = weight_init_ones(shape)
    return nn.Conv2d(in_channels,
                     out_channels,
                     kernel_size=kernel_size,
                     stride=stride,
                     padding=padding,
                     pad_mode=pad_mode,
                     weight_init=weights,
                     has_bias=False)
Example #15
0
 def __init__(self, in_channels, out_channels, kernel_size, stride,
              padding):
     super(Conv2dBatch, self).__init__()
     # Parameters
     self.in_channels = in_channels
     self.out_channels = out_channels
     self.kernel_size = kernel_size
     self.stride = stride
     self.padding = padding
     self.conv = nn.Conv2d(self.in_channels,
                           self.out_channels,
                           self.kernel_size,
                           self.stride,
                           has_bias=False,
                           pad_mode='pad',
                           padding=self.padding)
     self.bn = nn.BatchNorm2d(self.out_channels, momentum=0.9, eps=1e-5)
Example #16
0
def conv_bn_relu(in_channel,
                 out_channel,
                 kernel_size,
                 stride,
                 depthwise,
                 activation='relu6'):
    output = []
    output.append(
        nn.Conv2d(in_channel,
                  out_channel,
                  kernel_size,
                  stride,
                  pad_mode="same",
                  group=1 if not depthwise else in_channel))
    output.append(nn.BatchNorm2d(out_channel))
    if activation:
        output.append(nn.get_activation(activation))
    return nn.SequentialCell(output)
 def __init__(self, cin, cout):
     super(Net, self).__init__()
     self.maxpool = nn.MaxPool2d(kernel_size=3,
                                 stride=2,
                                 pad_mode="same")
     self.conv = nn.Conv2d(cin,
                           cin,
                           kernel_size=1,
                           stride=1,
                           padding=0,
                           has_bias=False,
                           pad_mode="same")
     self.bn = nn.BatchNorm2d(cin, momentum=0.1, eps=0.0001)
     self.add = P.TensorAdd()
     self.relu = P.ReLU()
     self.mean = P.ReduceMean(keep_dims=True)
     self.reshape = P.Reshape()
     self.dense = nn.Dense(cin, cout)
Example #18
0
 def __init__(self, num_classes=10):
     super(ConvNet, self).__init__()
     self.conv1 = nn.Conv2d(3,
                            ConvNet.output_ch,
                            kernel_size=7,
                            stride=2,
                            pad_mode='pad',
                            padding=3)
     self.bn1 = nn.BatchNorm2d(ConvNet.output_ch)
     self.relu = nn.ReLU()
     self.maxpool = nn.MaxPool2d(kernel_size=3,
                                 stride=2,
                                 pad_mode='pad',
                                 padding=1)
     self.flatten = nn.Flatten()
     self.fc = nn.Dense(
         int(ConvNet.image_h * ConvNet.image_w * ConvNet.output_ch /
             (4 * 4)), num_classes)
Example #19
0
 def __init__(self,
              in_channel,
              out_channel,
              kernel_size,
              stride=1,
              pad_mode='same',
              padding=0):
     super(BasicConv2d, self).__init__()
     self.conv = nn.Conv2d(in_channel,
                           out_channel,
                           kernel_size=kernel_size,
                           stride=stride,
                           pad_mode=pad_mode,
                           padding=padding,
                           weight_init=XavierUniform(),
                           has_bias=True)
     self.bn = nn.BatchNorm2d(out_channel, eps=0.001, momentum=0.9997)
     self.relu = nn.ReLU()
Example #20
0
 def __init__(self,
              in_channel,
              out_channel,
              kernel_size=3,
              stride=1,
              use_bn=False,
              pad_mode='same'):
     super(Conv, self).__init__()
     self.conv = nn.Conv2d(in_channel,
                           out_channel,
                           kernel_size=kernel_size,
                           stride=stride,
                           padding=0,
                           pad_mode=pad_mode,
                           weight_init=TruncatedNormal(0.02))
     self.bn = _bn(out_channel)
     self.Relu = nn.ReLU()
     self.use_bn = use_bn
Example #21
0
    def __init__(self, inchannels=512, num_anchors=3):
        super(LandmarkHead, self).__init__()

        weight_shape = (num_anchors * 10, inchannels, 1, 1)
        kaiming_weight, kaiming_bias = Init_KaimingUniform(weight_shape,
                                                           a=math.sqrt(5),
                                                           has_bias=True)
        self.conv1x1 = nn.Conv2d(inchannels,
                                 num_anchors * 10,
                                 kernel_size=(1, 1),
                                 stride=1,
                                 padding=0,
                                 has_bias=True,
                                 weight_init=kaiming_weight,
                                 bias_init=kaiming_bias)

        self.permute = P.Transpose()
        self.reshape = P.Reshape()
Example #22
0
 def __init__(self,
              cin,
              cout,
              kernel_size,
              stride=1,
              pad_mode='pad',
              padding=0,
              dilation=1,
              group=1,
              has_bias=False,
              weight_init='normal',
              bias_init='zeros',
              strategy=None):
     super(NetConv, self).__init__()
     self.conv = nn.Conv2d(cin, cout, kernel_size, stride, pad_mode,
                           padding, dilation, group, has_bias, weight_init,
                           bias_init)
     self.conv.conv2d.shard(strategy)
Example #23
0
 def __init__(self,
              weight,
              in_channel,
              out_channel,
              kernel_size,
              stride=1,
              padding=0,
              has_bias=False,
              bias=None):
     super(Net, self).__init__()
     self.conv = nn.Conv2d(in_channels=in_channel,
                           out_channels=out_channel,
                           kernel_size=kernel_size,
                           stride=stride,
                           padding=padding,
                           has_bias=has_bias,
                           weight_init=weight,
                           bias_init=bias)
Example #24
0
    def __init__(self, in_channels, out_chls, out_channels):
        super(YoloBlock, self).__init__()
        out_chls_2 = out_chls * 2

        self.conv0 = _conv_bn_relu(in_channels, out_chls, ksize=1)
        self.conv1 = _conv_bn_relu(out_chls, out_chls_2, ksize=3)

        self.conv2 = _conv_bn_relu(out_chls_2, out_chls, ksize=1)
        self.conv3 = _conv_bn_relu(out_chls, out_chls_2, ksize=3)

        self.conv4 = _conv_bn_relu(out_chls_2, out_chls, ksize=1)
        self.conv5 = _conv_bn_relu(out_chls, out_chls_2, ksize=3)

        self.conv6 = nn.Conv2d(out_chls_2,
                               out_channels,
                               kernel_size=1,
                               stride=1,
                               has_bias=True)
Example #25
0
def conv_bn_relu(in_channel,
                 out_channel,
                 use_bn=True,
                 kernel_size=3,
                 stride=1,
                 pad_mode="same",
                 activation='relu'):
    output = []
    output.append(
        nn.Conv2d(in_channel,
                  out_channel,
                  kernel_size,
                  stride,
                  pad_mode=pad_mode))
    if use_bn:
        output.append(nn.BatchNorm2d(out_channel))
    if activation:
        output.append(nn.get_activation(activation))
    return nn.SequentialCell(output)
Example #26
0
def conv4x4(in_channels,
            out_channels,
            stride=1,
            groups=1,
            dilation=1,
            pad_mode="pad",
            padding=1,
            bias=True):
    """4x4 convolution with padding"""

    return nn.Conv2d(in_channels,
                     out_channels,
                     kernel_size=4,
                     stride=stride,
                     pad_mode=pad_mode,
                     group=groups,
                     has_bias=bias,
                     dilation=dilation,
                     padding=padding)
Example #27
0
def _make_layer(base, batch_norm):
    """Make stage network of VGG."""
    layers = []
    in_channels = 3
    for v in base:
        if v == 'M':
            layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
        else:
            conv2d = nn.Conv2d(in_channels=in_channels,
                               out_channels=v,
                               kernel_size=3,
                               padding=1,
                               pad_mode='pad')
            if batch_norm:
                layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU()]
            else:
                layers += [conv2d, nn.ReLU()]
            in_channels = v
    return nn.SequentialCell(layers)
Example #28
0
def _last_conv2d(in_channel,
                 out_channel,
                 kernel_size=3,
                 stride=1,
                 pad_mod='same',
                 pad=0):
    in_channels = in_channel
    out_channels = in_channel
    depthwise_conv = nn.Conv2d(in_channels,
                               out_channels,
                               kernel_size,
                               stride,
                               pad_mode='same',
                               padding=pad,
                               group=in_channels)
    conv = _conv2d(in_channel, out_channel, kernel_size=1)
    return nn.SequentialCell(
        [depthwise_conv, _bn(in_channel),
         nn.ReLU6(), conv])
Example #29
0
def _last_conv2d(in_channel,
                 out_channel,
                 kernel_size=3,
                 stride=1,
                 pad_mod='same',
                 pad=0):
    depthwise_conv = nn.Conv2d(in_channel,
                               in_channel,
                               kernel_size,
                               stride,
                               pad_mode='same',
                               padding=pad,
                               has_bias=False,
                               group=in_channel,
                               weight_init='ones')
    conv = _conv2d(in_channel, out_channel, kernel_size=1)
    return nn.SequentialCell(
        [depthwise_conv, _bn(in_channel),
         nn.ReLU6(), conv])
Example #30
0
    def __init__(self, block, num_classes=100):
        super(ResNet50, self).__init__()

        weight_conv = Tensor(np.ones([64, 3, 7, 7]).astype(np.float32) * 0.01)
        self.conv1 = nn.Conv2d(3,
                               64,
                               kernel_size=7,
                               stride=2,
                               padding=3,
                               weight_init=weight_conv)
        self.bn1 = bn_with_initialize(64)
        self.relu = nn.ReLU()
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)

        self.layer1 = MakeLayer3(block,
                                 in_channels=64,
                                 out_channels=256,
                                 stride=1)
        self.layer2 = MakeLayer4(block,
                                 in_channels=256,
                                 out_channels=512,
                                 stride=2)
        self.layer3 = MakeLayer6(block,
                                 in_channels=512,
                                 out_channels=1024,
                                 stride=2)
        self.layer4 = MakeLayer3(block,
                                 in_channels=1024,
                                 out_channels=2048,
                                 stride=2)

        self.avgpool = nn.AvgPool2d(7, 1)
        self.flatten = nn.Flatten()

        weight_fc = Tensor(
            np.ones([num_classes, 512 * block.expansion]).astype(np.float32) *
            0.01)
        bias_fc = Tensor(np.ones([num_classes]).astype(np.float32) * 0.01)
        self.fc = nn.Dense(512 * block.expansion,
                           num_classes,
                           weight_init=weight_fc,
                           bias_init=bias_fc)