コード例 #1
0
 def __init__(self, **desc):
     """Initialize."""
     super(SimpleCnn, self).__init__()
     desc = Config(**desc)
     self.num_class = desc.num_class
     self.fp16 = desc.get('fp16', False)
     self.channels = desc.channels
     self.conv1 = ops.Conv2d(3, 32, padding=1, kernel_size=3)
     self.pool1 = ops.MaxPool2d(2, stride=2)
     self.blocks = self._blocks(self.channels, desc.blocks)
     self.pool2 = ops.MaxPool2d(2, stride=2)
     self.conv2 = ops.Conv2d(self.channels, 64, padding=1, kernel_size=3)
     self.global_conv = ops.Conv2d(64, 64, kernel_size=8, padding=0)
     self.view = ops.View()
     self.fc = ops.Linear(64, self.num_class)
コード例 #2
0
ファイル: spnet_backbone.py プロジェクト: huawei-noah/vega
 def _make_stem_layer(self):
     """Make stem layer."""
     self.conv1 = ops.Conv2d(
         3, self.inplanes, kernel_size=7, stride=2, padding=3, bias=False)
     self.norm1 = ops.BatchNorm2d(64)
     self.relu = ops.Relu(inplace=True)
     self.maxpool = ops.MaxPool2d(kernel_size=3, stride=2, padding=1)
コード例 #3
0
ファイル: parallel_fpn.py プロジェクト: huawei-noah/vega
    def call(self, inputs):
        """Forward compute.

        :param inputs: input feature map
        :return: tuple of feature map
        """
        laterals = [conv(inputs[i]) for i, conv in enumerate(self.lateral_convs)]
        num_stage = len(laterals)
        for i in range(num_stage - 1, 0, -1):
            laterals[i - 1] += ops.InterpolateScale(size=laterals[i - 1].size()[2:], mode='nearest')(laterals[i])
        outs = [self.fpn_convs[i](laterals[i]) for i in self.code or range(num_stage)]
        outs.append(ops.MaxPool2d(1, stride=2)(outs[-1]))
        return {idx: out for idx, out in enumerate(outs)}
コード例 #4
0
ファイル: ms2vega.py プロジェクト: huawei-noah/vega
def _transform_op(init_layer):
    """Transform the torch op to Vega op."""
    if isinstance(init_layer, nn.Conv2d):
        in_channels = init_layer.in_channels
        out_channels = init_layer.out_channels
        kernel_size = init_layer.kernel_size[0]
        stride = init_layer.stride
        padding = init_layer.padding
        # bias = init_layer.bias
        new_layer = ops.Conv2d(in_channels=in_channels,
                               out_channels=out_channels,
                               kernel_size=kernel_size,
                               stride=stride,
                               padding=padding,
                               bias=False)
    elif isinstance(init_layer, nn.BatchNorm2d):
        num_features = init_layer.num_features
        new_layer = ops.BatchNorm2d(num_features=num_features)
    elif isinstance(init_layer, nn.ReLU):
        new_layer = ops.Relu()
    elif isinstance(init_layer, nn.MaxPool2d):
        kernel_size = init_layer.kernel_size
        stride = init_layer.stride
        # padding = init_layer.padding
        new_layer = ops.MaxPool2d(kernel_size=kernel_size, stride=stride)
    elif isinstance(init_layer, nn.AvgPool2d):
        kernel_size = init_layer.kernel_size
        stride = init_layer.stride
        padding = init_layer.padding
        new_layer = ops.AvgPool2d(kernel_size=kernel_size,
                                  stride=stride,
                                  padding=padding)
    elif isinstance(init_layer, P.ReduceMean):
        new_layer = ops.AdaptiveAvgPool2d()
    elif isinstance(init_layer, nn.Dense):
        in_features = init_layer.in_channels
        out_features = init_layer.out_channels
        # use_bias = init_layer.bias
        new_layer = ops.Linear(in_features=in_features,
                               out_features=out_features)
    elif isinstance(init_layer, nn.Dropout):
        prob = init_layer.p
        inplace = init_layer.inplace
        new_layer = ops.Dropout(prob=prob, inplace=inplace)
    elif isinstance(init_layer, nn.Flatten):
        new_layer = ops.View()
    else:
        raise ValueError("The op {} is not supported.".format(
            type(init_layer)))
    return new_layer
コード例 #5
0
    def __init__(self, init_plane):
        """Create InitialBlock layer.

        :param init_plane: input channel.
        :type init_plane: int
        """
        super(InitialBlock, self).__init__()
        self.conv = ops.Conv2d(in_channels=3,
                               out_channels=init_plane,
                               kernel_size=7,
                               stride=2,
                               padding=3,
                               bias=False)
        self.batch = ops.BatchNorm2d(num_features=init_plane)
        self.relu = ops.Relu()
        self.maxpool2d = ops.MaxPool2d(kernel_size=3, stride=2, padding=1)
コード例 #6
0
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# MIT License for more details.
"""Import all torch operators."""
from vega.common import ClassType, ClassFactory
from vega.modules.operators import Seq, SeparatedConv, DilConv, GAPConv1x1, conv1X1, \
    conv3x3, conv5x5, conv7x7, FactorizedReduce
from vega.modules.operators import ops

OPS = {
    'none':
    lambda C, stride, affine, repeats=1: ops.Zero(stride),
    'avg_pool_3x3':
    lambda C, stride, affine, repeats=1: ops.AvgPool2d(
        3, stride=stride, padding=1, count_include_pad=False),
    'max_pool_3x3':
    lambda C, stride, affine, repeats=1: ops.MaxPool2d(
        3, stride=stride, padding=1),
    'global_average_pool':
    lambda C, stride, affine, repeats=1: Seq(GAPConv1x1(C, C)),
    'skip_connect':
    lambda C, stride, affine, repeats=1: ops.Identity()
    if stride == 1 else FactorizedReduce(C, C, affine=affine),
    'sep_conv_3x3':
    lambda C, stride, affine, repeats=1: SeparatedConv(
        C, C, 3, stride, 1, affine=affine),
    'sep_conv_5x5':
    lambda C, stride, affine, repeats=1: SeparatedConv(
        C, C, 5, stride, 2, affine=affine),
    'sep_conv_7x7':
    lambda C, stride, affine, repeats=1: SeparatedConv(
        C, C, 7, stride, 3, affine=affine),
    'dil_conv_3x3':
コード例 #7
0
 def _make_stem_layer(self):
     """Make stem layer."""
     self.conv1 = BN_Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3, bias=False)
     self.maxpool = ops.MaxPool2d(kernel_size=3, stride=2, padding=1)