예제 #1
0
파일: erdb_esr.py 프로젝트: ylfzr/vega
def channel_shuffle(x, groups):
    """Shuffle the channel of features.

    :param x: feature maps
    :type x: tensor
    :param groups: group number of channels
    :type groups: int
    :return: shuffled feature map
    :rtype: tensor
    """
    batchsize, num_channels, height, width = ops.get_shape(x)
    channels_per_group = num_channels // groups
    x = ops.View([batchsize, groups, channels_per_group, height, width])(x)
    x = ops.Transpose(1, 2)(x)
    x = ops.View([batchsize, num_channels, height, width])(x)
    return x
예제 #2
0
파일: mobilenetv3.py 프로젝트: ylfzr/vega
 def __call__(self, x):
     """Forward compute of MobileNetV3 for classification."""
     x = self.features(x)
     x = self.avgpool(x)
     x = ops.View((x.shape[0], -1))(x)
     x = self.classifier(x)
     return x
예제 #3
0
    def __init__(self,
                 cfgs,
                 mode='small',
                 input_channel=3,
                 feat_channels=16,
                 special_stride=1,
                 num_classes=10,
                 width_mult=1.,
                 block=InvertedResidualSE,
                 momentum=0.1,
                 is_prune_mode=False,
                 **kwargs):
        """Init MobileNetV3.

        :params cfgs: cfgs for mobilenetv3
        :type cfgs: list
        :params special_stride: the stride of the first InvertedResidualSE block.
        :type special_stride: int (1 for cifar10, 2 for imagenet)
        """
        super(MobileNetV3, self).__init__()
        self.cfgs = cfgs

        # building first layer
        if not is_prune_mode:
            feat_channels = _make_divisible(feat_channels * width_mult, 8)
        else:
            feat_channels = int(feat_channels * width_mult)
        layers = [
            ConvBnAct(input_channel,
                      feat_channels,
                      kernel_size=3,
                      momentum=momentum,
                      stride=special_stride,
                      padding=1,
                      activation='hswish')
        ]

        # buidling blocks
        # kernel_size, expand_ratio, output_channels, use_se, use_hs, stride
        for k, t, c, use_se, use_hs, s in self.cfgs:
            output_channel = _make_divisible(
                c * width_mult, 8) if not is_prune_mode else int(c *
                                                                 width_mult)
            hidden_dim = _make_divisible(t, 8) if not is_prune_mode else t
            layers.append(
                block(feat_channels, hidden_dim, output_channel, k, s, use_se,
                      use_hs, momentum))
            feat_channels = output_channel
        self.features = Sequential(*layers)

        # building last linear layer
        self.avgpool = ops.AdaptiveAvgPool2d((1, 1))
        chn = 1280 if mode == 'large' else 1024
        self.classifier = Sequential(ops.View(),
                                     ops.Linear(feat_channels, chn),
                                     ops.Hswish(), ops.Dropout(0.2),
                                     ops.Linear(chn, num_classes))
        self._initialize_weights()
예제 #4
0
    def __init__(self, base_channel, num_classes):
        """Create layers.

        :param base_channel: base_channel
        :type base_channel: int
        :param num_class: number of class
        :type num_class: int
        """
        super(LinearClassificationHead, self).__init__()
        self.avgpool = ops.AdaptiveAvgPool2d(output_size=(1, 1))
        self.view = ops.View()
        self.linear = ops.Linear(in_features=base_channel,
                                 out_features=num_classes)
예제 #5
0
 def __init__(self, **desc):
     """Initialize."""
     super(SimpleCnn, self).__init__()
     desc = Config(**desc)
     self.num_class = desc.num_class
     self.fp16 = desc.get('fp16', False)
     self.channels = desc.channels
     self.conv1 = ops.Conv2d(3, 32, padding=1, kernel_size=3)
     self.pool1 = ops.MaxPool2d(2, stride=2)
     self.blocks = self._blocks(self.channels, desc.blocks)
     self.pool2 = ops.MaxPool2d(2, stride=2)
     self.conv2 = ops.Conv2d(self.channels, 64, padding=1, kernel_size=3)
     self.global_conv = ops.Conv2d(64, 64, kernel_size=8)
     self.view = ops.View()
     self.fc = ops.Linear(64, self.num_class)
예제 #6
0
    def __init__(self, encoding, n_class=1000):
        super(DNet, self).__init__()
        op_names = ["conv3", "conv1", "conv3_grp2", "conv3_grp4", "conv3_base1", "conv3_base32", "conv3_sep"]
        block_str, num_channel, macro_str = encoding.split('_')
        curr_channel, index = int(num_channel), 0
        _big_model = "*" in block_str
        if _big_model:
            block_encoding_list = block_str.split('*')
        # stem
        self.layers = Sequential(
            create_op('conv3', 3, curr_channel // 2, stride=2),
            ops.Relu(),
            create_op('conv3', curr_channel // 2, curr_channel // 2),
            ops.Relu(),
            create_op('conv3', curr_channel // 2, curr_channel, stride=2),
            ops.Relu()
        )

        # body
        if not _big_model:
            while index < len(macro_str):
                stride = 1
                if macro_str[index] == '-':
                    stride = 2
                    index += 1

                channel_increase = int(macro_str[index])
                block = EncodedBlock(block_str, curr_channel, op_names, stride, channel_increase)
                self.layers.append(block)
                curr_channel *= channel_increase
                index += 1
        else:
            block_encoding_index = 0
            while index < len(macro_str):
                stride = 1
                if macro_str[index] == '-':
                    stride = 2
                    index += 1
                    block_encoding_index += 1
                channel_increase = int(macro_str[index])
                block_encoding = block_encoding_list[block_encoding_index]
                block = EncodedBlock(block_encoding, curr_channel, op_names, stride, channel_increase)
                self.layers.append(block)
                curr_channel *= channel_increase
                index += 1
        self.layers.append(ops.AdaptiveAvgPool2d((1, 1)))
        self.view = ops.View()
        self.fc = ops.Linear(in_features=curr_channel, out_features=n_class)
예제 #7
0
 def __init__(self, C, num_classes, input_size):
     """Init AuxiliaryHead."""
     super(AuxiliaryHead, self).__init__()
     stride = input_size - 5
     self.relu1 = ops.Relu(inplace=True)
     self.avgpool1 = ops.AvgPool2d(5,
                                   stride=stride,
                                   padding=0,
                                   count_include_pad=False)
     self.conv1 = ops.Conv2d(C, 128, 1, bias=False)
     self.batchnorm1 = ops.BatchNorm2d(128)
     self.relu2 = ops.Relu(inplace=True)
     self.conv2 = ops.Conv2d(128, 768, 2, bias=False)
     self.batchnorm2 = ops.BatchNorm2d(768)
     self.relu3 = ops.Relu(inplace=True)
     self.view = ops.View()
     self.classifier = ops.Linear(768, num_classes)
예제 #8
0
파일: mobilenetv3.py 프로젝트: ylfzr/vega
 def __call__(self, x):
     """Forward compute of SELayer."""
     b, c, _, _ = x.shape
     y = ops.View((b, c))(self.avg_pool(x))
     y = ops.View((b, c, 1, 1))(self.fc(y))
     return x * y
예제 #9
0
 def __init__(self, encoding, n_class=1000):
     super().__init__()
     self.backbone = DNetBackbone(encoding)
     self.view = ops.View()
     self.fc = ops.Linear(out_features=n_class)
예제 #10
0
 def __init__(self, encoding, n_class=1000):
     super().__init__()
     self.backbone = DNetBackbone(encoding)
     self.view = ops.View()
     out_plane = self.backbone.out_channels
     self.fc = ops.Linear(in_features=out_plane, out_features=n_class)