def __init__(self, in_channels, out_channels, stride=1, down_sample=None, use_se=False, platform="Ascend", **kwargs): super(BasicBlock, self).__init__() self.conv1 = conv3x3(in_channels, out_channels, stride=stride) self.bn1 = nn.BatchNorm2d(out_channels) self.relu = P.ReLU() self.conv2 = conv3x3(out_channels, out_channels, stride=1) self.bn2 = nn.BatchNorm2d(out_channels) self.use_se = use_se if self.use_se: self.se = SEBlock(out_channels) self.down_sample_flag = False if down_sample is not None: self.down_sample = down_sample self.down_sample_flag = True self.add = TensorAdd()
def __init__(self, in_channels, out_channels, stride=1, down_sample=None, base_width=64, groups=1, use_se=False, **kwargs): super(Bottleneck, self).__init__() width = int(out_channels * (base_width / 64.0)) * groups self.groups = groups self.conv1 = conv1x1(in_channels, width, stride=1) self.bn1 = nn.BatchNorm2d(width) self.relu = P.ReLU() self.conv3x3s = nn.CellList() self.conv2 = GroupConv(width, width, 3, stride, pad=1, groups=groups) self.op_split = Split(axis=1, output_num=self.groups) self.op_concat = Concat(axis=1) self.bn2 = nn.BatchNorm2d(width) self.conv3 = conv1x1(width, out_channels * self.expansion, stride=1) self.bn3 = nn.BatchNorm2d(out_channels * self.expansion) self.use_se = use_se if self.use_se: self.se = SEBlock(out_channels * self.expansion) self.down_sample_flag = False if down_sample is not None: self.down_sample = down_sample self.down_sample_flag = True self.cast = P.Cast() self.add = TensorAdd()
def __init__(self, in_channels, out_channels, stride=1, down_sample=None, base_width=64, groups=1, use_se=False, platform="Ascend", **kwargs): super(Bottleneck, self).__init__() width = int(out_channels * (base_width / 64.0)) * groups self.groups = groups self.conv1 = conv1x1(in_channels, width, stride=1) self.bn1 = nn.BatchNorm2d(width) self.relu = P.ReLU() self.conv3x3s = nn.CellList() if platform == "GPU": self.conv2 = nn.Conv2d(width, width, 3, stride, pad_mode='pad', padding=1, group=groups) else: self.conv2 = GroupConv(width, width, 3, stride, pad=1, groups=groups) self.bn2 = nn.BatchNorm2d(width) self.conv3 = conv1x1(width, out_channels * self.expansion, stride=1) self.bn3 = nn.BatchNorm2d(out_channels * self.expansion) self.use_se = use_se if self.use_se: self.se = SEBlock(out_channels * self.expansion) self.down_sample_flag = False if down_sample is not None: self.down_sample = down_sample self.down_sample_flag = True self.cast = P.Cast() self.add = TensorAdd()