def __init__( self, in_channels, channels, stride=1, groups=1, base_width=64, dilation=1, norm=M.BatchNorm2d, ): super(BasicBlock, self).__init__() if groups != 1 or base_width != 64: raise ValueError( "BasicBlock only supports groups=1 and base_width=64") if dilation > 1: raise NotImplementedError( "Dilation > 1 not supported in BasicBlock") self.conv1 = M.Conv2d(in_channels, channels, 3, stride, padding=dilation, bias=False) self.bn1 = norm(channels) self.conv2 = M.Conv2d(channels, channels, 3, 1, padding=1, bias=False) self.bn2 = norm(channels) self.downsample = ( M.Identity() if in_channels == channels and stride == 1 else M.Sequential( M.Conv2d(in_channels, channels, 1, stride, bias=False), norm(channels), ))
def __init__(self, in_channels: int, mid_channels: int, out_channels: int, stride: int = 1): super().__init__() self.conv1 = Conv2D(in_channels, mid_channels, kernel_size=5, stride=stride, padding=2, is_seperable=True, has_relu=True) self.conv2 = Conv2D(mid_channels, out_channels, kernel_size=5, stride=1, padding=2, is_seperable=True, has_relu=False) self.proj = (M.Identity() if stride == 1 and in_channels == out_channels else Conv2D(in_channels, out_channels, kernel_size=3, stride=stride, padding=1, is_seperable=True, has_relu=False)) self.relu = M.ReLU()
def __init__( self, in_channels, channels, stride=1, groups=1, base_width=64, dilation=1, norm=M.BatchNorm2d, ): assert norm is M.BatchNorm2d, "Quant mode only support BatchNorm2d currently." super(Bottleneck, self).__init__() width = int(channels * (base_width / 64.0)) * groups self.conv_bn_relu1 = M.ConvBnRelu2d(in_channels, width, 1, 1, bias=False) self.conv_bn_relu2 = M.ConvBnRelu2d( width, width, 3, stride, padding=dilation, groups=groups, dilation=dilation, bias=False, ) self.conv_bn3 = M.ConvBn2d(width, channels * self.expansion, 1, 1, bias=False) self.downsample = ( M.Identity() if in_channels == channels * self.expansion and stride == 1 else M.ConvBn2d( in_channels, channels * self.expansion, 1, stride, bias=False ) ) self.add = M.Elemwise("FUSE_ADD_RELU")
def __init__( self, in_channels, channels, stride=1, groups=1, base_width=64, dilation=1, norm=M.BatchNorm2d, ): super().__init__() width = int(channels * (base_width / 64.0)) * groups self.conv1 = M.Conv2d(in_channels, width, 1, 1, bias=False) self.bn1 = norm(width) self.conv2 = M.Conv2d( width, width, 3, stride, padding=dilation, groups=groups, dilation=dilation, bias=False, ) self.bn2 = norm(width) self.conv3 = M.Conv2d(width, channels * self.expansion, 1, 1, bias=False) self.bn3 = norm(channels * self.expansion) self.downsample = ( M.Identity() if in_channels == channels * self.expansion and stride == 1 else M.Sequential( M.Conv2d(in_channels, channels * self.expansion, 1, stride, bias=False), norm(channels * self.expansion), ) )
def __init__( self, in_channels, channels, stride=1, groups=1, base_width=64, dilation=1, norm=M.BatchNorm2d, ): assert norm is M.BatchNorm2d, "Quant mode only support BatchNorm2d currently." super(BasicBlock, self).__init__() if groups != 1 or base_width != 64: raise ValueError("BasicBlock only supports groups=1 and base_width=64") if dilation > 1: raise NotImplementedError("Dilation > 1 not supported in BasicBlock") self.conv_bn_relu1 = M.ConvBnRelu2d( in_channels, channels, 3, stride, padding=dilation, bias=False ) self.conv_bn2 = M.ConvBn2d(channels, channels, 3, 1, padding=1, bias=False) self.downsample = ( M.Identity() if in_channels == channels and stride == 1 else M.ConvBn2d(in_channels, channels, 1, stride, bias=False) ) self.add = M.Elemwise("FUSE_ADD_RELU")
def __init__(self, inp, oup, stride): super().__init__() if inp == oup and stride == 1: self.proj = M.Identity() else: self.proj = M.ConvBn2d(inp, oup, 1, stride=stride, bias=False) self.conv1 = M.ConvBnRelu2d(inp, oup, 3, padding=1, stride=stride, bias=False) self.conv2 = M.ConvBn2d(oup, oup, 3, padding=1, stride=1, bias=False)
def __init__( self, in_channels, bottleneck_channels, out_channels, stride, dilation=1): super(Bottleneck, self).__init__() self.downsample = None self.downsample = ( M.Identity() if in_channels == out_channels and stride == 1 else M.Sequential( M.Conv2d(in_channels, out_channels, kernel_size=1, stride=stride, bias=has_bias), FrozenBatchNorm2d(out_channels), ) ) self.conv1 = M.Conv2d(in_channels, bottleneck_channels, kernel_size=1, stride=1, bias=has_bias) self.bn1 = FrozenBatchNorm2d(bottleneck_channels) self.conv2 = M.Conv2d(bottleneck_channels, bottleneck_channels, kernel_size=3, stride=stride, padding=dilation, bias=has_bias, dilation=dilation) self.bn2 = FrozenBatchNorm2d(bottleneck_channels) self.conv3 = M.Conv2d(bottleneck_channels, out_channels, kernel_size=1, stride=1, bias=has_bias) self.bn3 = FrozenBatchNorm2d(out_channels)
def __init__( self, in_channels, channels, stride=1, groups=1, base_width=64, dilation=1, norm=M.BatchNorm2d, ): super().__init__() self.tmp_in_channels = in_channels self.tmp_channels = channels self.stride = stride if groups != 1 or base_width != 64: raise ValueError( "BasicBlock only supports groups=1 and base_width=64") if dilation > 1: raise NotImplementedError( "Dilation > 1 not supported in BasicBlock") self.conv1 = M.Conv2d(in_channels, channels, 3, stride, padding=dilation, bias=False) self.bn1 = norm(channels) self.conv2 = M.Conv2d(channels, channels, 3, 1, padding=1, bias=False) self.bn2 = norm(channels) self.downsample_id = M.Identity() self.downsample_conv = M.Conv2d(in_channels, channels, 1, stride, bias=False) self.downsample_norm = norm(channels)
def __init__(self, name): super().__init__(name) self.identity = M.Identity() self.identity_list = [M.Identity(), M.Identity()] self.identity_dict = {"0": M.Identity(), "1": M.Identity()} self.param = F.zeros((1, ))