예제 #1
0
 def __init__(self, in_channels, squeeze, activation, quantized=False):
     super().__init__(in_channels, squeeze, activation)
     self.quantized = quantized
     if quantized:
         assert quant_nn is not None, "pytorch_quantization is not available"
         self.mul_a_quantizer = quant_nn.TensorQuantizer(
             quant_nn.QuantConv2d.default_quant_desc_input)
         self.mul_b_quantizer = quant_nn.TensorQuantizer(
             quant_nn.QuantConv2d.default_quant_desc_input)
     else:
         self.mul_a_quantizer = nn.Identity()
         self.mul_b_quantizer = nn.Identity()
예제 #2
0
 def __init__(self, in_channels, squeeze, activation, quantized=False):
     super().__init__(
         in_channels,
         squeeze,
         activation,
     )
     self.quantized = quantized
     if quantized:
         self.mul_a_quantizer = quant_nn.TensorQuantizer(
             quant_nn.QuantConv2d.default_quant_desc_input)
         self.mul_b_quantizer = quant_nn.TensorQuantizer(
             quant_nn.QuantConv2d.default_quant_desc_input)
예제 #3
0
 def __init__(self,
              inplanes: int,
              planes: int,
              stride: int = 1,
              downsample: Optional[nn.Module] = None,
              groups: int = 1,
              base_width: int = 64,
              dilation: int = 1,
              norm_layer: Optional[Callable[..., nn.Module]] = None,
              quantize: bool = False) -> None:
     super(Bottleneck, self).__init__()
     if norm_layer is None:
         norm_layer = nn.BatchNorm2d
     width = int(planes * (base_width / 64.)) * groups
     # Both self.conv2 and self.downsample layers downsample the input when stride != 1
     self.conv1 = conv1x1(inplanes, width, quantize=quantize)
     self.bn1 = norm_layer(width)
     self.conv2 = conv3x3(width,
                          width,
                          stride,
                          groups,
                          dilation,
                          quantize=quantize)
     self.bn2 = norm_layer(width)
     self.conv3 = conv1x1(width, planes * self.expansion, quantize=quantize)
     self.bn3 = norm_layer(planes * self.expansion)
     self.relu = nn.ReLU(inplace=True)
     self.downsample = downsample
     self.stride = stride
     self._quantize = quantize
     if self._quantize:
         self.residual_quantizer = quant_nn.TensorQuantizer(
             quant_nn.QuantConv2d.default_quant_desc_input)
예제 #4
0
 def __init__(self,
              inplanes: int,
              planes: int,
              stride: int = 1,
              downsample: Optional[nn.Module] = None,
              groups: int = 1,
              base_width: int = 64,
              dilation: int = 1,
              norm_layer: Optional[Callable[..., nn.Module]] = None,
              quantize: bool = False) -> None:
     super(BasicBlock, self).__init__()
     if norm_layer is None:
         norm_layer = nn.BatchNorm2d
     if groups != 1 or base_width != 64:
         raise ValueError(
             'BasicBlock only supports groups=1 and base_width=64')
     if dilation > 1:
         raise NotImplementedError(
             "Dilation > 1 not supported in BasicBlock")
     # Both self.conv1 and self.downsample layers downsample the input when stride != 1
     self.conv1 = conv3x3(inplanes, planes, stride, quantize=quantize)
     self.bn1 = norm_layer(planes)
     self.relu = nn.ReLU(inplace=True)
     self.conv2 = conv3x3(planes, planes, quantize=quantize)
     self.bn2 = norm_layer(planes)
     self.downsample = downsample
     self.stride = stride
     self._quantize = quantize
     if self._quantize:
         self.residual_quantizer = quant_nn.TensorQuantizer(
             quant_nn.QuantConv2d.default_quant_desc_input)
예제 #5
0
    def __init__(
        self,
        builder: LayerBuilder,
        depsep_kernel_size: int,
        in_channels: int,
        out_channels: int,
        expand_ratio: int,
        stride: int,
        squeeze_excitation_ratio: float,
        squeeze_hidden=False,
        survival_prob: float = 1.0,
        quantized: bool = False,
        trt: bool = False,
    ):
        super().__init__()
        self.quantized = quantized
        self.residual = stride == 1 and in_channels == out_channels
        hidden_dim = in_channels * expand_ratio
        squeeze_base = hidden_dim if squeeze_hidden else in_channels
        squeeze_dim = max(1, int(squeeze_base * squeeze_excitation_ratio))

        self.expand = (None if in_channels == hidden_dim else builder.conv1x1(
            in_channels, hidden_dim, bn=True, act=True))
        self.depsep = builder.convDepSep(depsep_kernel_size,
                                         hidden_dim,
                                         hidden_dim,
                                         stride,
                                         bn=True,
                                         act=True)
        if trt or self.quantized:
            # Need TRT mode for quantized in order to automatically insert quantization before pooling
            self.se: nn.Module = SequentialSqueezeAndExcitationTRT(
                hidden_dim, squeeze_dim, builder.activation(), self.quantized)
        else:
            self.se: nn.Module = SequentialSqueezeAndExcitation(
                hidden_dim, squeeze_dim, builder.activation(), self.quantized)

        self.proj = builder.conv1x1(hidden_dim, out_channels, bn=True)

        if survival_prob == 1.0:
            self.residual_add = torch.add
        else:
            self.residual_add = StochasticDepthResidual(
                survival_prob=survival_prob)
        if self.quantized and self.residual:
            assert quant_nn is not None, "pytorch_quantization is not available"
            self.residual_quantizer = quant_nn.TensorQuantizer(
                quant_nn.QuantConv2d.default_quant_desc_input
            )  # TODO QuantConv2d ?!?
        else:
            self.residual_quantizer = nn.Identity()
예제 #6
0
    def __init__(
        self,
        builder: LayerBuilder,
        depsep_kernel_size: int,
        in_channels: int,
        out_channels: int,
        expand_ratio: int,
        stride: int,
        squeeze_excitation_ratio: int,
        squeeze_hidden=False,
        survival_prob: float = 1.0,
        quantized: bool = False,
        trt: bool = False,
    ):
        super().__init__()
        self.quantized = quantized
        self.residual = stride == 1 and in_channels == out_channels
        hidden_dim = in_channels * expand_ratio
        squeeze_base = hidden_dim if squeeze_hidden else in_channels
        squeeze_dim = max(1, int(squeeze_base * squeeze_excitation_ratio))

        self.expand = (None if in_channels == hidden_dim else builder.conv1x1(
            in_channels, hidden_dim, bn=True, act=True))
        self.depsep = builder.convDepSep(depsep_kernel_size,
                                         hidden_dim,
                                         hidden_dim,
                                         stride,
                                         bn=True,
                                         act=True)
        self.se = SequentialSqueezeAndExcitation(hidden_dim,
                                                 squeeze_dim,
                                                 builder.activation(),
                                                 self.quantized,
                                                 use_conv=trt)
        self.proj = builder.conv1x1(hidden_dim, out_channels, bn=True)

        self.survival_prob = survival_prob

        if self.quantized and self.residual:
            assert quant_nn is not None, "pytorch_quantization is not available"
            self.residual_quantizer = quant_nn.TensorQuantizer(
                quant_nn.QuantConv2d.default_quant_desc_input
            )  # TODO QuantConv2d ?!?
예제 #7
0
    def __init__(
        self,
        inplanes,
        planes,
        repeat=3,
        kernel_size=11,
        kernel_size_factor=1,
        stride=1,
        dilation=1,
        padding='same',
        dropout=0.2,
        activation=None,
        residual=True,
        groups=1,
        separable=False,
        heads=-1,
        normalization="batch",
        norm_groups=1,
        residual_mode='add',
        residual_panes=[],
        conv_mask=False,
        se=False,
        se_reduction_ratio=16,
        se_context_window=-1,
        se_interpolation_mode='nearest',
        stride_last=False,
        future_context: int = -1,
        quantize=False,
    ):
        super(JasperBlock, self).__init__()

        if padding != "same":
            raise ValueError("currently only 'same' padding is supported")

        kernel_size_factor = float(kernel_size_factor)
        if type(kernel_size) in (list, tuple):
            kernel_size = [compute_new_kernel_size(k, kernel_size_factor) for k in kernel_size]
        else:
            kernel_size = compute_new_kernel_size(kernel_size, kernel_size_factor)

        if future_context < 0:
            padding_val = get_same_padding(kernel_size[0], stride[0], dilation[0])
        else:
            padding_val = get_asymtric_padding(kernel_size[0], stride[0], dilation[0], future_context)

        self.conv_mask = conv_mask
        self.separable = separable
        self.residual_mode = residual_mode
        self.se = se
        self.quantize = quantize

        inplanes_loop = inplanes
        conv = nn.ModuleList()

        for _ in range(repeat - 1):
            # Stride last means only the last convolution in block will have stride
            if stride_last:
                stride_val = [1]
            else:
                stride_val = stride

            conv.extend(
                self._get_conv_bn_layer(
                    inplanes_loop,
                    planes,
                    kernel_size=kernel_size,
                    stride=stride_val,
                    dilation=dilation,
                    padding=padding_val,
                    groups=groups,
                    heads=heads,
                    separable=separable,
                    normalization=normalization,
                    norm_groups=norm_groups,
                    quantize=quantize,
                )
            )

            conv.extend(self._get_act_dropout_layer(drop_prob=dropout, activation=activation))

            inplanes_loop = planes

        conv.extend(
            self._get_conv_bn_layer(
                inplanes_loop,
                planes,
                kernel_size=kernel_size,
                stride=stride,
                dilation=dilation,
                padding=padding_val,
                groups=groups,
                heads=heads,
                separable=separable,
                normalization=normalization,
                norm_groups=norm_groups,
                quantize=quantize,
            )
        )

        if se:
            conv.append(
                SqueezeExcite(
                    planes,
                    reduction_ratio=se_reduction_ratio,
                    context_window=se_context_window,
                    interpolation_mode=se_interpolation_mode,
                    activation=activation,
                    quantize=quantize,
                )
            )

        self.mconv = conv

        res_panes = residual_panes.copy()
        self.dense_residual = residual

        if residual:
            res_list = nn.ModuleList()

            if residual_mode == 'stride_add':
                stride_val = stride
            else:
                stride_val = [1]

            if len(residual_panes) == 0:
                res_panes = [inplanes]
                self.dense_residual = False
            for ip in res_panes:
                res = nn.ModuleList(
                    self._get_conv_bn_layer(
                        ip,
                        planes,
                        kernel_size=1,
                        normalization=normalization,
                        norm_groups=norm_groups,
                        stride=stride_val,
                        quantize=quantize,
                    )
                )

                res_list.append(res)

            self.res = res_list
            if PYTORCH_QUANTIZATION_AVAILABLE and self.quantize:
                self.residual_quantizer = quant_nn.TensorQuantizer(quant_nn.QuantConv2d.default_quant_desc_input)
            elif not PYTORCH_QUANTIZATION_AVAILABLE and quantize:
                raise ImportError(
                    "pytorch-quantization is not installed. Install from "
                    "https://github.com/NVIDIA/TensorRT/tree/master/tools/pytorch-quantization."
                )
        else:
            self.res = None

        self.mout = nn.Sequential(*self._get_act_dropout_layer(drop_prob=dropout, activation=activation))