def __init__(self, in_features: int, out_features: int, bias: bool = True) -> None: super(Bilinear, self).__init__(in_features, out_features, bias) self.quant_handle = Q.QuantAndDeQuantGPU() self.weight_origin = None
def __init__(self, kernel_size, stride=None, padding=0, ceil_mode: bool = False, count_include_pad: bool = True) -> None: super(AvgPool1d, self).__init__(kernel_size, stride, padding, ceil_mode, count_include_pad) self.quant_handle = Q.QuantAndDeQuantGPU()
def __init__(self, norm_type, kernel_size, stride=None, ceil_mode: bool = False) -> None: super(LPPool2d, self).__init__(norm_type=norm_type, kernel_size=kernel_size, stride=stride, ceil_mode=ceil_mode) self.quant_handle = Q.QuantAndDeQuantGPU()
def __init__(self, kernel_size, output_size=None, output_ratio=None, return_indices: bool = False, _random_samples=None) -> None: super(FractionalMaxPool2d, self).__init__(kernel_size, output_ratio, return_indices, _random_samples) self.quant_handle = Q.QuantAndDeQuantGPU()
def __init__(self, kernel_size, stride=None, padding=0, dilation=1, return_indices: bool = False, ceil_mode: bool = False) -> None: super(MaxPool1d, self).__init__(kernel_size, stride, padding, dilation, return_indices, ceil_mode) self.quant_handle = Q.QuantAndDeQuantGPU()
def __init__(self, in_channels: int, out_channels: int, kernel_size, stride=1, padding=0, dilation=1, groups: int = 1, bias: bool = True): super(Conv2d, self).__init__(in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias) self.quant_handle = Q.QuantAndDeQuantGPU() self.weight_origin = None self._bit_width = 8
def __init__(self, output_size, return_indices: bool = False) -> None: super(AdaptiveMaxPool3d, self).__init__(output_size=output_size, return_indices=return_indices) self.quant_handle = Q.QuantAndDeQuantGPU()
def __init__(self, kernel_size, stride=None, padding=0) -> None: super(MaxUnpool3d, self).__init__(kernel_size, stride, padding) self.quant_handle = Q.QuantAndDeQuantGPU()
def __init__(self, output_size) -> None: super(AdaptiveAvgPool3d, self).__init__(output_size) self.quant_handle = Q.QuantAndDeQuantGPU()