def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=False, cast_func=void_cast_func, n_train_sample=1): BitCenterLayer.__init__( self, fp_functional=F.conv2d, lp_functional=bit_center_conv2d, bias=bias, cast_func=cast_func, n_train_sample=n_train_sample) Conv2d.__init__( self, in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias) # weight_delta is the delta tensor in the algorithm while weight_lp is the cached # lp version of weight offset self.setup_bit_center_vars() self.cuda() self.reset_parameters_bit_center() self.register_backward_hook(self.update_grad_output_cache)
def __init__(self, in_channels: int, out_channels: int, kernel_size: Union[int, Tuple[int, int]], stride: Union[int, Tuple[int, int]] = 1, padding: Union[int, Tuple[int, int]] = 0, dilation: Union[int, Tuple[int, int]] = 1, groups: int = 1, bias: bool = True, padding_type: str = 'standard', weight_quant: Union[ WeightQuantProxyProtocol, Type[Injector]] = Int8WeightPerTensorFloat, bias_quant: Union[BiasQuantProxyProtocol, Type[Injector]] = FloatBias, input_quant: Union[ActQuantProxyProtocol, Type[Injector]] = None, output_quant: Union[ActQuantProxyProtocol, Type[Injector]] = None, return_quant_tensor: bool = False, **kwargs) -> None: Conv2d.__init__(self, in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias) QuantWBIOL.__init__(self, weight=self.weight, bias=self.bias, weight_quant=weight_quant, bias_quant=bias_quant, input_quant=input_quant, output_quant=output_quant, return_quant_tensor=return_quant_tensor, **kwargs) assert self.padding_mode == 'zeros' assert not (padding_type == 'same' and padding != 0) self.padding_type = padding_type
def __init__( self, in_channels: int, out_channels: int, kernel_size: Union[int, Tuple[int, int]], stride: Union[int, Tuple[int, int]] = 1, padding: Union[int, Tuple[int, int]] = 0, padding_type: PaddingType = PaddingType.STANDARD, dilation: Union[int, Tuple[int, int]] = 1, groups: int = 1, bias: bool = True, bias_quant_type: QuantType = QuantType.FP, bias_narrow_range: bool = False, bias_bit_width: int = None, weight_quant_override: WeightQuantProxy = None, weight_quant_type: QuantType = QuantType.FP, weight_narrow_range: bool = False, weight_scaling_override: Optional[Module] = None, weight_bit_width_impl_override: Union[BitWidthParameter, BitWidthConst] = None, weight_bit_width_impl_type: BitWidthImplType = BitWidthImplType. CONST, weight_restrict_bit_width_type: RestrictValueType = RestrictValueType.INT, weight_bit_width: int = 32, weight_min_overall_bit_width: Optional[int] = 2, weight_max_overall_bit_width: Optional[int] = None, weight_scaling_impl_type: ScalingImplType = ScalingImplType.STATS, weight_scaling_const: Optional[float] = None, weight_scaling_stats_op: StatsOp = StatsOp.MAX, weight_scaling_per_output_channel: bool = False, weight_ternary_threshold: float = 0.5, weight_restrict_scaling_type: RestrictValueType = RestrictValueType .LOG_FP, weight_scaling_stats_sigma: float = 3.0, weight_scaling_min_val: float = SCALING_MIN_VAL, weight_override_pretrained_bit_width: bool = False, compute_output_scale: bool = False, compute_output_bit_width: bool = False, return_quant_tensor: bool = False) -> None: QuantLayer.__init__(self, compute_output_scale=compute_output_scale, compute_output_bit_width=compute_output_bit_width, return_quant_tensor=return_quant_tensor) Conv2d.__init__(self, in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias) if weight_quant_type == QuantType.FP and compute_output_bit_width: raise Exception( "Computing output bit width requires enabling quantization") if bias_quant_type != QuantType.FP and not (compute_output_scale and compute_output_bit_width): raise Exception( "Quantizing bias requires to compute output scale and output bit width" ) self.per_elem_ops = 2 * self.kernel_size[0] * self.kernel_size[1] * ( in_channels // groups) self.padding_type = padding_type self.weight_reg = WeightReg() if weight_quant_override is not None: self.weight_quant = weight_quant_override self.weight_quant.add_tracked_parameter(self.weight) else: weight_scaling_stats_input_concat_dim = 1 if weight_scaling_per_output_channel: weight_stats_input_view_shape_impl = StatsInputViewShapeImpl.OVER_OUTPUT_CHANNELS weight_scaling_shape = self.per_output_channel_broadcastable_shape weight_scaling_stats_reduce_dim = 1 else: weight_stats_input_view_shape_impl = StatsInputViewShapeImpl.OVER_TENSOR weight_scaling_shape = SCALING_SCALAR_SHAPE weight_scaling_stats_reduce_dim = None if weight_scaling_stats_op == StatsOp.MAX_AVE: weight_stats_input_view_shape_impl = StatsInputViewShapeImpl.OVER_OUTPUT_CHANNELS weight_scaling_stats_reduce_dim = 1 self.weight_quant = WeightQuantProxy( bit_width=weight_bit_width, quant_type=weight_quant_type, narrow_range=weight_narrow_range, scaling_override=weight_scaling_override, restrict_scaling_type=weight_restrict_scaling_type, scaling_const=weight_scaling_const, scaling_stats_op=weight_scaling_stats_op, scaling_impl_type=weight_scaling_impl_type, scaling_stats_reduce_dim=weight_scaling_stats_reduce_dim, scaling_shape=weight_scaling_shape, bit_width_impl_type=weight_bit_width_impl_type, bit_width_impl_override=weight_bit_width_impl_override, restrict_bit_width_type=weight_restrict_bit_width_type, min_overall_bit_width=weight_min_overall_bit_width, max_overall_bit_width=weight_max_overall_bit_width, tracked_parameter_list_init=self.weight, ternary_threshold=weight_ternary_threshold, scaling_stats_input_view_shape_impl= weight_stats_input_view_shape_impl, scaling_stats_input_concat_dim= weight_scaling_stats_input_concat_dim, scaling_stats_sigma=weight_scaling_stats_sigma, scaling_min_val=weight_scaling_min_val, override_pretrained_bit_width= weight_override_pretrained_bit_width) self.bias_quant = BiasQuantProxy(quant_type=bias_quant_type, bit_width=bias_bit_width, narrow_range=bias_narrow_range)