Ejemplo n.º 1
0
 def __init__(self):
   super(QNN_HARnn, self).__init__()
   self.hardtanh0 = qnn.QuantHardTanh(quant_type=QuantType.INT, bit_width=2, narrow_range=True, bit_width_impl_type=BitWidthImplType.CONST, min_val = -1.0, max_val = 1.0, restrict_scaling_type=RestrictValueType.LOG_FP, scaling_per_channel=False, scaling_impl_type=ScalingImplType.PARAMETER)
   self.dropout0 = torch.nn.Dropout(p = DROPOUT)
   self.linear1 = qnn.QuantLinear(8, 128, bias=False, weight_quant_type=QuantType.BINARY, weight_bit_width=1, weight_scaling_stats_op = StatsOp.AVE, weight_scaling_stats_sigma=0.001, weight_scaling_per_output_channel = True, weight_narrow_range = True, weight_bit_width_impl_type=BitWidthImplType.CONST)
   self.hardtanh1 = qnn.QuantHardTanh(quant_type=QuantType.INT, bit_width=2, narrow_range=True, bit_width_impl_type=BitWidthImplType.CONST, min_val = -1.0, max_val = 1.0, restrict_scaling_type=RestrictValueType.LOG_FP, scaling_per_channel=False, scaling_impl_type=ScalingImplType.PARAMETER)
   self.dropout1 = torch.nn.Dropout(p = DROPOUT)
   self.linear2 = qnn.QuantLinear(128, 3, bias=False, weight_quant_type=QuantType.BINARY, weight_bit_width=1, weight_scaling_stats_op = StatsOp.AVE, weight_scaling_stats_sigma=0.001, weight_scaling_per_output_channel = False, weight_narrow_range = True, weight_bit_width_impl_type=BitWidthImplType.CONST)
Ejemplo n.º 2
0
def make_hardtanh_activation(bit_width, return_quant_tensor=False):
    return quant_nn.QuantHardTanh(bit_width=bit_width,
                                  max_val=ACT_MAX_VAL,
                                  min_val=ACT_MIN_VAL,
                                  quant_type=QUANT_TYPE,
                                  scaling_impl_type=ACT_SCALING_IMPL_TYPE,
                                  scaling_min_val=SCALING_MIN_VAL,
                                  return_quant_tensor=return_quant_tensor)
Ejemplo n.º 3
0
 def __init__(self, quant_type, bit_width, scaling_impl_type=ScalingImplType.CONST,
              restrict_scaling_type=RestrictValueType.LOG_FP):
     super(ZeroAct, self).__init__()
     self._min_val_act = 0
     self._max_val_act = 255
     self._min_val = 0
     self._max_val = 255
     self._act = qnn.QuantHardTanh(scaling_impl_type=scaling_impl_type, restrict_scaling_type=restrict_scaling_type,
                                   min_val=self._min_val_act, max_val=self._max_val_act, quant_type=quant_type,
                                   bit_width=bit_width)
     self._scale = (self._max_val - self._min_val) / (self._max_val_act - self._min_val_act)
Ejemplo n.º 4
0
 def __init__(self, max_val, quant_type, bit_width, scaling_impl_type=ScalingImplType.CONST,
              restrict_scaling_type=RestrictValueType.LOG_FP):
     super(MyQuantReLU, self).__init__()
     self._min_val_act = -1
     self._max_val_act = 1 - 2 / (2 ** bit_width)
     self._min_val = 0
     self._max_val = max_val
     self._act = qnn.QuantHardTanh(scaling_impl_type=scaling_impl_type, restrict_scaling_type=restrict_scaling_type,
                                   min_val=self._min_val_act, max_val=self._max_val_act, quant_type=quant_type,
                                   bit_width=bit_width)
     self._scale = (self._max_val - self._min_val) / (self._max_val_act - self._min_val_act)
Ejemplo n.º 5
0
def make_quantization_input(bit_width, absolute_act_val, scaling_per_channel):
    return quant_nn.QuantHardTanh(
        bit_width=bit_width,
        scaling_per_channel=scaling_per_channel,
        quant_type=QUANT_TYPE,
        scaling_impl_type=ACT_SCALING_IMPL_TYPE,
        scaling_min_val=SCALING_MIN_VAL,
        restrict_scaling_type=ACT_RESTRICT_SCALING_TYPE,
        max_val=absolute_act_val,
        min_val=-absolute_act_val,
        return_quant_tensor=False)
Ejemplo n.º 6
0
def make_norm_scale(bit_width, absolute_act_val, scaling_per_channel):
    return quant_nn.QuantHardTanh(
        bit_width=bit_width,
        scaling_per_channel=scaling_per_channel,
        quant_type=QUANT_TYPE,
        scaling_impl_type=ACT_SCALING_IMPL_TYPE,
        scaling_min_val=SCALING_MIN_VAL,
        restrict_scaling_type=ACT_RESTRICT_SCALING_TYPE,
        max_val=absolute_act_val,
        min_val=-absolute_act_val,
        scaling_stats_permute_dims=(1, 0, 2),
        return_quant_tensor=True)
Ejemplo n.º 7
0
def make_quant_hard_tanh(bit_width,
                         quant_type=QUANT_TYPE,
                         scaling_impl_type=ACT_SCALING_IMPL_TYPE,
                         scaling_per_channel=ACT_SCALING_PER_CHANNEL,
                         restrict_scaling_type=ACT_SCALING_RESTRICT_SCALING_TYPE,
                         scaling_min_val=SCALING_MIN_VAL,
                         threshold=HARD_TANH_THRESHOLD,
                         return_quant_tensor=ACT_RETURN_QUANT_TENSOR,
                         per_channel_broadcastable_shape=ACT_PER_CHANNEL_BROADCASTABLE_SHAPE):
    return qnn.QuantHardTanh(bit_width=bit_width,
                             quant_type=quant_type,
                             scaling_per_channel=scaling_per_channel,
                             scaling_impl_type=scaling_impl_type,
                             restrict_scaling_type=restrict_scaling_type,
                             scaling_min_val=scaling_min_val,
                             max_val=threshold,
                             min_val=-threshold,
                             per_channel_broadcastable_shape=per_channel_broadcastable_shape,
                             return_quant_tensor=return_quant_tensor)
Ejemplo n.º 8
0
def make_quant_hard_tanh(bit_width,
                         scaling_impl_type               = ACT_SCALING_IMPL_TYPE,
                         scaling_per_channel             = ACT_SCALING_PER_CHANNEL,
                         restrict_scaling_type           = ACT_SCALING_RESTRICT_SCALING_TYPE,
                         scaling_min_val                 = SCALING_MIN_VAL,
                         threshold                       = HARD_TANH_THRESHOLD,
                         min_overall_bit_width           = MIN_OVERALL_BW,
                         max_overall_bit_width           = MAX_OVERALL_BW,
                         return_quant_tensor             = ACT_RETURN_QUANT_TENSOR,
                         per_channel_broadcastable_shape = ACT_PER_CHANNEL_BROADCASTABLE_SHAPE):
    '''Helper for Hard Tanh activation layers'''
    quant_type = get_quant_type(bit_width)
    return qnn.QuantHardTanh(bit_width=bit_width,
                             quant_type=quant_type,
                             scaling_per_channel             = scaling_per_channel,
                             scaling_impl_type               = scaling_impl_type,
                             restrict_scaling_type           = restrict_scaling_type,
                             scaling_min_val                 = scaling_min_val,
                             max_val                         = threshold,
                             min_val                         = -threshold,
                             min_overall_bit_width           = min_overall_bit_width,
                             max_overall_bit_width           = max_overall_bit_width,
                             per_channel_broadcastable_shape = per_channel_broadcastable_shape,
                             return_quant_tensor             = return_quant_tensor)