def __init__(self, units, activation=None, use_bias=True, kernel_initializer='glorot_uniform', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, **kwargs): super().__init__(units, activation=activation, use_bias=use_bias, kernel_initializer=kernel_initializer, bias_initializer=bias_initializer, kernel_regularizer=kernel_regularizer, bias_regularizer=bias_regularizer, activity_regularizer=activity_regularizer, kernel_constraint=kernel_constraint, bias_constraint=bias_constraint, **kwargs) self.quant_mode, self.quantizer = maybe_get_quantizer() if self.quant_mode > 0: self._quant_vars = {} self.valid_inputs = None self.valid_output = None self.quant_info = None self.params_name = None self.node = None self.quant_vars_initialized = False self.params_quantized = False
def __init__(self, *args, **kwards): super(deephi_BatchNorm, self).__init__(*args, **kwards) self.params_name = None self.node = None self.quant_mode, self.quantizer = maybe_get_quantizer() self.param_saved = False self.param_quantized = False
def __init__(self, *args, **kwards): super(deephi_MaxPool2d, self).__init__(*args, **kwards) self.valid_inputs = None self.valid_output = None self.node = None self.quant_mode, self.quantizer = maybe_get_quantizer() self.need_quant_output = True
def __init__(self, *args, **kwargs): # only support the specified slope and inplace operation super().__init__(*args, **kwargs) if NndctOption.nndct_leaky_relu_approximate.value: self.negative_slope = 0.1015625 self.quant_mode, self.quantizer = maybe_get_quantizer() self.node = None
def __init__(self, *args, **kwards): super(deephi_Interpolate, self).__init__(*args, **kwards) self.valid_inputs = None self.valid_output = None self.params_name = None self.node = None self.quant_mode, self.quantizer = maybe_get_quantizer()
def __init__(self): super(deephi_Sub, self).__init__() self.quant_mode, self.quantizer = maybe_get_quantizer() self.valid_inputs = None self.valid_output = None self.quant_info = None self.params_name = None self.node = None
def __init__(self, op_type, *args, **kwards): super().__init__() self.valid_inputs = None self.valid_output = None self.node = None self.quant_mode, self.quantizer = maybe_get_quantizer() self.need_quant_output = True self.op_type = op_type
def Mean(*args, **kwargs): #quant_mode,_ = maybe_get_quantizer() #if quant_mode==None: # return quant_mode, _ = maybe_get_quantizer() if quant_mode is None or NndctOption.nndct_quant_off.value: return torch.mean return deephi_Mean(*args, **kwargs)
def __init__(self, *args, **kwargs): super(deephi_ReLU, self).__init__(*args, **kwargs) self.quant_mode, self.quantizer = maybe_get_quantizer() self.valid_inputs = None self.valid_output = None self.params_name = None self.node = None self.need_quant_output = False
def __init__(self, caller, *args, **kwards): super().__init__() self.valid_inputs = None self.valid_output = None self.node = None self.quant_mode, self.quantizer = maybe_get_quantizer() self.need_quant_output = True self.caller = caller self._match_inputs = []
def __init__(self, *args, **kwargs): super(deephi_Cat, self).__init__() # self.dim = kwargs.get('dim', 0) self.quant_mode, self.quantizer = maybe_get_quantizer() self.valid_inputs = None self.valid_output = None self.params_name = None self.node = None self.need_quant_output = True
def __init__(self): super().__init__() self.quant_mode, self.quantizer = maybe_get_quantizer() if self.quant_mode > 0: self._quant_vars = {} self.valid_inputs = None self.valid_output = None self.quant_info = None self.params_name = None self.node = None
def __init__(self, *args, **kwards): super(deephi_Linear, self).__init__(*args, **kwards) self.valid_inputs = None self.valid_output = None self.params_name = None self.bias_valid_inputs = None self.bias_valid_output = None self.node = None self.quant_mode, self.quantizer = maybe_get_quantizer() self.param_quantized = False self.need_quant_output = True
def __init__(self, **kwargs): super().__init__(**kwargs) self.quant_mode, self.quantizer = maybe_get_quantizer() if self.quant_mode and self.quant_mode > 0: self._quant_vars = {} self.valid_inputs = None self.valid_output = None self.quant_info = None self.params_name = None self.node = None self.quant_vars_initialized = False self.params_quantized = False
def BatchNorm(*args, **kwargs): quant_mode, _ = maybe_get_quantizer() if quant_mode == None: def _check_input_dim(self, input): pass import types nn = torch.nn.modules.batchnorm._BatchNorm(*args, **kwargs) nn._check_input_dim = types.MethodType(_check_input_dim, nn) return nn return deephi_BatchNorm(*args, **kwargs)
def __init__(self, *args, **kwards): super(deephi_Conv2d, self).__init__(*args, **kwards) self.params_name = None self.node = None self.quant_mode, self.quantizer = maybe_get_quantizer() self.param_saved = False self.param_quantized = False self.weight_f = None self.bias_f = None self.err = None self.stop = False self.efficency = 0.0 self.deviation = 0.0
def __init__(self, *args, **kwards): super(deephi_Linear, self).__init__(*args, **kwards) self.params_name = None self.node = None self.quant_mode, self.quantizer = maybe_get_quantizer() self.param_saved = False self.param_quantized = False # self.weight and self.bias are not quantized float parameters self.weight_bak = None # backup of float bias for bias correction self.bias_bak = None # backup of float bias for bias correction self.stop = False self.rate = NndctOption.nndct_param_corr_rate.value self.efficency = 0.0 self.deviation = 0.0
def MaxPool2d(*args, **kwargs): quant_mode, _ = maybe_get_quantizer() if quant_mode == None: return torch.nn.MaxPool2d(*args, **kwargs) return deephi_MaxPool2d(*args, **kwargs)
def __init__(self, *args, **kwards): super(deephi_MaxPool2d, self).__init__(*args, **kwards) self.node = None self.quant_mode, self.quantizer = maybe_get_quantizer()
def __init__(self, *args, **kwargs): super(deephi_Cat, self).__init__() # self.dim = kwargs.get('dim', 0) self.quant_mode, self.quantizer = maybe_get_quantizer() self.node = None
def ConvTranspose2d(*args, **kwargs): quant_mode, _ = maybe_get_quantizer() if quant_mode == None: return torch.nn.ConvTranspose2d(*args, **kwargs) return deephi_ConvTranspose2d(*args, **kwargs)
def Sigmoid(*args, **kwargs): quant_mode, _ = maybe_get_quantizer() if quant_mode == None: return torch.nn.Sigmoid(*args, **kwargs) return deephi_Sigmoid(*args, **kwargs)
def __init__(self): super(deephi_Tanh, self).__init__() self.quant_mode, self.quantizer = maybe_get_quantizer() self.node = None
def LeakyReLU(*args, **kwargs): quant_mode, _ = maybe_get_quantizer() if quant_mode is None or NndctOption.nndct_quant_off.value: return torch.nn.LeakyReLU(*args, **kwargs) return deephi_LeakyReLU(*args, **kwargs)
def __init__(self, *args, **kwargs): # only support the specified slope and inplace operation super().__init__(*args, **kwargs) self.negative_slope = 0.1015625 self.quant_mode, self.quantizer = maybe_get_quantizer() self.node = None
def __init__(self, caller, *args, **kwards): super().__init__() self.node = None self.quant_mode, self.quantizer = maybe_get_quantizer() self.caller = caller
def Linear(*args, **kwargs): quant_mode, _ = maybe_get_quantizer() if quant_mode == None: return torch.nn.Linear(*args, **kwargs) return deephi_Linear(*args, **kwargs)
def __init__(self, inplace=False, *args, **kwards): super(deephi_Hardswish, self).__init__() self.quant_mode, self.quantizer = maybe_get_quantizer() self.node = None self.inplace = inplace
def Tanh(*args, **kwargs): quant_mode, _ = maybe_get_quantizer() if quant_mode == None: return torch.nn.Tanh(*args, **kwargs) return deephi_Tanh(*args, **kwargs)
def __init__(self, dim=None): super(deephi_Softmax, self).__init__() self.dim = dim self.quant_mode, self.quantizer = maybe_get_quantizer() self.node = None