def __init__(self, config, xnor=False): self.bits = 1 if xnor else 2 self.hls_type = XnorPrecisionType() if xnor else IntegerPrecisionType(width=2, signed=True) self.alpha = config['config']['alpha'] # Use the QKeras quantizer to handle any stochastic / alpha stuff self.quantizer_fn = get_quantizer(config) # Then we use our BinaryQuantizer to convert to '0,1' format self.binary_quantizer = BinaryQuantizer(1) if xnor else BinaryQuantizer(2)
def __init__(self, config): self.quantizer_fn = get_quantizer(config) self.alpha = config['config'].get('alpha', None) if config['class_name'] == 'quantized_bits': self.bits = config['config']['bits'] self.hls_type = get_type(config) # ! includes stochastic_ternary elif 'ternary' in config['class_name']: self.bits = 2 self.hls_type = IntegerPrecisionType(width=2, signed=True) # ! includes stochastic_binary elif 'binary' in config['class_name']: self.bits = 1 self.hls_type = XnorPrecisionType() else: print("Unsupported quantizer: " + config['class_name']) self.bits = 16 self.hls_type = FixedPrecisionType(width=16, integer=6, signed=True)
def __init__(self, config): self.bits = config['config']['bits'] self.quantizer_fn = get_quantizer(config) self.hls_type = ExponentPrecisionType(width=self.bits, signed=True)