Example #1
0
def _get_precision_from_quantizer(quantizer):
    import qkeras
    if isinstance(quantizer, str):
        quantizer_obj = qkeras.get_quantizer(quantizer)
        quantizer = {}
        # Some activations are classes with get_config method
        if hasattr(quantizer_obj, 'get_config'):
            quantizer['class_name'] = quantizer_obj.__class__.__name__
            quantizer['config'] = quantizer_obj.get_config()
        # Some activations are just functions
        else: 
            quantizer['class_name'] = quantizer_obj.__name__

    supported_quantizers = ['quantized_bits', 'quantized_relu', 'quantized_tanh']
    if quantizer['class_name'] in supported_quantizers:
        bits = int(quantizer['config']['bits']) + 1
        integer = int(quantizer['config']['integer']) + 1
        
    elif quantizer['class_name'] in ['binary', 'stochastic_binary', 'binary_tanh']:
        bits = 2
        integer = 2
    
    elif quantizer['class_name'] in ['ternary', 'stochastic_ternary', 'ternary_tanh']:
        bits = 2
        integer = 2
    else:
        raise Exception('ERROR: Unsupported quantizer: {}'.format(quantizer['class_name']))

    decimal = bits - integer
    if decimal > 0:
        return 'ap_fixed<{},{}>'.format(bits, integer)
    else:
        return 'ap_int<{}>'.format(bits)
Example #2
0
def _get_precision_from_quantizer(quantizer):
    import qkeras
    if isinstance(quantizer, str):
        quantizer_obj = qkeras.get_quantizer(quantizer)
        quantizer = {}
        # Some activations are classes with get_config method
        if hasattr(quantizer_obj, 'get_config'):
            quantizer['class_name'] = quantizer_obj.__class__.__name__
            quantizer['config'] = quantizer_obj.get_config()
        # Some activations are just functions
        else:
            quantizer['class_name'] = quantizer_obj.__name__

    supported_quantizers = [
        'quantized_bits', 'quantized_relu', 'quantized_tanh', 'quantized_po2',
        'quantized_relu_po2'
    ]
    signed = True
    if quantizer['class_name'] in supported_quantizers:
        bits = int(quantizer['config']['bits'])
        # if integer isn't specified, it should be the same as bits
        integer = int(quantizer['config'].get('integer', bits - 1)) + 1
        if quantizer['class_name'] == 'quantized_relu':
            signed = False
            integer -= 1
    elif quantizer['class_name'] in [
            'binary', 'stochastic_binary', 'binary_tanh'
    ]:
        bits = 2
        integer = 2

    elif quantizer['class_name'] in [
            'ternary', 'stochastic_ternary', 'ternary_tanh'
    ]:
        bits = 2
        integer = 2
    else:
        raise Exception('ERROR: Unsupported quantizer: {}'.format(
            quantizer['class_name']))

    decimal = bits - integer
    signed = '' if signed else 'u'
    if decimal > 0:
        return 'ap_{}fixed<{},{}>'.format(signed, bits, integer)
    else:
        return 'ap_{}int<{}>'.format(signed, bits)
Example #3
0
 def __init__(self, config):
     self.bits = config['config']['bits']
     self.quantizer_fn = get_quantizer(config)
     self.hls_type = ExponentPrecisionType(width=self.bits, signed=True)
Example #4
0
    def _act_size(self, layer):
        """Computes size of activations of a layer in bits."""
        i_size = self.input_bits
        o_size = self.output_bits
        t_size = self.ref_bits
        output_size = np.prod(layer.output.shape[1:])
        # we compute activation sizes for inputs and outputs
        if layer.__class__.__name__ in ["InputLayer"]:
            return i_size * output_size
        elif layer.__class__.__name__ in [
                "Dense", "Conv2D", "Conv1D", "DepthwiseConv2D"
        ]:
            if layer.activation is not None and layer.activation.__name__ != "linear":
                return t_size * output_size
            else:
                return 0
        elif layer.__class__.__name__ in [
                "QDense", "QConv2D", "QConv1D", "QDepthwiseConv2D"
        ]:
            if layer.activation is None:
                is_softmax = False
                is_linear = False
            else:
                if isinstance(layer.activation, six.string_types):
                    is_softmax = layer.activation == "softmax"
                    is_linear = layer.activation == "linear"
                elif hasattr(layer.activation, "__name__"):
                    is_softmax = layer.activation.__name__ == "softmax"
                    is_linear = layer.activation.__name__ == "linear"
                else:
                    is_softmax = False
                    is_linear = False

                if is_softmax:
                    bits = o_size
                elif is_linear:
                    bits = 0
                else:
                    assert not isinstance(layer.activation, six.string_types)
                    if hasattr(layer.activation, "bits"):
                        bits = layer.activation.bits
                    else:
                        bits = t_size

                return bits * np.prod(layer.output.shape.as_list()[1:])
        elif layer.__class__.__name__ in ["QActivation", "Activation"]:
            if isinstance(layer.activation, six.string_types):
                is_linear = layer.activation == "linear"
                is_softmax = layer.activation == "softmax"
                is_sigmoid = layer.activation == "sigmoid"
            else:
                is_linear = layer.activation.__name__ == "linear"
                is_softmax = layer.activation.__name__ == "softmax"
                is_sigmoid = layer.activation.__name__ == "sigmoid"

            if is_linear:
                bits = 0
            elif is_softmax or is_sigmoid:
                bits = o_size
            else:
                if isinstance(layer.activation, six.string_types):
                    activation = get_quantizer(layer.activation)
                else:
                    activation = layer.activation
                if hasattr(activation, "bits"):
                    bits = activation.bits
                else:
                    bits = t_size
            return bits * output_size
        return 0