예제 #1
0
def qconv_model():
    x = x_in = keras.layers.Input((23, 23, 1), name="input")
    x = QActivation("quantized_relu(4)", name="QA_0")(x)
    x = QConv2D(16,
                2,
                2,
                kernel_quantizer=quantizers.binary(),
                bias_quantizer=quantizers.ternary(),
                name="qconv2d_1")(x)
    x = QConv2D(8,
                2,
                2,
                kernel_quantizer=quantizers.quantized_bits(4, 0, 1),
                bias_quantizer=quantizers.quantized_bits(4, 0, 1),
                activation=quantizers.quantized_relu(6, 2),
                name="qconv2D_2")(x)
    x = QConv2D(2,
                2,
                2,
                kernel_quantizer=quantizers.quantized_bits(4, 0, 1),
                bias_quantizer=quantizers.quantized_bits(4, 0, 1),
                activation=quantizers.quantized_relu(6, 2),
                name="qconv2d_3")(x)
    x = QActivation("quantized_bits(6, 0, 1)", name="QA_4")(x)

    model = keras.Model(inputs=[x_in], outputs=[x])
    return model
예제 #2
0
def test_invalid_denominator_qbn():
    input_quantizers = None
    act = "binary(use_01=0)"
    gamma = quantizers.ternary()
    variance = gamma
    model = qbn_model(act=act,
                      gamma=gamma,
                      variance=variance,
                      beta=None,
                      mean=None)
    with pytest.raises(divider_factory.UnacceptedQuantizerError):
        run(model, input_quantizers)
예제 #3
0
    def convert_to_qkeras_quantizer(self,
                                    alpha=None,
                                    threshold=None,
                                    use_stochastic_rounding=False,
                                    number_of_unrolls=5):
        """convert qtools quantizer to qkeras quantizer."""

        return quantizers.ternary(
            alpha=alpha,
            threshold=threshold,
            use_stochastic_rounding=use_stochastic_rounding,
            number_of_unrolls=number_of_unrolls)
예제 #4
0
def test_Ternary():

  qkeras_quantizer = quantizers.ternary()
  qtools_quantizer = quantizer_impl.Ternary()
  qtools_quantizer.convert_qkeras_quantizer(qkeras_quantizer)
  new_quantizer = qtools_quantizer.convert_to_qkeras_quantizer(
      alpha=qkeras_quantizer.alpha, threshold=qkeras_quantizer.threshold,
      use_stochastic_rounding=qkeras_quantizer.use_stochastic_rounding,
      number_of_unrolls=qkeras_quantizer.number_of_unrolls)

  result = new_quantizer.__dict__
  for (key, val) in result.items():
    assert_equal(val, qkeras_quantizer.__dict__[key])
예제 #5
0
               name='dense'))
    if use_batchnorm:
        model.add(BatchNormalization(name='bn'))
    model.add(QActivation(activation=activation_quantizer))
    model.compile()
    return model, is_xnor, test_no


@pytest.fixture(scope='module')
def randX_100_10():
    return randX(100, 10)


@pytest.mark.parametrize(
    'test_no,N,kernel_quantizer,bias_quantizer,activation_quantizer,use_batchnorm,is_xnor',
    [(1, 10, ternary(alpha=1), quantized_bits(5,
                                              2), 'binary_tanh', False, False),
     (2, 10, binary(), quantized_bits(5, 2), 'binary_tanh', False, True),
     (3, 10, ternary(alpha='auto'), quantized_bits(5,
                                                   2), binary(), True, True),
     (4, 10, ternary(alpha='auto'), quantized_bits(5,
                                                   2), 'ternary', True, False),
     (5, 10, ternary(alpha='auto'), quantized_bits(
         5, 2), ternary(threshold=0.2), True, False),
     (6, 10, ternary(alpha='auto'), quantized_bits(
         5, 2), ternary(threshold=0.8), True, False),
     (7, 10, binary(), quantized_bits(5, 2), binary(), False, True)])
def test_btnn(make_btnn, randX_100_10):
    model, is_xnor, test_no = make_btnn
    X = randX_100_10
    cfg = hls4ml.utils.config_from_keras_model(model, granularity='name')
예제 #6
0
               name='dense'))
    if use_batchnorm:
        model.add(BatchNormalization(name='bn'))
    model.add(QActivation(activation=activation_quantizer))
    model.compile()
    return model, is_xnor


@pytest.fixture(scope='module')
def randX_100_10():
    return randX(100, 10)


@pytest.mark.parametrize(
    'N,kernel_quantizer,bias_quantizer,activation_quantizer,use_batchnorm,is_xnor',
    [(10, ternary(alpha=1), quantized_bits(5, 2), 'binary_tanh', False, False),
     (10, binary(), quantized_bits(5, 2), 'binary_tanh', False, True),
     (10, ternary(alpha='auto'), quantized_bits(5, 2), binary(), True, True),
     (10, ternary(alpha='auto'), quantized_bits(5, 2), 'ternary', True, False),
     (10, ternary(alpha='auto'), quantized_bits(
         5, 2), ternary(threshold=0.2), True, False),
     (10, ternary(alpha='auto'), quantized_bits(
         5, 2), ternary(threshold=0.8), True, False),
     (10, binary(), quantized_bits(5, 2), binary(), False, True)])
def test_btnn(make_btnn, randX_100_10):
    model, is_xnor = make_btnn
    X = randX_100_10
    cfg = hls4ml.utils.config_from_keras_model(model, granularity='name')
    hls_model = hls4ml.converters.convert_from_keras_model(model,
                                                           output_dir='btnn',
                                                           hls_config=cfg)