Esempio n. 1
0
def qconv_model():
    x = x_in = keras.layers.Input((23, 23, 1), name="input")
    x = QActivation("quantized_relu(4)", name="QA_0")(x)
    x = QConv2D(16,
                2,
                2,
                kernel_quantizer=quantizers.binary(),
                bias_quantizer=quantizers.ternary(),
                name="qconv2d_1")(x)
    x = QConv2D(8,
                2,
                2,
                kernel_quantizer=quantizers.quantized_bits(4, 0, 1),
                bias_quantizer=quantizers.quantized_bits(4, 0, 1),
                activation=quantizers.quantized_relu(6, 2),
                name="qconv2D_2")(x)
    x = QConv2D(2,
                2,
                2,
                kernel_quantizer=quantizers.quantized_bits(4, 0, 1),
                bias_quantizer=quantizers.quantized_bits(4, 0, 1),
                activation=quantizers.quantized_relu(6, 2),
                name="qconv2d_3")(x)
    x = QActivation("quantized_bits(6, 0, 1)", name="QA_4")(x)

    model = keras.Model(inputs=[x_in], outputs=[x])
    return model
Esempio n. 2
0
    def convert_to_qkeras_quantizer(self,
                                    alpha=None,
                                    use_stochastic_rounding=False):
        """convert qtools quantizer to qkeras quantizer."""

        return quantizers.binary(
            use_01=self.use_01,
            alpha=alpha,
            use_stochastic_rounding=use_stochastic_rounding)
Esempio n. 3
0
def test_Binary():
  qkeras_quantizer = quantizers.binary()
  qtools_quantizer = quantizer_impl.Binary()
  qtools_quantizer.convert_qkeras_quantizer(qkeras_quantizer)
  new_quantizer = qtools_quantizer.convert_to_qkeras_quantizer(
      alpha=qkeras_quantizer.alpha,
      use_stochastic_rounding=qkeras_quantizer.use_stochastic_rounding)

  result = new_quantizer.__dict__
  for (key, val) in result.items():
    assert_equal(val, qkeras_quantizer.__dict__[key])
Esempio n. 4
0
def test_util_layers():
    input_quantizers = None  # quantizers.quantized_bits(4, 0, 1)

    act = "quantized_bits(6, 0, 1)"
    x = x_in = keras.layers.Input((24, 24, 1), name="input")
    x = QActivation(act, name="QA_0")(x)
    x = keras.layers.Reshape((12 * 12, 4, 1), name="reshape_1")(x)
    x = keras.layers.MaxPooling2D(pool_size=(2, 2), name="maxpooling_2")(x)
    x = keras.layers.Flatten(name="flatten_3")(x)
    x = QDense(30,
               kernel_quantizer=quantizers.binary(use_01=1),
               bias_quantizer=quantizers.binary(use_01=1),
               activation=quantizers.quantized_po2(3, 2),
               name="qdense_4")(x)

    model = keras.Model(inputs=[x_in], outputs=[x])
    dtype_dict = run(model, input_quantizers)

    multiplier = dtype_dict["qdense_4"]["multiplier"]
    assert multiplier["quantizer_type"] == "quantized_bits"
    assert multiplier["bits"] == 6
    assert multiplier["int_bits"] == 1
    assert multiplier["is_signed"] == 1
    assert multiplier["op_type"] == "and"

    accumulator = dtype_dict["qdense_4"]["accumulator"]
    assert accumulator["quantizer_type"] == "quantized_bits"
    assert accumulator["bits"] == 15
    assert accumulator["int_bits"] == 10
    assert accumulator["is_signed"] == 1
    assert accumulator["op_type"] == "add"

    output = dtype_dict["qdense_4"]["output_quantizer"]
    assert output["quantizer_type"] == "quantized_po2"
    assert output["bits"] == 3
    assert output["is_signed"] == 1
    assert output["max_value"] == 2
Esempio n. 5
0
def qdense_model_fork():
    x = x_in = keras.layers.Input((23, ), name="input")
    x = QDense(10,
               kernel_quantizer=quantizers.quantized_bits(5, 0, 1),
               bias_quantizer=quantizers.quantized_bits(5, 0, 1),
               activation=quantizers.quantized_po2(3, 1),
               name="qdense_0")(x)
    x = QDense(20,
               kernel_quantizer=quantizers.quantized_bits(5, 0, 1),
               bias_quantizer=quantizers.quantized_bits(5, 0, 1),
               activation=quantizers.quantized_relu(6, 2),
               name="qdense_1")(x)
    x = QActivation("quantized_relu(4)", name="QA_2")(x)
    x_1 = QDense(30,
                 kernel_quantizer=quantizers.binary(),
                 bias_quantizer=quantizers.binary(),
                 name="qdense_3")(x)
    x_2 = QActivation("quantized_relu(6,2)", name="QA_3")(x)

    model = keras.Model(inputs=[x_in], outputs=[
        x_1,
        x_2,
    ])
    return model
Esempio n. 6
0
        model.add(BatchNormalization(name='bn'))
    model.add(QActivation(activation=activation_quantizer))
    model.compile()
    return model, is_xnor, test_no


@pytest.fixture(scope='module')
def randX_100_10():
    return randX(100, 10)


@pytest.mark.parametrize(
    'test_no,N,kernel_quantizer,bias_quantizer,activation_quantizer,use_batchnorm,is_xnor',
    [(1, 10, ternary(alpha=1), quantized_bits(5,
                                              2), 'binary_tanh', False, False),
     (2, 10, binary(), quantized_bits(5, 2), 'binary_tanh', False, True),
     (3, 10, ternary(alpha='auto'), quantized_bits(5,
                                                   2), binary(), True, True),
     (4, 10, ternary(alpha='auto'), quantized_bits(5,
                                                   2), 'ternary', True, False),
     (5, 10, ternary(alpha='auto'), quantized_bits(
         5, 2), ternary(threshold=0.2), True, False),
     (6, 10, ternary(alpha='auto'), quantized_bits(
         5, 2), ternary(threshold=0.8), True, False),
     (7, 10, binary(), quantized_bits(5, 2), binary(), False, True)])
def test_btnn(make_btnn, randX_100_10):
    model, is_xnor, test_no = make_btnn
    X = randX_100_10
    cfg = hls4ml.utils.config_from_keras_model(model, granularity='name')
    hls_model = hls4ml.converters.convert_from_keras_model(
        model,
Esempio n. 7
0
def test_qenergy():
    x = x_in = keras.layers.Input((784, ), name="input")
    x = QDense(300,
               kernel_quantizer=quantizers.binary(),
               bias_quantizer=quantizers.binary(),
               name="d0")(x)
    x = QActivation("quantized_relu(4,0)", name="d0_qr4")(x)
    x = QDense(100,
               kernel_quantizer=quantizers.quantized_bits(4, 0, 1),
               bias_quantizer=quantizers.quantized_bits(4, 0, 1),
               name="d1")(x)
    x = QAdaptiveActivation("quantized_relu", 4, name="d1_qr4")(x)
    x = QDense(10,
               kernel_quantizer=quantizers.quantized_bits(4, 0, 1),
               bias_quantizer=quantizers.quantized_bits(4, 0, 1),
               name="d2")(x)
    x = keras.layers.Activation("softmax", name="softmax")(x)

    model = keras.Model(inputs=[x_in], outputs=[x])
    # print(model.summary())

    reference_internal = "int8"
    reference_accumulator = "int32"

    # get reference energy cost
    q = run_qtools.QTools(model,
                          process="horowitz",
                          source_quantizers=reference_internal,
                          is_inference=False,
                          weights_path=None,
                          keras_quantizer=reference_internal,
                          keras_accumulator=reference_accumulator,
                          for_reference=True)

    ref_energy_dict = q.pe(weights_on_memory="sram",
                           activations_on_memory="sram",
                           min_sram_size=8 * 16 * 1024 * 1024,
                           rd_wr_on_io=False)
    reference_size = q.extract_energy_sum(qtools_settings.cfg.include_energy,
                                          ref_energy_dict)

    # get trial energy cost
    q = run_qtools.QTools(model,
                          process="horowitz",
                          source_quantizers=reference_internal,
                          is_inference=False,
                          weights_path=None,
                          keras_quantizer=reference_internal,
                          keras_accumulator=reference_accumulator,
                          for_reference=False)
    trial_energy_dict = q.pe(weights_on_memory="sram",
                             activations_on_memory="sram",
                             min_sram_size=8 * 16 * 1024 * 1024,
                             rd_wr_on_io=False)
    trial_size = q.extract_energy_sum(qtools_settings.cfg.include_energy,
                                      trial_energy_dict)

    # Reference energy number is now updated with keras_accumulator as
    # output quantizer
    tmp = ref_energy_dict["d0"]["energy"]
    assert tmp["inputs"] == pytest.approx(372.77, abs=0.1)
    assert tmp["outputs"] == pytest.approx(570.57, abs=0.1)
    assert tmp["parameters"] == pytest.approx(111975.96, abs=0.1)
    assert tmp["op_cost"] == pytest.approx(70560.0, abs=0.1)

    tmp = ref_energy_dict["d1"]["energy"]
    assert tmp["inputs"] == pytest.approx(570.57, abs=0.1)
    assert tmp["outputs"] == pytest.approx(190.19, abs=0.1)
    assert tmp["parameters"] == pytest.approx(14313.66, abs=0.1)
    assert tmp["op_cost"] == pytest.approx(26500.0, abs=0.1)

    tmp = ref_energy_dict["d2"]["energy"]
    assert tmp["inputs"] == pytest.approx(190.19, abs=0.1)
    assert tmp["outputs"] == pytest.approx(19.02, abs=0.1)
    assert tmp["parameters"] == pytest.approx(483.08, abs=0.1)
    assert tmp["op_cost"] == pytest.approx(883.33, abs=0.1)

    # Trial
    tmp = trial_energy_dict["d0"]["energy"]
    assert tmp["inputs"] == pytest.approx(372.77, abs=0.1)
    assert tmp["outputs"] == pytest.approx(342.34, abs=0.1)
    assert tmp["parameters"] == pytest.approx(13997.95, abs=0.1)
    assert tmp["op_cost"] == pytest.approx(15729.0, abs=0.1)

    tmp = trial_energy_dict["d1"]["energy"]
    assert tmp["inputs"] == pytest.approx(72.27, abs=0.1)
    assert tmp["outputs"] == pytest.approx(110.31, abs=0.1)
    assert tmp["parameters"] == pytest.approx(7158.73, abs=0.1)
    assert tmp["op_cost"] == pytest.approx(3250.0, abs=0.1)

    tmp = trial_energy_dict["d2"]["energy"]
    assert tmp["inputs"] == pytest.approx(26.63, abs=0.1)
    assert tmp["outputs"] == pytest.approx(11.41, abs=0.1)
    assert tmp["parameters"] == pytest.approx(243.44, abs=0.1)
    assert tmp["op_cost"] == pytest.approx(102.08, abs=0.1)

    # print(ref_energy_dict)
    # print(trial_energy_dict)
    assert int(reference_size) == 226629
    assert int(trial_size) == 41070
Esempio n. 8
0
    if use_batchnorm:
        model.add(BatchNormalization(name='bn'))
    model.add(QActivation(activation=activation_quantizer))
    model.compile()
    return model, is_xnor


@pytest.fixture(scope='module')
def randX_100_10():
    return randX(100, 10)


@pytest.mark.parametrize(
    'N,kernel_quantizer,bias_quantizer,activation_quantizer,use_batchnorm,is_xnor',
    [(10, ternary(alpha=1), quantized_bits(5, 2), 'binary_tanh', False, False),
     (10, binary(), quantized_bits(5, 2), 'binary_tanh', False, True),
     (10, ternary(alpha='auto'), quantized_bits(5, 2), binary(), True, True),
     (10, ternary(alpha='auto'), quantized_bits(5, 2), 'ternary', True, False),
     (10, ternary(alpha='auto'), quantized_bits(
         5, 2), ternary(threshold=0.2), True, False),
     (10, ternary(alpha='auto'), quantized_bits(
         5, 2), ternary(threshold=0.8), True, False),
     (10, binary(), quantized_bits(5, 2), binary(), False, True)])
def test_btnn(make_btnn, randX_100_10):
    model, is_xnor = make_btnn
    X = randX_100_10
    cfg = hls4ml.utils.config_from_keras_model(model, granularity='name')
    hls_model = hls4ml.converters.convert_from_keras_model(model,
                                                           output_dir='btnn',
                                                           hls_config=cfg)
    hls_model.compile()