コード例 #1
0
def test_conv2d():
    input_quantizers = None

    act = "quantized_bits(6, 0, 1)"
    weight = quantizers.quantized_relu_po2(4, 2)
    x = x_in = keras.layers.Input((23, 23, 1), name="input")
    x = QActivation(act, name="QA_0")(x)
    x = QConv2D(16,
                2,
                2,
                kernel_quantizer=weight,
                bias_quantizer=weight,
                name="qconv2d_1")(x)

    model = keras.Model(inputs=[x_in], outputs=[x])

    dtype_dict = run(model, input_quantizers)
    multiplier = dtype_dict["qconv2d_1"]["multiplier"]
    accumulator = dtype_dict["qconv2d_1"]["accumulator"]
    op_count = dtype_dict["qconv2d_1"]["operation_count"]

    assert multiplier["quantizer_type"] == "quantized_bits"
    assert multiplier["bits"] == 15
    assert multiplier["int_bits"] == 2
    assert multiplier["is_signed"] == 1
    assert multiplier["op_type"] == "shifter"
    assert accumulator["quantizer_type"] == "quantized_bits"
    assert accumulator["bits"] == 18
    assert accumulator["int_bits"] == 5
    assert accumulator["is_signed"] == 1
    assert accumulator["op_type"] == "add"
    assert op_count == 7744
コード例 #2
0
def qbn_model(act="binary(use_01=0)",
              gamma=quantizers.quantized_relu_po2(4, 2),
              variance=quantizers.quantized_relu_po2(4, 2),
              beta=None,
              mean=None):

    x = x_in = keras.layers.Input((23, 23, 1), name="input")
    x = QActivation(act, name="QA_0")(x)
    x = QBatchNormalization(gamma_quantizer=gamma,
                            variance_quantizer=variance,
                            beta_quantizer=beta,
                            mean_quantizer=mean,
                            gamma_range=8,
                            beta_range=4,
                            name="qbn_1")(x)

    model = keras.Model(inputs=[x_in], outputs=[x])

    return model
コード例 #3
0
def qbn_model_inference():

    x = x_in = keras.layers.Input((23, 23, 1), name="input")
    x = QConv2D(4,
                2,
                23,
                kernel_quantizer=quantizers.quantized_ulaw(4, 1, 1),
                bias_quantizer=quantizers.stochastic_ternary(),
                use_bias=False,
                name="qconv2d_1")(x)
    x = QBatchNormalization(gamma_quantizer=quantizers.quantized_relu_po2(
        3, 2),
                            variance_quantizer=quantizers.quantized_po2(
                                3, 2, quadratic_approximation=False),
                            beta_quantizer=quantizers.quantized_bits(6, 0, 1),
                            scale=False,
                            center=False,
                            gamma_range=8,
                            beta_range=4,
                            name="qbn_2")(x)

    x = QConv2D(2,
                1,
                1,
                kernel_quantizer=quantizers.quantized_po2(3, 0),
                bias_quantizer=quantizers.quantized_po2(3, 2),
                name="qconv2d_3")(x)

    model = keras.Model(inputs=[x_in], outputs=[x])

    layer = model.get_layer("qbn_2")

    weight_arr = [
        np.array([3, 4, 1, 7]),
        np.array([6, 4, 1, -7]),
        np.array([2, 7, -8, 2]),
        np.array([-1, -7, 4, 9])
    ]

    # quantize the weights
    quantizer_list = layer.get_quantizers()
    for (i, quantizer) in enumerate(quantizer_list):
        if quantizer is not None:
            weight_arr[i] = keras.backend.eval(
                quantizer(keras.backend.constant(weight_arr[i])))

    num_weights = 4
    if not layer.scale:
        num_weights -= 1
    if not layer.center:
        num_weights -= 1

    layer.set_weights(weight_arr[:num_weights])

    return model
コード例 #4
0
def test_ReluPowerOfTwo():
  qkeras_quantizer = quantizers.quantized_relu_po2()
  qtools_quantizer = quantizer_impl.ReluPowerOfTwo()
  qtools_quantizer.convert_qkeras_quantizer(qkeras_quantizer)
  new_quantizer = qtools_quantizer.convert_to_qkeras_quantizer(
      negative_slope=qkeras_quantizer.negative_slope,
      use_stochastic_rounding=qkeras_quantizer.use_stochastic_rounding,
      quadratic_approximation=qkeras_quantizer.quadratic_approximation)

  result = new_quantizer.__dict__
  for (key, val) in result.items():
    assert_equal(val, qkeras_quantizer.__dict__[key])
コード例 #5
0
    def convert_to_qkeras_quantizer(self,
                                    negative_slope=0,
                                    use_stochastic_rounding=False,
                                    quadratic_approximation=False):
        """convert qtools quantizer to qkeras quantizer."""

        # quantized_relu_po2
        return quantizers.quantized_relu_po2(
            bits=self.bits,
            max_value=self.max_val_po2 if self.max_val_po2 >= 0 else None,
            negative_slope=negative_slope,
            use_stochastic_rounding=use_stochastic_rounding,
            quadratic_approximation=quadratic_approximation)