コード例 #1
0
def qbn_model_inference():

    x = x_in = keras.layers.Input((23, 23, 1), name="input")
    x = QConv2D(4,
                2,
                23,
                kernel_quantizer=quantizers.quantized_ulaw(4, 1, 1),
                bias_quantizer=quantizers.stochastic_ternary(),
                use_bias=False,
                name="qconv2d_1")(x)
    x = QBatchNormalization(gamma_quantizer=quantizers.quantized_relu_po2(
        3, 2),
                            variance_quantizer=quantizers.quantized_po2(
                                3, 2, quadratic_approximation=False),
                            beta_quantizer=quantizers.quantized_bits(6, 0, 1),
                            scale=False,
                            center=False,
                            gamma_range=8,
                            beta_range=4,
                            name="qbn_2")(x)

    x = QConv2D(2,
                1,
                1,
                kernel_quantizer=quantizers.quantized_po2(3, 0),
                bias_quantizer=quantizers.quantized_po2(3, 2),
                name="qconv2d_3")(x)

    model = keras.Model(inputs=[x_in], outputs=[x])

    layer = model.get_layer("qbn_2")

    weight_arr = [
        np.array([3, 4, 1, 7]),
        np.array([6, 4, 1, -7]),
        np.array([2, 7, -8, 2]),
        np.array([-1, -7, 4, 9])
    ]

    # quantize the weights
    quantizer_list = layer.get_quantizers()
    for (i, quantizer) in enumerate(quantizer_list):
        if quantizer is not None:
            weight_arr[i] = keras.backend.eval(
                quantizer(keras.backend.constant(weight_arr[i])))

    num_weights = 4
    if not layer.scale:
        num_weights -= 1
    if not layer.center:
        num_weights -= 1

    layer.set_weights(weight_arr[:num_weights])

    return model
コード例 #2
0
def hybrid_model():
    """hybrid model that mixes qkeras and keras layers."""

    x = x_in = keras.layers.Input((784, ), name="input")
    x = keras.layers.Dense(300, name="d0")(x)
    x = keras.layers.Activation("relu", name="d0_act")(x)
    x = QDense(100,
               kernel_quantizer=quantizers.quantized_po2(4),
               bias_quantizer=quantizers.quantized_po2(4),
               name="d1")(x)
    x = QActivation("quantized_relu(4,0)", name="d1_qr4")(x)
    x = QDense(10,
               kernel_quantizer=quantizers.quantized_po2(4),
               bias_quantizer=quantizers.quantized_po2(4),
               name="d2")(x)
    x = keras.layers.Activation("softmax", name="softmax")(x)

    return keras.Model(inputs=[x_in], outputs=[x])
コード例 #3
0
def test_PowerOfTwo():
  qkeras_quantizer = quantizers.quantized_po2()
  qtools_quantizer = quantizer_impl.PowerOfTwo(is_signed=True)
  qtools_quantizer.convert_qkeras_quantizer(qkeras_quantizer)
  new_quantizer = qtools_quantizer.convert_to_qkeras_quantizer(
      negative_slope=None,
      use_stochastic_rounding=qkeras_quantizer.use_stochastic_rounding,
      quadratic_approximation=qkeras_quantizer.quadratic_approximation)

  result = new_quantizer.__dict__
  for (key, val) in result.items():
    assert_equal(val, qkeras_quantizer.__dict__[key])
コード例 #4
0
def float_po2_model():
    x = x_in = keras.layers.Input((23, 23, 1), name="input")
    x = QConv2D(16,
                2,
                2,
                kernel_quantizer=quantizers.quantized_po2(5, 0),
                bias_quantizer=quantizers.quantized_po2(5, 0),
                name="qconv2d_1")(x)
    x = QActivation("quantized_relu_po2(3, 2)", name="QA_0")(x)
    x = QConv2D(10,
                2,
                2,
                kernel_quantizer=quantizers.quantized_bits(5, 2, 1),
                bias_quantizer=quantizers.quantized_bits(5, 2, 1),
                name="qconv2d_0")(x)
    model = keras.Model(inputs=[x_in], outputs=[x])

    for layer in model.layers:
        print(layer)
        print(layer.output_shape)
    return model
コード例 #5
0
    def convert_to_qkeras_quantizer(self,
                                    negative_slope=0,
                                    use_stochastic_rounding=False,
                                    quadratic_approximation=False):
        """convert qtools quantizer to qkeras quantizer."""

        if self.is_signed:
            # quantized_po2
            return quantizers.quantized_po2(
                bits=self.bits,
                max_value=self.max_val_po2 if self.max_val_po2 >= 0 else None,
                use_stochastic_rounding=use_stochastic_rounding,
                quadratic_approximation=quadratic_approximation)
        else:
            # quantized_relu_po2
            return quantizers.quantized_relu_po2(
                bits=self.bits,
                max_value=self.max_val_po2 if self.max_val_po2 >= 0 else None,
                negative_slope=negative_slope,
                use_stochastic_rounding=use_stochastic_rounding,
                quadratic_approximation=quadratic_approximation)
コード例 #6
0
def test_util_layers():
    input_quantizers = None  # quantizers.quantized_bits(4, 0, 1)

    act = "quantized_bits(6, 0, 1)"
    x = x_in = keras.layers.Input((24, 24, 1), name="input")
    x = QActivation(act, name="QA_0")(x)
    x = keras.layers.Reshape((12 * 12, 4, 1), name="reshape_1")(x)
    x = keras.layers.MaxPooling2D(pool_size=(2, 2), name="maxpooling_2")(x)
    x = keras.layers.Flatten(name="flatten_3")(x)
    x = QDense(30,
               kernel_quantizer=quantizers.binary(use_01=1),
               bias_quantizer=quantizers.binary(use_01=1),
               activation=quantizers.quantized_po2(3, 2),
               name="qdense_4")(x)

    model = keras.Model(inputs=[x_in], outputs=[x])
    dtype_dict = run(model, input_quantizers)

    multiplier = dtype_dict["qdense_4"]["multiplier"]
    assert multiplier["quantizer_type"] == "quantized_bits"
    assert multiplier["bits"] == 6
    assert multiplier["int_bits"] == 1
    assert multiplier["is_signed"] == 1
    assert multiplier["op_type"] == "and"

    accumulator = dtype_dict["qdense_4"]["accumulator"]
    assert accumulator["quantizer_type"] == "quantized_bits"
    assert accumulator["bits"] == 15
    assert accumulator["int_bits"] == 10
    assert accumulator["is_signed"] == 1
    assert accumulator["op_type"] == "add"

    output = dtype_dict["qdense_4"]["output_quantizer"]
    assert output["quantizer_type"] == "quantized_po2"
    assert output["bits"] == 3
    assert output["is_signed"] == 1
    assert output["max_value"] == 2
コード例 #7
0
def qdense_model_fork():
    x = x_in = keras.layers.Input((23, ), name="input")
    x = QDense(10,
               kernel_quantizer=quantizers.quantized_bits(5, 0, 1),
               bias_quantizer=quantizers.quantized_bits(5, 0, 1),
               activation=quantizers.quantized_po2(3, 1),
               name="qdense_0")(x)
    x = QDense(20,
               kernel_quantizer=quantizers.quantized_bits(5, 0, 1),
               bias_quantizer=quantizers.quantized_bits(5, 0, 1),
               activation=quantizers.quantized_relu(6, 2),
               name="qdense_1")(x)
    x = QActivation("quantized_relu(4)", name="QA_2")(x)
    x_1 = QDense(30,
                 kernel_quantizer=quantizers.binary(),
                 bias_quantizer=quantizers.binary(),
                 name="qdense_3")(x)
    x_2 = QActivation("quantized_relu(6,2)", name="QA_3")(x)

    model = keras.Model(inputs=[x_in], outputs=[
        x_1,
        x_2,
    ])
    return model