예제 #1
0
def test_qconv2dtranspose():
    x = Input((
        4,
        4,
        1,
    ))
    y = QConv2DTranspose(1,
                         kernel_size=(3, 3),
                         kernel_quantizer=binary(),
                         bias_quantizer=binary(),
                         name='conv2d_tran')(x)
    model = Model(inputs=x, outputs=y)
    data = np.ones(shape=(1, 4, 4, 1))
    kernel = np.ones(shape=(3, 3, 1, 1))
    bias = np.ones(shape=(1, ))
    model.get_layer('conv2d_tran').set_weights([kernel, bias])
    actual_output = model.predict(data).astype(np.float16)
    expected_output = np.array([[2., 3., 4., 4., 3., 2.],
                                [3., 5., 7., 7., 5., 3.],
                                [4., 7., 10., 10., 7., 4.],
                                [4., 7., 10., 10., 7., 4.],
                                [3., 5., 7., 7., 5., 3.],
                                [2., 3., 4., 4., 3., 2.]]).reshape(
                                    (1, 6, 6, 1)).astype(np.float16)
    assert_allclose(actual_output, expected_output, rtol=1e-4)
예제 #2
0
def test_binary_auto_po2():
    """Test binary auto_po2 scale quantizer."""

    np.random.seed(42)
    N = 1000000
    m_list = [1.0, 0.1, 0.01, 0.001]

    for m in m_list:
        x = np.random.uniform(-m, m, (N, 10)).astype(K.floatx())
        x = K.constant(x)

        quantizer_ref = binary(alpha="auto")
        quantizer = binary(alpha="auto_po2")

        q_ref = K.eval(quantizer_ref(x))
        q = K.eval(quantizer(x))

        ref = get_weight_scale(quantizer_ref, q_ref)

        expected = np.power(2.0, np.round(np.log2(ref)))
        result = get_weight_scale(quantizer, q)

        assert_allclose(result, expected, rtol=0.0001)
예제 #3
0
def test_binary_auto():
    """Test binary auto scale quantizer."""

    np.random.seed(42)
    N = 1000000
    m_list = [1.0, 0.1, 0.01, 0.001]

    for m in m_list:
        x = np.random.uniform(-m, m, (N, 10)).astype(K.floatx())
        x = K.constant(x)

        quantizer = binary(alpha="auto")
        q = K.eval(quantizer(x))

        result = get_weight_scale(quantizer, q)
        expected = m / 2.0
        logging.info("expect %s", expected)
        logging.info("result %s", result)
        assert_allclose(result, expected, rtol=0.02)
예제 #4
0
def main():
  # check the mean value of samples from stochastic_rounding for po2
  np.random.seed(42)
  count = 100000
  val = 42
  a = K.constant([val] * count)
  b = quantized_po2(use_stochastic_rounding=True)(a)
  res = np.sum(K.eval(b)) / count
  print(res, "should be close to ", val)
  b = quantized_relu_po2(use_stochastic_rounding=True)(a)
  res = np.sum(K.eval(b)) / count
  print(res, "should be close to ", val)
  a = K.constant([-1] * count)
  b = quantized_relu_po2(use_stochastic_rounding=True)(a)
  res = np.sum(K.eval(b)) / count
  print(res, "should be all ", 0)

  # non-stochastic rounding quantizer.
  a = K.constant([-3.0, -2.0, -1.0, -0.5, 0.0, 0.5, 1.0, 2.0, 3.0])
  a = K.constant([0.194336])
  print(" a =", K.eval(a).astype(np.float16))
  print("qa =", K.eval(quantized_relu(6,2)(a)).astype(np.float16))
  print("ss =", K.eval(smooth_sigmoid(a)).astype(np.float16))
  print("hs =", K.eval(hard_sigmoid(a)).astype(np.float16))
  print("ht =", K.eval(hard_tanh(a)).astype(np.float16))
  print("st =", K.eval(smooth_tanh(a)).astype(np.float16))
  c = K.constant(np.arange(-1.5, 1.51, 0.3))
  print(" c =", K.eval(c).astype(np.float16))
  print("qb_111 =", K.eval(quantized_bits(1,1,1)(c)).astype(np.float16))
  print("qb_210 =", K.eval(quantized_bits(2,1,0)(c)).astype(np.float16))
  print("qb_211 =", K.eval(quantized_bits(2,1,1)(c)).astype(np.float16))
  print("qb_300 =", K.eval(quantized_bits(3,0,0)(c)).astype(np.float16))
  print("qb_301 =", K.eval(quantized_bits(3,0,1)(c)).astype(np.float16))
  c_1000 = K.constant(np.array([list(K.eval(c))] * 1000))
  b = np.sum(K.eval(bernoulli()(c_1000)).astype(np.int32), axis=0) / 1000.0
  print("       hs =", K.eval(hard_sigmoid(c)).astype(np.float16))
  print("    b_all =", b.astype(np.float16))
  T = 0.0
  t = K.eval(stochastic_ternary(alpha="auto")(c_1000))
  for i in range(10):
    print("stochastic_ternary({}) =".format(i), t[i])
  print("   st_all =", np.round(
      np.sum(t.astype(np.float32), axis=0).astype(np.float16) /
      1000.0, 2).astype(np.float16))
  print("  ternary =", K.eval(ternary(threshold=0.5)(c)).astype(np.int32))
  c = K.constant(np.arange(-1.5, 1.51, 0.3))
  print(" c =", K.eval(c).astype(np.float16))
  print(" b_10 =", K.eval(binary(1)(c)).astype(np.float16))
  print("qr_10 =", K.eval(quantized_relu(1,0)(c)).astype(np.float16))
  print("qr_11 =", K.eval(quantized_relu(1,1)(c)).astype(np.float16))
  print("qr_20 =", K.eval(quantized_relu(2,0)(c)).astype(np.float16))
  print("qr_21 =", K.eval(quantized_relu(2,1)(c)).astype(np.float16))
  print("qr_101 =", K.eval(quantized_relu(1,0,1)(c)).astype(np.float16))
  print("qr_111 =", K.eval(quantized_relu(1,1,1)(c)).astype(np.float16))
  print("qr_201 =", K.eval(quantized_relu(2,0,1)(c)).astype(np.float16))
  print("qr_211 =", K.eval(quantized_relu(2,1,1)(c)).astype(np.float16))
  print("qt_200 =", K.eval(quantized_tanh(2,0)(c)).astype(np.float16))
  print("qt_210 =", K.eval(quantized_tanh(2,1)(c)).astype(np.float16))
  print("qt_201 =", K.eval(quantized_tanh(2,0,1)(c)).astype(np.float16))
  print("qt_211 =", K.eval(quantized_tanh(2,1,1)(c)).astype(np.float16))
  set_internal_sigmoid("smooth"); print("with smooth sigmoid")
  print("qr_101 =", K.eval(quantized_relu(1,0,1)(c)).astype(np.float16))
  print("qr_111 =", K.eval(quantized_relu(1,1,1)(c)).astype(np.float16))
  print("qr_201 =", K.eval(quantized_relu(2,0,1)(c)).astype(np.float16))
  print("qr_211 =", K.eval(quantized_relu(2,1,1)(c)).astype(np.float16))
  print("qt_200 =", K.eval(quantized_tanh(2,0)(c)).astype(np.float16))
  print("qt_210 =", K.eval(quantized_tanh(2,1)(c)).astype(np.float16))
  print("qt_201 =", K.eval(quantized_tanh(2,0,1)(c)).astype(np.float16))
  print("qt_211 =", K.eval(quantized_tanh(2,1,1)(c)).astype(np.float16))
  set_internal_sigmoid("real"); print("with real sigmoid")
  print("qr_101 =", K.eval(quantized_relu(1,0,1)(c)).astype(np.float16))
  print("qr_111 =", K.eval(quantized_relu(1,1,1)(c)).astype(np.float16))
  print("qr_201 =", K.eval(quantized_relu(2,0,1)(c)).astype(np.float16))
  print("qr_211 =", K.eval(quantized_relu(2,1,1)(c)).astype(np.float16))
  print("qt_200 =", K.eval(quantized_tanh(2,0)(c)).astype(np.float16))
  print("qt_210 =", K.eval(quantized_tanh(2,1)(c)).astype(np.float16))
  print("qt_201 =", K.eval(quantized_tanh(2,0,1)(c)).astype(np.float16))
  print("qt_211 =", K.eval(quantized_tanh(2,1,1)(c)).astype(np.float16))
  set_internal_sigmoid("hard")
  print(" c =", K.eval(c).astype(np.float16))
  print("q2_31 =", K.eval(quantized_po2(3,1)(c)).astype(np.float16))
  print("q2_32 =", K.eval(quantized_po2(3,2)(c)).astype(np.float16))
  print("qr2_21 =", K.eval(quantized_relu_po2(2,1)(c)).astype(np.float16))
  print("qr2_22 =", K.eval(quantized_relu_po2(2,2)(c)).astype(np.float16))
  print("qr2_44 =", K.eval(quantized_relu_po2(4,1)(c)).astype(np.float16))

  # stochastic rounding
  c = K.constant(np.arange(-1.5, 1.51, 0.3))
  print("q2_32_2 =", K.eval(quantized_relu_po2(32,2)(c)).astype(np.float16))
  b = K.eval(stochastic_binary()(c_1000)).astype(np.int32)
  for i in range(5):
    print("sbinary({}) =".format(i), b[i])
  print("sbinary =", np.round(np.sum(b, axis=0) / 1000.0, 2).astype(np.float16))
  print(" binary =", K.eval(binary()(c)).astype(np.int32))
  print(" c      =", K.eval(c).astype(np.float16))
  for i in range(10):
    print(" s_bin({}) =".format(i),
          K.eval(binary(use_stochastic_rounding=1)(c)).astype(np.int32))
  for i in range(10):
    print(" s_po2({}) =".format(i),
          K.eval(quantized_po2(use_stochastic_rounding=1)(c)).astype(np.int32))
  for i in range(10):
    print(
        " s_relu_po2({}) =".format(i),
        K.eval(quantized_relu_po2(use_stochastic_rounding=1)(c)).astype(
            np.int32))
예제 #5
0
def test_binary(use_01, alpha, test_values, expected_values):
    x = K.placeholder(ndim=2)
    f = K.function([x], [binary(use_01, alpha)(x)])
    result = f([test_values])[0]
    assert_allclose(result, expected_values, rtol=1e-05)
예제 #6
0
def test_qnetwork():
    x = x_in = Input((28, 28, 1), name='input')
    x = QSeparableConv2D(32, (2, 2),
                         strides=(2, 2),
                         depthwise_quantizer=binary(),
                         pointwise_quantizer=quantized_bits(4, 0, 1),
                         depthwise_activation=quantized_bits(6, 2, 1),
                         bias_quantizer=quantized_bits(4, 0, 1),
                         name='conv2d_0_m')(x)
    x = QActivation('quantized_relu(6,2,1)', name='act0_m')(x)
    x = QConv2D(64, (3, 3),
                strides=(2, 2),
                kernel_quantizer=ternary(),
                bias_quantizer=quantized_bits(4, 0, 1),
                name='conv2d_1_m')(x)
    x = QActivation('quantized_relu(6, 3, 1)', name='act1_m')(x)
    x = QConv2D(64, (2, 2),
                strides=(2, 2),
                kernel_quantizer=quantized_bits(6, 2, 1),
                bias_quantizer=quantized_bits(4, 0, 1),
                name='conv2d_2_m')(x)
    x = QActivation('quantized_relu(6,4,1)', name='act2_m')(x)
    x = Flatten(name='flatten')(x)
    x = QDense(10,
               kernel_quantizer=quantized_bits(6, 2, 1),
               bias_quantizer=quantized_bits(4, 0, 1),
               name='dense')(x)
    x = Activation('softmax', name='softmax')(x)

    model = Model(inputs=[x_in], outputs=[x])

    # generate same output for weights

    np.random.seed(42)
    for layer in model.layers:
        all_weights = []
        for i, weights in enumerate(layer.get_weights()):
            input_size = np.prod(layer.input.shape.as_list()[1:])
            if input_size is None:
                input_size = 576 * 10  # hack to avoid learning sizes
            shape = weights.shape
            assert input_size > 0, 'input size for {} {}'.format(layer.name, i)
            # he normal initialization with a scale factor of 2.0
            all_weights.append(
                10.0 * np.random.normal(0.0, np.sqrt(2.0 / input_size), shape))
        if all_weights:
            layer.set_weights(all_weights)

    # apply quantizer to weights
    model_save_quantized_weights(model)

    all_weights = []

    for layer in model.layers:
        for i, weights in enumerate(layer.get_weights()):
            w = np.sum(weights)
            all_weights.append(w)

    all_weights = np.array(all_weights)

    # test_qnetwork_weight_quantization
    all_weights_signature = np.array(
        [2.0, -6.75, -0.625, -2.0, -0.25, -56.0, 1.125, -2.625, -0.75])
    assert all_weights.size == all_weights_signature.size
    assert np.all(all_weights == all_weights_signature)

    # test_qnetwork_forward:
    y = np.array([[
        0.000e+00, 0.000e+00, 0.000e+00, 0.000e+00, 0.000e+00, 5.341e-02,
        9.468e-01, 0.000e+00, 0.000e+00, 0.000e+00
    ],
                  [
                      0.000e+00, 0.000e+00, 0.000e+00, 0.000e+00, 5.960e-08,
                      0.000e+00, 1.919e-01, 0.000e+00, 0.000e+00, 8.081e-01
                  ],
                  [
                      0.000e+00, 0.000e+00, 0.000e+00, 0.000e+00, 2.378e-04,
                      0.000e+00, 0.000e+00, 0.000e+00, 2.843e-05, 9.995e-01
                  ],
                  [
                      0.000e+00, 0.000e+00, 0.000e+00, 0.000e+00, 0.000e+00,
                      0.000e+00, 1.000e+00, 0.000e+00, 0.000e+00, 0.000e+00
                  ],
                  [
                      0.000e+00, 0.000e+00, 0.000e+00, 0.000e+00, 0.000e+00,
                      0.000e+00, 1.000e+00, 0.000e+00, 2.623e-06, 0.000e+00
                  ],
                  [
                      0.000e+00, 0.000e+00, 0.000e+00, 0.000e+00, 0.000e+00,
                      7.749e-07, 0.000e+00, 0.000e+00, 1.634e-04, 1.000e+00
                  ],
                  [
                      0.000e+00, 0.000e+00, 0.000e+00, 0.000e+00, 0.000e+00,
                      0.000e+00, 1.000e+00, 0.000e+00, 0.000e+00, 0.000e+00
                  ],
                  [
                      0.000e+00, 1.000e+00, 0.000e+00, 0.000e+00, 0.000e+00,
                      0.000e+00, 6.557e-07, 0.000e+00, 0.000e+00, 0.000e+00
                  ],
                  [
                      0.000e+00, 0.000e+00, 0.000e+00, 0.000e+00, 1.000e+00,
                      0.000e+00, 5.960e-08, 0.000e+00, 0.000e+00, 0.000e+00
                  ],
                  [
                      0.000e+00, 0.000e+00, 0.000e+00, 0.000e+00, 9.125e-03,
                      9.907e-01, 9.418e-06, 0.000e+00, 5.597e-05, 0.000e+00
                  ]]).astype(np.float16)

    inputs = 2 * np.random.rand(10, 28, 28, 1)
    p = model.predict(inputs).astype(np.float16)
    assert np.all(p == y)
예제 #7
0
def test_qpooling_in_qtools():
    input_size = (16, 16, 3)
    pool_size = (2, 2)
    input_quantizers = [quantized_bits(8, 0, 1)]
    is_inference = False

    x = Input(input_size)
    xin = x
    x = QAveragePooling2D(pool_size=pool_size,
                          average_quantizer=binary(),
                          activation=quantized_bits(4, 0, 1),
                          name="pooling")(x)
    x = QGlobalAveragePooling2D(average_quantizer=quantized_bits(4, 0, 1),
                                activation=ternary(),
                                name="global_pooling")(x)
    model = Model(inputs=xin, outputs=x)

    (graph,
     source_quantizer_list) = qgraph.CreateGraph(model, input_quantizers)

    qgraph.GraphPropagateActivationsToEdges(graph)

    layer_map = generate_layer_data_type_map.generate_layer_data_type_map(
        graph, source_quantizer_list, is_inference)

    dtype_dict = interface.map_to_json(layer_map)

    # Checks the QAveragePpooling layer datatype
    multiplier = dtype_dict["pooling"]["pool_avg_multiplier"]
    accumulator = dtype_dict["pooling"]["pool_sum_accumulator"]
    average_quantizer = dtype_dict["pooling"]["average_quantizer"]
    output = dtype_dict["pooling"]["output_quantizer"]

    assert_equal(multiplier["quantizer_type"], "quantized_bits")
    assert_equal(multiplier["bits"], 10)
    assert_equal(multiplier["int_bits"], 3)
    assert_equal(multiplier["is_signed"], 1)
    assert_equal(multiplier["op_type"], "mux")

    assert_equal(accumulator["quantizer_type"], "quantized_bits")
    assert_equal(accumulator["bits"], 10)
    assert_equal(accumulator["int_bits"], 3)
    assert_equal(accumulator["is_signed"], 1)
    assert_equal(accumulator["op_type"], "add")

    assert_equal(output["quantizer_type"], "quantized_bits")
    assert_equal(output["bits"], 4)
    assert_equal(output["int_bits"], 1)
    assert_equal(output["is_signed"], 1)

    assert_equal(average_quantizer["quantizer_type"], "binary")
    assert_equal(average_quantizer["bits"], 1)
    assert_equal(average_quantizer["int_bits"], 1)
    assert_equal(average_quantizer["is_signed"], 1)

    # Checks the QGlobalAveragePooling layer datatype
    multiplier = dtype_dict["global_pooling"]["pool_avg_multiplier"]
    accumulator = dtype_dict["global_pooling"]["pool_sum_accumulator"]
    average_quantizer = dtype_dict["global_pooling"]["average_quantizer"]
    output = dtype_dict["global_pooling"]["output_quantizer"]

    assert_equal(multiplier["quantizer_type"], "quantized_bits")
    assert_equal(multiplier["bits"], 13)
    assert_equal(multiplier["int_bits"], 7)
    assert_equal(multiplier["is_signed"], 1)
    assert_equal(multiplier["op_type"], "mul")

    assert_equal(accumulator["quantizer_type"], "quantized_bits")
    assert_equal(accumulator["bits"], 10)
    assert_equal(accumulator["int_bits"], 7)
    assert_equal(accumulator["is_signed"], 1)
    assert_equal(accumulator["op_type"], "add")

    assert_equal(output["quantizer_type"], "ternary")
    assert_equal(output["bits"], 2)
    assert_equal(output["int_bits"], 2)
    assert_equal(output["is_signed"], 1)

    assert_equal(average_quantizer["quantizer_type"], "quantized_bits")
    assert_equal(average_quantizer["bits"], 4)
    assert_equal(average_quantizer["int_bits"], 1)
    assert_equal(average_quantizer["is_signed"], 1)
예제 #8
0
def test_qnetwork():
    K.set_learning_phase(1)
    x = x_in = Input((28, 28, 1), name='input')
    x = QSeparableConv2D(32, (2, 2),
                         strides=(2, 2),
                         depthwise_quantizer=binary(alpha=1.0),
                         pointwise_quantizer=quantized_bits(4, 0, 1,
                                                            alpha=1.0),
                         depthwise_activation=quantized_bits(6,
                                                             2,
                                                             1,
                                                             alpha=1.0),
                         bias_quantizer=quantized_bits(4, 0, 1),
                         name='conv2d_0_m')(x)
    x = QActivation('quantized_relu(6,2,1)', name='act0_m')(x)
    x = QConv2D(64, (3, 3),
                strides=(2, 2),
                kernel_quantizer=ternary(alpha=1.0),
                bias_quantizer=quantized_bits(4, 0, 1),
                name='conv2d_1_m',
                activation=quantized_relu(6, 3, 1))(x)
    x = QConv2D(64, (2, 2),
                strides=(2, 2),
                kernel_quantizer=quantized_bits(6, 2, 1, alpha=1.0),
                bias_quantizer=quantized_bits(4, 0, 1),
                name='conv2d_2_m')(x)
    x = QActivation('quantized_relu(6,4,1)', name='act2_m')(x)
    x = Flatten(name='flatten')(x)
    x = QDense(10,
               kernel_quantizer=quantized_bits(6, 2, 1, alpha=1.0),
               bias_quantizer=quantized_bits(4, 0, 1),
               name='dense')(x)
    x = Activation('softmax', name='softmax')(x)

    model = Model(inputs=[x_in], outputs=[x])

    # reload the model to ensure saving/loading works
    json_string = model.to_json()
    clear_session()
    model = quantized_model_from_json(json_string)

    # generate same output for weights
    np.random.seed(42)
    for layer in model.layers:
        all_weights = []
        for i, weights in enumerate(layer.get_weights()):
            input_size = np.prod(layer.input.shape.as_list()[1:])
            if input_size is None:
                input_size = 576 * 10  # to avoid learning sizes
            shape = weights.shape
            assert input_size > 0, 'input size for {} {}'.format(layer.name, i)
            # he normal initialization with a scale factor of 2.0
            all_weights.append(
                10.0 * np.random.normal(0.0, np.sqrt(2.0 / input_size), shape))
        if all_weights:
            layer.set_weights(all_weights)

    # apply quantizer to weights
    model_save_quantized_weights(model)

    all_weights = []

    for layer in model.layers:
        for i, weights in enumerate(layer.get_weights()):
            w = np.sum(weights)
            all_weights.append(w)

    all_weights = np.array(all_weights)

    # test_qnetwork_weight_quantization
    all_weights_signature = np.array(
        [2., -6.75, -0.625, -2., -0.25, -56., 1.125, -1.625, -1.125])

    assert all_weights.size == all_weights_signature.size
    assert np.all(all_weights == all_weights_signature)

    # test_qnetwork_forward:
    expected_output = np.array([[
        0.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00, 1.e+00, 0.e+00, 0.e+00,
        0.e+00
    ],
                                [
                                    0.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00,
                                    0.e+00, 1.e+00, 0.e+00, 0.e+00, 0.e+00
                                ],
                                [
                                    0.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00,
                                    0.e+00, 0.e+00, 0.e+00, 6.e-08, 1.e+00
                                ],
                                [
                                    0.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00,
                                    0.e+00, 1.e+00, 0.e+00, 0.e+00, 0.e+00
                                ],
                                [
                                    0.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00,
                                    0.e+00, 1.e+00, 0.e+00, 0.e+00, 0.e+00
                                ],
                                [
                                    0.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00,
                                    0.e+00, 0.e+00, 0.e+00, 5.e-07, 1.e+00
                                ],
                                [
                                    0.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00,
                                    0.e+00, 1.e+00, 0.e+00, 0.e+00, 0.e+00
                                ],
                                [
                                    0.e+00, 1.e+00, 0.e+00, 0.e+00, 0.e+00,
                                    0.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00
                                ],
                                [
                                    0.e+00, 0.e+00, 0.e+00, 0.e+00, 1.e+00,
                                    0.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00
                                ],
                                [
                                    0.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00,
                                    1.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00
                                ]]).astype(np.float16)
    inputs = 2 * np.random.rand(10, 28, 28, 1)
    actual_output = model.predict(inputs).astype(np.float16)
    assert_allclose(actual_output, expected_output, rtol=1e-4)
예제 #9
0
def main():
  np.random.seed(42)
  a = K.constant([-3.0, -2.0, -1.0, -0.5, 0.0, 0.5, 1.0, 2.0, 3.0])
  a = K.constant([0.194336])
  print(" a =", K.eval(a).astype(np.float16))
  print("qa =", K.eval(quantized_relu(6,2)(a)).astype(np.float16))
  print("ss =", K.eval(smooth_sigmoid(a)).astype(np.float16))
  print("hs =", K.eval(hard_sigmoid(a)).astype(np.float16))
  print("ht =", K.eval(hard_tanh(a)).astype(np.float16))
  print("st =", K.eval(smooth_tanh(a)).astype(np.float16))
  c = K.constant(np.arange(-1.5, 1.51, 0.3))
  print(" c =", K.eval(c).astype(np.float16))
  print("qb_111 =", K.eval(quantized_bits(1,1,1)(c)).astype(np.float16))
  print("qb_210 =", K.eval(quantized_bits(2,1,0)(c)).astype(np.float16))
  print("qb_211 =", K.eval(quantized_bits(2,1,1)(c)).astype(np.float16))
  print("qb_300 =", K.eval(quantized_bits(3,0,0)(c)).astype(np.float16))
  print("qb_301 =", K.eval(quantized_bits(3,0,1)(c)).astype(np.float16))
  c_1000 = K.constant(np.array([list(K.eval(c))] * 1000))
  b = np.sum(K.eval(bernoulli()(c_1000)).astype(np.int32), axis=0) / 1000.0
  print("       hs =", K.eval(hard_sigmoid(c)).astype(np.float16))
  print("    b_all =", b.astype(np.float16))
  T = 0.0
  t = K.eval(stochastic_ternary(threshold=T)(c_1000)).astype(np.int32)
  for i in range(10):
    print("sternary({}) =".format(i), t[i])
  print("   st_all =", np.round(
      np.sum(t.astype(np.float32), axis=0).astype(np.float16) /
      1000.0, 2).astype(np.float16))
  print("  ternary =", K.eval(ternary(threshold=0.5)(c)).astype(np.int32))
  b = K.eval(stochastic_binary()(c_1000)).astype(np.int32)
  for i in range(5):
    print("sbinary({}) =".format(i), b[i])
  print("sbinary =", np.round(np.sum(b, axis=0) / 1000.0, 2).astype(np.float16))
  print(" binary =", K.eval(binary()(c)).astype(np.int32))
  c = K.constant(np.arange(-1.5, 1.51, 0.3))
  print(" c =", K.eval(c).astype(np.float16))
  print(" b_10 =", K.eval(binary(1)(c)).astype(np.float16))
  print("qr_10 =", K.eval(quantized_relu(1,0)(c)).astype(np.float16))
  print("qr_11 =", K.eval(quantized_relu(1,1)(c)).astype(np.float16))
  print("qr_20 =", K.eval(quantized_relu(2,0)(c)).astype(np.float16))
  print("qr_21 =", K.eval(quantized_relu(2,1)(c)).astype(np.float16))
  print("qr_101 =", K.eval(quantized_relu(1,0,1)(c)).astype(np.float16))
  print("qr_111 =", K.eval(quantized_relu(1,1,1)(c)).astype(np.float16))
  print("qr_201 =", K.eval(quantized_relu(2,0,1)(c)).astype(np.float16))
  print("qr_211 =", K.eval(quantized_relu(2,1,1)(c)).astype(np.float16))
  print("qt_200 =", K.eval(quantized_tanh(2,0)(c)).astype(np.float16))
  print("qt_210 =", K.eval(quantized_tanh(2,1)(c)).astype(np.float16))
  print("qt_201 =", K.eval(quantized_tanh(2,0,1)(c)).astype(np.float16))
  print("qt_211 =", K.eval(quantized_tanh(2,1,1)(c)).astype(np.float16))
  set_internal_sigmoid("smooth"); print("with smooth sigmoid")
  print("qr_101 =", K.eval(quantized_relu(1,0,1)(c)).astype(np.float16))
  print("qr_111 =", K.eval(quantized_relu(1,1,1)(c)).astype(np.float16))
  print("qr_201 =", K.eval(quantized_relu(2,0,1)(c)).astype(np.float16))
  print("qr_211 =", K.eval(quantized_relu(2,1,1)(c)).astype(np.float16))
  print("qt_200 =", K.eval(quantized_tanh(2,0)(c)).astype(np.float16))
  print("qt_210 =", K.eval(quantized_tanh(2,1)(c)).astype(np.float16))
  print("qt_201 =", K.eval(quantized_tanh(2,0,1)(c)).astype(np.float16))
  print("qt_211 =", K.eval(quantized_tanh(2,1,1)(c)).astype(np.float16))
  set_internal_sigmoid("real"); print("with real sigmoid")
  print("qr_101 =", K.eval(quantized_relu(1,0,1)(c)).astype(np.float16))
  print("qr_111 =", K.eval(quantized_relu(1,1,1)(c)).astype(np.float16))
  print("qr_201 =", K.eval(quantized_relu(2,0,1)(c)).astype(np.float16))
  print("qr_211 =", K.eval(quantized_relu(2,1,1)(c)).astype(np.float16))
  print("qt_200 =", K.eval(quantized_tanh(2,0)(c)).astype(np.float16))
  print("qt_210 =", K.eval(quantized_tanh(2,1)(c)).astype(np.float16))
  print("qt_201 =", K.eval(quantized_tanh(2,0,1)(c)).astype(np.float16))
  print("qt_211 =", K.eval(quantized_tanh(2,1,1)(c)).astype(np.float16))
  set_internal_sigmoid("hard")
  print(" c =", K.eval(c).astype(np.float16))
  print("q2_31 =", K.eval(quantized_po2(3,1)(c)).astype(np.float16))
  print("q2_32 =", K.eval(quantized_po2(3,2)(c)).astype(np.float16))
  print("qr2_21 =", K.eval(quantized_relu_po2(2,1)(c)).astype(np.float16))
  print("qr2_22 =", K.eval(quantized_relu_po2(2,2)(c)).astype(np.float16))
  print("qr2_44 =", K.eval(quantized_relu_po2(4,1)(c)).astype(np.float16))
  with warnings.catch_warnings(record=True) as w:
    warnings.simplefilter("always")
    print("q2_32_2 =", K.eval(quantized_relu_po2(32,2)(c)).astype(np.float16))
    assert len(w) == 1
    assert issubclass(w[-1].category, UserWarning)
    print(str(w[-1].message))