def test_sequential_qnetwork():
    model = tf.keras.Sequential()
    model.add(Input((28, 28, 1), name='input'))
    model.add(
        QConv2D(32, (2, 2),
                strides=(2, 2),
                kernel_quantizer=quantized_bits(4, 0, 1),
                bias_quantizer=quantized_bits(4, 0, 1),
                name='conv2d_0_m'))
    model.add(QActivation(quantized_relu(4, 0), name='act0_m'))
    model.add(
        QConv2D(64, (3, 3),
                strides=(2, 2),
                kernel_quantizer=quantized_bits(4, 0, 1),
                bias_quantizer=quantized_bits(4, 0, 1),
                name='conv2d_1_m'))
    model.add(QActivation(quantized_relu(4, 0), name='act1_m'))
    model.add(
        QConv2D(64, (2, 2),
                strides=(2, 2),
                kernel_quantizer=quantized_bits(4, 0, 1),
                bias_quantizer=quantized_bits(4, 0, 1),
                name='conv2d_2_m'))
    model.add(QActivation(quantized_relu(4, 0), name='act2_m'))
    model.add(Flatten())
    model.add(
        QDense(10,
               kernel_quantizer=quantized_bits(4, 0, 1),
               bias_quantizer=quantized_bits(4, 0, 1),
               name='dense'))
    model.add(Activation('softmax', name='softmax'))

    # Check that all model operation were found correctly
    model_ops = extract_model_operations(model)
    for layer in model_ops.keys():
        assert model_ops[layer]['type'][0] != 'null'
    return model
Exemple #2
0
def test_qconv1d():
  np.random.seed(33)
  x = Input((4, 4,))
  y = QConv1D(
      2, 1,
      kernel_quantizer=quantized_bits(6, 2, 1),
      bias_quantizer=quantized_bits(4, 0, 1),
      name='qconv1d')(
          x)
  model = Model(inputs=x, outputs=y)

  #Extract model operations
  model_ops = extract_model_operations(model)

  # Assertion about the number of operations for this Conv1D layer
  assert model_ops['qconv1d']["number_of_operations"] == 32

  # Print qstats to make sure it works with Conv1D layer
  print_qstats(model) 

  # reload the model to ensure saving/loading works
  json_string = model.to_json()
  clear_session()
  model = quantized_model_from_json(json_string)

  for layer in model.layers:
    all_weights = []
    for i, weights in enumerate(layer.get_weights()):
      input_size = np.prod(layer.input.shape.as_list()[1:])
      if input_size is None:
        input_size = 10 * 10
      shape = weights.shape
      assert input_size > 0, 'input size for {} {}'.format(layer.name, i)
      all_weights.append(
          10.0 * np.random.normal(0.0, np.sqrt(2.0 / input_size), shape))
    if all_weights:
      layer.set_weights(all_weights)
    
  # Save the model as an h5 file using Keras's model.save()
  fd, fname = tempfile.mkstemp('.h5')
  model.save(fname)
  del model  # Delete the existing model

  # Returns a compiled model identical to the previous one
  model = load_qmodel(fname)

  #Clean the created h5 file after loading the model
  os.close(fd)
  os.remove(fname)

  # apply quantizer to weights
  model_save_quantized_weights(model)

  inputs = np.random.rand(2, 4, 4)
  p = model.predict(inputs).astype(np.float16)
  '''
  y = np.array([[[0.1309, -1.229], [-0.4165, -2.639], [-0.08105, -2.299],
                 [1.981, -2.195]],
                [[-0.3174, -3.94], [-0.3352, -2.316], [0.105, -0.833],
                 [0.2115, -2.89]]]).astype(np.float16)
  '''
  y = np.array([[[-2.441, 3.816], [-3.807, -1.426], [-2.684, -1.317],
                 [-1.659, 0.9834]],
                [[-4.99, 1.139], [-2.559, -1.216], [-2.285, 1.905],
                 [-2.652, -0.467]]]).astype(np.float16)
  assert np.all(p == y)
def test_qconv1d(layer_cls):
    np.random.seed(33)
    if layer_cls == "QConv1D":
        x = Input((
            4,
            4,
        ))
        y = QConv1D(2,
                    1,
                    kernel_quantizer=quantized_bits(6, 2, 1, alpha=1.0),
                    bias_quantizer=quantized_bits(4, 0, 1),
                    name='qconv1d')(x)
        model = Model(inputs=x, outputs=y)
    else:
        x = Input((
            4,
            4,
        ))
        y = QSeparableConv1D(2,
                             2,
                             depthwise_quantizer=quantized_bits(6,
                                                                2,
                                                                1,
                                                                alpha=1.0),
                             pointwise_quantizer=quantized_bits(4,
                                                                0,
                                                                1,
                                                                alpha=1.0),
                             bias_quantizer=quantized_bits(4, 0, 1),
                             name='qconv1d')(x)
        model = Model(inputs=x, outputs=y)

    # Extract model operations
    model_ops = extract_model_operations(model)

    # Check the input layer model operation was found correctly
    assert model_ops['qconv1d']['type'][0] != 'null'

    # Assertion about the number of operations for this (Separable)Conv1D layer
    if layer_cls == "QConv1D":
        assert model_ops['qconv1d']['number_of_operations'] == 32
    else:
        assert model_ops['qconv1d']['number_of_operations'] == 30

    # Print qstats to make sure it works with Conv1D layer
    print_qstats(model)

    # reload the model to ensure saving/loading works
    # json_string = model.to_json()
    # clear_session()
    # model = quantized_model_from_json(json_string)

    for layer in model.layers:
        all_weights = []
        for i, weights in enumerate(layer.get_weights()):
            input_size = np.prod(layer.input.shape.as_list()[1:])
            if input_size is None:
                input_size = 10 * 10
            shape = weights.shape
            assert input_size > 0, 'input size for {} {}'.format(layer.name, i)
            all_weights.append(
                10.0 * np.random.normal(0.0, np.sqrt(2.0 / input_size), shape))
        if all_weights:
            layer.set_weights(all_weights)
    # Save the model as an h5 file using Keras's model.save()
    fd, fname = tempfile.mkstemp('.h5')
    model.save(fname)
    del model  # Delete the existing model

    # Return a compiled model identical to the previous one
    model = load_qmodel(fname)

    # Clean the created h5 file after loading the model
    os.close(fd)
    os.remove(fname)

    # apply quantizer to weights
    model_save_quantized_weights(model)

    inputs = np.random.rand(2, 4, 4)
    p = model.predict(inputs).astype(np.float16)
    if layer_cls == "QConv1D":
        y = np.array([[[-2.441, 3.816], [-3.807, -1.426], [-2.684, -1.317],
                       [-1.659, 0.9834]],
                      [[-4.99, 1.139], [-2.559, -1.216], [-2.285, 1.905],
                       [-2.652, -0.467]]]).astype(np.float16)
    else:
        y = np.array([[[-2.275, -3.178], [-0.4358, -3.262], [1.987, 0.3987]],
                      [[-0.01251, -0.376], [0.3928, -1.328],
                       [-1.243, -2.43]]]).astype(np.float16)
    assert_allclose(p, y, rtol=1e-4)