コード例 #1
0
ファイル: qpooling_test.py プロジェクト: laurilaatu/qkeras
def test_qpooling_in_model_quantize():
    input_size = (16, 16, 3)
    pool_size = (2, 2)

    x = Input(input_size)
    xin = x
    x = AveragePooling2D(pool_size=pool_size, name="pooling")(x)
    x = GlobalAveragePooling2D(name="global_pooling")(x)
    model = Model(inputs=xin, outputs=x)

    quantize_config = {
        "QAveragePooling2D": {
            "average_quantizer": "binary",
            "activation_quantizer": "binary"
        },
        "QGlobalAveragePooling2D": {
            "average_quantizer": "quantized_bits(4, 0, 1)",
            "activation_quantizer": "ternary"
        }
    }

    qmodel = model_quantize(model, quantize_config, 4)
    print_qstats(qmodel)
    assert_equal(str(qmodel.layers[1].average_quantizer_internal), "binary()")
    assert_equal(str(qmodel.layers[1].activation), "binary()")
    assert_equal(str(qmodel.layers[2].average_quantizer_internal),
                 "quantized_bits(4,0,1)")
    assert_equal(str(qmodel.layers[2].activation), "ternary()")
コード例 #2
0
def QDenseModel(weights_f, load_weights=False):
    """Construct QDenseModel."""

    x = x_in = Input((RESHAPED, ), name="input")
    x = QActivation("quantized_relu(4)", name="act_i")(x)
    x = QDense(N_HIDDEN,
               kernel_quantizer=ternary(),
               bias_quantizer=quantized_bits(4, 0, 1),
               name="dense0")(x)
    x = QActivation("quantized_relu(2)", name="act0")(x)
    x = QDense(NB_CLASSES,
               kernel_quantizer=quantized_bits(4, 0, 1),
               bias_quantizer=quantized_bits(4, 0, 1),
               name="dense2")(x)
    x = Activation("softmax", name="softmax")(x)

    model = Model(inputs=[x_in], outputs=[x])
    model.summary()
    model.compile(loss="categorical_crossentropy",
                  optimizer=OPTIMIZER,
                  metrics=["accuracy"])

    if load_weights and weights_f:
        model.load_weights(weights_f)

    print_qstats(model)
    return model
コード例 #3
0
ファイル: qpooling_test.py プロジェクト: laurilaatu/qkeras
def test_q_average_pooling(pooling, input_size, pool_size, strides, padding,
                           data_format, average_quantizer,
                           activation_quantizer, y):
    """q_average_pooling test utility."""

    np.random.seed(33)

    x = Input(input_size)
    xin = x
    if pooling == 'QAveragePooling2D':
        x = QAveragePooling2D(pool_size=pool_size,
                              strides=strides,
                              padding=padding,
                              data_format=data_format,
                              average_quantizer=average_quantizer,
                              activation=activation_quantizer,
                              name='qpooling')(x)
    else:
        x = QGlobalAveragePooling2D(data_format=data_format,
                                    average_quantizer=average_quantizer,
                                    activation=activation_quantizer,
                                    name='qpooling')(x)
    model = Model(inputs=xin, outputs=x)

    # Prints qstats to make sure it works with Conv1D layer
    print_qstats(model)

    size = (2, ) + input_size
    inputs = np.random.rand(size[0], size[1], size[2], size[3])

    if data_format == 'channels_first':
        assert_raises(tf.errors.InvalidArgumentError, model.predict, inputs)
    else:
        p = model.predict(inputs).astype(np.float16)
        assert_allclose(p, y, rtol=1e-4)

        # Reloads the model to ensure saving/loading works
        json_string = model.to_json()
        clear_session()
        reload_model = quantized_model_from_json(json_string)
        p = reload_model.predict(inputs).astype(np.float16)
        assert_allclose(p, y, rtol=1e-4)

        # Saves the model as an h5 file using Keras's model.save()
        fd, fname = tempfile.mkstemp(".h5")
        model.save(fname)
        del model  # Delete the existing model

        # Returns a compiled model identical to the previous one
        loaded_model = load_qmodel(fname)

        # Cleans the created h5 file after loading the model
        os.close(fd)
        os.remove(fname)

        # Applys quantizer to weights
        model_save_quantized_weights(loaded_model)
        p = loaded_model.predict(inputs).astype(np.float16)
        assert_allclose(p, y, rtol=1e-4)
コード例 #4
0
def UseNetwork(weights_f, load_weights=False):
    """Use DenseModel.

  Args:
    weights_f: weight file location.
    load_weights: load weights when it is True.
  """
    model = QDenseModel(weights_f, load_weights)

    batch_size = BATCH_SIZE
    (x_train_, y_train_), (x_test_, y_test_) = mnist.load_data()

    x_train_ = x_train_.reshape(60000, RESHAPED)
    x_test_ = x_test_.reshape(10000, RESHAPED)
    x_train_ = x_train_.astype("float32")
    x_test_ = x_test_.astype("float32")

    x_train_ /= 255
    x_test_ /= 255

    print(x_train_.shape[0], "train samples")
    print(x_test_.shape[0], "test samples")

    y_train_ = to_categorical(y_train_, NB_CLASSES)
    y_test_ = to_categorical(y_test_, NB_CLASSES)

    if not load_weights:
        model.fit(x_train_,
                  y_train_,
                  batch_size=batch_size,
                  epochs=NB_EPOCH,
                  verbose=VERBOSE,
                  validation_split=VALIDATION_SPLIT)

        if weights_f:
            model.save_weights(weights_f)

    score = model.evaluate(x_test_, y_test_, verbose=VERBOSE)
    print_qstats(model)
    print("Test score:", score[0])
    print("Test accuracy:", score[1])
コード例 #5
0
def test_qconv1d():
  np.random.seed(33)
  x = Input((4, 4,))
  y = QConv1D(
      2, 1,
      kernel_quantizer=quantized_bits(6, 2, 1),
      bias_quantizer=quantized_bits(4, 0, 1),
      name='qconv1d')(
          x)
  model = Model(inputs=x, outputs=y)

  #Extract model operations
  model_ops = extract_model_operations(model)

  # Assertion about the number of operations for this Conv1D layer
  assert model_ops['qconv1d']["number_of_operations"] == 32

  # Print qstats to make sure it works with Conv1D layer
  print_qstats(model) 

  # reload the model to ensure saving/loading works
  json_string = model.to_json()
  clear_session()
  model = quantized_model_from_json(json_string)

  for layer in model.layers:
    all_weights = []
    for i, weights in enumerate(layer.get_weights()):
      input_size = np.prod(layer.input.shape.as_list()[1:])
      if input_size is None:
        input_size = 10 * 10
      shape = weights.shape
      assert input_size > 0, 'input size for {} {}'.format(layer.name, i)
      all_weights.append(
          10.0 * np.random.normal(0.0, np.sqrt(2.0 / input_size), shape))
    if all_weights:
      layer.set_weights(all_weights)
    
  # Save the model as an h5 file using Keras's model.save()
  fd, fname = tempfile.mkstemp('.h5')
  model.save(fname)
  del model  # Delete the existing model

  # Returns a compiled model identical to the previous one
  model = load_qmodel(fname)

  #Clean the created h5 file after loading the model
  os.close(fd)
  os.remove(fname)

  # apply quantizer to weights
  model_save_quantized_weights(model)

  inputs = np.random.rand(2, 4, 4)
  p = model.predict(inputs).astype(np.float16)
  '''
  y = np.array([[[0.1309, -1.229], [-0.4165, -2.639], [-0.08105, -2.299],
                 [1.981, -2.195]],
                [[-0.3174, -3.94], [-0.3352, -2.316], [0.105, -0.833],
                 [0.2115, -2.89]]]).astype(np.float16)
  '''
  y = np.array([[[-2.441, 3.816], [-3.807, -1.426], [-2.684, -1.317],
                 [-1.659, 0.9834]],
                [[-4.99, 1.139], [-2.559, -1.216], [-2.285, 1.905],
                 [-2.652, -0.467]]]).astype(np.float16)
  assert np.all(p == y)
コード例 #6
0
def test_qconv1d(layer_cls):
    np.random.seed(33)
    if layer_cls == "QConv1D":
        x = Input((
            4,
            4,
        ))
        y = QConv1D(2,
                    1,
                    kernel_quantizer=quantized_bits(6, 2, 1, alpha=1.0),
                    bias_quantizer=quantized_bits(4, 0, 1),
                    name='qconv1d')(x)
        model = Model(inputs=x, outputs=y)
    else:
        x = Input((
            4,
            4,
        ))
        y = QSeparableConv1D(2,
                             2,
                             depthwise_quantizer=quantized_bits(6,
                                                                2,
                                                                1,
                                                                alpha=1.0),
                             pointwise_quantizer=quantized_bits(4,
                                                                0,
                                                                1,
                                                                alpha=1.0),
                             bias_quantizer=quantized_bits(4, 0, 1),
                             name='qconv1d')(x)
        model = Model(inputs=x, outputs=y)

    # Extract model operations
    model_ops = extract_model_operations(model)

    # Check the input layer model operation was found correctly
    assert model_ops['qconv1d']['type'][0] != 'null'

    # Assertion about the number of operations for this (Separable)Conv1D layer
    if layer_cls == "QConv1D":
        assert model_ops['qconv1d']['number_of_operations'] == 32
    else:
        assert model_ops['qconv1d']['number_of_operations'] == 30

    # Print qstats to make sure it works with Conv1D layer
    print_qstats(model)

    # reload the model to ensure saving/loading works
    # json_string = model.to_json()
    # clear_session()
    # model = quantized_model_from_json(json_string)

    for layer in model.layers:
        all_weights = []
        for i, weights in enumerate(layer.get_weights()):
            input_size = np.prod(layer.input.shape.as_list()[1:])
            if input_size is None:
                input_size = 10 * 10
            shape = weights.shape
            assert input_size > 0, 'input size for {} {}'.format(layer.name, i)
            all_weights.append(
                10.0 * np.random.normal(0.0, np.sqrt(2.0 / input_size), shape))
        if all_weights:
            layer.set_weights(all_weights)
    # Save the model as an h5 file using Keras's model.save()
    fd, fname = tempfile.mkstemp('.h5')
    model.save(fname)
    del model  # Delete the existing model

    # Return a compiled model identical to the previous one
    model = load_qmodel(fname)

    # Clean the created h5 file after loading the model
    os.close(fd)
    os.remove(fname)

    # apply quantizer to weights
    model_save_quantized_weights(model)

    inputs = np.random.rand(2, 4, 4)
    p = model.predict(inputs).astype(np.float16)
    if layer_cls == "QConv1D":
        y = np.array([[[-2.441, 3.816], [-3.807, -1.426], [-2.684, -1.317],
                       [-1.659, 0.9834]],
                      [[-4.99, 1.139], [-2.559, -1.216], [-2.285, 1.905],
                       [-2.652, -0.467]]]).astype(np.float16)
    else:
        y = np.array([[[-2.275, -3.178], [-0.4358, -3.262], [1.987, 0.3987]],
                      [[-0.01251, -0.376], [0.3928, -1.328],
                       [-1.243, -2.43]]]).astype(np.float16)
    assert_allclose(p, y, rtol=1e-4)