예제 #1
0
def qconv_model():
    x = x_in = keras.layers.Input((23, 23, 1), name="input")
    x = QActivation("quantized_relu(4)", name="QA_0")(x)
    x = QConv2D(16,
                2,
                2,
                kernel_quantizer=quantizers.binary(),
                bias_quantizer=quantizers.ternary(),
                name="qconv2d_1")(x)
    x = QConv2D(8,
                2,
                2,
                kernel_quantizer=quantizers.quantized_bits(4, 0, 1),
                bias_quantizer=quantizers.quantized_bits(4, 0, 1),
                activation=quantizers.quantized_relu(6, 2),
                name="qconv2D_2")(x)
    x = QConv2D(2,
                2,
                2,
                kernel_quantizer=quantizers.quantized_bits(4, 0, 1),
                bias_quantizer=quantizers.quantized_bits(4, 0, 1),
                activation=quantizers.quantized_relu(6, 2),
                name="qconv2d_3")(x)
    x = QActivation("quantized_bits(6, 0, 1)", name="QA_4")(x)

    model = keras.Model(inputs=[x_in], outputs=[x])
    return model
예제 #2
0
def qdense_model(Inputs, l1Reg=0, bits=6, ints=0, h5fName=None):
    x = QDense(21,
               activation=None,
               kernel_initializer='lecun_uniform',
               kernel_regularizer=l1(l1Reg),
               bias_regularizer=l1(l1Reg),
               kernel_quantizer=quantized_bits(bits, ints, alpha=1),
               bias_quantizer=quantized_bits(6, 0, alpha=1),
               name="Dense_Layer_1")(Inputs)

    x = QActivation(activation=quantized_relu(bits, ints),
                    name="Relu_Layer_1")(x)

    x = QDense(22,
               activation=None,
               kernel_initializer='lecun_uniform',
               kernel_regularizer=l1(l1Reg),
               bias_regularizer=l1(l1Reg),
               kernel_quantizer=quantized_bits(bits, ints, alpha=1),
               bias_quantizer=quantized_bits(bits, ints, alpha=1),
               name="Dense_Layer_2")(x)

    x = QActivation(activation=quantized_relu(bits, ints),
                    name="Relu_Layer_2")(x)

    x = QDense(8,
               activation=None,
               kernel_initializer='lecun_uniform',
               kernel_regularizer=l1(l1Reg),
               bias_regularizer=l1(l1Reg),
               kernel_quantizer=quantized_bits(bits, ints, alpha=1),
               bias_quantizer=quantized_bits(bits, ints, alpha=1),
               name="Dense_Layer_3")(x)

    x = QActivation(activation=quantized_relu(bits), name="Relu_Layer_3")(x)

    x = QDense(1,
               activation=None,
               kernel_initializer='lecun_uniform',
               kernel_regularizer=l1(l1Reg),
               bias_regularizer=l1(l1Reg),
               kernel_quantizer=quantized_bits(bits, ints, alpha=1),
               bias_quantizer=quantized_bits(bits, ints, alpha=1),
               name="Dense_Layer_4")(x)

    #x = QActivation("quantized_bits(20,5)",name="Final_quantization")(x)

    predictions = Activation(activation='sigmoid',
                             name="Sigmoid_Output_Layer")(x)

    model = Model(inputs=Inputs, outputs=predictions)

    return (model)
예제 #3
0
def test_QuantizedRelu():
  qkeras_quantizer = quantizers.quantized_relu()
  qtools_quantizer = quantizer_impl.QuantizedRelu()
  qtools_quantizer.convert_qkeras_quantizer(qkeras_quantizer)
  new_quantizer = qtools_quantizer.convert_to_qkeras_quantizer(
      use_sigmoid=qkeras_quantizer.use_sigmoid,
      negative_slope=qkeras_quantizer.negative_slope,
      use_stochastic_rounding=qkeras_quantizer.use_stochastic_rounding,
      relu_upper_bound=qkeras_quantizer.relu_upper_bound,
      is_quantized_clip=qkeras_quantizer.is_quantized_clip,
      qnoise_factor=qkeras_quantizer.qnoise_factor)

  result = new_quantizer.__dict__
  for (key, val) in result.items():
    assert_equal(val, qkeras_quantizer.__dict__[key])
예제 #4
0
    def convert_to_qkeras_quantizer(self,
                                    use_sigmoid=0,
                                    negative_slope=0.0,
                                    use_stochastic_rounding=False,
                                    relu_upper_bound=None,
                                    is_quantized_clip=True,
                                    qnoise_factor=1.0):
        """convert qtools quantizer to qkeras quantizer."""

        return quantizers.quantized_relu(
            bits=self.bits,
            integer=self.int_bits,
            use_sigmoid=use_sigmoid,
            negative_slope=negative_slope,
            use_stochastic_rounding=use_stochastic_rounding,
            relu_upper_bound=relu_upper_bound,
            is_quantized_clip=is_quantized_clip,
            qnoise_factor=qnoise_factor)
예제 #5
0
def test_single_dense_activation_exact(randX_100_16, bits, alpha):
    '''
  Test a single Dense -> Activation layer topology for
  bit exactness with number of bits parameter
  '''
    X = randX_100_16
    model = Sequential()
    model.add(
        QDense(16,
               input_shape=(16, ),
               name='fc1',
               kernel_quantizer=quantized_bits(bits, 0, alpha=alpha),
               bias_quantizer=quantized_bits(bits, 0, alpha=1),
               kernel_initializer='lecun_uniform'))
    model.add(QActivation(activation=quantized_relu(bits, 0), name='relu1'))
    model.compile()

    hls4ml.model.optimizer.get_optimizer(
        'output_rounding_saturation_mode').configure(
            layers=['relu1'],
            rounding_mode='AP_RND_CONV',
            saturation_mode='AP_SAT')
    config = hls4ml.utils.config_from_keras_model(model, granularity='name')
    hls_model = hls4ml.converters.convert_from_keras_model(
        model,
        hls_config=config,
        output_dir=str(
            test_root_path /
            'hls4mlprj_qkeras_single_dense_activation_exact_{}_{}'.format(
                bits, alpha)),
        part='xcu250-figd2104-2L-e')
    hls4ml.model.optimizer.get_optimizer(
        'output_rounding_saturation_mode').configure(layers=[])
    hls_model.compile()

    y_qkeras = model.predict(X)
    y_hls4ml = hls_model.predict(X)
    # Goal is to get it passing with all equal
    #np.testing.assert_array_equal(y_qkeras, y_hls4ml)
    # For now allow matching within 1 bit
    np.testing.assert_allclose(y_qkeras.ravel(),
                               y_hls4ml.ravel(),
                               atol=2**-bits,
                               rtol=1.0)
예제 #6
0
 def gen_model(img_shape):
     img_input = x = keras.Input(shape=img_shape)
     x = QConv2D(filters=5,
                 kernel_size=4,
                 strides=4,
                 kernel_quantizer=quantizers.quantized_bits(
                     8, 3, alpha="auto_po2"),
                 bias_quantizer=quantizers.quantized_bits(8, 3),
                 name="conv")(x)
     x = QActivation(activation=quantizers.quantized_relu(4, 0),
                     name="act")(x)
     x = keras.layers.Flatten(name="flatten")(x)
     x = QDense(5,
                kernel_quantizer=quantizers.quantized_bits(
                    8, 0, alpha="auto_po2"),
                bias_quantizer=quantizers.quantized_bits(8, 3),
                name="dense")(x)
     model = keras.Model(inputs=img_input, outputs=[x])
     return model
예제 #7
0
def qdense_model_fork():
    x = x_in = keras.layers.Input((23, ), name="input")
    x = QDense(10,
               kernel_quantizer=quantizers.quantized_bits(5, 0, 1),
               bias_quantizer=quantizers.quantized_bits(5, 0, 1),
               activation=quantizers.quantized_po2(3, 1),
               name="qdense_0")(x)
    x = QDense(20,
               kernel_quantizer=quantizers.quantized_bits(5, 0, 1),
               bias_quantizer=quantizers.quantized_bits(5, 0, 1),
               activation=quantizers.quantized_relu(6, 2),
               name="qdense_1")(x)
    x = QActivation("quantized_relu(4)", name="QA_2")(x)
    x_1 = QDense(30,
                 kernel_quantizer=quantizers.binary(),
                 bias_quantizer=quantizers.binary(),
                 name="qdense_3")(x)
    x_2 = QActivation("quantized_relu(6,2)", name="QA_3")(x)

    model = keras.Model(inputs=[x_in], outputs=[
        x_1,
        x_2,
    ])
    return model
예제 #8
0
def test_qnoise_quantized_relu():
    # 0 sign bit, 1 integer bit, and 3 fractional bits.
    bits = 4
    integer = 1
    use_sigmoid = False
    negative_slope = 0
    use_stochastic_rounding = False

    # input to quantized relu
    inputs = np.array([0.0, 0.5, -0.5, 0.6, 2.0, 3.0], dtype=np.float32)
    # float relu
    x = np.array([0.0, 0.5, 0.0, 0.6, 2.0, 3.0], dtype=np.float32)
    # float relu with upper bound 1.5
    x_ub = np.array([0.0, 0.5, 0.0, 0.6, 1.5, 1.5], dtype=np.float32)
    # float relu with quantized clipping
    x_clipped = np.array([0.0, 0.5, 0.0, 0.6, 1.875, 1.875], dtype=np.float32)
    # quantized relu
    xq = np.array([0.0, 0.5, 0.0, 0.625, 1.875, 1.875], dtype=np.float32)

    # mixing half and half
    x_xq = 0.5 * (x + xq)
    x_clipped_xq = 0.5 * (x_clipped + xq)
    x_ub_xq = 0.5 * (x_ub + xq)

    #########################################
    # No relu upper bound
    # No quantized clip for float relu
    #########################################
    qr_qc_false = quantized_relu(
        bits=bits,
        integer=integer,
        use_sigmoid=use_sigmoid,
        negative_slope=negative_slope,
        use_stochastic_rounding=use_stochastic_rounding,
        relu_upper_bound=None,
        is_quantized_clip=False,
        use_variables=True)
    # no quantization
    qr_qc_false.update_qnoise_factor(qnoise_factor=0.0)
    x_q_0 = qr_qc_false(inputs)
    assert_equal(x_q_0, x)

    # full quantization
    qr_qc_false.update_qnoise_factor(qnoise_factor=1.0)
    x_q_1 = qr_qc_false(inputs)
    assert_equal(x_q_1, xq)

    # mixing half and half
    qr_qc_false.update_qnoise_factor(qnoise_factor=0.5)
    x_q_05 = qr_qc_false(inputs)
    assert_equal(x_q_05, x_xq)

    #########################################
    # No relu upper bound
    # Quantized clip for float relu
    #########################################
    qr_qc_true = quantized_relu(
        bits=bits,
        integer=integer,
        use_sigmoid=use_sigmoid,
        negative_slope=negative_slope,
        use_stochastic_rounding=use_stochastic_rounding,
        relu_upper_bound=None,
        is_quantized_clip=True,
        use_variables=True)
    # no quantization
    qr_qc_true.update_qnoise_factor(qnoise_factor=0.0)
    x_q_0 = qr_qc_true(inputs)
    assert_equal(x_q_0, x_clipped)

    # full quantization
    qr_qc_true.update_qnoise_factor(qnoise_factor=1.0)
    x_q_1 = qr_qc_true(inputs)
    assert_equal(x_q_1, xq)

    # mixing half and half
    qr_qc_true.update_qnoise_factor(qnoise_factor=0.5)
    x_q_05 = qr_qc_true(inputs)
    assert_equal(x_q_05, x_clipped_xq)

    #########################################
    # Relu upper bound
    # No quantized clip for float relu
    #########################################
    qr_ub_qc_false = quantized_relu(
        bits=bits,
        integer=integer,
        use_sigmoid=use_sigmoid,
        negative_slope=negative_slope,
        use_stochastic_rounding=use_stochastic_rounding,
        relu_upper_bound=1.5,
        is_quantized_clip=False,
        use_variables=True)
    # no quantization
    qr_ub_qc_false.update_qnoise_factor(qnoise_factor=0.0)
    x_q_0 = qr_ub_qc_false(inputs)
    assert_equal(x_q_0, np.clip(x_ub, a_min=None, a_max=1.5))

    # full quantization
    qr_ub_qc_false.update_qnoise_factor(qnoise_factor=1.0)
    x_q_1 = qr_ub_qc_false(inputs)
    assert_equal(x_q_1, np.clip(xq, a_min=None, a_max=1.5))

    # mixing half and half
    qr_ub_qc_false.update_qnoise_factor(qnoise_factor=0.5)
    x_q_05 = qr_ub_qc_false(inputs)
    assert_equal(x_q_05, np.clip(x_ub_xq, a_min=None, a_max=1.5))

    #########################################
    # Relu upper bound
    # Quantized clip for float relu
    # (The quantized clip has precedence over the relu upper bound.)
    #########################################
    qr_ub_qc_true = quantized_relu(
        bits=bits,
        integer=integer,
        use_sigmoid=use_sigmoid,
        negative_slope=negative_slope,
        use_stochastic_rounding=use_stochastic_rounding,
        relu_upper_bound=1.5,
        is_quantized_clip=True,
        use_variables=True)
    # no quantization
    qr_ub_qc_true.update_qnoise_factor(qnoise_factor=0.0)
    x_q_0 = qr_ub_qc_true(inputs)
    assert_equal(x_q_0, x_clipped)

    # full quantization
    qr_ub_qc_true.update_qnoise_factor(qnoise_factor=1.0)
    x_q_1 = qr_ub_qc_true(inputs)
    assert_equal(x_q_1, xq)

    # mixing half and half
    qr_ub_qc_true.update_qnoise_factor(qnoise_factor=0.5)
    x_q_05 = qr_ub_qc_true(inputs)
    assert_equal(x_q_05, x_clipped_xq)
예제 #9
0
from tensorflow.keras.layers import Activation, MaxPooling2D, Flatten
from qkeras.qlayers import QDense, QActivation
from qkeras.qconvolutional import QConv2D
from qkeras.quantizers import quantized_bits, quantized_relu

model = Sequential()

model.add(
    QConv2D(8, (4, 4),
            strides=(1, 1),
            input_shape=(32, 32, 1),
            kernel_quantizer=quantized_bits(14, 2),
            bias_quantizer=quantized_bits(14, 2),
            name="conv2d_0_m"))

model.add(QActivation(activation=quantized_relu(14, 2), name='relu1'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name='max1'))

model.add(
    QConv2D(16, (2, 2),
            strides=(1, 1),
            kernel_quantizer=quantized_bits(14, 2),
            bias_quantizer=quantized_bits(14, 2),
            name="conv2d_1_m"))

model.add(QActivation(activation=quantized_relu(14, 2), name='relu2'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name='max2'))
model.add(Flatten())
model.add(
    QDense(120,
           name='fc1',
예제 #10
0
    assert sum(wrong) / len(wrong) < 0.005


@pytest.fixture(scope='module')
def randX_1000_1():
    return randX(1000, 1)


# TODO: include quantized_relu tests when they are made to pass
# https://github.com/fastmachinelearning/hls4ml/issues/377
@pytest.mark.parametrize('quantizer', [(quantized_bits(8, 0)),
                                       (quantized_bits(8, 4)),
                                       (quantized_bits(4, 2)),
                                       (quantized_bits(4, 0)),
                                       (quantized_bits(10, 0)),
                                       (quantized_relu(4)),
                                       (quantized_relu(4, 2)),
                                       (quantized_relu(8)),
                                       (quantized_relu(8, 4)),
                                       (quantized_relu(10)),
                                       (quantized_relu(10, 5))])
def test_quantizer(randX_1000_1, quantizer):
    '''
  Test a single quantizer as an Activation function.
  Checks the type inference through the conversion is correct without just
  using the same logic.
  '''
    X = randX_1000_1
    X = np.round(X * 2**10) * 2**-10  # make it an exact ap_fixed<16,6>
    model = Sequential()
    model.add(
예제 #11
0
def test_qnoise_quantized_relu():
    # 0 sign bit, 1 integer bit, and 3 fractional bits.
    bits = 4
    integer = 1
    use_sigmoid = False
    negative_slope = 0
    use_stochastic_rounding = False

    # input to quantized relu
    inputs = np.array([0.0, 0.5, -0.5, 0.6, 2.0, 3.0], dtype=np.float32)
    # float relu
    x = np.array([0.0, 0.5, 0.0, 0.6, 2.0, 3.0], dtype=np.float32)
    # float relu with upper bound 1.5
    x_ub = np.array([0.0, 0.5, 0.0, 0.6, 1.5, 1.5], dtype=np.float32)
    # float relu with quantized clipping
    x_clipped = np.array([0.0, 0.5, 0.0, 0.6, 1.875, 1.875], dtype=np.float32)
    # quantized relu
    xq = np.array([0.0, 0.5, 0.0, 0.625, 1.875, 1.875], dtype=np.float32)

    # mixing half and half
    x_xq = 0.5 * (x + xq)
    x_clipped_xq = 0.5 * (x_clipped + xq)
    x_ub_xq = 0.5 * (x_ub + xq)

    ######################
    # No relu upper bound
    ######################
    relu_upper_bound = None
    qr = quantized_relu(bits=bits,
                        integer=integer,
                        use_sigmoid=use_sigmoid,
                        negative_slope=negative_slope,
                        use_stochastic_rounding=use_stochastic_rounding,
                        relu_upper_bound=relu_upper_bound)

    ######################
    # Relu upper bound
    ######################
    relu_upper_bound = 1.5
    qr_ub = quantized_relu(bits=bits,
                           integer=integer,
                           use_sigmoid=use_sigmoid,
                           negative_slope=negative_slope,
                           use_stochastic_rounding=use_stochastic_rounding,
                           relu_upper_bound=relu_upper_bound)

    #########################################
    # No relu upper bound
    # No quantized clip for float relu
    #########################################
    qr.is_quantized_clip = False

    # no quantization
    x_q_0 = qr(inputs, qnoise_factor=0.0)
    assert_equal(x_q_0, x)

    # full quantization
    x_q_1 = qr(inputs, qnoise_factor=1.0)
    assert_equal(x_q_1, xq)

    # mixing half and half
    x_q_05 = qr(inputs, qnoise_factor=0.5)
    assert_equal(x_q_05, x_xq)

    #########################################
    # No relu upper bound
    # Quantized clip for float relu
    #########################################
    qr.is_quantized_clip = True

    # no quantization
    x_q_0 = qr(inputs, qnoise_factor=0.0)
    assert_equal(x_q_0, x_clipped)

    # full quantization
    x_q_1 = qr(inputs, qnoise_factor=1.0)
    assert_equal(x_q_1, xq)

    # mixing half and half
    x_q_05 = qr(inputs, qnoise_factor=0.5)
    assert_equal(x_q_05, x_clipped_xq)

    #########################################
    # Relu upper bound
    # No quantized clip for float relu
    #########################################
    qr_ub.is_quantized_clip = False

    # no quantization
    x_q_0 = qr_ub(inputs, qnoise_factor=0.0)
    assert_equal(x_q_0, x_ub)

    # full quantization
    x_q_1 = qr_ub(inputs, qnoise_factor=1.0)
    assert_equal(x_q_1, xq)

    # mixing half and half
    x_q_05 = qr_ub(inputs, qnoise_factor=0.5)
    assert_equal(x_q_05, x_ub_xq)

    #########################################
    # Relu upper bound
    # Quantized clip for float relu
    # (The quantized clip has precedence over the relu upper bound.)
    #########################################
    qr_ub.is_quantized_clip = True

    # no quantization
    x_q_0 = qr_ub(inputs, qnoise_factor=0.0)
    assert_equal(x_q_0, x_clipped)

    # full quantization
    x_q_1 = qr_ub(inputs, qnoise_factor=1.0)
    assert_equal(x_q_1, xq)

    # mixing half and half
    x_q_05 = qr_ub(inputs, qnoise_factor=0.5)
    assert_equal(x_q_05, x_clipped_xq)
예제 #12
0
def Q_baseline_model(size, epochs, optimizer, X_training, y_training,
                     X_validation, y_validation, output_name):
    '''
    NN Model constructor with loss and accuracy plots.

    Parameters
    ----------
    size : int
        Batch size used in the training process.
    epochs : int
        Number of epochs the model will be trained.
    optimizer : keras.optimizer
        Optimizer function.
    X_training : Numpy array
        Training data set.
    y_training : Numpy array
        True labels for the training set.
    X_validation : Numpy array
        Validation data set.
    y_validation : Numpy array
        True labels for the validation set.
    output_name : str
        Name used for saved plots.

    Returns
    -------
    model : qkeras.sequential
        QKeras model.
    w : numpy array
        Array of final weights used in the model for later inference.

    '''
    pruning = False
    # create model
    name = "RMSE validation"
    name2 = "RMSE training"
    history = History()
    model = Sequential()
    model.add(
        QDense(60,
               input_shape=(27, ),
               kernel_quantizer=quantized_bits(16, 1),
               bias_quantizer=quantized_bits(16, 1),
               kernel_initializer='random_normal'))
    model.add(QActivation(activation=quantized_relu(16, 1), name='relu1'))
    model.add(
        QDense(50,
               kernel_quantizer=quantized_bits(16, 1),
               bias_quantizer=quantized_bits(16, 1)))
    model.add(QActivation(activation=quantized_relu(16, 1), name='relu2'))
    # model.add(Dropout(rate=0.2))
    model.add(
        QDense(30,
               kernel_quantizer=quantized_bits(16, 1),
               bias_quantizer=quantized_bits(16, 1)))
    model.add(QActivation(activation=quantized_relu(16, 1), name='relu3'))
    model.add(
        QDense(40,
               kernel_quantizer=quantized_bits(16, 1),
               bias_quantizer=quantized_bits(16, 1)))
    model.add(QActivation(activation=quantized_relu(16, 1), name='relu4'))
    model.add(
        QDense(15,
               kernel_quantizer=quantized_bits(16, 1),
               bias_quantizer=quantized_bits(16, 1)))
    model.add(QActivation(activation=quantized_relu(16, 1), name='relu5'))

    # model.add(QDense(80,  input_shape=(27,),kernel_quantizer=quantized_bits(16,1),bias_quantizer=quantized_bits(16,1), kernel_initializer='random_normal'))
    # model.add(QActivation(activation=quantized_relu(16,1), name='relu1'))
    # model.add(QDense(50,kernel_quantizer=quantized_bits(16,1),bias_quantizer=quantized_bits(16,1)))
    # model.add(QActivation(activation=quantized_relu(16,1), name='relu2'))
    # model.add(QDense(35,kernel_quantizer=quantized_bits(16,1),bias_quantizer=quantized_bits(16,1)))
    # model.add(QActivation(activation=quantized_relu(16,1), name='relu3'))
    # # # model.add(Dropout(rate=0.2))
    model.add(QDense(1, kernel_quantizer=quantized_bits(16, 1)))
    model.add(QActivation(activation=quantized_relu(16, 1), name='relu6'))
    #model.add(Activation("sigmoid"))
    # model.add(QActivation(activation=quantized_tanh(16,1),name='tanh'))
    if pruning == True:
        print("////////////////////////Training Model with pruning")
        pruning_params = {
            "pruning_schedule":
            pruning_schedule.ConstantSparsity(0.75,
                                              begin_step=2000,
                                              frequency=100)
        }
        model = prune.prune_low_magnitude(model, **pruning_params)
        model.compile(loss='mean_squared_error', optimizer=optimizer)
        model.fit(X_training,
                  y_training,
                  batch_size=size,
                  epochs=epochs,
                  verbose=1,
                  validation_data=(X_validation, y_validation),
                  callbacks=[history,
                             pruning_callbacks.UpdatePruningStep()])

        model = strip_pruning(model)
        w = model.layers[0].weights[0].numpy()
        h, b = np.histogram(w, bins=100)
        plt.figure(figsize=(7, 7))
        plt.bar(b[:-1], h, width=b[1] - b[0])
        plt.semilogy()
        plt.savefig("Zeros' distribution", format='png')
        print('% of zeros = {}'.format(np.sum(w == 0) / np.size(w)))
    else:
        print("////////////////////////Training Model WITHOUT pruning")
        model.compile(loss='mean_squared_error', optimizer=optimizer)
        model.fit(X_training,
                  y_training,
                  batch_size=size,
                  epochs=epochs,
                  verbose=1,
                  validation_data=(X_validation, y_validation),
                  callbacks=[history])
    # Compile model
    # model.compile(loss='mean_squared_error', optimizer=optimizer)
    # model.fit(X_training, y_training,
    #       batch_size=size,
    #       epochs=epochs,
    #       verbose=1,
    #       validation_data=(X_validation, y_validation),callbacks=[history])

    w = []
    for layer in model.layers:
        print(layer)
        w.append(layer.get_weights())

    #print(w)
    train_predictions = model.predict(X_training)
    predictions = model.predict(X_validation)
    lin_mse = mean_squared_error(y_validation, predictions)
    lin_rmse = np.sqrt(lin_mse)
    lin_mse2 = mean_squared_error(y_training, train_predictions)
    lin_rmse2 = np.sqrt(lin_mse2)
    msg = "%s: %f" % (name, lin_rmse)
    msg2 = "%s: %f" % (name2, lin_rmse2)
    print(msg)
    print(msg2)
    fig, ax = plt.subplots()
    # xy=np.vstack([y_validation, predictions])
    #z=gaussian_kde(xy)
    ax.scatter(y_validation, predictions, edgecolors=(0, 0, 0))
    ax.set_title('Regression model predictions (validation set)')
    ax.set_xlabel('Measured $p_T$ (GeV/c)')
    ax.set_ylabel('Predicted $p_T$ (GeV/c)')
    ax.plot([Y.min(), Y.max()], [Y.min(), Y.max()], 'k--', lw=4)
    plt.rc('font', size=20)
    plt.rc('axes', titlesize=18)
    plt.rc('axes', labelsize=18)
    plt.rc('xtick', labelsize=18)
    plt.rc('ytick', labelsize=18)
    plt.rc('legend', fontsize=18)
    plt.rc('figure', titlesize=18)
    plt.tight_layout()
    plt.savefig(outrootname + '/' + '1' + output_name, format='png', dpi=800)
    fig2, ax2 = plt.subplots()
    ax2.plot(history.history['loss'], label='loss')
    ax2.plot(history.history['val_loss'], label='val_loss')
    ax2.set_title('Training and Validation loss per epoch')
    ax2.set_xlabel('# Epoch')
    ax2.set_ylabel('loss')
    plt.legend()
    plt.tight_layout()
    plt.savefig(outrootname + '/' + '2' + output_name, format='png', dpi=800)
    #plt.show()
    del ax, ax2

    return model, w
예제 #13
0
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.regularizers import l1
from qkeras.qlayers import QDense, QActivation
from qkeras.quantizers import quantized_bits, quantized_relu
from callbacks import all_callbacks

model = Sequential()
model.add(
    QDense(32,
           input_shape=(16, ),
           name='fc1',
           kernel_quantizer=quantized_bits(6, 0, alpha=1),
           bias_quantizer=quantized_bits(6, 0, alpha=1),
           kernel_initializer='lecun_uniform',
           kernel_regularizer=l1(0.0001)))
model.add(QActivation(activation=quantized_relu(6), name='relu1'))
model.add(
    QDense(5,
           name='output',
           kernel_quantizer=quantized_bits(6, 0, alpha=1),
           bias_quantizer=quantized_bits(6, 0, alpha=1),
           kernel_initializer='lecun_uniform',
           kernel_regularizer=l1(0.0001)))
model.add(Activation(activation='softmax', name='softmax'))

from tensorflow_model_optimization.python.core.sparsity.keras import prune, pruning_callbacks, pruning_schedule
from tensorflow_model_optimization.sparsity.keras import strip_pruning
pruning_params = {
    "pruning_schedule":
    pruning_schedule.ConstantSparsity(0.75, begin_step=2000, frequency=100)
}
예제 #14
0
def get_qkeras_model(inputDim,
                     hiddenDim=128,
                     encodeDim=8,
                     bits=7,
                     intBits=0,
                     reluBits=7,
                     reluIntBits=3,
                     lastBits=7,
                     lastIntBits=7,
                     l1reg=0,
                     batchNorm=True,
                     qBatchNorm=False,
                     input_batchNorm=False,
                     halfcode_layers=4,
                     fan_in_out=64,
                     **kwargs):
    """
    define the keras model
    the model based on the simple dense auto encoder 
    (128*128*128*128*8*128*128*128*128)
    """
    inputLayer = Input(shape=(inputDim, ))
    kwargs = {
        'kernel_quantizer': quantized_bits(bits, intBits, alpha=1),
        'bias_quantizer': quantized_bits(bits, intBits, alpha=1),
        'kernel_initializer': 'lecun_uniform',
        'kernel_regularizer': l1(l1reg)
    }

    # Declare encoder network
    for i in range(halfcode_layers):
        if i == 0:
            h = QDense(fan_in_out, **kwargs)(inputLayer)
        else:
            h = QDense(hiddenDim, **kwargs)(h)
        if batchNorm:
            if qBatchNorm:
                h = QBatchNormalization()(h)
            else:
                h = BatchNormalization()(h)
        h = QActivation(activation=quantized_relu(reluBits, reluIntBits))(h)

    # Declare latent space
    if halfcode_layers == 0:
        h = QDense(encodeDim, **kwargs)(inputLayer)
    else:
        h = QDense(encodeDim, **kwargs)(h)
    if batchNorm:
        if qBatchNorm:
            h = QBatchNormalization()(h)
        else:
            h = BatchNormalization()(h)
    h = QActivation(activation=quantized_relu(reluBits, reluIntBits))(h)

    # Declare decoder network
    for i in range(halfcode_layers):
        if i == halfcode_layers - 1:
            h = QDense(fan_in_out, **kwargs)(h)
        else:
            h = QDense(hiddenDim, **kwargs)(h)
        if batchNorm:
            if qBatchNorm:
                h = QBatchNormalization()(h)
            else:
                h = BatchNormalization()(h)
        h = QActivation(activation=quantized_relu(reluBits, reluIntBits))(h)

    kwargslast = {
        'kernel_quantizer': quantized_bits(lastBits, lastIntBits, alpha=1),
        'bias_quantizer': quantized_bits(lastBits, lastIntBits, alpha=1),
        'kernel_initializer': 'lecun_uniform',
        'kernel_regularizer': l1(l1reg)
    }
    h = QDense(inputDim, **kwargslast)(h)

    return Model(inputs=inputLayer, outputs=h)