예제 #1
0
def qdense_model(Inputs, l1Reg=0, bits=6, ints=0, h5fName=None):
    x = QDense(21,
               activation=None,
               kernel_initializer='lecun_uniform',
               kernel_regularizer=l1(l1Reg),
               bias_regularizer=l1(l1Reg),
               kernel_quantizer=quantized_bits(bits, ints, alpha=1),
               bias_quantizer=quantized_bits(6, 0, alpha=1),
               name="Dense_Layer_1")(Inputs)

    x = QActivation(activation=quantized_relu(bits, ints),
                    name="Relu_Layer_1")(x)

    x = QDense(22,
               activation=None,
               kernel_initializer='lecun_uniform',
               kernel_regularizer=l1(l1Reg),
               bias_regularizer=l1(l1Reg),
               kernel_quantizer=quantized_bits(bits, ints, alpha=1),
               bias_quantizer=quantized_bits(bits, ints, alpha=1),
               name="Dense_Layer_2")(x)

    x = QActivation(activation=quantized_relu(bits, ints),
                    name="Relu_Layer_2")(x)

    x = QDense(8,
               activation=None,
               kernel_initializer='lecun_uniform',
               kernel_regularizer=l1(l1Reg),
               bias_regularizer=l1(l1Reg),
               kernel_quantizer=quantized_bits(bits, ints, alpha=1),
               bias_quantizer=quantized_bits(bits, ints, alpha=1),
               name="Dense_Layer_3")(x)

    x = QActivation(activation=quantized_relu(bits), name="Relu_Layer_3")(x)

    x = QDense(1,
               activation=None,
               kernel_initializer='lecun_uniform',
               kernel_regularizer=l1(l1Reg),
               bias_regularizer=l1(l1Reg),
               kernel_quantizer=quantized_bits(bits, ints, alpha=1),
               bias_quantizer=quantized_bits(bits, ints, alpha=1),
               name="Dense_Layer_4")(x)

    #x = QActivation("quantized_bits(20,5)",name="Final_quantization")(x)

    predictions = Activation(activation='sigmoid',
                             name="Sigmoid_Output_Layer")(x)

    model = Model(inputs=Inputs, outputs=predictions)

    return (model)
예제 #2
0
def test_quantizer(randX_1000_1, quantizer):
    '''
  Test a single quantizer as an Activation function.
  Checks the type inference through the conversion is correct without just
  using the same logic.
  '''
    X = randX_1000_1
    X = np.round(X * 2**10) * 2**-10  # make it an exact ap_fixed<16,6>
    model = Sequential()
    model.add(
        QActivation(input_shape=(1, ), activation=quantizer, name='quantizer'))
    model.compile()

    hls4ml.model.optimizer.get_optimizer(
        'output_rounding_saturation_mode').configure(
            layers=['quantizer'],
            rounding_mode='AP_RND_CONV',
            saturation_mode='AP_SAT')
    config = hls4ml.utils.config_from_keras_model(model, granularity='name')
    output_dir = str(
        test_root_path / 'hls4mlprj_qkeras_quantizer_{}_{}_{}'.format(
            quantizer.__class__.__name__, quantizer.bits, quantizer.integer))
    hls_model = hls4ml.converters.convert_from_keras_model(
        model,
        hls_config=config,
        output_dir=output_dir,
        part='xcu250-figd2104-2L-e')
    hls4ml.model.optimizer.get_optimizer(
        'output_rounding_saturation_mode').configure(layers=[])
    hls_model.compile()

    y_qkeras = model.predict(X)
    y_hls4ml = hls_model.predict(X)
    # Goal is to get it passing with all equal
    np.testing.assert_array_equal(y_qkeras, y_hls4ml)
예제 #3
0
def make_btnn(test_no, N, kernel_quantizer, bias_quantizer,
              activation_quantizer, use_batchnorm, is_xnor):
    shape = (N, )
    model = Sequential()
    model.add(
        QDense(10,
               input_shape=shape,
               kernel_quantizer=kernel_quantizer,
               bias_quantizer=bias_quantizer,
               name='dense'))
    if use_batchnorm:
        model.add(BatchNormalization(name='bn'))
    model.add(QActivation(activation=activation_quantizer))
    model.compile()
    return model, is_xnor, test_no
예제 #4
0
def test_single_dense_activation_exact(randX_100_16, bits, alpha):
    '''
  Test a single Dense -> Activation layer topology for
  bit exactness with number of bits parameter
  '''
    X = randX_100_16
    model = Sequential()
    model.add(
        QDense(16,
               input_shape=(16, ),
               name='fc1',
               kernel_quantizer=quantized_bits(bits, 0, alpha=alpha),
               bias_quantizer=quantized_bits(bits, 0, alpha=1),
               kernel_initializer='lecun_uniform'))
    model.add(QActivation(activation=quantized_relu(bits, 0), name='relu1'))
    model.compile()

    hls4ml.model.optimizer.get_optimizer(
        'output_rounding_saturation_mode').configure(
            layers=['relu1'],
            rounding_mode='AP_RND_CONV',
            saturation_mode='AP_SAT')
    config = hls4ml.utils.config_from_keras_model(model, granularity='name')
    hls_model = hls4ml.converters.convert_from_keras_model(
        model,
        hls_config=config,
        output_dir=str(
            test_root_path /
            'hls4mlprj_qkeras_single_dense_activation_exact_{}_{}'.format(
                bits, alpha)),
        part='xcu250-figd2104-2L-e')
    hls4ml.model.optimizer.get_optimizer(
        'output_rounding_saturation_mode').configure(layers=[])
    hls_model.compile()

    y_qkeras = model.predict(X)
    y_hls4ml = hls_model.predict(X)
    # Goal is to get it passing with all equal
    #np.testing.assert_array_equal(y_qkeras, y_hls4ml)
    # For now allow matching within 1 bit
    np.testing.assert_allclose(y_qkeras.ravel(),
                               y_hls4ml.ravel(),
                               atol=2**-bits,
                               rtol=1.0)
예제 #5
0
from tensorflow.keras.layers import Activation, MaxPooling2D, Flatten
from qkeras.qlayers import QDense, QActivation
from qkeras.qconvolutional import QConv2D
from qkeras.quantizers import quantized_bits, quantized_relu

model = Sequential()

model.add(
    QConv2D(8, (4, 4),
            strides=(1, 1),
            input_shape=(32, 32, 1),
            kernel_quantizer=quantized_bits(14, 2),
            bias_quantizer=quantized_bits(14, 2),
            name="conv2d_0_m"))

model.add(QActivation(activation=quantized_relu(14, 2), name='relu1'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name='max1'))

model.add(
    QConv2D(16, (2, 2),
            strides=(1, 1),
            kernel_quantizer=quantized_bits(14, 2),
            bias_quantizer=quantized_bits(14, 2),
            name="conv2d_1_m"))

model.add(QActivation(activation=quantized_relu(14, 2), name='relu2'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name='max2'))
model.add(Flatten())
model.add(
    QDense(120,
           name='fc1',
예제 #6
0
def Q_baseline_model(size, epochs, optimizer, X_training, y_training,
                     X_validation, y_validation, output_name):
    '''
    NN Model constructor with loss and accuracy plots.

    Parameters
    ----------
    size : int
        Batch size used in the training process.
    epochs : int
        Number of epochs the model will be trained.
    optimizer : keras.optimizer
        Optimizer function.
    X_training : Numpy array
        Training data set.
    y_training : Numpy array
        True labels for the training set.
    X_validation : Numpy array
        Validation data set.
    y_validation : Numpy array
        True labels for the validation set.
    output_name : str
        Name used for saved plots.

    Returns
    -------
    model : qkeras.sequential
        QKeras model.
    w : numpy array
        Array of final weights used in the model for later inference.

    '''
    pruning = False
    # create model
    name = "RMSE validation"
    name2 = "RMSE training"
    history = History()
    model = Sequential()
    model.add(
        QDense(60,
               input_shape=(27, ),
               kernel_quantizer=quantized_bits(16, 1),
               bias_quantizer=quantized_bits(16, 1),
               kernel_initializer='random_normal'))
    model.add(QActivation(activation=quantized_relu(16, 1), name='relu1'))
    model.add(
        QDense(50,
               kernel_quantizer=quantized_bits(16, 1),
               bias_quantizer=quantized_bits(16, 1)))
    model.add(QActivation(activation=quantized_relu(16, 1), name='relu2'))
    # model.add(Dropout(rate=0.2))
    model.add(
        QDense(30,
               kernel_quantizer=quantized_bits(16, 1),
               bias_quantizer=quantized_bits(16, 1)))
    model.add(QActivation(activation=quantized_relu(16, 1), name='relu3'))
    model.add(
        QDense(40,
               kernel_quantizer=quantized_bits(16, 1),
               bias_quantizer=quantized_bits(16, 1)))
    model.add(QActivation(activation=quantized_relu(16, 1), name='relu4'))
    model.add(
        QDense(15,
               kernel_quantizer=quantized_bits(16, 1),
               bias_quantizer=quantized_bits(16, 1)))
    model.add(QActivation(activation=quantized_relu(16, 1), name='relu5'))

    # model.add(QDense(80,  input_shape=(27,),kernel_quantizer=quantized_bits(16,1),bias_quantizer=quantized_bits(16,1), kernel_initializer='random_normal'))
    # model.add(QActivation(activation=quantized_relu(16,1), name='relu1'))
    # model.add(QDense(50,kernel_quantizer=quantized_bits(16,1),bias_quantizer=quantized_bits(16,1)))
    # model.add(QActivation(activation=quantized_relu(16,1), name='relu2'))
    # model.add(QDense(35,kernel_quantizer=quantized_bits(16,1),bias_quantizer=quantized_bits(16,1)))
    # model.add(QActivation(activation=quantized_relu(16,1), name='relu3'))
    # # # model.add(Dropout(rate=0.2))
    model.add(QDense(1, kernel_quantizer=quantized_bits(16, 1)))
    model.add(QActivation(activation=quantized_relu(16, 1), name='relu6'))
    #model.add(Activation("sigmoid"))
    # model.add(QActivation(activation=quantized_tanh(16,1),name='tanh'))
    if pruning == True:
        print("////////////////////////Training Model with pruning")
        pruning_params = {
            "pruning_schedule":
            pruning_schedule.ConstantSparsity(0.75,
                                              begin_step=2000,
                                              frequency=100)
        }
        model = prune.prune_low_magnitude(model, **pruning_params)
        model.compile(loss='mean_squared_error', optimizer=optimizer)
        model.fit(X_training,
                  y_training,
                  batch_size=size,
                  epochs=epochs,
                  verbose=1,
                  validation_data=(X_validation, y_validation),
                  callbacks=[history,
                             pruning_callbacks.UpdatePruningStep()])

        model = strip_pruning(model)
        w = model.layers[0].weights[0].numpy()
        h, b = np.histogram(w, bins=100)
        plt.figure(figsize=(7, 7))
        plt.bar(b[:-1], h, width=b[1] - b[0])
        plt.semilogy()
        plt.savefig("Zeros' distribution", format='png')
        print('% of zeros = {}'.format(np.sum(w == 0) / np.size(w)))
    else:
        print("////////////////////////Training Model WITHOUT pruning")
        model.compile(loss='mean_squared_error', optimizer=optimizer)
        model.fit(X_training,
                  y_training,
                  batch_size=size,
                  epochs=epochs,
                  verbose=1,
                  validation_data=(X_validation, y_validation),
                  callbacks=[history])
    # Compile model
    # model.compile(loss='mean_squared_error', optimizer=optimizer)
    # model.fit(X_training, y_training,
    #       batch_size=size,
    #       epochs=epochs,
    #       verbose=1,
    #       validation_data=(X_validation, y_validation),callbacks=[history])

    w = []
    for layer in model.layers:
        print(layer)
        w.append(layer.get_weights())

    #print(w)
    train_predictions = model.predict(X_training)
    predictions = model.predict(X_validation)
    lin_mse = mean_squared_error(y_validation, predictions)
    lin_rmse = np.sqrt(lin_mse)
    lin_mse2 = mean_squared_error(y_training, train_predictions)
    lin_rmse2 = np.sqrt(lin_mse2)
    msg = "%s: %f" % (name, lin_rmse)
    msg2 = "%s: %f" % (name2, lin_rmse2)
    print(msg)
    print(msg2)
    fig, ax = plt.subplots()
    # xy=np.vstack([y_validation, predictions])
    #z=gaussian_kde(xy)
    ax.scatter(y_validation, predictions, edgecolors=(0, 0, 0))
    ax.set_title('Regression model predictions (validation set)')
    ax.set_xlabel('Measured $p_T$ (GeV/c)')
    ax.set_ylabel('Predicted $p_T$ (GeV/c)')
    ax.plot([Y.min(), Y.max()], [Y.min(), Y.max()], 'k--', lw=4)
    plt.rc('font', size=20)
    plt.rc('axes', titlesize=18)
    plt.rc('axes', labelsize=18)
    plt.rc('xtick', labelsize=18)
    plt.rc('ytick', labelsize=18)
    plt.rc('legend', fontsize=18)
    plt.rc('figure', titlesize=18)
    plt.tight_layout()
    plt.savefig(outrootname + '/' + '1' + output_name, format='png', dpi=800)
    fig2, ax2 = plt.subplots()
    ax2.plot(history.history['loss'], label='loss')
    ax2.plot(history.history['val_loss'], label='val_loss')
    ax2.set_title('Training and Validation loss per epoch')
    ax2.set_xlabel('# Epoch')
    ax2.set_ylabel('loss')
    plt.legend()
    plt.tight_layout()
    plt.savefig(outrootname + '/' + '2' + output_name, format='png', dpi=800)
    #plt.show()
    del ax, ax2

    return model, w
예제 #7
0
def dense_embedding_quantized(n_features=6,
                              n_features_cat=2,
                              number_of_pupcandis=100,
                              embedding_input_dim={
                                  0: 13,
                                  1: 3
                              },
                              emb_out_dim=2,
                              with_bias=True,
                              t_mode=0,
                              logit_total_bits=7,
                              logit_int_bits=2,
                              activation_total_bits=7,
                              logit_quantizer='quantized_bits',
                              activation_quantizer='quantized_relu',
                              activation_int_bits=2,
                              alpha=1,
                              use_stochastic_rounding=False,
                              units=[64, 32, 16]):
    n_dense_layers = len(units)

    logit_quantizer = getattr(qkeras.quantizers, logit_quantizer)(
        logit_total_bits,
        logit_int_bits,
        alpha=alpha,
        use_stochastic_rounding=use_stochastic_rounding)
    activation_quantizer = getattr(qkeras.quantizers,
                                   activation_quantizer)(activation_total_bits,
                                                         activation_int_bits)

    inputs_cont = Input(shape=(number_of_pupcandis, n_features - 2),
                        name='input_cont')
    pxpy = Input(shape=(number_of_pupcandis, 2), name='input_pxpy')

    embeddings = []
    inputs = [inputs_cont, pxpy]
    for i_emb in range(n_features_cat):
        input_cat = Input(shape=(number_of_pupcandis, ),
                          name='input_cat{}'.format(i_emb))
        inputs.append(input_cat)
        embedding = Embedding(input_dim=embedding_input_dim[i_emb],
                              output_dim=emb_out_dim,
                              embeddings_initializer=initializers.RandomNormal(
                                  mean=0, stddev=0.4 / emb_out_dim),
                              name='embedding{}'.format(i_emb))(input_cat)
        embeddings.append(embedding)

    # can concatenate all 3 if updated in hls4ml, for now; do it pairwise
    # x = Concatenate()([inputs_cont] + embeddings)
    emb_concat = Concatenate()(embeddings)
    x = Concatenate()([inputs_cont, emb_concat])

    for i_dense in range(n_dense_layers):
        x = QDense(units[i_dense],
                   kernel_quantizer=logit_quantizer,
                   bias_quantizer=logit_quantizer,
                   kernel_initializer='lecun_uniform')(x)
        x = BatchNormalization(momentum=0.95)(x)
        x = QActivation(activation=activation_quantizer)(x)

    if t_mode == 0:
        x = qkeras.qpooling.QGlobalAveragePooling1D(
            name='pool', quantizer=logit_quantizer)(x)
        # pool size?
        outputs = QDense(2,
                         name='output',
                         bias_quantizer=logit_quantizer,
                         kernel_quantizer=logit_quantizer,
                         activation='linear')(x)

    if t_mode == 1:
        if with_bias:
            b = QDense(
                2,
                name='met_bias',
                kernel_quantizer=logit_quantizer,
                bias_quantizer=logit_quantizer,
                kernel_initializer=initializers.VarianceScaling(scale=0.02))(x)
            pxpy = Add()([pxpy, b])
        w = QDense(
            1,
            name='met_weight',
            kernel_quantizer=logit_quantizer,
            bias_quantizer=logit_quantizer,
            kernel_initializer=initializers.VarianceScaling(scale=0.02))(x)
        w = BatchNormalization(trainable=False,
                               name='met_weight_minus_one',
                               epsilon=False)(w)
        x = Multiply()([w, pxpy])

        x = GlobalAveragePooling1D(name='output')(x)
    outputs = x

    keras_model = Model(inputs=inputs, outputs=outputs)

    keras_model.get_layer('met_weight_minus_one').set_weights(
        [np.array([1.]),
         np.array([-1.]),
         np.array([0.]),
         np.array([1.])])

    return keras_model
예제 #8
0
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.regularizers import l1
from qkeras.qlayers import QDense, QActivation
from qkeras.quantizers import quantized_bits, quantized_relu
from callbacks import all_callbacks

model = Sequential()
model.add(
    QDense(32,
           input_shape=(16, ),
           name='fc1',
           kernel_quantizer=quantized_bits(6, 0, alpha=1),
           bias_quantizer=quantized_bits(6, 0, alpha=1),
           kernel_initializer='lecun_uniform',
           kernel_regularizer=l1(0.0001)))
model.add(QActivation(activation=quantized_relu(6), name='relu1'))
model.add(
    QDense(5,
           name='output',
           kernel_quantizer=quantized_bits(6, 0, alpha=1),
           bias_quantizer=quantized_bits(6, 0, alpha=1),
           kernel_initializer='lecun_uniform',
           kernel_regularizer=l1(0.0001)))
model.add(Activation(activation='softmax', name='softmax'))

from tensorflow_model_optimization.python.core.sparsity.keras import prune, pruning_callbacks, pruning_schedule
from tensorflow_model_optimization.sparsity.keras import strip_pruning
pruning_params = {
    "pruning_schedule":
    pruning_schedule.ConstantSparsity(0.75, begin_step=2000, frequency=100)
}
예제 #9
0
def resnet_v1_eembc_quantized(
        input_shape=[32, 32, 3],
        num_classes=10,
        l1p=0,
        l2p=1e-4,
        num_filters=[
            16,
            16,  # block 1
            32,
            32,  # block 2
            64,
            64  # block 3
        ],
        kernel_sizes=[
            3,
            3,
            3,  # block 1
            3,
            3,
            1,  # block 2
            3,
            3,
            1  # block 3
        ],
        strides=[
            '111',  # block 1
            '212',  # block 2
            '212',  # block 3
        ],
        logit_total_bits=7,
        logit_int_bits=2,
        activation_total_bits=7,
        activation_int_bits=2,
        alpha=1,
        use_stochastic_rounding=False,
        logit_quantizer='quantized_bits',
        activation_quantizer='quantized_relu',
        skip=True,
        avg_pooling=False):

    logit_quantizer = getattr(qkeras.quantizers, logit_quantizer)(
        logit_total_bits,
        logit_int_bits,
        alpha=alpha,
        use_stochastic_rounding=use_stochastic_rounding)
    activation_quantizer = getattr(qkeras.quantizers, activation_quantizer)(
        activation_total_bits,
        activation_int_bits,
        use_stochastic_rounding=use_stochastic_rounding)

    # Input layer, change kernel size to 7x7 and strides to 2 for an official resnet
    inputs = Input(shape=input_shape)
    x = QConv2DBatchnorm(num_filters[0],
                         kernel_size=kernel_sizes[0],
                         strides=int(strides[0][0]),
                         padding='same',
                         kernel_quantizer=logit_quantizer,
                         bias_quantizer=logit_quantizer,
                         kernel_initializer='he_normal',
                         kernel_regularizer=l1_l2(l1=l1p, l2=l2p))(inputs)
    x = QActivation(activation=activation_quantizer)(x)

    # First stack
    # Weight layers
    y = QConv2DBatchnorm(num_filters[1],
                         kernel_size=kernel_sizes[1],
                         strides=int(strides[0][1]),
                         padding='same',
                         kernel_quantizer=logit_quantizer,
                         bias_quantizer=logit_quantizer,
                         kernel_initializer='he_normal',
                         kernel_regularizer=l1_l2(l1=l1p, l2=l2p))(x)
    y = QActivation(activation=activation_quantizer)(y)
    y = QConv2DBatchnorm(num_filters[0],
                         kernel_size=kernel_sizes[2],
                         strides=int(strides[0][2]),
                         padding='same',
                         kernel_quantizer=logit_quantizer,
                         bias_quantizer=logit_quantizer,
                         kernel_initializer='he_normal',
                         kernel_regularizer=l1_l2(l1=l1p, l2=l2p))(y)

    # Overall residual, connect weight layer and identity paths
    if skip:
        y = QActivation(activation=logit_quantizer)(y)
        x = Add()([x, y])
    else:
        x = y
    x = QActivation(activation=activation_quantizer)(x)

    if len(num_filters) > 2 and num_filters[2] > 0 and strides[
            1] != '' and kernel_sizes[3] > 0:
        # Second stack
        # Weight layers
        y = QConv2DBatchnorm(num_filters[2],
                             kernel_size=kernel_sizes[3],
                             strides=int(strides[1][0]),
                             padding='same',
                             kernel_quantizer=logit_quantizer,
                             bias_quantizer=logit_quantizer,
                             kernel_initializer='he_normal',
                             kernel_regularizer=l1_l2(l1=l1p, l2=l2p))(x)
        y = QActivation(activation=activation_quantizer)(y)
        y = QConv2DBatchnorm(num_filters[3],
                             kernel_size=kernel_sizes[4],
                             strides=int(strides[1][1]),
                             padding='same',
                             kernel_quantizer=logit_quantizer,
                             bias_quantizer=logit_quantizer,
                             kernel_initializer='he_normal',
                             kernel_regularizer=l1_l2(l1=l1p, l2=l2p))(y)

        # Adjust for change in dimension due to stride in identity
        x = QConv2D(num_filters[3],
                    kernel_size=kernel_sizes[5],
                    strides=int(strides[1][2]),
                    padding='same',
                    kernel_quantizer=logit_quantizer,
                    bias_quantizer=logit_quantizer,
                    kernel_initializer='he_normal',
                    kernel_regularizer=l1_l2(l1=l1p, l2=l2p))(x)
        x = QActivation(activation=logit_quantizer)(x)

        # Overall residual, connect weight layer and identity paths
        if skip:
            y = QActivation(activation=logit_quantizer)(y)
            x = Add()([x, y])
        else:
            x = y
        x = QActivation(activation=activation_quantizer)(x)

    if len(num_filters) > 4 and num_filters[4] > 0 and strides[
            2] != '' and kernel_sizes[6] > 0:
        # Third stack
        # Weight layers
        y = QConv2DBatchnorm(num_filters[4],
                             kernel_size=kernel_sizes[6],
                             strides=int(strides[2][0]),
                             padding='same',
                             kernel_quantizer=logit_quantizer,
                             bias_quantizer=logit_quantizer,
                             kernel_initializer='he_normal',
                             kernel_regularizer=l1_l2(l1=l1p, l2=l2p))(x)
        y = QActivation(activation=activation_quantizer)(y)
        y = QConv2DBatchnorm(num_filters[5],
                             kernel_size=kernel_sizes[7],
                             strides=int(strides[2][1]),
                             padding='same',
                             kernel_quantizer=logit_quantizer,
                             bias_quantizer=logit_quantizer,
                             kernel_initializer='he_normal',
                             kernel_regularizer=l1_l2(l1=l1p, l2=l2p))(y)

        # Adjust for change in dimension due to stride in identity
        x = QConv2D(num_filters[5],
                    kernel_size=kernel_sizes[8],
                    strides=int(strides[2][2]),
                    padding='same',
                    kernel_quantizer=logit_quantizer,
                    bias_quantizer=logit_quantizer,
                    kernel_initializer='he_normal',
                    kernel_regularizer=l1_l2(l1=l1p, l2=l2p))(x)
        x = QActivation(activation=logit_quantizer)(x)

        # Overall residual, connect weight layer and identity paths
        if skip:
            y = QActivation(activation=logit_quantizer)(y)
            x = Add()([x, y])
        else:
            x = y
        x = QActivation(activation=activation_quantizer)(x)

    if len(num_filters) > 6 and num_filters[6] > 0 and strides[
            3] != '' and kernel_sizes[9] > 0:
        # Fourth stack (not complete stack)
        # Weight layers
        y = QConv2DBatchnorm(num_filters[6],
                             kernel_size=kernel_sizes[9],
                             strides=int(strides[3][0]),
                             padding='same',
                             kernel_quantizer=logit_quantizer,
                             bias_quantizer=logit_quantizer,
                             kernel_initializer='he_normal',
                             kernel_regularizer=l1_l2(l1=l1p, l2=l2p))(x)
        x = QActivation(activation=activation_quantizer)(y)

    if len(num_filters) > 7 and num_filters[7] > 0 and strides[
            3] != '' and kernel_sizes[10] > 0:
        y = x
        y = QConv2DBatchnorm(num_filters[7],
                             kernel_size=kernel_sizes[10],
                             strides=int(strides[3][1]),
                             padding='same',
                             kernel_quantizer=logit_quantizer,
                             bias_quantizer=logit_quantizer,
                             kernel_initializer='he_normal',
                             kernel_regularizer=l1_l2(l1=l1p, l2=l2p))(y)
        x = QActivation(activation=logit_quantizer)(x)

        # Overall residual, connect weight layer and identity paths
        if skip:
            y = QActivation(activation=logit_quantizer)(y)
            x = Add()([x, y])
        else:
            x = y
        x = QActivation(activation=activation_quantizer)(x)

    # Final classification layer.
    pool_size = int(np.amin(x.shape[1:3]))
    if pool_size > 1 and avg_pooling:
        x = QAveragePooling2D(pool_size=pool_size,
                              quantizer=logit_quantizer)(x)

    y = Flatten()(x)
    # Changed output to separate QDense but did not quantize softmax as specified
    outputs = QDense(num_classes,
                     kernel_quantizer=logit_quantizer,
                     bias_quantizer=logit_quantizer,
                     kernel_initializer='he_normal')(y)
    outputs = Activation('softmax', name='softmax')(outputs)

    # Instantiate model.
    model = Model(inputs=inputs, outputs=outputs)
    return model
예제 #10
0
def resnet_v1_eembc(
        input_shape=[32, 32, 3],
        num_classes=10,
        l1p=0,
        l2p=1e-4,
        num_filters=[
            16,
            16,  # block 1
            32,
            32,  # block 2
            64,
            64  # block 3
        ],
        kernel_sizes=[
            3,
            3,
            3,  # block 1
            3,
            3,
            1,  # block 2
            3,
            3,
            1  # block 3
        ],
        strides=[
            '111',  # block 1
            '212',  # block 2
            '212',  # block 3
        ],
        skip=True,
        avg_pooling=False):

    # Input layer, change kernel size to 7x7 and strides to 2 for an official resnet
    inputs = Input(shape=input_shape)
    x = Conv2D(num_filters[0],
               kernel_size=kernel_sizes[0],
               strides=int(strides[0][0]),
               padding='same',
               kernel_initializer='he_normal',
               kernel_regularizer=l1_l2(l1=l1p, l2=l2p))(inputs)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    # First stack
    # Weight layers
    y = Conv2D(num_filters[1],
               kernel_size=kernel_sizes[1],
               strides=int(strides[0][1]),
               padding='same',
               kernel_initializer='he_normal',
               kernel_regularizer=l1_l2(l1=l1p, l2=l2p))(x)
    y = BatchNormalization()(y)
    y = Activation('relu')(y)
    y = Conv2D(num_filters[0],
               kernel_size=kernel_sizes[2],
               strides=int(strides[0][2]),
               padding='same',
               kernel_initializer='he_normal',
               kernel_regularizer=l1_l2(l1=l1p, l2=l2p))(y)
    y = BatchNormalization()(y)

    # Overall residual, connect weight layer and identity paths
    if skip:
        x = Add()([x, y])
    else:
        x = y
    x = Activation('relu')(x)

    if len(num_filters) > 2 and num_filters[2] > 0 and strides[
            1] != '' and kernel_sizes[3] > 0:
        # Second stack
        # Weight layers
        y = Conv2D(num_filters[2],
                   kernel_size=kernel_sizes[3],
                   strides=int(strides[1][0]),
                   padding='same',
                   kernel_initializer='he_normal',
                   kernel_regularizer=l1_l2(l1=l1p, l2=l2p))(x)
        y = BatchNormalization()(y)
        y = Activation('relu')(y)
        y = Conv2D(num_filters[3],
                   kernel_size=kernel_sizes[4],
                   strides=int(strides[1][1]),
                   padding='same',
                   kernel_initializer='he_normal',
                   kernel_regularizer=l1_l2(l1=l1p, l2=l2p))(y)
        y = BatchNormalization()(y)

        # Adjust for change in dimension due to stride in identity
        x = Conv2D(num_filters[3],
                   kernel_size=kernel_sizes[5],
                   strides=int(strides[1][2]),
                   padding='same',
                   kernel_initializer='he_normal',
                   kernel_regularizer=l1_l2(l1=l1p, l2=l2p))(x)

        # Overall residual, connect weight layer and identity paths
        if skip:
            x = Add()([x, y])
        else:
            x = y
        x = Activation('relu')(x)

    if len(num_filters) > 4 and num_filters[4] > 0 and strides[
            2] != '' and kernel_sizes[6] > 0:
        # Third stack
        # Weight layers
        y = Conv2D(num_filters[4],
                   kernel_size=kernel_sizes[6],
                   strides=int(strides[2][0]),
                   padding='same',
                   kernel_initializer='he_normal',
                   kernel_regularizer=l1_l2(l1=l1p, l2=l2p))(x)
        y = BatchNormalization()(y)
        y = Activation('relu')(y)
        y = Conv2D(num_filters[5],
                   kernel_size=kernel_sizes[7],
                   strides=int(strides[2][1]),
                   padding='same',
                   kernel_initializer='he_normal',
                   kernel_regularizer=l1_l2(l1=l1p, l2=l2p))(y)
        y = BatchNormalization()(y)

        # Adjust for change in dimension due to stride in identity
        x = Conv2D(num_filters[5],
                   kernel_size=kernel_sizes[8],
                   strides=int(strides[2][2]),
                   padding='same',
                   kernel_initializer='he_normal',
                   kernel_regularizer=l1_l2(l1=l1p, l2=l2p))(x)

        # Overall residual, connect weight layer and identity paths
        if skip:
            x = Add()([x, y])
        else:
            x = y
        x = Activation('relu')(x)

    if len(num_filters) > 6 and num_filters[6] > 0 and strides[
            3] != '' and kernel_sizes[9] > 0:
        # Fourth stack (not complete stack)
        # Weight layers
        y = Conv2D(num_filters[6],
                   kernel_size=kernel_sizes[9],
                   strides=int(strides[3][0]),
                   padding='same',
                   kernel_initializer='he_normal',
                   kernel_regularizer=l1_l2(l1=l1p, l2=l2p))(x)
        y = BatchNormalization()(y)
        x = Activation('relu')(y)

    if len(num_filters) > 7 and num_filters[7] > 0 and strides[
            3] != '' and kernel_sizes[10] > 0:
        y = x
        y = Conv2D(num_filters[7],
                   kernel_size=kernel_sizes[10],
                   strides=int(strides[3][1]),
                   padding='same',
                   kernel_initializer='he_normal',
                   kernel_regularizer=l1_l2(l1=l1p, l2=l2p))(y)
        y = BatchNormalization()(y)
        x = Activation('relu')(y)

        # Overall residual, connect weight layer and identity paths
        if skip:
            y = QActivation(activation=logit_quantizer)(y)
            x = Add()([x, y])
        else:
            x = y
        x = QActivation(activation=activation_quantizer)(x)

    # Final classification layer.
    pool_size = int(np.amin(x.shape[1:3]))
    if pool_size > 1 and avg_pooling:
        x = AveragePooling2D(pool_size=pool_size)(x)
    y = Flatten()(x)
    y = Dense(num_classes, kernel_initializer='he_normal')(y)
    outputs = Activation('softmax', name='softmax')(y)

    # Instantiate model.
    model = Model(inputs=inputs, outputs=outputs)
    return model
예제 #11
0
def get_qkeras_model(inputDim,
                     hiddenDim=128,
                     encodeDim=8,
                     bits=7,
                     intBits=0,
                     reluBits=7,
                     reluIntBits=3,
                     lastBits=7,
                     lastIntBits=7,
                     l1reg=0,
                     batchNorm=True,
                     qBatchNorm=False,
                     input_batchNorm=False,
                     halfcode_layers=4,
                     fan_in_out=64,
                     **kwargs):
    """
    define the keras model
    the model based on the simple dense auto encoder 
    (128*128*128*128*8*128*128*128*128)
    """
    inputLayer = Input(shape=(inputDim, ))
    kwargs = {
        'kernel_quantizer': quantized_bits(bits, intBits, alpha=1),
        'bias_quantizer': quantized_bits(bits, intBits, alpha=1),
        'kernel_initializer': 'lecun_uniform',
        'kernel_regularizer': l1(l1reg)
    }

    # Declare encoder network
    for i in range(halfcode_layers):
        if i == 0:
            h = QDense(fan_in_out, **kwargs)(inputLayer)
        else:
            h = QDense(hiddenDim, **kwargs)(h)
        if batchNorm:
            if qBatchNorm:
                h = QBatchNormalization()(h)
            else:
                h = BatchNormalization()(h)
        h = QActivation(activation=quantized_relu(reluBits, reluIntBits))(h)

    # Declare latent space
    if halfcode_layers == 0:
        h = QDense(encodeDim, **kwargs)(inputLayer)
    else:
        h = QDense(encodeDim, **kwargs)(h)
    if batchNorm:
        if qBatchNorm:
            h = QBatchNormalization()(h)
        else:
            h = BatchNormalization()(h)
    h = QActivation(activation=quantized_relu(reluBits, reluIntBits))(h)

    # Declare decoder network
    for i in range(halfcode_layers):
        if i == halfcode_layers - 1:
            h = QDense(fan_in_out, **kwargs)(h)
        else:
            h = QDense(hiddenDim, **kwargs)(h)
        if batchNorm:
            if qBatchNorm:
                h = QBatchNormalization()(h)
            else:
                h = BatchNormalization()(h)
        h = QActivation(activation=quantized_relu(reluBits, reluIntBits))(h)

    kwargslast = {
        'kernel_quantizer': quantized_bits(lastBits, lastIntBits, alpha=1),
        'bias_quantizer': quantized_bits(lastBits, lastIntBits, alpha=1),
        'kernel_initializer': 'lecun_uniform',
        'kernel_regularizer': l1(l1reg)
    }
    h = QDense(inputDim, **kwargslast)(h)

    return Model(inputs=inputLayer, outputs=h)