Пример #1
0
def main():
    #################################
    ##### build model
    #################################
    model = build_model()
    model.summary()
    #################################
    ##### compile train
    #################################
    model.compile(optimizer="adam",
                  loss="sparse_categorical_crossentropy",
                  metrics=["accuracy"])
    model.fit(train_images, train_labels, epochs=2, shuffle=True)
    model.evaluate(test_images, test_labels)
    model.save("./float.h5")
    del model

    # Use json format custom quantize strategy
    # if custom layer is defined in the same *.py file with quantize
    # tool then "layer_type": "__main__.PRelu", if not in the same
    # *.py file, then layer_type should be ${FILE_NAME}.PRelu
    custom_quantize_strategy = "./custom_quantize_strategy.json"
    my_quantize_strategy = load_json(custom_quantize_strategy)
    #################################
    ##### quantize model
    #################################
    loaded_model = tf.keras.models.load_model("./float.h5",
                                              custom_objects=custom_objects)
    loaded_model.summary()

    # custom layer will be quantized according to custom_quantize_strategy
    # first, and then wrapped by custom layer wrapper
    quant_model = vitis_quantize.VitisQuantizer(
        loaded_model,
        'pof2s',
        custom_objects=custom_objects,
        custom_quantize_strategy=my_quantize_strategy).quantize_model(
            calib_dataset=test_images)
    quant_model.summary()
    quant_model.save('quantized.h5')

    with vitis_quantize.quantize_scope():
        quantized_model = tf.keras.models.load_model(
            "quantized.h5", custom_objects=custom_objects)
        quantized_model.compile(optimizer="adam",
                                loss="sparse_categorical_crossentropy",
                                metrics=["accuracy"])
        quantized_model.evaluate(test_images, test_labels)

    # Dump Quantized Model
    vitis_quantize.VitisQuantizer.dump_model(quant_model,
                                             test_images[0:1],
                                             "./dump_results",
                                             dump_float=True)
Пример #2
0
def quant_model(build_dir, batchsize, evaluate):
    '''
    Quantize the floating-point model
    Save to HDF5 file
    '''

    float_dir = build_dir + '/float_model'
    quant_dir = build_dir + '/quant_model'
    tfrec_dir = build_dir + '/tfrec_val'

    print(DIVIDER)
    print('Make & Save quantized model..')

    # load the floating point trained model
    float_model = load_model(float_dir + '/float_model.h5', compile=False)

    # get input dimensions of the floating-point model
    height = float_model.input_shape[1]
    width = float_model.input_shape[2]
    chans = float_model.input_shape[3]
    print(' Input dimensions: height:', height, ' width:', width, 'channels:',
          chans)

    # make TFRecord dataset and image processing pipeline
    test_dataset = input_fn(tfrec_dir, batchsize, False)

    # run quantization
    quantizer = vitis_quantize.VitisQuantizer(float_model)
    quant_model = quantizer.quantize_model(calib_dataset=test_dataset)

    # saved quantized model
    os.makedirs(quant_dir, exist_ok=True)
    quant_model.save(quant_dir + '/quant_model.h5')
    print(' Saved quantized model to', quant_dir + '/quant_model.h5')

    if (evaluate):
        '''
        Evaluate the quantized model
        '''
        print(DIVIDER)
        print('Evaluating quantized model..')

        quant_model.compile(
            loss=SparseCategoricalCrossentropy(from_logits=True),
            metrics=['sparse_categorical_accuracy'])

        scores = quant_model.evaluate(test_dataset, steps=None, verbose=0)

    print(' Quantized model accuracy: {0:.4f}'.format(scores[1] * 100), '%')
    print(DIVIDER)

    return
Пример #3
0
def quant_model(float_model,quant_model,batchsize,tfrec_dir,evaluate):
    '''
    Quantize the floating-point model
    Save to HDF5 file
    '''

    # make folder for saving quantized model
    head_tail = os.path.split(quant_model) 
    os.makedirs(head_tail[0], exist_ok = True)

    # load the floating point trained model
    float_model = load_model(float_model)

    # get input dimensions of the floating-point model
    height = float_model.input_shape[1]
    width = float_model.input_shape[2]

    # make TFRecord dataset and image processing pipeline
    quant_dataset = input_fn_quant(tfrec_dir, batchsize, height, width)

    # run quantization
    quantizer = vitis_quantize.VitisQuantizer(float_model)
    quantized_model = quantizer.quantize_model(calib_dataset=quant_dataset)

    # saved quantized model
    quantized_model.save(quant_model)
    print('Saved quantized model to',quant_model)


    if (evaluate):
        '''
        Evaluate quantized model
        '''
        print('\n'+DIVIDER)
        print ('Evaluating quantized model..')
        print(DIVIDER+'\n')

        test_dataset = input_fn_test(tfrec_dir, batchsize, height, width)

        quantized_model.compile(optimizer=Adam(),
                                loss='sparse_categorical_crossentropy',
                                metrics=['accuracy'])

        scores = quantized_model.evaluate(test_dataset,
                                          verbose=0)

        print('Quantized model accuracy: {0:.4f}'.format(scores[1]*100),'%')
        print('\n'+DIVIDER)

    return
Пример #4
0
def quant_model(float_model,quant_model,batchsize,predict,pred_dir):
    '''
    Quantize the floating-point model
    Save to HDF5 file
    '''

    # make folder for saving quantized model
    head_tail = os.path.split(quant_model) 
    os.makedirs(head_tail[0], exist_ok = True)

    # make dataset and image processing pipeline
    _, x_test, _, x_test_noisy = mnist_download()
    calib_dataset = input_fn((x_test_noisy,x_test), batchsize, False)

    with custom_object_scope({'Sampling': Sampling}):
      # load trained floating-point model    
      float_model = load_model(float_model, compile=False, custom_objects={'Sampling': Sampling} )

      # quantizer
      quantizer = vitis_quantize.VitisQuantizer(float_model)
      quantized_model = quantizer.quantize_model(calib_dataset=calib_dataset)

    # saved quantized model
    quantized_model.save(quant_model)
    print('Saved quantized model to',quant_model)


    '''
    Predictions
    '''
    if (predict):
      print('\n'+DIVIDER)
      print ('Predicting with quantized model..')
      print(DIVIDER+'\n')

      # remake predictions folder
      shutil.rmtree(pred_dir, ignore_errors=True)
      os.makedirs(pred_dir)

      predict_dataset = input_fn((x_test_noisy), batchsize, False)
      predictions = quantized_model.predict(predict_dataset, verbose=0)

      # scale pixel values back up to range 0:255 then save as PNG
      for i in range(20):
        cv2.imwrite(pred_dir+'/pred_'+str(i)+'.png', predictions[i] * 255.0)
      print('Predictions saved as images in ./' + pred_dir)


    return
Пример #5
0
log_dir = "logs/float_fit/" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir,
                                                      histogram_freq=1)
model.fit(train_images,
          train_labels,
          epochs=1,
          validation_data=(test_images, test_labels))

model.save('float.h5')

# Post-Training Quantize
from tensorflow_model_optimization.quantization.keras import vitis_quantize
# Model:model for calibration
# Quantize_strategy['pof2s', 'fs', 'tqt']
quantizer = vitis_quantize.VitisQuantizer(model, quantize_strategy='fs')

# Calib_dataset:image dataset
# Input_method: Method to calculate the value of calibration
# Input_method[MinMax:0, MSE:1, Entropy:2, Percentile:3]
# MinMax:0 tracks the MinMax value globally
# MSE:1 minimizes MSE
# Entropy:2 minimizes KL-Divergence of the collected histogram
# Percentile:3 clips the percentile fraction of collected data
# Input_method_percentile: Percentile value when input_method=3, default value 99.9999
# Input_method_percentile[99.9, 99.99, 99.999, 99.9999]
quantized_model = quantizer.quantize_model(calib_dataset=train_images[0:10],
                                           input_method=3,
                                           input_method_percentile=99.9999)
quantized_model.save('quantized.h5')
Пример #6
0
log_dir = "logs/float_fit/" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir,
                                                      histogram_freq=1)
model.fit(train_images,
          train_labels,
          epochs=5,
          validation_data=(test_images, test_labels))

model.save('float.h5')

# Quantize Finetune
from tensorflow_model_optimization.quantization.keras import vitis_quantize

# q_aware stands for for quantization aware.
quantizer = vitis_quantize.VitisQuantizer(model, '8bit_tqt')
qat_model = quantizer.get_qat_model(init_quant=True,
                                    calib_dataset=train_images[0:10],
                                    include_cle=True,
                                    freeze_bn_delay=1000)

qat_model.compile(
    optimizer=keras.optimizers.Adam(learning_rate=0.001),
    loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
    metrics=['accuracy'])

qat_model.fit(train_images,
              train_labels,
              epochs=3,
              validation_data=(test_images, test_labels))
Пример #7
0
MODEL_DIR = './models'
FLOAT_MODEL = 'float_model.h5'
QAUNT_MODEL = 'quantized_model.h5'

# Load the floating point trained model
print('Load float model..')
path = os.path.join(MODEL_DIR, FLOAT_MODEL)
try:
    float_model = models.load_model(path)
except:
    print('\nError:load float model failed!')

# get input dimensions of the floating-point model
height = float_model.input_shape[1]
width = float_model.input_shape[2]

# get Mnist dataset
print("\nLoad Mnist dataset..")
(_, _, test_dataset) = get_mnist_dataset()

# Run quantization
print('\nRun quantization..')
quantizer = vitis_quantize.VitisQuantizer(float_model)
quantized_model = quantizer.quantize_model(calib_dataset=test_dataset)

# Save quantized model
path = os.path.join(MODEL_DIR, QAUNT_MODEL)
quantized_model.save(path)
print('\nSaved quantized model as', path)
    loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
    metrics=['sparse_categorical_accuracy'])

log_dir = "logs/float_fit/" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir,
                                                      histogram_freq=1)
model.fit(train_images,
          train_labels,
          epochs=1,
          validation_data=(test_images, test_labels))
model.save('float.h5')

# Quantize
from tensorflow_model_optimization.quantization.keras import vitis_quantize

# Build Quantizer
quantizer = vitis_quantize.VitisQuantizer(model)

# Dump the quantize strategy configs
quantize_strategy = quantizer.dump_quantize_strategy(
    dump_file='./quantize_strategy_v2.json', verbose=2)

# Modify the configs in 'quantize_strategy_v2.json'

# Set the modified quantize strategy configs
quantizer.set_quantize_strategy('./quantize_strategy_v2.json')

# Quantize
quant_model = quantizer.quantize_model(calib_dataset=train_images[0:10])
quant_model.save('quantized_0.h5')
Пример #9
0
def main():
    ## run once to save h5 file (add model info)
    if FLAGS.save_whole_model:
        model = ResNet50(weights='imagenet')
        model.save(FLAGS.model)
        exit()

    if not FLAGS.eval_images:
        train_data, eval_data = get_input_data(FLAGS.epochs)

    if FLAGS.dump or FLAGS.quantize_eval:
        from tensorflow_model_optimization.quantization.keras import vitis_quantize
        with vitis_quantize.quantize_scope():
            model = keras.models.load_model(FLAGS.model)

    elif FLAGS.createnewmodel:
        #for training the model from scratch use the following:
        basemodel = ResNet50(weights='imagenet',
                             include_top=True,
                             input_tensor=Input(shape=(100, 100, 3)))
        base_output = basemodel.layers[175].output
        new_output = tf.keras.layers.Dense(activation="softmax",
                                           units=131)(base_output)
        model = tf.keras.models.Model(inputs=basemodel.inputs,
                                      outputs=new_output)
        print(model.summary())

    else:
        model = keras.models.load_model(FLAGS.model)
        print(model.summary())

    img_paths, labels = get_images_infor_from_file(FLAGS.eval_image_path,
                                                   FLAGS.eval_image_list,
                                                   FLAGS.label_offset)
    imagenet_seq = ImagenetSequence(img_paths[0:1000], labels[0:1000],
                                    FLAGS.eval_batch_size)

    if FLAGS.quantize:
        # do quantization
        from tensorflow_model_optimization.quantization.keras import vitis_quantize
        #model = vitis_quantize.VitisQuantizer(model).quantize_model(calib_dataset=imagenet_seq)
        model = vitis_quantize.VitisQuantizer(model).quantize_model(
            calib_dataset=eval_data)

        # save quantized model
        model.save(os.path.join(FLAGS.quantize_output_dir, 'quantized.h5'))
        print('Quantize finished, results in: {}'.format(
            FLAGS.quantize_output_dir))
        return

    img_paths, labels = get_images_infor_from_file(FLAGS.eval_image_path,
                                                   FLAGS.eval_image_list,
                                                   FLAGS.label_offset)
    imagenet_seq = ImagenetSequence(img_paths[0:1], labels[0:1],
                                    FLAGS.eval_batch_size)

    if FLAGS.dump:
        # do quantize dump
        quantizer = vitis_quantize.VitisQuantizer.dump_model(
            model, imagenet_seq, FLAGS.dump_output_dir)

        print('Dump finished, results in: {}'.format(FLAGS.dump_output_dir))
        return

    initial_learning_rate = FLAGS.learning_rate
    lr_schedule = keras.optimizers.schedules.ExponentialDecay(
        initial_learning_rate,
        decay_steps=FLAGS.decay_steps,
        decay_rate=0.96,
        staircase=True)
    opt = RMSprop(learning_rate=lr_schedule)

    loss = keras.losses.SparseCategoricalCrossentropy()
    metric_top_5 = keras.metrics.SparseTopKCategoricalAccuracy()
    accuracy = keras.metrics.SparseCategoricalAccuracy()
    model.compile(optimizer=opt, loss=loss, metrics=[accuracy, metric_top_5])
    if not FLAGS.eval_only:
        if not os.path.exists(FLAGS.save_path):
            os.makedirs(FLAGS.save_path)
        callbacks = [
            keras.callbacks.ModelCheckpoint(
                filepath=os.path.join(FLAGS.save_path, FLAGS.filename),
                save_best_only=True,
                monitor="sparse_categorical_accuracy",
                verbose=1,
            )
        ]
        steps_per_epoch = FLAGS.steps_per_epoch if FLAGS.steps_per_epoch else np.ceil(
            TRAIN_NUM / FLAGS.batch_size)
        model.fit(train_data,
                  epochs=FLAGS.epochs,
                  callbacks=callbacks,
                  steps_per_epoch=steps_per_epoch,
                  validation_freq=FLAGS.eval_every_epoch,
                  validation_steps=EVAL_NUM / FLAGS.eval_batch_size,
                  validation_data=eval_data)
    if not FLAGS.eval_images:
        print("evaluate model using tf_records data format")
        model.evaluate(eval_data, steps=EVAL_NUM / FLAGS.eval_batch_size)
    if FLAGS.eval_images and FLAGS.eval_only:
        img_paths, labels = get_images_infor_from_file(FLAGS.eval_image_path,
                                                       FLAGS.eval_image_list,
                                                       FLAGS.label_offset)
        imagenet_seq = ImagenetSequence(img_paths, labels,
                                        FLAGS.eval_batch_size)
        res = model.evaluate(imagenet_seq,
                             steps=EVAL_NUM / FLAGS.eval_batch_size,
                             verbose=1)
Пример #10
0
def quant_ft(build_dir, batchsize, learnrate, epochs, max_classes):
    '''
    Quantize & fine-tune the floating-point model
    Save to HDF5 file
    '''
    def step_decay(epoch):
        '''
        Learning rate scheduler used by callback
        Reduces learning rate depending on number of epochs
        '''
        lr = learnrate
        if epoch > 65:
            lr /= 10
        return lr

    float_dir = build_dir + '/float_model'
    quant_ft_dir = build_dir + '/quant_ft_model'
    tfrec_train = build_dir + '/tfrec_train'
    tfrec_val = build_dir + '/tfrec_val'

    print('\n' + DIVIDER)
    print('Quantization & Fine-tune')
    print(DIVIDER + '\n')

    # load the floating point trained model
    print(' Loading floating-point model from', float_dir + '/float_model.h5')
    float_model = load_model(float_dir + '/float_model.h5', compile=False)

    # get input dimensions of the floating-point model
    height = float_model.input_shape[1]
    width = float_model.input_shape[2]
    chans = float_model.input_shape[3]
    print(' Input dimensions: height:', height, ' width:', width, 'channels:',
          chans)

    # Quantization-aware training model
    quantizer = vitis_quantize.VitisQuantizer(float_model)
    ft_model = quantizer.get_qat_model()
    '''
    tf.data pipelines
    '''
    train_dataset = input_fn(tfrec_train, batchsize, True)
    test_dataset = input_fn(tfrec_val, batchsize, False)
    '''
    Call backs
    '''
    chkpt_call = ModelCheckpoint(filepath=os.path.join(quant_ft_dir,
                                                       'quant_ft.h5'),
                                 monitor='val_accuracy',
                                 verbose=1,
                                 save_best_only=True)
    lr_scheduler_call = LearningRateScheduler(schedule=step_decay, verbose=1)
    callbacks_list = [chkpt_call, lr_scheduler_call]
    '''
    Compile model
    Adam optimizer to change weights & biases
    Loss function is sparse categorical crossentropy
    '''
    ft_model.compile(optimizer=Adam(learning_rate=learnrate),
                     loss=SparseCategoricalCrossentropy(from_logits=True),
                     metrics=['accuracy'])
    '''
    Training
    '''
    print('\n' + DIVIDER)
    print(' Training model with training set..')
    print(DIVIDER)

    # make folder for saving trained model checkpoint
    os.makedirs(quant_ft_dir, exist_ok=True)

    # run training
    train_hist = ft_model.fit(train_dataset,
                              epochs=epochs,
                              steps_per_epoch=(1300 * max_classes) //
                              batchsize,
                              validation_data=test_dataset,
                              validation_steps=None,
                              callbacks=callbacks_list,
                              verbose=1)
    '''
    Evaluate quantized model
    '''
    print('\n' + DIVIDER)
    print('Evaluating quantized model..')
    print(DIVIDER + '\n')

    # reload the best checkpoint and evaluate it
    with vitis_quantize.quantize_scope():
        eval_model = load_model(quant_ft_dir + '/quant_ft.h5', compile=False)

    eval_model.compile(loss=SparseCategoricalCrossentropy(from_logits=True),
                       metrics=['accuracy'])

    scores = eval_model.evaluate(test_dataset, steps=None, verbose=0)

    print(' Quantized model accuracy: {0:.4f}'.format(scores[1] * 100), '%')
    print('\n' + DIVIDER)

    return