예제 #1
0
def quant_model(float_model,quant_model,batchsize,tfrec_dir,evaluate):
    '''
    Quantize the floating-point model
    Save to HDF5 file
    '''

    # make folder for saving quantized model
    head_tail = os.path.split(quant_model) 
    os.makedirs(head_tail[0], exist_ok = True)

    # load the floating point trained model
    float_model = load_model(float_model)

    # get input dimensions of the floating-point model
    height = float_model.input_shape[1]
    width = float_model.input_shape[2]

    # make TFRecord dataset and image processing pipeline
    quant_dataset = input_fn_quant(tfrec_dir, batchsize, height, width)

    # run quantization
    quantizer = vitis_quantize.VitisQuantizer(float_model)
    quantized_model = quantizer.quantize_model(calib_dataset=quant_dataset)

    # saved quantized model
    quantized_model.save(quant_model)
    print('Saved quantized model to',quant_model)


    if (evaluate):
        '''
        Evaluate quantized model
        '''
        print('\n'+DIVIDER)
        print ('Evaluating quantized model..')
        print(DIVIDER+'\n')

        test_dataset = input_fn_test(tfrec_dir, batchsize, height, width)

        quantized_model.compile(optimizer=Adam(),
                                loss='sparse_categorical_crossentropy',
                                metrics=['accuracy'])

        scores = quantized_model.evaluate(test_dataset,
                                          verbose=0)

        print('Quantized model accuracy: {0:.4f}'.format(scores[1]*100),'%')
        print('\n'+DIVIDER)

    return
예제 #2
0
        yield num


step_num = step_number()

# model to be evaluated
eval_model = mobilenetv2(input_shape=(input_height, input_width, input_chan),
                         classes=2,
                         alpha=1.0,
                         incl_softmax=False)

eval_model.compile(loss=SparseCategoricalCrossentropy(from_logits=True),
                   metrics=['accuracy'])

# test dataset
test_dataset = input_fn_test(tfrec_dir, batchsize)


# eval function
def evaluate(checkpoint_path=''):
    eval_model.load_weights(checkpoint_path)
    scores = eval_model.evaluate(test_dataset)
    eval_metric_ops = {'accuracy': scores[1]}
    print('*** Accuracy after', next(step_num), 'steps:', scores[-1], '***')
    return eval_metric_ops


if __name__ == '__main__':
    path = './tf_ckpt/tf_float.ckpt'
    evaluate(path)
예제 #3
0
def train(input_height,input_width,input_chan,tfrec_dir,batchsize,learnrate,epochs,chkpt_dir,tboard):

    def step_decay(epoch):
        """
        Learning rate scheduler used by callback
        Reduces learning rate depending on number of epochs
        """
        lr = learnrate
        if epoch > 200:
            lr /= 100
        elif epoch > 10:
            lr /= 10
        return lr

        

    '''
    Define the model
    '''
    model = customcnn(input_shape=(input_height, input_width, input_chan),classes=2,filters=[8,16,32,64,128])

    print('\n'+DIVIDER)
    print(' Model Summary')
    print(DIVIDER)
    print(model.summary())
    print("Model Inputs: {ips}".format(ips=(model.inputs)))
    print("Model Outputs: {ops}".format(ops=(model.outputs)))


    '''
    tf.data pipelines
    '''
    # train and test folders
    train_dataset = input_fn_trn(tfrec_dir,batchsize,input_height,input_width)
    test_dataset = input_fn_test(tfrec_dir,batchsize,input_height,input_width)


    '''
    Call backs
    '''
    tb_call = TensorBoard(log_dir=tboard)

    chkpt_call = ModelCheckpoint(filepath=os.path.join(chkpt_dir,'f_model.h5'), 
                                 monitor='val_accuracy',
                                 verbose=1,
                                 save_best_only=True)

    lr_scheduler_call = LearningRateScheduler(schedule=step_decay,
                                              verbose=1)

    callbacks_list = [tb_call, chkpt_call, lr_scheduler_call]


    '''
    Compile model
    Adam optimizer to change weights & biases
    Loss function is categorical crossentropy
    '''
    model.compile(optimizer=Adam(learning_rate=learnrate),
                  loss='sparse_categorical_crossentropy',
                  metrics=['accuracy'])
 

    '''
    Training
    '''

    print('\n'+DIVIDER)
    print(' Training model with training set..')
    print(DIVIDER)

    # make folder for saving trained model checkpoint
    os.makedirs(chkpt_dir, exist_ok = True)


    # run training
    train_history=model.fit(train_dataset,
                            epochs=epochs,
                            steps_per_epoch=17500//batchsize,
                            validation_data=test_dataset,
                            validation_steps=None,
                            callbacks=callbacks_list,
                            verbose=1)

    print("\nTensorBoard can be opened with the command: tensorboard --logdir={dir} --host localhost --port 6006".format(dir=tboard))

    return
예제 #4
0
def train(input_ckpt,output_ckpt,tfrec_dir,tboard_dir,input_height,input_width, \
          input_chan,batchsize,epochs,learnrate,target_acc):
    '''
    tf.data pipelines
    '''
    # train and test folders
    train_dataset = input_fn_trn(tfrec_dir, batchsize)
    test_dataset = input_fn_test(tfrec_dir, batchsize)
    '''
    Call backs
    '''
    tb_call = TensorBoard(log_dir=tboard_dir)

    chkpt_call = ModelCheckpoint(filepath=output_ckpt,
                                 monitor='val_acc',
                                 verbose=1,
                                 save_best_only=True,
                                 save_weights_only=True)

    early_stop_call = EarlyStoponAcc(target_acc)

    callbacks_list = [tb_call, chkpt_call, early_stop_call]

    # if required, tf.set_pruning_mode must be set before defining the model
    if (input_ckpt != ''):
        tf.set_pruning_mode()
    '''
    Define the model
    '''
    model = mobilenetv2(input_shape=(input_height, input_width, input_chan),
                        classes=2,
                        alpha=1.0,
                        incl_softmax=False)
    '''
    Compile model
    Adam optimizer to change weights & biases
    Loss function is categorical crossentropy
    '''
    model.compile(optimizer=tf.train.AdamOptimizer(learning_rate=learnrate),
                  loss=SparseCategoricalCrossentropy(from_logits=True),
                  metrics=['accuracy'])
    '''
    If an input checkpoint is specified then assume we are fine-tuning a pruned model,
    so load the weights into the model, otherwise we are training from scratch
    '''
    if (input_ckpt != ''):
        print('Loading checkpoint - fine-tuning from', input_ckpt)
        model.load_weights(input_ckpt)
    else:
        print('Training from scratch..')

        print('\n' + DIVIDER)
        print(' Model Summary')
        print(DIVIDER)
        print(model.summary())
        print("Model Inputs: {ips}".format(ips=(model.inputs)))
        print("Model Outputs: {ops}".format(ops=(model.outputs)))
    '''
    Training
    '''
    print('\n' + DIVIDER)
    print(' Training model with training set..')
    print(DIVIDER)

    # make folder for saving trained model checkpoint
    os.makedirs(os.path.dirname(output_ckpt), exist_ok=True)

    # run training
    train_history = model.fit(train_dataset,
                              epochs=epochs,
                              steps_per_epoch=20000 // batchsize,
                              validation_data=test_dataset,
                              validation_steps=5000 // batchsize,
                              callbacks=callbacks_list,
                              verbose=1)
    '''
    save just the model architecture (no weights) to a JSON file
    '''
    with open(os.path.join(os.path.dirname(output_ckpt), 'baseline_arch.json'),
              'w') as f:
        f.write(model.to_json())

    print(
        "\nTensorBoard can be opened with the command: tensorboard --logdir={dir} --host localhost --port 6006"
        .format(dir=tboard_dir))

    return