def main(weights, tflite_path, create_model_fn):
    register_tf_netbuilder_extensions()

    # load saved model

    module = importlib.import_module('models')
    create_model = getattr(module, create_model_fn)

    model = create_model()
    model.load_weights(weights)

    # first pass

    probe_model_singlenet(model, test_img_path="resources/ski_224.jpg")

    # export model to tflite

    export_to_tflite(model, tflite_path)

    print("Done !!!")
Beispiel #2
0
    tflite_model = converter.convert()
    open(output_path, "wb").write(tflite_model)


register_tf_netbuilder_extensions()

# load saved model
module = importlib.import_module('models')
create_model_fn = 'create_openpose_singlenet'
create_model = getattr(module, create_model_fn)
path_weights = "output_singlenet/openpose_singlenet"
model = create_model()
model.load_weights(path_weights)

# first pass
probe_model_singlenet(model, test_img_path="resources/ski_224.jpg")

# export model to tflite

tflite_path = 'tf_lite/'

tflite_path = 'tf_lite/temp.tflite'
export_to_tflite(model, tflite_path)

print("Done !!!")

## use tflite to predict

import os
import tensorflow as tf
import matplotlib.pylab as plt
def train(ds_train, ds_val, model, optimizer, ckpt, last_epoch, last_step,
          max_epochs, steps_per_epoch):
    train_loss = tf.keras.metrics.Mean('train_loss', dtype=tf.float32)
    train_loss_heatmap = tf.keras.metrics.Mean('train_loss_heatmap',
                                               dtype=tf.float32)
    train_loss_paf = tf.keras.metrics.Mean('train_loss_paf', dtype=tf.float32)

    val_loss = tf.keras.metrics.Mean('val_loss', dtype=tf.float32)
    val_loss_heatmap = tf.keras.metrics.Mean('val_loss_heatmap',
                                             dtype=tf.float32)
    val_loss_paf = tf.keras.metrics.Mean('val_loss_paf', dtype=tf.float32)

    current_time = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
    train_log_dir = 'logs_singlenet/gradient_tape/' + current_time + '/train'
    train_summary_writer = tf.summary.create_file_writer(train_log_dir)
    val_log_dir = 'logs_singlenet/gradient_tape/' + current_time + '/val'
    val_summary_writer = tf.summary.create_file_writer(val_log_dir)

    output_paf_idx = 2
    output_heatmap_idx = 3

    # determine start epoch in case the training has been stopped manually and resumed

    resume = last_step != 0 and (steps_per_epoch - last_step) != 0
    if resume:
        start_epoch = last_epoch
    else:
        start_epoch = last_epoch + 1

    # start processing

    for epoch in range(start_epoch, max_epochs + 1, 1):

        start = timer()

        print("Start processing epoch {}".format(epoch))

        # set the initial step index depending on if you resumed the processing

        if resume:
            step = last_step + 1
            data_iter = ds_train.skip(last_step)
            print(f"Skipping {last_step} steps (May take a few minutes)...")
            resume = False
        else:
            step = 0
            data_iter = ds_train

        # process steps

        for x, y in data_iter:

            step += 1

            losses, total_loss = train_one_step(model, optimizer, x, y)

            train_loss(total_loss)
            train_loss_heatmap(losses[output_heatmap_idx])
            train_loss_paf(losses[output_paf_idx])

            print('step=', step)
            if step % 10 == 0:

                tf.print('Epoch', epoch, f'Step {step}/{steps_per_epoch}',
                         'Paf1', losses[0], 'Paf2', losses[1], 'Paf3',
                         losses[2], 'Heatmap', losses[3], 'Total loss',
                         total_loss)

                with train_summary_writer.as_default():
                    summary_step = (epoch - 1) * steps_per_epoch + step - 1
                    tf.summary.scalar('loss',
                                      train_loss.result(),
                                      step=summary_step)
                    tf.summary.scalar('loss_heatmap',
                                      train_loss_heatmap.result(),
                                      step=summary_step)
                    tf.summary.scalar('loss_paf',
                                      train_loss_paf.result(),
                                      step=summary_step)

            if step % 100 == 0:
                figure = probe_model_singlenet(
                    model, test_img_path="resources/ski_224.jpg")
                with train_summary_writer.as_default():
                    tf.summary.image("Test prediction",
                                     plot_to_image(figure),
                                     step=step)

            if step % 1000 == 0:
                ckpt.step.assign(step)
                ckpt.epoch.assign(epoch)
                save_path = manager.save()
                print("Saved checkpoint for step {}: {}".format(
                    step, save_path))

            if step >= steps_per_epoch:
                break

        print("Completed epoch {}. Saving weights...".format(epoch))
        model.save_weights(output_weights, overwrite=True)

        # save checkpoint at the end of an epoch

        ckpt.step.assign(step)
        ckpt.epoch.assign(epoch)
        manager.save()

        # reset metrics every epoch

        train_loss.reset_states()
        train_loss_heatmap.reset_states()
        train_loss_paf.reset_states()

        end = timer()

        print("Epoch training time: " + str(timedelta(seconds=end - start)))

        # calculate validation loss

        print("Calculating validation losses...")
        for val_step, (x_val, y_val_true) in enumerate(ds_val):

            if val_step % 1000 == 0:
                print(f"Validation step {val_step} ...")

            y_val_pred = model(x_val)
            losses = [
                eucl_loss(y_val_true[0], y_val_pred[0]),
                eucl_loss(y_val_true[0], y_val_pred[1]),
                eucl_loss(y_val_true[0], y_val_pred[2]),
                eucl_loss(y_val_true[1], y_val_pred[3])
            ]
            total_loss = tf.reduce_sum(losses)
            val_loss(total_loss)
            val_loss_heatmap(losses[output_heatmap_idx])
            val_loss_paf(losses[output_paf_idx])

        val_loss_res = val_loss.result()
        val_loss_heatmap_res = val_loss_heatmap.result()
        val_loss_paf_res = val_loss_paf.result()

        print(
            f'Validation losses for epoch: {epoch} : Loss paf {val_loss_paf_res}, Loss heatmap '
            f'{val_loss_heatmap_res}, Total loss {val_loss_res}')

        with val_summary_writer.as_default():
            tf.summary.scalar('val_loss', val_loss_res, step=epoch)
            tf.summary.scalar('val_loss_heatmap',
                              val_loss_heatmap_res,
                              step=epoch)
            tf.summary.scalar('val_loss_paf', val_loss_paf_res, step=epoch)
        val_loss.reset_states()
        val_loss_heatmap.reset_states()
        val_loss_paf.reset_states()