コード例 #1
0
        with tf.device("/device:GPU:0"):
            ordinal_model.build_model(input_images)
            ordinal_model.build_evaluation(eval_batch_size=configs.batch_size,
                                           flip_array=preprocessor.flip_array)

        print("Network built!")
        # log_writer = tf.summary.FileWriter(logdir=log_dir, graph=sess.graph)

        model_saver = tf.train.Saver()
        net_init = tf.global_variables_initializer()
        sess.run([net_init])
        # reload the model

        for cur_model_iterations in evaluation_models:

            mean_coords_eval = evaluators.mEvaluatorPose3D(
                nJoints=configs.nJoints)
            opt_mean_coords_eval = evaluators.mEvaluatorPose3D(
                nJoints=configs.nJoints)
            raw_coords_eval = evaluators.mEvaluatorPose3D(
                nJoints=configs.nJoints)
            opt_raw_coords_eval = evaluators.mEvaluatorPose3D(
                nJoints=configs.nJoints)

            data_index = my_utils.mRangeVariable(min_val=data_from,
                                                 max_val=data_to - 1,
                                                 initial_val=data_from)

            ################# Restore the model ################
            if os.path.exists(
                    configs.restore_model_path_fn(cur_model_iterations) +
                    ".index"):
コード例 #2
0
                    cur_model_depth_scale.add(cur_scale)

                    scale_for_show.append(cur_scale)

                print("Iter: {:07d} Loss : {:07f} Scales: {}\n\n".format(
                    scale_data_index.val, scale_loss, scale_for_show))
                print("Cur Scale: {:07f}\n\n".format(
                    cur_model_depth_scale.cur_average[0]))
            ################################################################################################

            ##### Then evaluate it #####
            cur_depth_scale = cur_model_depth_scale.cur_average[0]
            print("Scale used to evaluate: {:07f}".format(cur_depth_scale))

            depth_eval = evaluators.mEvaluatorDepth(nJoints=configs.nJoints)
            coords_eval = evaluators.mEvaluatorPose3D(nJoints=configs.nJoints)
            data_index = my_utils.mRangeVariable(min_val=data_from,
                                                 max_val=data_to - 1,
                                                 initial_val=data_from)

            while not data_index.isEnd():
                global_steps = sess.run(ordinal_model.global_steps)

                batch_images_np = np.zeros([
                    configs.batch_size, configs.img_size, configs.img_size, 3
                ],
                                           dtype=np.float32)
                batch_relation_table_np = np.zeros(
                    [configs.batch_size, configs.nJoints, configs.nJoints],
                    dtype=np.float32)
                batch_loss_table_log_np = np.zeros(
コード例 #3
0
import numpy as np
import os
import sys

sys.path.append("../")

from utils.evaluate_utils import evaluators

eval_iterations = 100000
nJoints = 17

if __name__ == "__main__":
    eval_depth = evaluators.mEvaluatorDepth(nJoints)
    eval_coords = evaluators.mEvaluatorPose3D(nJoints)


    gt_depth = np.random.random([eval_iterations, nJoints]) * 100
    pd_depth = np.random.random([eval_iterations, nJoints]) * 100

    mean_depth = np.mean(np.abs(gt_depth - pd_depth))

    gt_coords = np.random.random([eval_iterations, nJoints, 3]) * 100
    pd_coords = np.random.random([eval_iterations, nJoints, 3]) * 100

    mean_coords = np.mean(np.sqrt(np.sum((gt_coords - pd_coords)**2, axis=2)))

    for i in range(eval_iterations):
        eval_depth.add(gt_depth[i], pd_depth[i])
        eval_coords.add(gt_coords[i], pd_coords[i])

    print("mean_depth({} | {}), mean_coords({} | {})".format(mean_depth, eval_depth.mean(), mean_coords, eval_coords.mean()))