示例#1
0
 def __init__(self):
     tf.reset_default_graph()
     pa.create_necessary_folders()
     # Place Holder, batch size is 1
     self.img_holder = tf.placeholder(tf.float32, [1, pa.PH, pa.PW, 3])
     # PAF inference network
     self.tensor_paf_pcm = PAF_network.PoseNet().inference_paf_pcm(
         self.img_holder)
     self.sess = tf.Session()
     saver = tf.train.Saver()
     ckpt = tf.train.get_checkpoint_state("logs/")
     if ckpt:
         saver.restore(self.sess, ckpt.model_checkpoint_path)
     else:
         raise FileNotFoundError("No PAF check point")
示例#2
0
def main(argv=None):
    print("Training with batch size: " + str(BATCH_SIZE))
    # Place Holder
    PH, PW = pa.PH, pa.PW
    ipjc_holder = tf.placeholder(tf.float32, [BATCH_SIZE, 8, 6 + 2, 3])
    img_holder = tf.placeholder(tf.float32, [BATCH_SIZE, PH, PW, 3])
    # Entire network
    img_tensor, i_hv_tensor = gpu_pipeline.build_training_pipeline(
        ipjc_holder, img_holder)
    poseNet = gpu_network.PoseNet()
    loss_tensor = poseNet.build_paf_pcm_loss(img_tensor, i_hv_tensor)
    lgdts_tensor = build_training_ops(loss_tensor)

    # Session Saver summary_writer
    sess = tf.Session()
    saver = tf.train.Saver()
    ckpt = tf.train.get_checkpoint_state("logs/")
    if ckpt:
        saver.restore(sess, ckpt.model_checkpoint_path)
    else:
        # Global initializer
        sess.run(tf.global_variables_initializer())
        pa.create_necessary_folders()

    summary_writer = tf.summary.FileWriter("logs/summary", sess.graph)

    # Close the graph so no op can be added
    tf.get_default_graph().finalize()
    # Load samples from disk
    samples_gen = gpu_pipeline.training_samples_gen(BATCH_SIZE)
    for itr in range(1, int(1e7)):
        batch_img, batch_ipjc = next(samples_gen)
        feed_dict = {img_holder: batch_img, ipjc_holder: batch_ipjc}
        loss_num, g_step_num, lr_num, train_op = sess.run(lgdts_tensor[0:4],
                                                          feed_dict=feed_dict)
        print_log(loss_num, g_step_num, lr_num, itr)

        # Summary
        if itr % 100 == 0:
            summary_str = sess.run(lgdts_tensor[4], feed_dict=feed_dict)
            summary_writer.add_summary(summary_str, g_step_num)

            saver.save(sess, "logs/ckpt")
            print('Model Saved.')

    sess.close()
示例#3
0
def main(argv=None):
    print("Training with batch size: " + str(BATCH_SIZE))
    # Place Holder
    PH, PW = pa.PH, pa.PW
    HEAT_H, HEAT_W = pa.HEAT_H, pa.HEAT_W
    PCM_nhwc_holder = tf.placeholder(tf.float32, [BATCH_SIZE, HEAT_H, HEAT_W, 14])
    PAF_nhwc_holder = tf.placeholder(tf.float32, [BATCH_SIZE, HEAT_H, HEAT_W, 11 * 2])
    img_holder = tf.placeholder(tf.float32, [BATCH_SIZE, PH, PW, 3])
    # Entire network
    poseNet = PAF_network.PoseNet()
    loss_tensor = poseNet.build_paf_pcm_loss(img_holder, PCM_nhwc_holder, PAF_nhwc_holder)
    lgdts_tensor = build_training_ops(loss_tensor)  #[loss_tensor, global_step, decaying_learning_rate, train_op, summary_op]
    
    # Session Saver summary_writer
    sess = tf.Session()
    saver = tf.train.Saver()
    ckpt = tf.train.get_checkpoint_state("logs/")
    if ckpt:
        saver.restore(sess, ckpt.model_checkpoint_path)
    else:
        # Global initializer
        sess.run(tf.global_variables_initializer())
        pa.create_necessary_folders()

    summary_writer = tf.summary.FileWriter("logs/summary", sess.graph)
    
    # Close the graph so no op can be added
    tf.get_default_graph().finalize()
    # Load samples from disk
    gen_PCM_PAF_IMG = label_loader.generator_PCM_PAF_IMG(BATCH_SIZE, (512, 512), 8)

    for itr in range(1, int(1e7)):
        BC, BA, BI = next(gen_PCM_PAF_IMG)
        feed_dict = {img_holder: BI, PCM_nhwc_holder: BC, PAF_nhwc_holder: BA}
        loss_num, g_step_num, lr_num, train_op = sess.run(lgdts_tensor[0:4], feed_dict=feed_dict)
        print_log(loss_num, g_step_num, lr_num, itr)
        
        # Summary
        if itr % 100 == 0:
            summary_str = sess.run(lgdts_tensor[4], feed_dict=feed_dict)
            summary_writer.add_summary(summary_str, g_step_num)

            saver.save(sess, "logs/ckpt")
            print('Model Saved.')

    sess.close()
                if visible(12):  # Head top
                    set_ipjc(6, 12)
                if visible(13):  # Neck
                    set_ipjc(7, 13)

        iname_arr = np.asarray(iname_list)
        return [ipjc_arr, iname_arr]

    target_ratio = PW / PH

    files = [open(l) for l in label_collection]
    json_labels_list = [json.load(l) for l in files]
    [f.close() for f in files]

    la_im_list = list(zip(json_labels_list, img_folder_collection))
    ipjcs_inames_list = [resize_by_json(*la_im)
                         for la_im in la_im_list]  # [3][ipjc, iname]
    ipjc3, iname3 = list(zip(*ipjcs_inames_list))
    ipjc3 = np.reshape(np.asarray(ipjc3), [-1])
    iname3 = np.reshape(np.asarray(iname3), [-1])

    ipjc_con = np.concatenate(ipjc3, 0)
    iname_con = np.concatenate(iname3, 0)
    np.save(AI_IPJC_FILE, ipjc_con)
    np.save(AI_INAME_FILE, iname_con)


assert sys.version_info >= (3, 5)
pa.create_necessary_folders()
resize_keep_ratio(save_img=False)
示例#5
0
def save_joints_position(v_name=None):
    """
    Save joints position from a video to file
    v_name: name of video, no appendix
    :return:
    """
    tf.reset_default_graph()
    pa.create_necessary_folders()
    batch_size = 15
    if v_name is None:
        video_path = os.path.join(pa.VIDEO_FOLDER_PATH,
                                  pa.VIDEO_LIST[0] + ".mp4")
    else:
        video_path = os.path.join(pa.VIDEO_FOLDER_PATH, v_name + ".mp4")

    metadata = skvideo.io.ffprobe(video_path)
    total_frames = int(metadata["video"]["@nb_frames"])

    v_width = int(metadata["video"]["@width"])
    v_height = int(metadata["video"]["@height"])
    assert (v_height == pa.PH and v_width == pa.PW)
    v_gen = skvideo.io.vreader(video_path)

    # Place Holder
    img_holder = tf.placeholder(tf.float32, [batch_size, v_height, v_width, 3])
    # Entire network
    paf_pcm_tensor = gpu_network.PoseNet().inference_paf_pcm(img_holder)

    # Place for argmax values
    joint_ixy = list()  # [i][j0~6][x,y]
    # Session Saver summary_writer
    with tf.Session() as sess:
        saver = tf.train.Saver()
        ckpt = tf.train.get_checkpoint_state("logs/")
        if ckpt:
            saver.restore(sess, ckpt.model_checkpoint_path)
        else:
            raise FileNotFoundError("Tensorflow ckpt not found")

        for i in range(0, total_frames - batch_size + 1, batch_size):
            frames = [next(v_gen) / 255. for _ in range(batch_size)]
            feed_dict = {img_holder: frames}
            paf_pcm = sess.run(paf_pcm_tensor, feed_dict=feed_dict)
            pcm = paf_pcm[:, :, :, 14:]
            pcm = np.clip(pcm, 0., 1.)
            for idx_img in range(batch_size):
                # 6 joint in image
                img_j6 = []
                for idx_joint in range(8):
                    heat = pcm[idx_img, :, :, idx_joint]
                    c_coor_1d = np.argmax(heat)
                    c_coor_2d = np.unravel_index(
                        c_coor_1d, [pa.HEAT_SIZE[1], pa.HEAT_SIZE[0]])
                    c_value = heat[c_coor_2d]
                    j_xy = []  # x,y
                    if c_value > 0.15:
                        percent_h = c_coor_2d[0] / pa.HEAT_H
                        percent_w = c_coor_2d[1] / pa.HEAT_W
                        j_xy.append(percent_w)
                        j_xy.append(percent_h)
                    else:
                        j_xy.append(-1.)
                        j_xy.append(-1.)
                    img_j6.append(j_xy)
                joint_ixy.append(img_j6)
            print("Image: " + str(i))
    # sess closed
    save_path = os.path.join(pa.RNN_SAVED_JOINTS_PATH, v_name + ".npy")
    np.save(save_path, joint_ixy)
    print(save_path)