예제 #1
0
def main(argv=None):
    # load config file and setup
    params = {}
    config = configparser.ConfigParser()
    config_file = "../experiments/mv2_cpm.cfg"
    if len(argv) != 1:
        config_file = argv[1]
    config.read(config_file)
    for _ in config.options("Train"):
        params[_] = eval(config.get("Train", _))

    os.environ['CUDA_VISIBLE_DEVICES'] = params['visible_devices']

    gpus_index = params['visible_devices'].split(",")
    params['gpus'] = len(gpus_index)

    if not os.path.exists(params['modelpath']):
        os.makedirs(params['modelpath'])
    if not os.path.exists(params['logpath']):
        os.makedirs(params['logpath'])

    gpus = 'gpus'
    if platform.system() == 'Darwin':
        gpus = 'cpu'
    training_name = '{}_batch-{}_lr-{}_{}-{}_{}x{}_{}'.format(
        params['model'], params['batchsize'], params['lr'], gpus,
        params['gpus'], params['input_width'], params['input_height'],
        config_file.replace("/", "-").replace(".cfg", ""))

    with tf.Graph().as_default(), tf.device("/cpu:0"):
        dataset_RHD = RHD(batchnum=params['batchsize'])

        global_step = tf.Variable(0, trainable=False)
        learning_rate = tf.train.exponential_decay(float(params['lr']),
                                                   global_step,
                                                   decay_steps=10000,
                                                   decay_rate=float(
                                                       params['decay_rate']),
                                                   staircase=True)
        opt = tf.train.AdamOptimizer(learning_rate, epsilon=1e-8)
        tower_grads = []
        reuse_variable = False

        for i in range(params['gpus']):
            with tf.device("/gpu:%d" % i):
                with tf.name_scope("GPU_%d" % i):
                    #input_image, keypoint_xyz, keypoint_uv, input_heat, keypoint_vis, k, num_px_left_hand, num_px_right_hand \
                    batch_data_all = dataset_RHD.get_batch_data
                    input_image1 = batch_data_all[8]
                    input_image2 = batch_data_all[10]
                    hand_motion = batch_data_all[9]
                    scoremap1 = batch_data_all[11]
                    scoremap2 = batch_data_all[12]
                    is_loss1 = batch_data_all[13]
                    is_loss2 = batch_data_all[14]

                    batch_data_all_back = dataset_RHD.coco_get_batch_back_data
                    input_image1_back = batch_data_all_back[8]
                    input_image2_back = batch_data_all_back[10]
                    hand_motion_back = batch_data_all_back[9]
                    scoremap1_back = batch_data_all_back[11]
                    scoremap2_back = batch_data_all_back[12]
                    is_loss1_back = batch_data_all_back[13]
                    is_loss2_back = batch_data_all_back[14]

                    input_image1 = tf.concat([input_image1, input_image1_back],
                                             0)  #第一个batch的维度 hand1 back1
                    input_image2 = tf.concat([input_image2, input_image2_back],
                                             0)
                    hand_motion = tf.concat([hand_motion, hand_motion_back],
                                            0)  #第一个batch的维度 hand12 back12
                    scoremap1 = tf.concat([scoremap1, scoremap1_back], 0)
                    scoremap2 = tf.concat([scoremap2, scoremap2_back], 0)
                    is_loss1 = tf.concat([is_loss1, is_loss1_back], 0)
                    is_loss2 = tf.concat([is_loss2, is_loss2_back], 0)
                    """
                    total_loss, motion_loss*0.00001, loss_scoremap*0.001, loss_is_loss,\
                               ur, ux, uy, uz, ufxuz, pred_heatmaps_tmp, pre_is_loss, is_loss12
                    """
                    loss, motion_loss, loss_scoremap, loss_is_loss,\
                    ur, ux, uy, uz, ufxuz, preheat, preheat_, pre_is_loss, is_loss12\
                        = get_loss_and_output(params['model'], params['batchsize'],
                                    input_image1, input_image2, hand_motion, scoremap1,scoremap2,is_loss1,is_loss2,reuse_variable)

                    loss_all = loss
                    grads = opt.compute_gradients(loss_all)
                    tower_grads.append(grads)

        grads = average_gradients(tower_grads)
        for grad, var in grads:
            if grad is not None:
                tf.summary.histogram("gradients_on_average/%s" % var.op.name,
                                     grad)

        apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)
        for var in tf.trainable_variables():
            tf.summary.histogram(var.op.name, var)

        MOVING_AVERAGE_DECAY = 0.99
        variable_averages = tf.train.ExponentialMovingAverage(
            MOVING_AVERAGE_DECAY, global_step)
        variable_to_average = (tf.trainable_variables() +
                               tf.moving_average_variables())
        variables_averages_op = variable_averages.apply(variable_to_average)

        update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
        with tf.control_dependencies(update_ops):
            train_op = tf.group(apply_gradient_op, variables_averages_op)

        saver = tf.train.Saver(max_to_keep=10)

        tf.summary.scalar("learning_rate", learning_rate)
        tf.summary.scalar("loss", loss)

        summary_merge_op = tf.summary.merge_all()

        init = tf.global_variables_initializer()
        config = tf.ConfigProto()
        # occupy gpu gracefully
        config.gpu_options.allow_growth = True
        with tf.Session(config=config) as sess:
            init.run()
            checkpoint_path = os.path.join(params['modelpath'], training_name)
            model_name = '/model-54100'
            if checkpoint_path:
                saver.restore(sess, checkpoint_path + model_name)
                print("restore from " + checkpoint_path + model_name)

            summary_writer = tf.summary.FileWriter(
                os.path.join(params['logpath'], training_name), sess.graph)
            total_step_num = params['num_train_samples'] * params[
                'max_epoch'] // (params['batchsize'] * params['gpus'])
            print("Start training...")
            for step in tqdm(range(total_step_num)):
                _, loss_value = sess.run([train_op, loss])
                if step % params['per_update_tensorboard_step'] == 0:
                    valid_loss_value,valid_motion_loss, valid_scoremap_loss,loss_is_loss_v, valid_input_image1, valid_input_image2, valid_hand_motion, \
                    ur_v, ux_v, uy_v, uz_v, preheat_v,preheat_m_v, scoremap1_v, scoremap2_v, pre_is_loss_v,  is_loss12_v = sess.run(
                        [loss,motion_loss, loss_scoremap,loss_is_loss, input_image1, input_image2, hand_motion,
                         ur, ux, uy, uz, preheat,preheat_, scoremap1, scoremap2, pre_is_loss, is_loss12])

                    valid_input_image1 = (valid_input_image1 + 0.5) * 255
                    valid_input_image1 = valid_input_image1.astype(np.int16)

                    valid_input_image2 = (valid_input_image2 + 0.5) * 255
                    valid_input_image2 = valid_input_image2.astype(np.int16)

                    fig = plt.figure(1)
                    plt.clf()
                    ax1 = fig.add_subplot(3, 4, 1)
                    ax1.imshow(valid_input_image1[
                        0, :, :, :])  #第一个batch的维度 hand1(0~31) back1(32~63)
                    ax1.axis('off')
                    ax1.set_title(str(
                        is_loss12_v[0]))  #hand1 back1 hand2 back2
                    ax2 = fig.add_subplot(3, 4, 2)
                    ax2.imshow(valid_input_image2[
                        0, :, :, :])  #第一个batch的维度 hand2 back2
                    ax2.axis('off')
                    ax2.set_title(str(is_loss12_v[64]))
                    ax3 = fig.add_subplot(3, 4, 3)
                    ax3.imshow(valid_input_image1[
                        32, :, :, :])  #第一个batch的维度 hand1 back1
                    ax3.axis('off')
                    ax3.set_title(str(is_loss12_v[32]))
                    ax4 = fig.add_subplot(3, 4, 4)
                    ax4.imshow(valid_input_image2[
                        32, :, :, :])  #第一个batch的维度 hand2 back2
                    ax4.axis('off')
                    ax4.set_title(str(is_loss12_v[96]))

                    ax5 = fig.add_subplot(3, 4, 5)
                    ax5.imshow(
                        np.sum(scoremap1_v[0],
                               axis=-1))  #第一个batch的维度 hand1(0~31) back1(32~63)
                    ax5.axis('off')
                    ax5.set_title(str(
                        pre_is_loss_v[0]))  # hand1 back1 hand2 back2
                    ax6 = fig.add_subplot(3, 4, 6)
                    ax6.imshow(np.sum(scoremap2_v[0],
                                      axis=-1))  #第一个batch的维度 hand2 back2
                    ax6.axis('off')
                    ax6.set_title(str(pre_is_loss_v[64]))
                    ax7 = fig.add_subplot(3, 4, 7)
                    ax7.imshow(np.sum(scoremap1_v[32],
                                      axis=-1))  #第一个batch的维度 hand1 back1
                    ax7.axis('off')
                    ax7.set_title(str(pre_is_loss_v[32]))
                    ax8 = fig.add_subplot(3, 4, 8)
                    ax8.imshow(np.sum(scoremap2_v[32],
                                      axis=-1))  #第一个batch的维度 hand2 back2
                    ax8.axis('off')
                    ax8.set_title(str(pre_is_loss_v[96]))

                    ax9 = fig.add_subplot(3, 4, 9)
                    ax9.imshow(np.sum(preheat_v[0],
                                      axis=-1))  #hand1 back1 hand2 back2
                    ax9.axis('off')
                    ax10 = fig.add_subplot(3, 4, 10)
                    ax10.imshow(np.sum(preheat_v[64], axis=-1))
                    ax10.axis('off')
                    ax11 = fig.add_subplot(3, 4, 11)
                    ax11.imshow(np.sum(preheat_v[32], axis=-1))
                    ax11.axis('off')
                    ax12 = fig.add_subplot(3, 4, 12)
                    ax12.imshow(np.sum(preheat_v[96], axis=-1))
                    ax12.axis('off')
                    plt.savefig(
                        os.path.join(params['logpath'], training_name) + "/" +
                        str(step).zfill(10) + ".png")

                    fig2 = plt.figure(2)
                    plt.clf()
                    ax13 = fig2.add_subplot(2, 4, 2)  #hand12 back12
                    ax13.plot([0, valid_hand_motion[0, 1]],
                              [0, valid_hand_motion[0, 2]],
                              label="label",
                              color='red')
                    ax13.plot([0, ux_v[0]], [0, uy_v[0]],
                              label="predict",
                              color='blue')
                    ax13.set_xlim((-1, 1))
                    ax13.set_ylim((1, -1))
                    ax13.grid(True)

                    ax15 = fig2.add_subplot(2, 4, 3)
                    ax15.plot([0, valid_hand_motion[32, 1]],
                              [0, valid_hand_motion[32, 2]],
                              label="label",
                              color='red')
                    ax15.plot([0, ux_v[32]], [0, uy_v[32]],
                              label="predict",
                              color='blue')
                    ax15.set_xlim((-1, 1))
                    ax15.set_ylim((-1, 1))
                    ax15.grid(True)
                    ax16 = fig2.add_subplot(2, 4, 5)
                    ax16.imshow(np.sum(preheat_m_v[0],
                                       axis=-1))  #hand1 back1 hand2 back2
                    ax16.axis('off')
                    ax17 = fig2.add_subplot(2, 4, 6)
                    ax17.imshow(np.sum(preheat_m_v[64], axis=-1))
                    ax17.axis('off')
                    ax18 = fig2.add_subplot(2, 4, 7)
                    ax18.imshow(np.sum(preheat_m_v[32], axis=-1))
                    ax18.axis('off')
                    ax19 = fig2.add_subplot(2, 4, 8)
                    ax19.imshow(np.sum(preheat_m_v[96], axis=-1))
                    ax19.axis('off')

                    plt.savefig(
                        os.path.join(params['logpath'], training_name) + "/" +
                        str(step).zfill(10) + "_.png")

                    print(
                        "loss:" + str(valid_loss_value),
                        " motion_loss:" + str(valid_motion_loss) +
                        " scoremap_loss:" + str(valid_scoremap_loss),
                        " is_loss:" + str(loss_is_loss_v))

                    # save model
                if step % params['per_saved_model_step'] == 0:
                    saver.save(sess,
                               os.path.join(checkpoint_path, 'model'),
                               global_step=step)
예제 #2
0
    parser.add_argument(
        '--output_node_names', type=str, default='GPU_0/final_fxuz_Variable'
    )  # ['GPU_0/final_r_Variable','GPU_0/final_x_Variable','GPU_0/final_y_Variable','GPU_0/final_z_Variable'])
    parser.add_argument(
        '--output_graph',
        type=str,
        default=
        '/home/chen/Documents/Mobile_hand/experiments/trained/mv2_hourglass_deep/models/mv2_hourglass_batch-32_lr-0.001_gpus-1_32x32_..-experiments-mv2_hourglass/'
        + model_name + '.pb',
        help='output_freeze_path')

    args = parser.parse_args()
    i = 0
    batchsize = 1
    with tf.Graph().as_default(), tf.device("/cpu:0"):
        dataset_RHD = RHD(batchnum=batchsize)

        with tf.device("/gpu:%d" % i):
            with tf.name_scope("GPU_%d" % i):
                #input_node = tf.placeholder(tf.float32, shape=[2, args.size, args.size, 3], name="input_image")

                # input_image, keypoint_xyz, keypoint_uv, input_heat, keypoint_vis, k, num_px_left_hand, num_px_right_hand \
                batch_data_all = dataset_RHD.get_batch_data
                input_image1 = batch_data_all[8]
                input_image2 = batch_data_all[10]
                hand_motion = batch_data_all[9]
                scoremap1 = batch_data_all[11]
                scoremap2 = batch_data_all[12]
                is_loss1 = batch_data_all[13]
                is_loss2 = batch_data_all[14]
def main(argv=None):
    # load config file and setup
    params = {}
    config = configparser.ConfigParser()
    config_file = "../experiments/mv2_cpm.cfg"
    if len(argv) != 1:
        config_file = argv[1]
    config.read(config_file)
    for _ in config.options("Train"):
        params[_] = eval(config.get("Train", _))

    os.environ['CUDA_VISIBLE_DEVICES'] = params['visible_devices']

    gpus_index = params['visible_devices'].split(",")
    params['gpus'] = len(gpus_index)

    if not os.path.exists(params['modelpath']):
        os.makedirs(params['modelpath'])
    if not os.path.exists(params['logpath']):
        os.makedirs(params['logpath'])

    gpus = 'gpus'
    if platform.system() == 'Darwin':
        gpus = 'cpu'
    training_name = '{}_batch-{}_lr-{}_{}-{}_{}x{}_{}'.format(
        params['model'], params['batchsize'], params['lr'], gpus,
        params['gpus'], params['input_width'], params['input_height'],
        config_file.replace("/", "-").replace(".cfg", ""))

    with tf.Graph().as_default(), tf.device("/cpu:0"):
        dataset_RHD = RHD(batchnum=params['batchsize'])

        global_step = tf.Variable(0, trainable=False)
        learning_rate = tf.train.exponential_decay(float(params['lr']),
                                                   global_step,
                                                   decay_steps=10000,
                                                   decay_rate=float(
                                                       params['decay_rate']),
                                                   staircase=True)
        opt = tf.train.AdamOptimizer(learning_rate, epsilon=1e-8)
        tower_grads = []
        reuse_variable = False

        for i in range(params['gpus']):
            with tf.device("/gpu:%d" % i):
                with tf.name_scope("GPU_%d" % i):
                    #input_image, keypoint_xyz, keypoint_uv, input_heat, keypoint_vis, k, num_px_left_hand, num_px_right_hand \
                    batch_data_all = dataset_RHD.get_batch_data

                    hand_motion = batch_data_all[9]
                    scoremap1 = batch_data_all[11]
                    scoremap2 = batch_data_all[12]
                    one_scoremap = tf.ones_like(scoremap1)
                    scoremap1 = tf.where(scoremap1 > 1,
                                         x=one_scoremap,
                                         y=scoremap1)
                    scoremap2 = tf.where(scoremap2 > 1,
                                         x=one_scoremap,
                                         y=scoremap2)

                    scoremap = scoremap1 - scoremap2

                    # 计算一个scoremap的loss
                    scoremap = tf.reduce_sum(scoremap, axis=-1)
                    scoremap = tf.expand_dims(scoremap, axis=-1)  # hand back
                    """
                    total_loss, motion_loss*0.00001, loss_scoremap*0.001, loss_is_loss,\
                               ur, ux, uy, uz, ufxuz, pred_heatmaps_tmp, pre_is_loss, is_loss12
                    """
                    loss, ufxuz\
                        = get_loss_and_output(params['model'], params['batchsize'],
                                              scoremap, hand_motion, reuse_variable)

                    loss_all = loss
                    grads = opt.compute_gradients(loss_all)
                    tower_grads.append(grads)

        grads = average_gradients(tower_grads)
        for grad, var in grads:
            if grad is not None:
                tf.summary.histogram("gradients_on_average/%s" % var.op.name,
                                     grad)

        apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)
        for var in tf.trainable_variables():
            tf.summary.histogram(var.op.name, var)

        MOVING_AVERAGE_DECAY = 0.99
        variable_averages = tf.train.ExponentialMovingAverage(
            MOVING_AVERAGE_DECAY, global_step)
        variable_to_average = (tf.trainable_variables() +
                               tf.moving_average_variables())
        variables_averages_op = variable_averages.apply(variable_to_average)

        update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
        with tf.control_dependencies(update_ops):
            train_op = tf.group(apply_gradient_op, variables_averages_op)

        saver = tf.train.Saver(max_to_keep=10)

        tf.summary.scalar("learning_rate", learning_rate)
        tf.summary.scalar("loss", loss)

        init = tf.global_variables_initializer()
        config = tf.ConfigProto()
        # occupy gpu gracefully
        config.gpu_options.allow_growth = True
        with tf.Session(config=config) as sess:
            init.run()
            checkpoint_path = os.path.join(params['modelpath'], training_name)
            model_name = '/model-65000'
            if checkpoint_path:
                saver.restore(sess, checkpoint_path + model_name)
                print("restore from " + checkpoint_path + model_name)

            summary_writer = tf.summary.FileWriter(
                os.path.join(params['logpath'], training_name), sess.graph)
            total_step_num = params['num_train_samples'] * params[
                'max_epoch'] // (params['batchsize'] * params['gpus'])
            print("Start training...")
            for step in tqdm(range(total_step_num)):
                _, loss_value = sess.run([train_op, loss])
                if step % params['per_update_tensorboard_step'] == 0:
                    """
                                        loss, ufxuz\
                        = get_loss_and_output(params['model'], params['batchsize'],
                                              scoremap, hand_motion, reuse_variable)
                    """
                    loss_v, ufxuz_v, scoremap_v, hand_motion_v = sess.run(
                        [loss, ufxuz, scoremap, hand_motion])

                    fig = plt.figure(1)
                    plt.clf()
                    ax1 = fig.add_subplot(1, 2, 1)
                    ax1.imshow(
                        scoremap_v[0, :, :,
                                   0])  #第一个batch的维度 hand1(0~31) back1(32~63)
                    """
                            loss_l2r = tf.nn.l2_loss(hand_motion[:, 0] - ur[:, 0], name='lossr_heatmap_stage%d' % idx)
                            loss_l2x = tf.nn.l2_loss(hand_motion[:, 1] - ux[:, 0], name='lossx_heatmap_stage%d' % idx)
                            loss_l2y = tf.nn.l2_loss(hand_motion[:, 2] - uy[:, 0], name='lossy_heatmap_stage%d' % idx)
                            loss_l2z = tf.nn.l2_loss(hand_motion[:, 3] - uz[:, 0], name='lossz_heatmap_stage%d' % idx)
                            losses.append(loss_l2x+loss_l2y+loss_l2r*0.001+loss_l2z*0.001)
                    
                        ufxuz = tf.concat(values=[ur, ux, uy, uz], axis=1, name='fxuz')
                    """
                    ax2 = fig.add_subplot(1, 2, 2)
                    ax2.plot([0, hand_motion_v[0, 1]],
                             [0, hand_motion_v[0, 2]],
                             label="label",
                             color='red')
                    ax2.plot([0, ufxuz_v[0, 1]], [0, ufxuz_v[0, 2]],
                             label="predict",
                             color='blue')
                    ax2.set_xlim((-1, 1))
                    ax2.set_ylim((1, -1))
                    ax2.grid(True)

                    plt.savefig(
                        os.path.join(params['logpath']) + "/" +
                        str(step).zfill(10) + "_.png")

                    print("loss:" + str(loss_value))

                    # save model
                if step % params['per_saved_model_step'] == 0:
                    saver.save(sess,
                               os.path.join(checkpoint_path, 'model'),
                               global_step=step)
def main(argv=None):
    # load config file and setup
    params = {}
    config = configparser.ConfigParser()
    config_file = "../experiments/mv2_cpm.cfg"
    if len(argv) != 1:
        config_file = argv[1]
    config.read(config_file)
    for _ in config.options("Train"):
        params[_] = eval(config.get("Train", _))

    os.environ['CUDA_VISIBLE_DEVICES'] = params['visible_devices']

    gpus_index = params['visible_devices'].split(",")
    params['gpus'] = len(gpus_index)

    if not os.path.exists(params['modelpath']):
        os.makedirs(params['modelpath'])
    if not os.path.exists(params['logpath']):
        os.makedirs(params['logpath'])

    gpus = 'gpus'
    if platform.system() == 'Darwin':
        gpus = 'cpu'
    training_name = '{}_batch-{}_lr-{}_{}-{}_{}x{}_{}'.format(
        params['model'], params['batchsize'], params['lr'], gpus,
        params['gpus'], params['input_width'], params['input_height'],
        config_file.replace("/", "-").replace(".cfg", ""))

    with tf.Graph().as_default(), tf.device("/cpu:0"):
        dataset_RHD = RHD(batchnum=test_num)

        global_step = tf.Variable(0, trainable=False)

        reuse_variable = False

        for i in range(params['gpus']):
            with tf.device("/gpu:%d" % i):
                with tf.name_scope("GPU_%d" % i):
                    input_node = tf.placeholder(tf.float32,
                                                shape=[test_num, 32, 32, 3],
                                                name="input_image")

                    batch_data_all = dataset_RHD.get_batch_data
                    input_image1 = batch_data_all[8]
                    input_image2 = batch_data_all[10]
                    hand_motion = batch_data_all[9]
                    scoremap1 = batch_data_all[11]
                    scoremap2 = batch_data_all[12]
                    is_loss1 = batch_data_all[13]
                    is_loss2 = batch_data_all[14]

                    # input_image = tf.concat([input_image1, input_image1_back], 0) #第一个batch的维度 hand1 back1
                    # scoremap = tf.concat([scoremap1, scoremap1_back], 0)
                    # is_loss = tf.concat([is_loss1, is_loss1_back], 0)

                    input_image = input_node
                    scoremap = scoremap1
                    is_loss = is_loss1

                    # 计算一个scoremap的loss
                    scoremap = tf.reduce_sum(scoremap, axis=-1)
                    one_scoremap = tf.ones_like(scoremap)
                    scoremap = tf.where(scoremap > 1,
                                        x=one_scoremap,
                                        y=scoremap)
                    scoremap = tf.expand_dims(scoremap, axis=-1)  # hand back
                    is_loss = tf.expand_dims(is_loss, axis=-1)
                    """
                    model, batchsize, input_image, scoremap, is_loss, reuse_variables=None
                    total_loss, loss_is_loss, loss_scoremap, pred_heatmaps_tmp, pre_is_loss, pred_heatmaps_tmp_01_modi
                    """
                    preheat, pre_is_loss, pred_heatmaps_tmp_01_modi\
                        = get_loss_and_output(params['model'], params['batchsize'],
                                                input_image, scoremap, is_loss, reuse_variable)

        saver = tf.train.Saver(max_to_keep=10)

        init = tf.global_variables_initializer()
        config = tf.ConfigProto()
        # occupy gpu gracefully
        config.gpu_options.allow_growth = True
        with tf.Session(config=config) as sess:
            init.run()
            checkpoint_path = os.path.join(params['modelpath'], training_name)
            model_name = 'model-15350'
            if checkpoint_path:
                saver.restore(sess, checkpoint_path + '/' + model_name)
                print("restore from " + checkpoint_path + '/' + model_name)
            total_step_num = params['num_train_samples'] * params[
                'max_epoch'] // (params['batchsize'] * 2 * params['gpus'])

            print("Start testing...")
            path = "/home/chen/Documents/Mobile_hand/experiments/varify/image/set1/"
            import matplotlib.image
            for step in tqdm(range(286)):
                image_raw12_crop = matplotlib.image.imread(
                    path + str(int(step / 2)).zfill(5) + '_' +
                    str(step % 2 + 1) + '.jpg')
                image_raw12_crop = image_raw12_crop.astype(
                    'float') / 255.0 - 0.5
                scoremap_v, is_loss_v,\
                preheat_v, pre_is_loss_v, pred_heatmaps_tmp_01_modi_v\
                    = sess.run(
                    [scoremap, is_loss,
                     preheat, pre_is_loss, pred_heatmaps_tmp_01_modi],
                    feed_dict={input_node: np.repeat(image_raw12_crop[np.newaxis, :],test_num,axis=0)})

                input_image_v = (image_raw12_crop + 0.5) * 255
                input_image_v = input_image_v.astype(np.int16)

                fig = plt.figure(1)
                plt.clf()
                ax1 = fig.add_subplot(2, 3, 1)
                ax1.imshow(
                    input_image_v)  # 第一个batch的维度 hand1(0~31) back1(32~63)
                ax1.axis('off')

                ax3 = fig.add_subplot(2, 3, 2)
                ax3.imshow(
                    preheat_v[0, :, :,
                              0])  # 第一个batch的维度 hand1(0~31) back1(32~63)
                ax3.axis('off')
                ax3.set_title(str(pre_is_loss_v[0, 0]))  # hand1 back1

                ax7 = fig.add_subplot(2, 3, 5)
                ax7.imshow(
                    preheat_v[0, :, :,
                              1])  # 第一个batch的维度 hand1(0~31) back1(32~63)
                ax7.axis('off')
                ax7.set_title(str(pre_is_loss_v[0, 1]))  # hand1 back1

                ax4 = fig.add_subplot(2, 3, 3)
                ax4.imshow(pred_heatmaps_tmp_01_modi_v[
                    0, :, :, 0])  # 第一个batch的维度 hand1(0~31) back1(32~63)
                ax4.axis('off')
                ax8 = fig.add_subplot(2, 3, 6)
                ax8.imshow(pred_heatmaps_tmp_01_modi_v[
                    0, :, :, 1])  # 第一个batch的维度 hand1(0~31) back1(32~63)
                ax8.axis('off')

                ax2 = fig.add_subplot(2, 3, 4)
                ax2.imshow(pred_heatmaps_tmp_01_modi_v[0, :, :, 0] -
                           pred_heatmaps_tmp_01_modi_v[0, :, :, 1]
                           )  # 第一个batch的维度 hand1(0~31) back1(32~63)
                ax2.axis('off')

                plt.savefig(
                    "/home/chen/Documents/Mobile_hand/experiments/varify/image/valid_on_cam/softmax/"
                    + str(step).zfill(10) + model_name + "_.png")
예제 #5
0
def main(argv=None):
    # load config file and setup
    params = {}
    config = configparser.ConfigParser()
    config_file = "../experiments/mv2_cpm.cfg"
    if len(argv) != 1:
        config_file = argv[1]
    config.read(config_file)
    for _ in config.options("Train"):
        params[_] = eval(config.get("Train", _))

    os.environ['CUDA_VISIBLE_DEVICES'] = params['visible_devices']

    gpus_index = params['visible_devices'].split(",")
    params['gpus'] = len(gpus_index)

    if not os.path.exists(params['modelpath']):
        os.makedirs(params['modelpath'])
    if not os.path.exists(params['logpath']):
        os.makedirs(params['logpath'])

    gpus = 'gpus'
    if platform.system() == 'Darwin':
        gpus = 'cpu'
    training_name = '{}_batch-{}_lr-{}_{}-{}_{}x{}_{}'.format(
        params['model'], '32', params['lr'], gpus, params['gpus'],
        params['input_width'], params['input_height'],
        "..-experiments-mv2_hourglass")

    with tf.Graph().as_default(), tf.device("/cpu:0"):
        dataset_RHD = RHD(batchnum=params['batchsize'])

        # global_step = tf.Variable(0, trainable=False)
        # learning_rate = tf.train.exponential_decay(float(params['lr']), global_step,
        #                                            decay_steps=10000, decay_rate=float(params['decay_rate']),
        #                                            staircase=True)
        # opt = tf.train.AdamOptimizer(learning_rate, epsilon=1e-8)
        # tower_grads = []
        reuse_variable = False

        for i in range(params['gpus']):
            with tf.device("/gpu:%d" % i):
                with tf.name_scope("GPU_%d" % i):
                    #input_image, keypoint_xyz, keypoint_uv, input_heat, keypoint_vis, k, num_px_left_hand, num_px_right_hand \
                    batch_data_all = dataset_RHD.get_batch_data
                    input_image1 = batch_data_all[8]
                    input_image2 = batch_data_all[10]
                    # hand_motion = batch_data_all[9]
                    # scoremap1 = batch_data_all[11]
                    # scoremap2 = batch_data_all[12]
                    # is_loss1 = batch_data_all[13]
                    # is_loss2 = batch_data_all[14]

                    batch_data_all_back = dataset_RHD.coco_get_batch_back_data
                    input_image1_back = batch_data_all_back[8]
                    input_image2_back = batch_data_all_back[10]
                    # hand_motion_back = batch_data_all_back[9]
                    # scoremap1_back = batch_data_all_back[11]
                    # scoremap2_back = batch_data_all_back[12]
                    # is_loss1_back = batch_data_all_back[13]
                    # is_loss2_back = batch_data_all_back[14]

                    input_image1 = tf.concat([input_image1, input_image1_back],
                                             0)  #第一个batch的维度 hand1 back1
                    input_image2 = tf.concat([input_image2, input_image2_back],
                                             0)
                    # hand_motion = tf.concat([hand_motion, hand_motion_back], 0)#第一个batch的维度 hand12 back12
                    # scoremap1 = tf.concat([scoremap1, scoremap1_back], 0)
                    # scoremap2 = tf.concat([scoremap2, scoremap2_back], 0)
                    # is_loss1 = tf.concat([is_loss1, is_loss1_back], 0)
                    # is_loss2 = tf.concat([is_loss2, is_loss2_back], 0)
                    """
                    total_loss, motion_loss*0.00001, loss_scoremap*0.001, loss_is_loss,\
                               ur, ux, uy, uz, ufxuz, pred_heatmaps_tmp, pre_is_loss, is_loss12
                    """
                    ur, ux, uy, uz, ufxuz, preheat, preheat_, pre_is_loss\
                        = get_loss_and_output(params['model'], params['batchsize'], input_image1, input_image2, reuse_variable)

        saver = tf.train.Saver(max_to_keep=10)

        init = tf.global_variables_initializer()
        config = tf.ConfigProto()
        # occupy gpu gracefully
        config.gpu_options.allow_growth = True
        with tf.Session(config=config) as sess:
            init.run()
            checkpoint_path = os.path.join(params['modelpath'], training_name)
            model_name = '/model-29500'
            if checkpoint_path:
                saver.restore(sess, checkpoint_path + model_name)
                print("restore from " + checkpoint_path + model_name)

            summary_writer = tf.summary.FileWriter(
                os.path.join(params['logpath'], training_name), sess.graph)
            total_step_num = params['num_train_samples'] * params[
                'max_epoch'] // (params['batchsize'] * params['gpus'])
            print("Start training...")
            for step in tqdm(range(total_step_num)):
                valid_input_image1, valid_input_image2, \
                ur_v, ux_v, uy_v, uz_v, preheat_v, preheat_m_v, pre_is_loss_v = sess.run(
                    [input_image1, input_image2, ur, ux, uy, uz, preheat, preheat_, pre_is_loss])

                valid_input_image1 = (valid_input_image1 + 0.5) * 255
                valid_input_image1 = valid_input_image1.astype(np.int16)

                valid_input_image2 = (valid_input_image2 + 0.5) * 255
                valid_input_image2 = valid_input_image2.astype(np.int16)

                fig = plt.figure(1)
                plt.clf()
                ax1 = fig.add_subplot(3, 4, 1)
                ax1.imshow(valid_input_image1[
                    0, :, :, :])  #第一个batch的维度 hand1(0~31) back1(32~63)
                ax1.axis('off')
                ax2 = fig.add_subplot(3, 4, 2)
                ax2.imshow(
                    valid_input_image2[0, :, :, :])  #第一个batch的维度 hand2 back2
                ax2.axis('off')
                ax3 = fig.add_subplot(3, 4, 3)
                ax3.imshow(valid_input_image1[
                    1 *
                    params['batchsize'], :, :, :])  #第一个batch的维度 hand1 back1
                ax3.axis('off')
                ax4 = fig.add_subplot(3, 4, 4)
                ax4.imshow(valid_input_image2[
                    1 *
                    params['batchsize'], :, :, :])  #第一个batch的维度 hand2 back2
                ax4.axis('off')

                ax5 = fig.add_subplot(3, 4, 5)
                ax5.imshow(
                    np.sum(preheat_v[0],
                           axis=-1))  # 第一个batch的维度 hand1(0~31) back1(32~63)
                ax5.axis('off')
                ax5.set_title(str(pre_is_loss_v[0]))  # hand1 back1 hand2 back2
                ax6 = fig.add_subplot(3, 4, 6)
                ax6.imshow(np.sum(preheat_v[2 * params['batchsize']],
                                  axis=-1))  # 第一个batch的维度 hand2 back2
                ax6.axis('off')
                ax6.set_title(str(pre_is_loss_v[2 * params['batchsize']]))
                ax7 = fig.add_subplot(3, 4, 7)
                ax7.imshow(np.sum(preheat_v[1 * params['batchsize']],
                                  axis=-1))  # 第一个batch的维度 hand1 back1
                ax7.axis('off')
                ax7.set_title(str(pre_is_loss_v[1 * params['batchsize']]))
                ax8 = fig.add_subplot(3, 4, 8)
                ax8.imshow(np.sum(preheat_v[3 * params['batchsize']],
                                  axis=-1))  # 第一个batch的维度 hand2 back2
                ax8.axis('off')
                ax8.set_title(str(pre_is_loss_v[3 * params['batchsize']]))

                ax9 = fig.add_subplot(3, 4, 9)
                ax9.imshow(
                    np.sum(preheat_m_v[0 * params['batchsize']],
                           axis=-1))  #hand1 back1 hand2 back2
                ax9.axis('off')
                ax10 = fig.add_subplot(3, 4, 10)
                ax10.imshow(
                    np.sum(preheat_m_v[2 * params['batchsize']], axis=-1))
                ax10.axis('off')
                ax11 = fig.add_subplot(3, 4, 11)
                ax11.imshow(
                    np.sum(preheat_m_v[1 * params['batchsize']], axis=-1))
                ax11.axis('off')
                ax12 = fig.add_subplot(3, 4, 12)
                ax12.imshow(
                    np.sum(preheat_m_v[3 * params['batchsize']], axis=-1))
                ax12.axis('off')
                plt.savefig(
                    os.path.join(params['logpath'], training_name) + "/" +
                    str(step).zfill(10) + ".png")

                fig2 = plt.figure(2)
                plt.clf()
                ax13 = fig2.add_subplot(2, 4, 2)  #hand12 back12
                ax13.plot([0, ux_v[0]], [0, uy_v[0]],
                          label="predict",
                          color='blue')
                ax13.set_xlim((-1, 1))
                ax13.set_ylim((1, -1))
                ax13.grid(True)

                ax15 = fig2.add_subplot(2, 4, 3)
                ax15.plot([0, ux_v[1 * params['batchsize']]],
                          [0, uy_v[1 * params['batchsize']]],
                          label="predict",
                          color='blue')
                ax15.set_xlim((-1, 1))
                ax15.set_ylim((-1, 1))
                ax15.grid(True)
                ax16 = fig2.add_subplot(2, 4, 5)
                ax16.imshow(np.sum(preheat_m_v[0],
                                   axis=-1))  #hand1 back1 hand2 back2
                ax16.axis('off')
                ax17 = fig2.add_subplot(2, 4, 6)
                ax17.imshow(
                    np.sum(preheat_m_v[2 * params['batchsize']], axis=-1))
                ax17.axis('off')
                ax18 = fig2.add_subplot(2, 4, 7)
                ax18.imshow(
                    np.sum(preheat_m_v[1 * params['batchsize']], axis=-1))
                ax18.axis('off')
                ax19 = fig2.add_subplot(2, 4, 8)
                ax19.imshow(
                    np.sum(preheat_m_v[3 * params['batchsize']], axis=-1))
                ax19.axis('off')

                plt.savefig(
                    os.path.join(params['logpath'], training_name) + "/" +
                    str(step).zfill(10) + "_.png")
def main(argv=None):
    # load config file and setup
    params = {}
    config = configparser.ConfigParser()
    config_file = "../experiments/mv2_cpm.cfg"
    if len(argv) != 1:
        config_file = argv[1]
    config.read(config_file)
    for _ in config.options("Train"):
        params[_] = eval(config.get("Train", _))

    os.environ['CUDA_VISIBLE_DEVICES'] = params['visible_devices']

    gpus_index = params['visible_devices'].split(",")
    params['gpus'] = len(gpus_index)

    if not os.path.exists(params['modelpath']):
        os.makedirs(params['modelpath'])
    if not os.path.exists(params['logpath']):
        os.makedirs(params['logpath'])

    gpus = 'gpus'
    if platform.system() == 'Darwin':
        gpus = 'cpu'
    training_name = '{}_batch-{}_lr-{}_{}-{}_{}x{}_{}'.format(
        params['model'], params['batchsize'], params['lr'], gpus,
        params['gpus'], params['input_width'], params['input_height'],
        config_file.replace("/", "-").replace(".cfg", ""))

    with tf.Graph().as_default(), tf.device("/cpu:0"):
        dataset_RHD = RHD(batchnum=test_num)

        global_step = tf.Variable(0, trainable=False)

        reuse_variable = False

        for i in range(params['gpus']):
            with tf.device("/gpu:%d" % i):
                with tf.name_scope("GPU_%d" % i):
                    input_node = tf.placeholder(tf.float32,
                                                shape=[test_num, 32, 32, 3],
                                                name="input_image")

                    batch_data_all = dataset_RHD.get_batch_data
                    input_image1 = batch_data_all[8]
                    input_image2 = batch_data_all[10]
                    hand_motion = batch_data_all[9]
                    scoremap1 = batch_data_all[11]
                    scoremap2 = batch_data_all[12]
                    is_loss1 = batch_data_all[13]
                    is_loss2 = batch_data_all[14]

                    # input_image = tf.concat([input_image1, input_image1_back], 0) #第一个batch的维度 hand1 back1
                    # scoremap = tf.concat([scoremap1, scoremap1_back], 0)
                    # is_loss = tf.concat([is_loss1, is_loss1_back], 0)

                    input_image = input_node
                    scoremap = scoremap1
                    is_loss = is_loss1

                    # 计算一个scoremap的loss
                    scoremap = tf.reduce_sum(scoremap, axis=-1)
                    one_scoremap = tf.ones_like(scoremap)
                    scoremap = tf.where(scoremap > 1,
                                        x=one_scoremap,
                                        y=scoremap)
                    scoremap = tf.expand_dims(scoremap, axis=-1)  # hand back
                    is_loss = tf.expand_dims(is_loss, axis=-1)
                    """
                    model, batchsize, input_image, scoremap, is_loss, reuse_variables=None
                    total_loss, loss_is_loss, loss_scoremap, pred_heatmaps_tmp, pre_is_loss, pred_heatmaps_tmp_01_modi
                    """
                    preheat, pre_is_loss, pred_heatmaps_tmp_01_modi\
                        = get_loss_and_output(params['model'], params['batchsize'],
                                                input_image, scoremap, is_loss, reuse_variable)

        saver = tf.train.Saver(max_to_keep=10)

        init = tf.global_variables_initializer()
        config = tf.ConfigProto()
        # occupy gpu gracefully
        config.gpu_options.allow_growth = True
        with tf.Session(config=config) as sess:
            init.run()
            cap = cv2.VideoCapture(0)
            centerx = 160
            centery = 120
            size = 24
            color_ = (0, 255, 0)
            checkpoint_path = os.path.join(params['modelpath'], training_name)
            model_name = 'model-4200'
            if checkpoint_path:
                saver.restore(sess, checkpoint_path + '/' + model_name)
                print("restore from " + checkpoint_path + '/' + model_name)
            total_step_num = params['num_train_samples'] * params[
                'max_epoch'] // (params['batchsize'] * 2 * params['gpus'])

            print("Start testing...")
            path = "/home/chen/Documents/Mobile_hand/experiments/varify/image/set1/"
            import matplotlib.image
            for step in tqdm(range(100000)):
                _, image_raw1 = cap.read()
                image_raw1 = scipy.misc.imresize(image_raw1, (240, 320))
                image_raw1_crop = np.array(
                    image_raw1[centery - size:centery + size,
                               centerx - size:centerx + size])
                image_raw1_crop = cv2.resize(image_raw1_crop,
                                             (int(32), int(32)),
                                             interpolation=cv2.INTER_AREA)
                first_point = (centerx - size, centery - size)
                last_point = (centerx + size, centery + size)
                cv2.rectangle(image_raw1,
                              first_point,
                              last_point,
                              color=color_,
                              thickness=2)

                image_raw12_crop = image_raw1_crop.astype(
                    'float') / 255.0 - 0.5
                scoremap_v, is_loss_v,\
                preheat_v, pre_is_loss_v, pred_heatmaps_tmp_01_modi_v\
                    = sess.run(
                    [scoremap, is_loss,
                     preheat, pre_is_loss, pred_heatmaps_tmp_01_modi],
                    feed_dict={input_node: np.repeat(image_raw12_crop[np.newaxis, :],test_num,axis=0)})

                #根据preheat_v 计算最有可能的指尖坐标,当手指指尖存在时更新坐标centerx, centery
                if pre_is_loss_v[0, 0] > pre_is_loss_v[0, 1]:
                    color_ = (0, 255, 0)
                    motion = preheat_v[0, :, :, 0] - preheat_v[0, :, :, 1]
                    raw, column = motion.shape
                    _positon = np.argmax(
                        motion)  # get the index of max in the a
                    m, n = divmod(_positon, column)
                else:
                    color_ = (255, 0, 0)
                    m = 15.5
                    n = 15.5

                right_move = int((n - 15.5) / 32 * 48)
                down_move = int((m - 15.5) / 32 * 48)
                centery = centery + down_move
                centerx = centerx + right_move
                input_image_v = (image_raw12_crop + 0.5) * 255
                input_image_v = input_image_v.astype(np.int16)

                if centery < 0 or centery > 240:
                    centery = 120

                if centerx < 0 or centerx > 320:
                    centerx = 160

                fig = plt.figure(1)
                plt.clf()
                ax1 = fig.add_subplot(2, 3, 1)
                ax1.imshow(
                    input_image_v)  # 第一个batch的维度 hand1(0~31) back1(32~63)

                ax3 = fig.add_subplot(2, 3, 2)
                ax3.imshow(
                    preheat_v[0, :, :,
                              0])  # 第一个batch的维度 hand1(0~31) back1(32~63)
                ax3.set_title(str(pre_is_loss_v[0, 0]))  # hand1 back1

                ax7 = fig.add_subplot(2, 3, 5)
                ax7.imshow(
                    preheat_v[0, :, :,
                              1])  # 第一个batch的维度 hand1(0~31) back1(32~63)
                ax7.set_title(str(pre_is_loss_v[0, 1]))  # hand1 back1

                ax4 = fig.add_subplot(2, 3, 3)
                ax4.imshow(pred_heatmaps_tmp_01_modi_v[
                    0, :, :, 0])  # 第一个batch的维度 hand1(0~31) back1(32~63)
                ax4.set_title('m:' + str(m) + ' n:' + str(n))

                ax8 = fig.add_subplot(2, 3, 6)
                ax8.imshow(pred_heatmaps_tmp_01_modi_v[
                    0, :, :, 1])  # 第一个batch的维度 hand1(0~31) back1(32~63)

                ax2 = fig.add_subplot(2, 3, 4)
                ax2.imshow(image_raw1)  # 第一个batch的维度 hand1(0~31) back1(32~63)

                plt.savefig(
                    "/home/chen/Documents/Mobile_hand/experiments/varify/image/valid_on_cam/softmax/"
                    + str(step).zfill(10) + model_name + "_.png")
                plt.pause(0.01)
예제 #7
0
def main(argv=None):
    # load config file and setup
    params = {}
    config = configparser.ConfigParser()
    config_file = "../experiments/mv2_cpm.cfg"
    if len(argv) != 1:
        config_file = argv[1]
    config.read(config_file)
    for _ in config.options("Train"):
        params[_] = eval(config.get("Train", _))

    os.environ['CUDA_VISIBLE_DEVICES'] = params['visible_devices']

    gpus_index = params['visible_devices'].split(",")
    params['gpus'] = len(gpus_index)

    if not os.path.exists(params['modelpath']):
        os.makedirs(params['modelpath'])
    if not os.path.exists(params['logpath']):
        os.makedirs(params['logpath'])

    gpus = 'gpus'
    if platform.system() == 'Darwin':
        gpus = 'cpu'
    training_name = '{}_batch-{}_lr-{}_{}-{}_{}x{}_{}'.format(
        params['model'], params['batchsize'], params['lr'], gpus,
        params['gpus'], params['input_width'], params['input_height'],
        config_file.replace("/", "-").replace(".cfg", ""))

    with tf.Graph().as_default(), tf.device("/cpu:0"):
        dataset_RHD = RHD(batchnum=params['batchsize'])

        global_step = tf.Variable(0, trainable=False)
        learning_rate = tf.train.exponential_decay(float(params['lr']),
                                                   global_step,
                                                   decay_steps=10000,
                                                   decay_rate=float(
                                                       params['decay_rate']),
                                                   staircase=True)
        opt = tf.train.AdamOptimizer(learning_rate, epsilon=1e-8)
        tower_grads = []
        reuse_variable = False

        for i in range(params['gpus']):
            with tf.device("/gpu:%d" % i):
                with tf.name_scope("GPU_%d" % i):
                    #input_image, keypoint_xyz, keypoint_uv, input_heat, keypoint_vis, k, num_px_left_hand, num_px_right_hand \
                    batch_data_all = dataset_RHD.get_batch_data
                    finger_mask_sum1 = batch_data_all[1]
                    input_image1 = batch_data_all[2]
                    scoremap1 = batch_data_all[4]

                    # input_image = tf.concat([input_image1, input_image1_back], 0) #第一个batch的维度 hand1 back1
                    # scoremap = tf.concat([scoremap1, scoremap1_back], 0)
                    # is_loss = tf.concat([is_loss1, is_loss1_back], 0)

                    input_image = input_image1
                    scoremap = scoremap1
                    # is_loss = is_loss1

                    # 计算一个scoremap的loss
                    #scoremap = tf.reduce_sum(scoremap, axis=-1)
                    one_scoremap = tf.ones_like(scoremap)
                    scoremap = tf.where(scoremap > 1,
                                        x=one_scoremap,
                                        y=scoremap)
                    scoremap = tf.expand_dims(scoremap, axis=-1)
                    scoremap = tf.concat([scoremap, 1 - scoremap], axis=-1)

                    # is_loss = tf.expand_dims(is_loss, axis=-1)
                    # is_loss = tf.concat([is_loss, 1 - is_loss], axis=-1)
                    """
                    [total_loss, loss_scoremap, loss_zrate, z_rate_pre]
                     , [total_loss, loss_scoremap, loss_zrate, z_rate_pre, finger_mask_sum]
                    """
                    loss, preheat = get_loss_and_output(
                        params['model'], params['batchsize'], input_image,
                        scoremap, finger_mask_sum1)

                    grads = opt.compute_gradients(loss)
                    tower_grads.append(grads)

        grads = average_gradients(tower_grads)
        for grad, var in grads:
            if grad is not None:
                tf.summary.histogram("gradients_on_average/%s" % var.op.name,
                                     grad)

        apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)
        for var in tf.trainable_variables():
            tf.summary.histogram(var.op.name, var)

        MOVING_AVERAGE_DECAY = 0.99
        variable_averages = tf.train.ExponentialMovingAverage(
            MOVING_AVERAGE_DECAY, global_step)
        variable_to_average = (tf.trainable_variables() +
                               tf.moving_average_variables())
        variables_averages_op = variable_averages.apply(variable_to_average)

        update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
        with tf.control_dependencies(update_ops):
            train_op = tf.group(apply_gradient_op, variables_averages_op)

        saver = tf.train.Saver(max_to_keep=10)

        tf.summary.scalar("learning_rate", learning_rate)
        tf.summary.scalar("loss", loss)

        init = tf.global_variables_initializer()
        config = tf.ConfigProto()
        # occupy gpu gracefully
        config.gpu_options.allow_growth = True
        with tf.Session(config=config) as sess:
            init.run()
            checkpoint_path = os.path.join(params['modelpath'], training_name)
            model_name = '/model-700'
            if checkpoint_path:
                saver.restore(sess, checkpoint_path + model_name)
                print("restore from " + checkpoint_path + model_name)
            total_step_num = params['num_train_samples'] * params[
                'max_epoch'] // (params['batchsize'] * 2 * params['gpus'])

            print("Start training...")
            for step in tqdm(range(total_step_num)):
                _, loss_value = sess.run([train_op, loss])
                if step % params['per_update_tensorboard_step'] == 0:
                    # [total_loss, loss_scoremap, loss_zrate, z_rate_pre]
                    loss_v, input_image_v, scoremap_v, preheat_v = sess.run(
                        [loss, input_image, scoremap, preheat])
                    #loss_scoremap, loss_zrate, z_rate_pre, finger_mask_sum])
                    input_image_v_r = np.reshape(input_image_v,
                                                 [params['batchsize'], -1])
                    #np.savetxt(checkpoint_path+"/test_image.txt", input_image_v_r, fmt='%f', delimiter=',')

                    input_image_v = (input_image_v + 0.5) * 255
                    input_image_v = input_image_v.astype(np.int16)

                    # fig = plt.figure(1)
                    # plt.clf()
                    # ax1 = fig.add_subplot(2, 4, 1)
                    # ax1.imshow(input_image_v[0, :, :, :])#第一个batch的维度 hand1(0~31) back1(32~63)
                    # ax1.axis('off')
                    #
                    #
                    # ax2 = fig.add_subplot(2, 4, 2)
                    # ax2.imshow(scoremap_v[0, :, :, 0])  # 第一个batch的维度 hand1(0~31) back1(32~63)
                    # ax2.axis('off')
                    # ax2.set_title(str(is_loss_v[0, 0]))  # hand1 back1
                    #
                    # ax6 = fig.add_subplot(2, 4, 6)
                    # ax6.imshow(scoremap_v[0, :, :, 1])  # 第一个batch的维度 hand1(0~31) back1(32~63)
                    # if np.sum(scoremap_v[0, :, :, 1])==0:
                    #     print("zero")
                    # ax6.axis('off')
                    # ax6.set_title(str(is_loss_v[0, 1]))  # hand1 back1
                    #
                    #
                    # ax3 = fig.add_subplot(2, 4, 3)
                    # ax3.imshow(preheat_v[0, :, :, 0])  # 第一个batch的维度 hand1(0~31) back1(32~63)
                    # ax3.axis('off')
                    # ax3.set_title(str(pre_is_loss_v[0, 0]))  # hand1 back1
                    #
                    # ax7 = fig.add_subplot(2, 4, 7)
                    # ax7.imshow(preheat_v[0, :, :, 1])  # 第一个batch的维度 hand1(0~31) back1(32~63)
                    # ax7.axis('off')
                    # ax7.set_title(str(pre_is_loss_v[0, 1]))  # hand1 back1
                    #
                    #
                    # ax4 = fig.add_subplot(2, 4, 4)
                    # ax4.imshow(pred_heatmaps_tmp_01_modi_v[0, :, :, 0])  # 第一个batch的维度 hand1(0~31) back1(32~63)
                    # ax4.axis('off')
                    # ax8 = fig.add_subplot(2, 4, 8)
                    # ax8.imshow(pred_heatmaps_tmp_01_modi_v[0, :, :, 1])  # 第一个batch的维度 hand1(0~31) back1(32~63)
                    # ax8.axis('off')
                    #
                    # ax5 = fig.add_subplot(2, 4, 5)
                    # ax5.imshow(pred_heatmaps_tmp_01_modi_v[0, :, :, 0] - pred_heatmaps_tmp_01_modi_v[0, :, :, 1])  # 第一个batch的维度 hand1(0~31) back1(32~63)
                    # ax5.axis('off')
                    #
                    # plt.savefig(os.path.join(params['logpath']) + "/" + str(step).zfill(10) + "_.png")
                    #
                    # print("loss:"+str(loss_v), " is_loss_loss:"+str(loss_is_loss_v)+" scoremap_loss:"
                    #       +str(loss_scoremap_v)+" scoremap_loss:"+str(loss_scoremap_m_v))

                    fig = plt.figure(1)
                    plt.clf()
                    ax1 = fig.add_subplot(3, 1, 1)
                    ax1.imshow(input_image_v[
                        0, :, :, :])  #第一个batch的维度 hand1(0~31) back1(32~63)
                    ax1.axis('off')

                    ax2 = fig.add_subplot(3, 1, 2)
                    ax2.imshow(
                        scoremap_v[0, :, :,
                                   0])  # 第一个batch的维度 hand1(0~31) back1(32~63)

                    ax6 = fig.add_subplot(3, 1, 3)
                    ax6.imshow(
                        preheat_v[0, :, :,
                                  0])  # 第一个batch的维度 hand1(0~31) back1(32~63)
                    # ax2.set_title(str([round(i,4) for i in finger_mask_sum_v[0]]),fontsize=10)
                    # ax6.set_title(str([round(i,4) for i in z_rate_pre_v[0]]),fontsize=10)
                    ax2.axis('off')
                    ax6.axis('off')
                    plt.savefig(
                        os.path.join(params['logpath']) + "/" +
                        str(step).zfill(10) + "_.png")

                    # print("loss:"+str(loss_v), " is_loss_loss:"+str(loss_is_loss_v)+" scoremap_loss:"
                    #       +str(loss_scoremap_v)+" scoremap_loss:"+str(loss_scoremap_m_v))
                    print(
                        "loss:" + str(loss_v)
                    )  #+"loss_scoremap_v: "+str(loss_scoremap_v)+"loss_zrate_v: "+str(loss_zrate_v))

                    # save model
                if step % params['per_saved_model_step'] == 0:
                    saver.save(sess,
                               os.path.join(checkpoint_path, 'model'),
                               global_step=step)