Ejemplo n.º 1
0
    # retrained version: PoseNet
    last_cpt = tf.train.latest_checkpoint(PATH_TO_POSENET_SNAPSHOTS)
    assert last_cpt is not None, "Could not locate snapshot to load. Did you already train the network and set the path accordingly?"
    load_weights_from_snapshot(sess,
                               last_cpt,
                               discard_list=['Adam', 'global_step', 'beta'])
else:
    # load weights used in the paper
    net.init(sess,
             weight_files=[
                 './weights/handsegnet-rhd.pickle',
                 './weights/posenet-rhd-stb.pickle'
             ],
             exclude_var_list=['PosePrior', 'ViewpointNet'])

util = EvalUtil()
# iterate dataset
for i in range(dataset.num_samples):
    # get prediction
    keypoints_scoremap_v,\
    scale_crop_v, center_v, kp_uv21_gt, kp_vis = sess.run([keypoints_scoremap, scale_crop, center, data['keypoint_uv21'], data['keypoint_vis21']])

    keypoints_scoremap_v = np.squeeze(keypoints_scoremap_v)
    kp_uv21_gt = np.squeeze(kp_uv21_gt)
    kp_vis = np.squeeze(kp_vis)

    # detect keypoints
    coord_hw_pred_crop = detect_keypoints(np.squeeze(keypoints_scoremap_v))
    coord_hw_pred = trafo_coords(coord_hw_pred_crop, center_v, scale_crop_v,
                                 256)
    coord_uv_pred = np.stack([coord_hw_pred[:, 1], coord_hw_pred[:, 0]], 1)
# initialize network weights
if USE_RETRAINED:
    # retrained version
    last_cpt = tf.train.latest_checkpoint(PATH_TO_SNAPSHOTS)
    assert last_cpt is not None, "Could not locate snapshot to load. Did you already train the network and set the path accordingly?"
    load_weights_from_snapshot(sess,
                               last_cpt,
                               discard_list=['Adam', 'global_step', 'beta'])
else:
    # load weights used in the paper
    net.init(sess,
             weight_files=['./weights/posenet-rhd-stb.pickle'],
             exclude_var_list=['PosePrior', 'ViewpointNet'])

util = EvalUtil()
# iterate dataset
for i in range(dataset.num_samples):
    # get prediction
    crop_scale, keypoints_scoremap_v, kp_uv21_gt, kp_vis = sess.run([
        data['crop_scale'], keypoints_scoremap, data['keypoint_uv21'],
        data['keypoint_vis21']
    ])

    keypoints_scoremap_v = np.squeeze(keypoints_scoremap_v)
    kp_uv21_gt = np.squeeze(kp_uv21_gt)
    kp_vis = np.squeeze(kp_vis)
    crop_scale = np.squeeze(crop_scale)

    # detect keypoints
    coord_hw_pred_crop = detect_keypoints(np.squeeze(keypoints_scoremap_v))
Ejemplo n.º 3
0
# Start TF
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.8,
                            allow_growth=True)
config = tf.ConfigProto(gpu_options=gpu_options)
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
tf.train.start_queue_runners(sess=sess)

# initialize network with weights used in the paper
net.init(sess,
         weight_files=[
             './weights/handsegnet-rhd.pickle',
             './weights/posenet3d-rhd-stb.pickle'
         ])

util = EvalUtil()
# iterate dataset
for i in range(dataset.num_samples):
    # get prediction

    import pdb
    pdb.set_trace()
    keypoint_xyz21, keypoint_vis21, keypoint_scale, coord3d_pred_v = sess.run([
        data['keypoint_xyz21'], data['keypoint_vis21'], data['keypoint_scale'],
        coord3d_pred
    ])

    keypoint_xyz21 = np.squeeze(keypoint_xyz21)
    keypoint_vis21 = np.squeeze(keypoint_vis21)
    coord3d_pred_v = np.squeeze(coord3d_pred_v)
    keypoint_scale = np.squeeze(keypoint_scale)
Ejemplo n.º 4
0
# build network
net = CPM(out_chan=22)

# feed through network
scoremap, _ = net.inference(image_crop)[-1]

# Start TF
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.3)
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
sess.run(tf.global_variables_initializer())
tf.train.start_queue_runners(sess=sess)

weight_path = './weights/pose_model.npy'
net.init(weight_path, sess)

util = EvalUtil()
# iterate dataset
for i in range(dataset.num_samples):
    # get prediction
    keypoint_xyz21, keypoint_vis21, keypoint_scale, keypoint_uv21_v, image_crop_v, scoremap_v = \
        sess.run([data['keypoint_xyz21'], data['keypoint_vis21'], data['keypoint_scale'], data['keypoint_uv21'], image_crop, scoremap])

    keypoint_xyz21 = np.squeeze(keypoint_xyz21)
    keypoint_vis21 = np.squeeze(keypoint_vis21)
    keypoint_scale = np.squeeze(keypoint_scale)
    keypoint_uv21_v = np.squeeze(keypoint_uv21_v)
    image_crop_v = np.squeeze((image_crop_v + 0.5) * 256).astype(np.uint8)
    scoremap_v = np.squeeze(scoremap_v)
    for ik in (1, 5, 9, 13, 17):
        scoremap_v[:, :, ik:ik + 4] = scoremap_v[:, :, ik + 3:ik - 1:-1]
Ejemplo n.º 5
0
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.7)
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
tf.train.start_queue_runners(sess=sess)

colorlist_pred = ['#660000', '#b30000', '#ff0000', '#ff4d4d', '#ff9999']
colorlist_gt = ['#000066', '#0000b3', '#0000ff', '#4d4dff', '#9999ff']

# Plots all 5 fingers
def plot_fingers(points, plt_specs, c, ax):
    for i in range(5):
        start, end = i*4+1, (i+1)*4+1
        to_plot = np.concatenate((points[start:end], points[0:1]), axis=0)
        ax.plot(to_plot[:,0], to_plot[:,1], to_plot[:,2], plt_specs, color=c[i])

fig = None
util_vae = EvalUtil()
# iterate dataset
for i in range(dataset.num_samples):
    # Get required data
    keypoint_xyz21, keypoint_vis21, keypoint_scale, img_crop, hand_side = \
        sess.run([data['keypoint_xyz21'], data['keypoint_vis21'], data['keypoint_scale'], data['image_crop'], data['hand_side']])

    keypoint_xyz21 = np.squeeze(keypoint_xyz21)
    keypoint_vis21 = np.squeeze(keypoint_vis21)
    keypoint_scale = np.squeeze(keypoint_scale)
    img_crop = np.squeeze(img_crop)

    # Get VAE prediction
    img_res = cv.resize(img_crop, end_size)
    img_pyt = img_res.transpose(2, 0, 1).reshape((1, 3, end_size[0], end_size[1]))
    img_crop_var = Variable(np2pyt(img_pyt), volatile=True).cuda()
Ejemplo n.º 6
0
sess.run(tf.global_variables_initializer())
tf.train.start_queue_runners(sess=sess)

# initialize network weights
if USE_RETRAINED:
    # retrained version
    last_cpt = tf.train.latest_checkpoint(PATH_TO_SNAPSHOTS)
    assert last_cpt is not None, "Could not locate snapshot to load. Did you already train the network and set the path accordingly?"
    load_weights_from_snapshot(sess, last_cpt, discard_list=['Adam', 'global_step', 'beta'])
    print('loading weights from {}'.format(last_cpt))
else:
    # load weights used in the paper
    net.init('./weights/pose_model.npy', sess)
    # net.init_pickle(sess, ['./snapshots_cpm_rotate_s10_wrist_dome/model-100000.pickle'], ['scale'])

util = EvalUtil()
# iterate dataset

results = []
for i in range(dataset.num_samples):
    # get prediction
    # crop_scale, keypoints_scoremap_v, kp_uv21_gt, kp_vis, image_crop, crop_center, img_dir, hand_side, head_size \
    #     = sess.run([data['crop_scale'], keypoints_scoremap, data['keypoint_uv21'], data['keypoint_vis21'], data['image_crop'], data['crop_center'], data['img_dir'], data['hand_side'], data['head_size']])
    crop_scale, keypoints_scoremap_v, kp_uv21_gt, kp_vis, image_crop, crop_center, img_dir, hand_side \
        = sess.run([data['crop_scale'], keypoints_scoremap, data['keypoint_uv21'], data['keypoint_vis21'], data['image_crop'], data['crop_center'], data['img_dir'], data['hand_side']])

    keypoints_scoremap_v = np.squeeze(keypoints_scoremap_v)
    kp_uv21_gt = np.squeeze(kp_uv21_gt)
    kp_vis = np.squeeze(kp_vis)
    crop_scale = np.squeeze(crop_scale)
    crop_center = np.squeeze(crop_center)
Ejemplo n.º 7
0
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
tf.train.start_queue_runners(sess=sess)

# initialize network with weights used in the paper
if USE_RETRAINED:
    # retrained version: HandSegNet
    last_cpt = tf.train.latest_checkpoint('./snapshots_lifting_%s_drop/' %
                                          VARIANT)
    assert last_cpt is not None, "Could not locate snapshot to load. Did you already train the network?"
    load_weights_from_snapshot(sess,
                               last_cpt,
                               discard_list=['Adam', 'global_step', 'beta'])
else:
    net.init(sess, weight_files=['./weights/lifting-%s.pickle' % VARIANT])

util = EvalUtil()
# iterate dataset
for i in range(dataset.num_samples):
    # get prediction
    keypoint_xyz21, keypoint_scale, coord3d_pred_v = sess.run([
        data['keypoint_xyz21'],
        data['keypoint_scale'],
        coord3d_pred,
    ])

    keypoint_xyz21 = np.squeeze(keypoint_xyz21)
    keypoint_scale = np.squeeze(keypoint_scale)
    coord3d_pred_v = np.squeeze(coord3d_pred_v)

    # rescale to meters
    coord3d_pred_v *= keypoint_scale
Ejemplo n.º 8
0
                                ])  # left hand (true for all samples provided)
    evaluation = tf.placeholder_with_default(True, shape=())

    # build network
    net = ColorHandPose3DNetwork()
    hand_scoremap_tf, image_crop_tf, scale_tf, center_tf, \
    keypoints_scoremap_tf, keypoint_coord3d_tf = net.inference_crop(image_tf, hand_side_tf, evaluation)
    # Start TF
    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.8)
    sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))

    # initialize network
    net.init(sess)

    # Feed image list through network
    eval2d = EvalUtil()
    eval3d = EvalUtil()
    import tqdm
    import cv2

    for i in tqdm.tqdm(range(len(data))):
        sample = data[i]
        image_v = sample['image']

        hand_scoremap_v, image_crop_v, scale_v, center_v, \
        keypoints_scoremap_v, keypoint_coord3d_v = sess.run([hand_scoremap_tf, image_crop_tf, scale_tf, center_tf,
                                                             keypoints_scoremap_tf, keypoint_coord3d_tf],
                                                            feed_dict={image_tf: image_v})

        hand_scoremap_v = np.squeeze(hand_scoremap_v)
        image_crop_v = np.squeeze(image_crop_v)