コード例 #1
0
def evaluate_batches( sess, model,
  data_mean_3d, data_std_3d, dim_to_use_3d, dim_to_ignore_3d,
  data_mean_2d, data_std_2d, dim_to_use_2d, dim_to_ignore_2d,
  current_step, encoder_inputs, decoder_outputs, current_epoch=0 ):

  n_joints = 17 if not(FLAGS.predict_14) else 14
  nbatches = len( encoder_inputs )

  all_dists, start_time, loss = [], time.time(), 0.
  log_every_n_batches = 100
  for i in range(nbatches):

    if current_epoch > 0 and (i+1) % log_every_n_batches == 0:
      print("Working on test epoch {0}, batch {1} / {2}".format( current_epoch, i+1, nbatches) )

    enc_in, dec_out = encoder_inputs[i], decoder_outputs[i]
    dp = 1.0 
    step_loss, loss_summary, poses3d = model.step( sess, enc_in, dec_out, dp, isTraining=False )
    loss += step_loss

    enc_in  = data_utils.unNormalizeData( enc_in,  data_mean_2d, data_std_2d, dim_to_ignore_2d )
    dec_out = data_utils.unNormalizeData( dec_out, data_mean_3d, data_std_3d, dim_to_ignore_3d )
    poses3d = data_utils.unNormalizeData( poses3d, data_mean_3d, data_std_3d, dim_to_ignore_3d )

    dtu3d = np.hstack( (np.arange(3), dim_to_use_3d) ) if not(FLAGS.predict_14) else  dim_to_use_3d

    dec_out = dec_out[:, dtu3d]
    poses3d = poses3d[:, dtu3d]

    assert dec_out.shape[0] == FLAGS.batch_size
    assert poses3d.shape[0] == FLAGS.batch_size

    if FLAGS.procrustes:
      for j in range(FLAGS.batch_size):
        gt  = np.reshape(dec_out[j,:],[-1,3])
        out = np.reshape(poses3d[j,:],[-1,3])
        _, Z, T, b, c = compute_similarity_transform(gt,out,compute_optimal_scale=True)
        out = (b*out.dot(T))+c

        poses3d[j,:] = np.reshape(out,[-1,17*3] ) if not(FLAGS.predict_14) else np.reshape(out,[-1,14*3] )

    sqerr = (poses3d - dec_out)**2 
    dists = np.zeros( (sqerr.shape[0], n_joints) ) 
    dist_idx = 0
    for k in np.arange(0, n_joints*3, 3):
      dists[:,dist_idx] = np.sqrt( np.sum( sqerr[:, k:k+3], axis=1 ))
      dist_idx = dist_idx + 1

    all_dists.append(dists)
    assert sqerr.shape[0] == FLAGS.batch_size

  step_time = (time.time() - start_time) / nbatches
  loss      = loss / nbatches

  all_dists = np.vstack( all_dists )

  joint_err = np.mean( all_dists, axis=0 )
  total_err = np.mean( all_dists )

  return total_err, joint_err, step_time, loss
コード例 #2
0
def get_srnn_gts(actions,
                 model,
                 test_set,
                 data_mean,
                 data_std,
                 dim_to_ignore,
                 to_euler=True):
    srnn_gts_euler = {}
    # print ("entering here")
    for action in actions:

        srnn_gt_euler = []
        _, _, srnn_expmap = model.get_batch_srnn(test_set,
                                                 action,
                                                 noise_rate=FLAGS.n_r)

        # expmap -> rotmat -> euler
        for i in np.arange(srnn_expmap.shape[0]):
            denormed = data_utils.unNormalizeData(srnn_expmap[i, :, :],
                                                  data_mean, data_std,
                                                  dim_to_ignore, actions)

            if to_euler:
                for j in np.arange(denormed.shape[0]):
                    # print (denormed.shape)
                    for k in np.arange(3, 97, 3):
                        denormed[j, k:k + 3] = data_utils.rotmat2euler(
                            data_utils.expmap2rotmat(denormed[j, k:k + 3]))

            srnn_gt_euler.append(denormed)

        # Put back in the dictionary, every action will have 8 sequences of euler space
        srnn_gts_euler[action] = srnn_gt_euler
    # print (np.array(srnn_gts_euler[action]).shape)
    return srnn_gts_euler
def denormalize_and_convert_to_euler(data, data_mean, data_std, dim_to_ignore,
                                     actions, one_hot):
    """
  Denormalizes data and converts to Euler angles
  (all losses are computed on Euler angles).

  Args
    data: dictionary with human poses.
    data_mean: d-long vector with the mean of the training data.
    data_std: d-long vector with the standard deviation of the training data.
    dim_to_ignore: dimensions to ignore because the std is too small or for other reasons.
    actions: list of strings with the actions in the data dictionary.
    one_hot: whether the data comes with one-hot encoding.

  Returns
    all_denormed: a list with nbatch entries. Each entry is an n-by-d matrix
                  that corresponds to a denormalized sequence in Euler angles
  """

    all_denormed = []

    # expmap -> rotmat -> euler
    for i in np.arange(data.shape[0]):
        denormed = data_utils.unNormalizeData(data[i, :, :], data_mean,
                                              data_std, dim_to_ignore, actions,
                                              one_hot)

        for j in np.arange(denormed.shape[0]):
            for k in np.arange(3, 97, 3):
                denormed[j, k:k + 3] = data_utils.rotmat2euler(
                    data_utils.expmap2rotmat(denormed[j, k:k + 3]))

        all_denormed.append(denormed)

    return all_denormed
def draw_seqpose(pose,
                 pose_dir,
                 name,
                 data_mean_2d,
                 data_std_2d,
                 dim_to_ignore_2d,
                 verbose=0,
                 unnorm=False):
    import matplotlib
    matplotlib.use('Agg')
    import matplotlib.gridspec as gridspec
    import matplotlib.pyplot as plt

    if unnorm:
        pose = data_utils.fillData(pose, data_mean_2d, data_std_2d,
                                   dim_to_ignore_2d)
    else:
        pose = data_utils.unNormalizeData(pose, data_mean_2d, data_std_2d,
                                          dim_to_ignore_2d)

    #plt.figure()
    gs1 = gridspec.GridSpec(1, 1)
    plt.axis('off')

    # draw for sequence[t]
    plt.clf()
    ax2 = plt.subplot(gs1[0])
    viz.show2Dpose_seq(pose, ax2)
    ax2.invert_yaxis()
    if not os.path.exists(pose_dir):
        os.system('mkdir -p "{}"'.format(pose_dir))
    plt.savefig(os.path.join(pose_dir, name),
                bbox_inches='tight',
                pad_inches=0,
                dpi=200)  #transparent=True,
コード例 #5
0
def get_srnn_gts(actions,
                 model,
                 test_set,
                 data_mean,
                 data_std,
                 dim_to_ignore,
                 one_hot,
                 from_exp=True,
                 to_euler=True):
    """
  Get the ground truths for srnn's sequences, and convert to Euler angles.
  (the error is always computed in Euler angles).

  Args
    actions: a list of actions to get ground truths for.
    model: training model we are using (we only use the "get_batch" method).
    test_set: dictionary with normalized training data.
    data_mean: d-long vector with the mean of the training data.
    data_std: d-long vector with the standard deviation of the training data.
    dim_to_ignore: dimensions that we are not using to train/predict.
    one_hot: whether the data comes with one-hot encoding indicating action.
    to_euler: whether to convert the angles to Euler format or keep thm in exponential map

  Returns
    srnn_gts_euler: a dictionary where the keys are actions, and the values
      are the ground_truth, denormalized expected outputs of srnns's seeds.
  """
    srnn_gts = {}

    for action in actions:

        srnn_gt = []
        _, _, srnn = model.get_batch_srnn(test_set, action, False)

        for i in np.arange(srnn.shape[0]):
            denormed = data_utils.unNormalizeData(srnn[i, :, :], data_mean,
                                                  data_std, dim_to_ignore,
                                                  actions, one_hot)

            if from_exp and to_euler:
                for j in np.arange(denormed.shape[0]):
                    for k in np.arange(3, 97, 3):
                        denormed[j, k:k + 3] = data_utils.rotmat2euler(
                            data_utils.expmap2rotmat(denormed[j, k:k + 3]))

            if not from_exp and not to_euler:  # from euler to exp
                for j in np.arange(denormed.shape[0]):
                    for k in np.arange(3, 97, 3):
                        denormed[j, k:k + 3] = data_utils.rotmat2expmap(
                            data_utils.euler2rotmat(denormed[j, k:k + 3]))

            srnn_gt.append(denormed)

        # Put back in the dictionary
        srnn_gts[action] = srnn_gt

    return srnn_gts
def video(sequence,
          tmp_dir,
          video_dir,
          name,
          data_mean_2d,
          data_std_2d,
          dim_to_ignore_2d,
          verbose=0,
          unnorm=True):
    import matplotlib
    matplotlib.use('Agg')
    import matplotlib.gridspec as gridspec
    import matplotlib.pyplot as plt
    if unnorm:
        sequence = data_utils.unNormalizeData(sequence, data_mean_2d,
                                              data_std_2d, dim_to_ignore_2d)
    else:
        sequence = data_utils.fillData(sequence, data_mean_2d, data_std_2d,
                                       dim_to_ignore_2d)

    gs1 = gridspec.GridSpec(1, 1)
    plt.axis('off')
    for t in range(sequence.shape[0]):
        # draw for sequence[t]
        plt.clf()

        ax2 = plt.subplot(gs1[0])
        p2d = sequence[t, :]
        viz.show2Dpose(p2d, ax2)
        ax2.invert_yaxis()
        if not os.path.exists(tmp_dir):
            os.system('mkdir -p "{}"'.format(tmp_dir))
        plt.savefig(os.path.join(tmp_dir, '%04d.jpg' % (t)))

    if not os.path.exists(video_dir):
        os.system('mkdir -p "{}"'.format(video_dir))
    np.save(os.path.join(video_dir, name), sequence)

    if verbose:
        os.system('ffmpeg -framerate 16 -y -i "' +
                  os.path.join(tmp_dir, "%04d.jpg") + '" "' +
                  os.path.join(video_dir, name + '.mp4"'))
    else:
        subprocess.call([
            'ffmpeg', '-framerate', '16', '-y', '-i',
            os.path.join(tmp_dir, "%04d.jpg"),
            os.path.join(video_dir, name + '.mp4')
        ],
                        stdout=open(os.devnull, "w"),
                        stderr=subprocess.STDOUT)

    os.system('rm ' + os.path.join(tmp_dir, '*'))
コード例 #7
0
    def get_srnn_gts(self,
                     actions,
                     model,
                     test_set,
                     data_mean,
                     data_std,
                     dim_to_ignore,
                     one_hot,
                     to_euler=True):
        # """
        # Get the ground truths for srnn's sequences, and convert to Euler angles.
        # (the error is always computed in Euler angles).

        # Args
        #   actions: a list of actions to get ground truths for.
        #   model: training model we are using (we only use the "get_batch" method).
        #   test_set: dictionary with normalized training data.
        #   data_mean: d-long vector with the mean of the training data.
        #   data_std: d-long vector with the standard deviation of the training data.
        #   dim_to_ignore: dimensions that we are not using to train/predict.
        #   one_hot: whether the data comes with one-hot encoding indicating action.
        #   to_euler: whether to convert the angles to Euler format or keep thm in exponential map

        # Returns
        #   srnn_gts_euler: a dictionary where the keys are actions, and the values
        #     are the ground_truth, denormalized expected outputs of srnns's seeds.
        # """
        srnn_gts_euler = {}

        for action in actions:

            srnn_gt_euler = []
            _, _, srnn_expmap = model.get_batch_srnn(test_set, action)

            # expmap -> rotmat -> euler
            for i in np.arange(srnn_expmap.shape[0]):
                denormed = data_utils.unNormalizeData(srnn_expmap[i, :, :],
                                                      data_mean, data_std,
                                                      dim_to_ignore, actions,
                                                      one_hot)

                # if to_euler:
                #   for j in np.arange( denormed.shape[0] ):
                #     for k in np.arange(3,97,3):
                #       denormed[j,k:k+3] = data_utils.rotmat2euler( data_utils.expmap2rotmat( denormed[j,k:k+3] ))

                srnn_gt_euler.append(denormed)

            # Put back in the dictionary
            srnn_gts_euler[action] = srnn_gt_euler

        return srnn_gts_euler
def get_srnn_gts_sample(actions, model, test_set, data_mean, data_std, dim_to_ignore, one_hot, to_euler=True):
  """
  Get the ground truths for srnn's sequences, and convert to Euler angles. (sampling)
  (the error is always computed in Euler angles).

  Args
    actions: a list of actions to get ground truths for.
    model: training model we are using (we only use the "get_batch" method).
    test_set: dictionary with normalized training data.
    data_mean: d-long vector with the mean of the training data.
    data_std: d-long vector with the standard deviation of the training data.
    dim_to_ignore: dimensions that we are not using to train/predict.
    one_hot: whether the data comes with one-hot encoding indicating action.
    to_euler: whether to convert the angles to Euler format or keep thm in exponential map

  Returns
    srnn_gts_euler: a dictionary where the keys are actions, and the values
      are the ground_truth, denormalized expected outputs of srnns's seeds.
  """
  srnn_gts_euler = {}

  for action in actions:

    srnn_gt_euler = []
    encoder_input, decoder_input, decoder_output = model.get_batch_srnn(test_set, action)

    if FLAGS.omit_one_hot:
      srnn_expmap  = np.zeros((8, 100+8, 54), dtype=float)
    else:
      srnn_expmap  = np.zeros((8, 100+8, 54+len(actions)), dtype=float)

    for i in np.arange(decoder_output.shape[0]):
      sequence = np.concatenate((encoder_input[i][-7:],decoder_input[i][0:1], decoder_output[i][:]),axis=0)
      srnn_expmap[i, :, :] = sequence[:,:]
    # print(srnn_expmap.shape)

    # expmap -> rotmat -> euler
    for i in np.arange(srnn_expmap.shape[0]):
      denormed = data_utils.unNormalizeData(srnn_expmap[i,:,:], data_mean, data_std, dim_to_ignore, actions, one_hot)

      if to_euler:
        for j in np.arange(denormed.shape[0]):
          for k in np.arange(3,97,3):
            denormed[j,k:k+3] = data_utils.rotmat2euler( data_utils.expmap2rotmat(denormed[j,k:k+3]))

      srnn_gt_euler.append(denormed);

    # Put back in the dictionary
    srnn_gts_euler[action] = srnn_gt_euler

  return srnn_gts_euler
コード例 #9
0
ファイル: translate.py プロジェクト: ytixu/DNNdumps
def get_srnn_gts(actions,
                 test_set,
                 data_mean,
                 data_std,
                 dim_to_ignore,
                 one_hot,
                 subject,
                 subsequence,
                 to_euler=True):
    """
  Get the ground truths for srnn's sequences, and convert to Euler angles.
  (the error is always computed in Euler angles).

  Args
    actions: a list of actions to get ground truths for.
    test_set: dictionary with normalized training data.
    data_mean: d-long vector with the mean of the training data.
    data_std: d-long vector with the standard deviation of the training data.
    dim_to_ignore: dimensions that we are not using to train/predict.
    one_hot: whether the data comes with one-hot encoding indicating action.
    to_euler: whether to convert the angles to Euler format or keep thm in exponential map

  Returns
    srnn_gts_euler: a dictionary where the keys are actions, and the values
      are the ground_truth, denormalized expected outputs of srnns's seeds.
  """
    srnn_gts_euler = {}
    for action in actions:

        srnn_gt_euler = []
        # _, _, srnn_expmap = get_batch_srnn( test_set, action )
        srnn_expmap = get_batch_srnn(test_set, action, subject, subsequence)

        # expmap -> rotmat -> euler
        for i in np.arange(srnn_expmap.shape[0]):
            denormed = data_utils.unNormalizeData(srnn_expmap[i, :, :],
                                                  data_mean, data_std,
                                                  dim_to_ignore, actions,
                                                  one_hot)

            if to_euler:
                for j in np.arange(denormed.shape[0]):
                    for k in np.arange(3, 97, 3):
                        denormed[j, k:k + 3] = data_utils.rotmat2euler(
                            data_utils.expmap2rotmat(denormed[j, k:k + 3]))

            srnn_gt_euler.append(denormed)
        # Put back in the dictionary
        srnn_gts_euler[action] = np.array(srnn_gt_euler)

    return srnn_gts_euler
コード例 #10
0
def smooth(n2ds, poses3d, data_mean_3d, data_std_3d, dim_to_use_3d,
           dim_to_ignore_3d):
    # print(poses3d.shape) # (498, 64, 48)
    n_joints = 17 if not (FLAGS.predict_14) else 14
    n_batches = len(poses3d)
    total_poses3d = np.asarray(poses3d[0])
    for i in range(1, n_batches):
        total_poses3d = np.concatenate((total_poses3d, np.asarray(poses3d[i])),
                                       axis=0)
    dp = 1.0
    # print(total_poses3d.shape)
    total_poses3d = data_utils.unNormalizeData(total_poses3d, data_mean_3d,
                                               data_std_3d, dim_to_ignore_3d)
    # print(total_poses3d.shape)
    # Keep only the relevant dimensions
    dtu3d = np.hstack(
        (np.arange(3),
         dim_to_use_3d)) if not (FLAGS.predict_14) else dim_to_use_3d
    total_poses3d = total_poses3d[:, dtu3d]

    # Smoothing
    rate = 0.7
    n = total_poses3d.shape[0]
    n_lines, _ = total_poses3d.shape
    indice = np.cumsum([0] + n2ds)  #indice of first frames
    smoothed_poses3d = []
    for j in range(len(indice) - 1):
        if indice[j] != n - 1:
            smoothed_poses3d.append(total_poses3d[indice[j]] * rate +
                                    (1 - rate) * total_poses3d[indice[j] + 1])
        else:
            smoothed_poses3d.append(total_poses3d[indice[j]])
        for i in range(indice[j] + 1, indice[j + 1] - 1):
            if i == n - 1:
                break
            smoothed_poses3d.append(total_poses3d[i - 1] * ((1 - rate) / 2.) +
                                    total_poses3d[i] * rate +
                                    total_poses3d[i + 1] * ((1 - rate) / 2.))
        smoothed_poses3d.append(total_poses3d[i - 1] * (1 - rate) +
                                total_poses3d[i] * rate)
    smoothed_poses3d = np.asarray(smoothed_poses3d)  # (31872, 51)
    smoothed_poses3d = np.split(smoothed_poses3d, n_batches)
    return smoothed_poses3d
コード例 #11
0
def predict_batch(data, center, scale, batch_size=128):
  """
  Input:
    data: matrix with shape (#frames, 32)
    center: length-2 array with center coordinate
    scale: stacked hourglass scale parameter
  """

  fig = plt.figure()
  ax_2d = fig.add_subplot(121)
  ax_3d = fig.add_subplot(122, projection='3d')

  data = np.array(data)

  # Wrap in another matrix if there's only a single clip
  if len(data.shape) == 1:
    data = np.array([data])

  if data.shape[1] != 32:
    raise ValueError("Expected data shape to be (?, 32), got " + str(data.shape))

  data, destination_indices = data_utils.process_stacked_hourglass(data)
  normalized_data, data_mean_3d, data_std_3d, dim_to_ignore_3d = normalize_batch(data)
  viz.show2Dpose(np.reshape(data[30], (64, 1)), ax_2d)
  ax_2d.invert_yaxis()

  with tf.Session() as sess:
    model = load_model(sess, batch_size)
    dp = 1.0
    dec_out = np.zeros((normalized_data.shape[0], 48))
    _, _, points = model.step(sess, normalized_data, dec_out, dp, isTraining=False)

    points = data_utils.unNormalizeData(points, data_mean_3d, data_std_3d, dim_to_ignore_3d)
    viz.show3Dpose(points[30,:], ax_3d)
    plt.show()
    points = np.reshape(points, (-1, 32, 3))

    return points
コード例 #12
0
def sample():
  """Get samples from a model and visualize them"""

  actions = data_utils.define_actions( FLAGS.action )

  # Load camera parameters
  SUBJECT_IDS = [1,5,6,7,8,9,11]
  rcams = cameras.load_cameras(FLAGS.cameras_path, SUBJECT_IDS)

  # Load 3d data and load (or create) 2d projections
  train_set_3d, test_set_3d, data_mean_3d, data_std_3d, dim_to_ignore_3d, dim_to_use_3d, train_root_positions, test_root_positions = data_utils.read_3d_data(
    actions, FLAGS.data_dir, FLAGS.camera_frame, rcams, FLAGS.predict_14 )

  if FLAGS.use_sh:
    train_set_2d, test_set_2d, data_mean_2d, data_std_2d, dim_to_ignore_2d, dim_to_use_2d = data_utils.read_2d_predictions(actions, FLAGS.data_dir)
  else:
    train_set_2d, test_set_2d, data_mean_2d, data_std_2d, dim_to_ignore_2d, dim_to_use_2d = data_utils.create_2d_data( actions, FLAGS.data_dir, rcams )
  print( "done reading and normalizing data." )

  device_count = {"GPU": 0} if FLAGS.use_cpu else {"GPU": 1}
  with tf.Session(config=tf.ConfigProto( device_count = device_count )) as sess:
    # === Create the model ===
    print("Creating %d layers of %d units." % (FLAGS.num_layers, FLAGS.linear_size))
    batch_size = 128
    model = create_model(sess, actions, batch_size)
    print("Model loaded")

    for key2d in test_set_2d.keys():

      (subj, b, fname) = key2d
      print( "Subject: {}, action: {}, fname: {}".format(subj, b, fname) )

      # keys should be the same if 3d is in camera coordinates
      key3d = key2d if FLAGS.camera_frame else (subj, b, '{0}.h5'.format(fname.split('.')[0]))
      key3d = (subj, b, fname[:-3]) if (fname.endswith('-sh')) and FLAGS.camera_frame else key3d

      enc_in  = test_set_2d[ key2d ]
      n2d, _ = enc_in.shape
      dec_out = test_set_3d[ key3d ]
      n3d, _ = dec_out.shape
      assert n2d == n3d

      # Split into about-same-size batches
      enc_in   = np.array_split( enc_in,  n2d // batch_size )
      dec_out  = np.array_split( dec_out, n3d // batch_size )
      all_poses_3d = []

      for bidx in range( len(enc_in) ):

        # Dropout probability 0 (keep probability 1) for sampling
        dp = 1.0
        _, _, poses3d = model.step(sess, enc_in[bidx], dec_out[bidx], dp, isTraining=False)

        # denormalize
        enc_in[bidx]  = data_utils.unNormalizeData(  enc_in[bidx], data_mean_2d, data_std_2d, dim_to_ignore_2d )
        dec_out[bidx] = data_utils.unNormalizeData( dec_out[bidx], data_mean_3d, data_std_3d, dim_to_ignore_3d )
        poses3d = data_utils.unNormalizeData( poses3d, data_mean_3d, data_std_3d, dim_to_ignore_3d )
        all_poses_3d.append( poses3d )

      # Put all the poses together
      enc_in, dec_out, poses3d = map( np.vstack, [enc_in, dec_out, all_poses_3d] )

      # Convert back to world coordinates
      if FLAGS.camera_frame:
        N_CAMERAS = 4
        N_JOINTS_H36M = 32

        # Add global position back
        dec_out = dec_out + np.tile( test_root_positions[ key3d ], [1,N_JOINTS_H36M] )

        # Load the appropriate camera
        subj, _, sname = key3d

        cname = sname.split('.')[1] # <-- camera name
        scams = {(subj,c+1): rcams[(subj,c+1)] for c in range(N_CAMERAS)} # cams of this subject
        scam_idx = [scams[(subj,c+1)][-1] for c in range(N_CAMERAS)].index( cname ) # index of camera used
        the_cam  = scams[(subj, scam_idx+1)] # <-- the camera used
        R, T, f, c, k, p, name = the_cam
        assert name == cname

        def cam2world_centered(data_3d_camframe):
          data_3d_worldframe = cameras.camera_to_world_frame(data_3d_camframe.reshape((-1, 3)), R, T)
          data_3d_worldframe = data_3d_worldframe.reshape((-1, N_JOINTS_H36M*3))
          # subtract root translation
          return data_3d_worldframe - np.tile( data_3d_worldframe[:,:3], (1,N_JOINTS_H36M) )

        # Apply inverse rotation and translation
        dec_out = cam2world_centered(dec_out)
        poses3d = cam2world_centered(poses3d)

  # Grab a random batch to visualize
  enc_in, dec_out, poses3d = map( np.vstack, [enc_in, dec_out, poses3d] )
  idx = np.random.permutation( enc_in.shape[0] )
  enc_in, dec_out, poses3d = enc_in[idx, :], dec_out[idx, :], poses3d[idx, :]

  # Visualize random samples
  import matplotlib.gridspec as gridspec

  # 1080p	= 1,920 x 1,080
  fig = plt.figure( figsize=(19.2, 10.8) )

  gs1 = gridspec.GridSpec(5, 9) # 5 rows, 9 columns
  gs1.update(wspace=-0.00, hspace=0.05) # set the spacing between axes.
  plt.axis('off')

  subplot_idx, exidx = 1, 1
  nsamples = 15
  for i in np.arange( nsamples ):

    # Plot 2d pose
    ax1 = plt.subplot(gs1[subplot_idx-1])
    p2d = enc_in[exidx,:]
    viz.show2Dpose( p2d, ax1 )
    ax1.invert_yaxis()

    # Plot 3d gt
    ax2 = plt.subplot(gs1[subplot_idx], projection='3d')
    p3d = dec_out[exidx,:]
    viz.show3Dpose( p3d, ax2 )

    # Plot 3d predictions
    ax3 = plt.subplot(gs1[subplot_idx+1], projection='3d')
    p3d = poses3d[exidx,:]
    viz.show3Dpose( p3d, ax3, lcolor="#9b59b6", rcolor="#2ecc71" )

    exidx = exidx + 1
    subplot_idx = subplot_idx + 3

  plt.show()
コード例 #13
0
def main(_):
    
    smoothed = read_openpose_json()
    plt.figure(2)
    smooth_curves_plot = show_anim_curves(smoothed, plt)
    #return
    pngName = 'gif_output/smooth_plot.png'
    smooth_curves_plot.savefig(pngName)
    logger.info('writing gif_output/smooth_plot.png')
    
    if FLAGS.interpolation:
        logger.info("start interpolation")

        framerange = len( smoothed.keys() )
        joint_rows = 36
        array = np.concatenate(list(smoothed.values()))
        array_reshaped = np.reshape(array, (framerange, joint_rows) )
    
        multiplier = FLAGS.multiplier
        multiplier_inv = 1/multiplier

        out_array = np.array([])
        for row in range(joint_rows):
            x = []
            for frame in range(framerange):
                x.append( array_reshaped[frame, row] )
            
            frame = range( framerange )
            frame_resampled = np.arange(0, framerange, multiplier)
            spl = UnivariateSpline(frame, x, k=3)
            #relative smooth factor based on jnt anim curve
            min_x, max_x = min(x), max(x)
            smooth_fac = max_x - min_x
            smooth_resamp = 125
            smooth_fac = smooth_fac * smooth_resamp
            spl.set_smoothing_factor( float(smooth_fac) )
            xnew = spl(frame_resampled)
            
            out_array = np.append(out_array, xnew)
    
        logger.info("done interpolating. reshaping {0} frames,  please wait!!".format(framerange))
    
        a = np.array([])
        for frame in range( int( framerange * multiplier_inv ) ):
            jnt_array = []
            for jnt in range(joint_rows):
                jnt_array.append( out_array[ jnt * int(framerange * multiplier_inv) + frame] )
            a = np.append(a, jnt_array)
        
        a = np.reshape(a, (int(framerange * multiplier_inv), joint_rows))
        out_array = a
    
        interpolate_smoothed = {}
        for frame in range( int(framerange * multiplier_inv) ):
            interpolate_smoothed[frame] = list( out_array[frame] )
        
        plt.figure(3)
        smoothed = interpolate_smoothed
        interpolate_curves_plot = show_anim_curves(smoothed, plt)
        pngName = 'gif_output/interpolate_{0}.png'.format(smooth_resamp)
        interpolate_curves_plot.savefig(pngName)
        logger.info('writing gif_output/interpolate_plot.png')

    enc_in = np.zeros((1, 64))
    enc_in[0] = [0 for i in range(64)]

    actions = data_utils.define_actions(FLAGS.action)

    SUBJECT_IDS = [1, 5, 6, 7, 8, 9, 11]
    rcams = cameras.load_cameras(FLAGS.cameras_path, SUBJECT_IDS)
    train_set_2d, test_set_2d, data_mean_2d, data_std_2d, dim_to_ignore_2d, dim_to_use_2d = data_utils.read_2d_predictions(
        actions, FLAGS.data_dir)
    train_set_3d, test_set_3d, data_mean_3d, data_std_3d, dim_to_ignore_3d, dim_to_use_3d, train_root_positions, test_root_positions = data_utils.read_3d_data(
        actions, FLAGS.data_dir, FLAGS.camera_frame, rcams, FLAGS.predict_14)

    device_count = {"GPU": 1}
    png_lib = []
    before_pose = None
    with tf.Session(config=tf.ConfigProto(
            device_count=device_count,
            allow_soft_placement=True)) as sess:
        #plt.figure(3)
        batch_size = 128
        model = create_model(sess, actions, batch_size)
        iter_range = len(smoothed.keys())
        export_units = {}
        twod_export_units = {}
        for n, (frame, xy) in enumerate(smoothed.items()):
            logger.info("calc frame {0}/{1}".format(frame, iter_range))
            # map list into np array  
            joints_array = np.zeros((1, 36))
            joints_array[0] = [0 for i in range(36)]
            for o in range(len(joints_array[0])):
                #feed array with xy array
                joints_array[0][o] = xy[o]

            twod_export_units[frame]={}
            for abs_b, __n in enumerate(range(0, len(xy),2)):
                twod_export_units[frame][abs_b] = {"translate": [xy[__n],xy[__n+1]]}

            _data = joints_array[0]
            # mapping all body parts or 3d-pose-baseline format
            for i in range(len(order)):
                for j in range(2):
                    # create encoder input
                    enc_in[0][order[i] * 2 + j] = _data[i * 2 + j]
            for j in range(2):
                # Hip
                enc_in[0][0 * 2 + j] = (enc_in[0][1 * 2 + j] + enc_in[0][6 * 2 + j]) / 2
                # Neck/Nose
                enc_in[0][14 * 2 + j] = (enc_in[0][15 * 2 + j] + enc_in[0][12 * 2 + j]) / 2
                # Thorax
                enc_in[0][13 * 2 + j] = 2 * enc_in[0][12 * 2 + j] - enc_in[0][14 * 2 + j]

            # set spine
            spine_x = enc_in[0][24]
            spine_y = enc_in[0][25]

            enc_in = enc_in[:, dim_to_use_2d]
            mu = data_mean_2d[dim_to_use_2d]
            stddev = data_std_2d[dim_to_use_2d]
            enc_in = np.divide((enc_in - mu), stddev)

            dp = 1.0
            dec_out = np.zeros((1, 48))
            dec_out[0] = [0 for i in range(48)]
            _, _, poses3d = model.step(sess, enc_in, dec_out, dp, isTraining=False)
            all_poses_3d = []
            enc_in = data_utils.unNormalizeData(enc_in, data_mean_2d, data_std_2d, dim_to_ignore_2d)
            poses3d = data_utils.unNormalizeData(poses3d, data_mean_3d, data_std_3d, dim_to_ignore_3d)
            gs1 = gridspec.GridSpec(1, 1)
            gs1.update(wspace=-0.00, hspace=0.05)  # set the spacing between axes.
            plt.axis('off')
            all_poses_3d.append( poses3d )
            enc_in, poses3d = map( np.vstack, [enc_in, all_poses_3d] )
            subplot_idx, exidx = 1, 1
            _max = 0
            _min = 10000

            for i in range(poses3d.shape[0]):
                for j in range(32):
                    tmp = poses3d[i][j * 3 + 2]
                    poses3d[i][j * 3 + 2] = poses3d[i][j * 3 + 1]
                    poses3d[i][j * 3 + 1] = tmp
                    if poses3d[i][j * 3 + 2] > _max:
                        _max = poses3d[i][j * 3 + 2]
                    if poses3d[i][j * 3 + 2] < _min:
                        _min = poses3d[i][j * 3 + 2]

            for i in range(poses3d.shape[0]):
                for j in range(32):
                    poses3d[i][j * 3 + 2] = _max - poses3d[i][j * 3 + 2] + _min
                    poses3d[i][j * 3] += (spine_x - 630)
                    poses3d[i][j * 3 + 2] += (500 - spine_y)

            # Plot 3d predictions
            ax = plt.subplot(gs1[subplot_idx - 1], projection='3d')
            ax.view_init(18, -70)    

            if FLAGS.cache_on_fail:
                if np.min(poses3d) < -1000:
                    poses3d = before_pose

            p3d = poses3d
            to_export = poses3d.tolist()[0]
            x,y,z = [[] for _ in range(3)]
            for o in range(0, len(to_export), 3):
                x.append(to_export[o])
                y.append(to_export[o+1])
                z.append(to_export[o+2])
            export_units[frame]={}
            for jnt_index, (_x, _y, _z) in enumerate(zip(x,y,z)):
                export_units[frame][jnt_index] = {"translate": [_x, _y, _z]}


            viz.show3Dpose(p3d, ax, lcolor="#9b59b6", rcolor="#2ecc71")

            pngName = 'png/pose_frame_{0}.png'.format(str(frame).zfill(12))
            plt.savefig(pngName)
            if FLAGS.write_gif:
                png_lib.append(imageio.imread(pngName))

            if FLAGS.cache_on_fail:
                before_pose = poses3d

    if FLAGS.write_gif:
        if FLAGS.interpolation:
            #take every frame on gif_fps * multiplier_inv
            png_lib = np.array([png_lib[png_image] for png_image in range(0,len(png_lib), int(multiplier_inv)) ])
        logger.info("creating Gif gif_output/animation.gif, please Wait!")
        imageio.mimsave('gif_output/animation.gif', png_lib, fps=FLAGS.gif_fps)

    _out_file = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'maya/3d_data.json')
    twod_out_file = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'maya/2d_data.json')
    with open(_out_file, 'w') as outfile:
        logger.info("exported maya json to {0}".format(_out_file))
        json.dump(export_units, outfile)
    with open(twod_out_file, 'w') as outfile:
        logger.info("exported maya json to {0}".format(twod_out_file))
        json.dump(twod_export_units, outfile)

    logger.info("Done!".format(pngName))
コード例 #14
0
def hankgogo(gogodata, gogodatafake):
    """Get samples from a model and visualize them"""

    actions = data_utils.define_actions(FLAGS.action)

    SUBJECT_IDS = [1, 5, 6, 7, 8, 9, 11]
    rcams = cameras.load_cameras(FLAGS.cameras_path, SUBJECT_IDS)

    # Load 3d data and load (or create) 2d projections
    train_set_3d, test_set_3d, data_mean_3d, data_std_3d, dim_to_ignore_3d, dim_to_use_3d, train_root_positions, test_root_positions = data_utils.read_3d_data(
        actions, FLAGS.data_dir, FLAGS.camera_frame, rcams, FLAGS.predict_14)

    #if FLAGS.use_sh:
    #  train_set_2d, test_set_2d, data_mean_2d, data_std_2d, dim_to_ignore_2d, dim_to_use_2d = data_utils.read_2d_predictions(actions, FLAGS.data_dir)
    #else:
    train_set_2d, test_set_2d, data_mean_2d, data_std_2d, dim_to_ignore_2d, dim_to_use_2d = data_utils.create_2d_data(
        actions, FLAGS.data_dir, rcams)
    print("done reading and normalizing data.")

    device_count = {"GPU": 0} if FLAGS.use_cpu else {"GPU": 1}
    with tf.Session(config=tf.ConfigProto(device_count=device_count)) as sess:
        # === Create the model ===
        print("Creating %d layers of %d units." %
              (FLAGS.num_layers, FLAGS.linear_size))
        batch_size = 1
        model = create_model_my(sess, actions, batch_size)
        print("Model loaded")

        # Dropout probability 0 (keep probability 1) for sampling
        dp = 1.0
        poses3d = model.step(sess, gogodata, isTraining=False)
        tesmp = poses3d
        poses3d = data_utils.unNormalizeData(poses3d, data_mean_3d,
                                             data_std_3d, dim_to_ignore_3d)
        model.saver.save(sess, os.path.join(mysave_dir, "gogo"))

    # Grab a random batch to visualize

# enc_in, dec_out, poses3d = map( np.vstack, [enc_in, dec_out, poses3d] )
# idx = np.random.permutation( enc_in.shape[0] )
# enc_in, dec_out, poses3d = enc_in[idx, :], dec_out[idx, :], poses3d[idx, :]

# Visualize random samples
    import matplotlib.gridspec as gridspec

    # 1080p	= 1,920 x 1,080
    fig = plt.figure(figsize=(19.2, 10.8))

    gs1 = gridspec.GridSpec(5, 9)  # 5 rows, 9 columns
    gs1.update(wspace=-0.00, hspace=0.05)  # set the spacing between axes.
    plt.axis('off')

    subplot_idx, exidx = 1, 1
    nsamples = 1
    # Plot 2d pose
    #ax1 = plt.subplot(gs1[subplot_idx-1])
    #p2d = enc_in[exidx,:]
    #viz.show2Dpose( p2d, ax1 )
    #ax1.invert_yaxis()

    # Plot 3d gt
    #ax2 = plt.subplot(gs1[subplot_idx], projection='3d')
    #p3d = dec_out[exidx,:]
    #viz.show3Dpose( p3d, ax2 )

    # Plot 3d predictions
    ax3 = plt.subplot(gs1[subplot_idx + 1], projection='3d')
    p3d = poses3d
    viz.show3Dpose(p3d, ax3, lcolor="#9b59b6", rcolor="#2ecc71")

    exidx = exidx + 1
    subplot_idx = subplot_idx + 3

    plt.show()
コード例 #15
0
def main(_):
    # 出力用日付
    now_str = "{0:%Y%m%d_%H%M%S}".format(datetime.datetime.now())

    logger.debug("FLAGS.person_idx={0}".format(FLAGS.person_idx))

    # 日付+indexディレクトリ作成
    subdir = '{0}/{1}_3d_{2}_idx{3:02d}'.format(
        os.path.dirname(openpose_output_dir),
        os.path.basename(openpose_output_dir), now_str, FLAGS.person_idx)
    os.makedirs(subdir)

    frame3d_dir = "{0}/frame3d".format(subdir)
    os.makedirs(frame3d_dir)

    #関節位置情報ファイル
    posf = open(subdir + '/pos.txt', 'w')

    #正規化済みOpenpose位置情報ファイル
    smoothedf = open(subdir + '/smoothed.txt', 'w')

    idx = FLAGS.person_idx - 1
    smoothed = openpose_utils.read_openpose_json(openpose_output_dir, idx,
                                                 level[FLAGS.verbose] == 3)
    logger.info("reading and smoothing done. start feeding 3d-pose-baseline")
    logger.debug(smoothed)
    plt.figure(2)
    smooth_curves_plot = show_anim_curves(smoothed, plt)
    pngName = subdir + '/smooth_plot.png'
    smooth_curves_plot.savefig(pngName)

    enc_in = np.zeros((1, 64))
    enc_in[0] = [0 for i in range(64)]

    actions = data_utils.define_actions(FLAGS.action)

    SUBJECT_IDS = [1, 5, 6, 7, 8, 9, 11]
    rcams = cameras.load_cameras(FLAGS.cameras_path, SUBJECT_IDS)
    train_set_2d, test_set_2d, data_mean_2d, data_std_2d, dim_to_ignore_2d, dim_to_use_2d = data_utils.read_2d_predictions(
        actions, FLAGS.data_dir)
    train_set_3d, test_set_3d, data_mean_3d, data_std_3d, dim_to_ignore_3d, dim_to_use_3d, train_root_positions, test_root_positions = data_utils.read_3d_data(
        actions, FLAGS.data_dir, FLAGS.camera_frame, rcams, FLAGS.predict_14)

    before_pose = None
    device_count = {"GPU": 1}
    png_lib = []
    with tf.Session(config=tf.ConfigProto(device_count=device_count,
                                          allow_soft_placement=True)) as sess:
        #plt.figure(3)
        batch_size = 128
        model = create_model(sess, actions, batch_size)
        for n, (frame, xy) in enumerate(smoothed.items()):
            logger.info("calc idx {0}, frame {1}".format(idx, frame))

            # map list into np array
            joints_array = np.zeros((1, 36))
            joints_array[0] = [0 for i in range(36)]
            for o in range(len(joints_array[0])):
                #feed array with xy array
                joints_array[0][o] = xy[o]
            _data = joints_array[0]

            smoothedf.write(' '.join(map(str, _data)))
            smoothedf.write("\n")

            # mapping all body parts or 3d-pose-baseline format
            for i in range(len(order)):
                for j in range(2):
                    # create encoder input
                    enc_in[0][order[i] * 2 + j] = _data[i * 2 + j]
            for j in range(2):
                # Hip
                enc_in[0][0 * 2 + j] = (enc_in[0][1 * 2 + j] +
                                        enc_in[0][6 * 2 + j]) / 2
                # Neck/Nose
                enc_in[0][14 * 2 + j] = (enc_in[0][15 * 2 + j] +
                                         enc_in[0][12 * 2 + j]) / 2
                # Thorax
                enc_in[0][13 * 2 +
                          j] = 2 * enc_in[0][12 * 2 + j] - enc_in[0][14 * 2 +
                                                                     j]

            # set spine
            spine_x = enc_in[0][24]
            spine_y = enc_in[0][25]

            # logger.debug("enc_in - 1")
            # logger.debug(enc_in)

            enc_in = enc_in[:, dim_to_use_2d]
            mu = data_mean_2d[dim_to_use_2d]
            stddev = data_std_2d[dim_to_use_2d]
            enc_in = np.divide((enc_in - mu), stddev)

            dp = 1.0
            dec_out = np.zeros((1, 48))
            dec_out[0] = [0 for i in range(48)]
            _, _, poses3d = model.step(sess,
                                       enc_in,
                                       dec_out,
                                       dp,
                                       isTraining=False)
            all_poses_3d = []
            enc_in = data_utils.unNormalizeData(enc_in, data_mean_2d,
                                                data_std_2d, dim_to_ignore_2d)
            poses3d = data_utils.unNormalizeData(poses3d, data_mean_3d,
                                                 data_std_3d, dim_to_ignore_3d)
            gs1 = gridspec.GridSpec(1, 1)
            gs1.update(wspace=-0.00,
                       hspace=0.05)  # set the spacing between axes.
            plt.axis('off')
            all_poses_3d.append(poses3d)
            enc_in, poses3d = map(np.vstack, [enc_in, all_poses_3d])
            subplot_idx, exidx = 1, 1
            max = 0
            min = 10000

            # logger.debug("enc_in - 2")
            # logger.debug(enc_in)

            for i in range(poses3d.shape[0]):
                for j in range(32):
                    tmp = poses3d[i][j * 3 + 2]
                    poses3d[i][j * 3 + 2] = poses3d[i][j * 3 + 1]
                    poses3d[i][j * 3 + 1] = tmp
                    if poses3d[i][j * 3 + 2] > max:
                        max = poses3d[i][j * 3 + 2]
                    if poses3d[i][j * 3 + 2] < min:
                        min = poses3d[i][j * 3 + 2]

            for i in range(poses3d.shape[0]):
                for j in range(32):
                    poses3d[i][j * 3 + 2] = max - poses3d[i][j * 3 + 2] + min
                    poses3d[i][j * 3] += (spine_x - 630)
                    poses3d[i][j * 3 + 2] += (500 - spine_y)

            # Plot 3d predictions
            ax = plt.subplot(gs1[subplot_idx - 1], projection='3d')
            ax.view_init(18, 280)
            logger.debug(np.min(poses3d))
            if np.min(poses3d) < -1000 and before_pose is not None:
                poses3d = before_pose

            p3d = poses3d
            # logger.debug("poses3d")
            # logger.debug(poses3d)

            if level[FLAGS.verbose] == logging.INFO:
                viz.show3Dpose(p3d,
                               ax,
                               lcolor="#9b59b6",
                               rcolor="#2ecc71",
                               add_labels=True)

                # 各フレームの単一視点からのはINFO時のみ
                pngName = frame3d_dir + '/tmp_{0:012d}.png'.format(frame)
                plt.savefig(pngName)
                png_lib.append(imageio.imread(pngName))
                before_pose = poses3d

            # 各フレームの角度別出力はデバッグ時のみ
            if level[FLAGS.verbose] == logging.DEBUG:

                for azim in [0, 45, 90, 135, 180, 225, 270, 315, 360]:
                    ax2 = plt.subplot(gs1[subplot_idx - 1], projection='3d')
                    ax2.view_init(18, azim)
                    viz.show3Dpose(p3d,
                                   ax2,
                                   lcolor="#FF0000",
                                   rcolor="#0000FF",
                                   add_labels=True)

                    pngName2 = frame3d_dir + '/tmp_{0:012d}_{1:03d}.png'.format(
                        frame, azim)
                    plt.savefig(pngName2)

            #関節位置情報の出力
            write_pos_data(poses3d, ax, posf)

        posf.close()

        # INFO時は、アニメーションGIF生成
        if level[FLAGS.verbose] == logging.INFO:
            logger.info(
                "creating Gif {0}/movie_smoothing.gif, please Wait!".format(
                    subdir))
            imageio.mimsave('{0}/movie_smoothing.gif'.format(subdir),
                            png_lib,
                            fps=FLAGS.gif_fps)

        logger.info("Done!".format(pngName))
コード例 #16
0
def main(_):
    smoothed = read_openpose_json()
    logger.info("reading and smoothing done. start feeding 3d-pose-baseline")
    plt.figure(2)
    smooth_curves_plot = show_anim_curves(smoothed, plt)
    pngName = 'png/smooth_plot.png'
    smooth_curves_plot.savefig(pngName)

    enc_in = np.zeros((1, 64))
    enc_in[0] = [0 for i in range(64)]

    actions = data_utils.define_actions(FLAGS.action)

    SUBJECT_IDS = [1, 5, 6, 7, 8, 9, 11]
    rcams = cameras.load_cameras(FLAGS.cameras_path, SUBJECT_IDS)
    train_set_2d, test_set_2d, data_mean_2d, data_std_2d, dim_to_ignore_2d, dim_to_use_2d = data_utils.read_2d_predictions(
        actions, FLAGS.data_dir)
    train_set_3d, test_set_3d, data_mean_3d, data_std_3d, dim_to_ignore_3d, dim_to_use_3d, train_root_positions, test_root_positions = data_utils.read_3d_data(
        actions, FLAGS.data_dir, FLAGS.camera_frame, rcams, FLAGS.predict_14)

    device_count = {"GPU": 1}
    png_lib = []
    with tf.Session(config=tf.ConfigProto(device_count=device_count,
                                          allow_soft_placement=True)) as sess:
        #plt.figure(3)
        batch_size = 128
        model = create_model(sess, actions, batch_size)
        for n, (frame, xy) in enumerate(smoothed.items()):
            logger.info("calc frame {0}".format(frame))
            # map list into np array
            joints_array = np.zeros((1, 36))
            joints_array[0] = [0 for i in range(36)]
            for o in range(len(joints_array[0])):
                #feed array with xy array
                joints_array[0][o] = xy[o]
            _data = joints_array[0]
            # mapping all body parts or 3d-pose-baseline format
            for i in range(len(order)):
                for j in range(2):
                    # create encoder input
                    enc_in[0][order[i] * 2 + j] = _data[i * 2 + j]
            for j in range(2):
                # Hip
                enc_in[0][0 * 2 + j] = (enc_in[0][1 * 2 + j] +
                                        enc_in[0][6 * 2 + j]) / 2
                # Neck/Nose
                enc_in[0][14 * 2 + j] = (enc_in[0][15 * 2 + j] +
                                         enc_in[0][12 * 2 + j]) / 2
                # Thorax
                enc_in[0][13 * 2 +
                          j] = 2 * enc_in[0][12 * 2 + j] - enc_in[0][14 * 2 +
                                                                     j]

            # set spine
            spine_x = enc_in[0][24]
            spine_y = enc_in[0][25]

            enc_in = enc_in[:, dim_to_use_2d]
            mu = data_mean_2d[dim_to_use_2d]
            stddev = data_std_2d[dim_to_use_2d]
            enc_in = np.divide((enc_in - mu), stddev)

            dp = 1.0
            dec_out = np.zeros((1, 48))
            dec_out[0] = [0 for i in range(48)]
            _, _, poses3d = model.step(sess,
                                       enc_in,
                                       dec_out,
                                       dp,
                                       isTraining=False)
            all_poses_3d = []
            enc_in = data_utils.unNormalizeData(enc_in, data_mean_2d,
                                                data_std_2d, dim_to_ignore_2d)
            poses3d = data_utils.unNormalizeData(poses3d, data_mean_3d,
                                                 data_std_3d, dim_to_ignore_3d)
            gs1 = gridspec.GridSpec(1, 1)
            gs1.update(wspace=-0.00,
                       hspace=0.05)  # set the spacing between axes.
            plt.axis('off')
            all_poses_3d.append(poses3d)
            enc_in, poses3d = map(np.vstack, [enc_in, all_poses_3d])
            subplot_idx, exidx = 1, 1
            max = 0
            min = 10000

            for i in range(poses3d.shape[0]):
                for j in range(32):
                    tmp = poses3d[i][j * 3 + 2]
                    poses3d[i][j * 3 + 2] = poses3d[i][j * 3 + 1]
                    poses3d[i][j * 3 + 1] = tmp
                    if poses3d[i][j * 3 + 2] > max:
                        max = poses3d[i][j * 3 + 2]
                    if poses3d[i][j * 3 + 2] < min:
                        min = poses3d[i][j * 3 + 2]

            for i in range(poses3d.shape[0]):
                for j in range(32):
                    poses3d[i][j * 3 + 2] = max - poses3d[i][j * 3 + 2] + min
                    poses3d[i][j * 3] += (spine_x - 630)
                    poses3d[i][j * 3 + 2] += (500 - spine_y)

            # Plot 3d predictions
            ax = plt.subplot(gs1[subplot_idx - 1], projection='3d')
            ax.view_init(18, -70)
            logger.debug(np.min(poses3d))
            if np.min(poses3d) < -1000:
                poses3d = before_pose

            p3d = poses3d
            logger.debug(poses3d)
            viz.show3Dpose(p3d, ax, lcolor="#9b59b6", rcolor="#2ecc71")

            pngName = 'png/test_{0}.png'.format(str(frame))
            plt.savefig(pngName)
            png_lib.append(imageio.imread(pngName))
            before_pose = poses3d

    logger.info("creating Gif png/movie_smoothing.gif, please Wait!")
    imageio.mimsave('png/movie_smoothing.gif', png_lib, fps=FLAGS.gif_fps)
    logger.info("Done!".format(pngName))
コード例 #17
0
def video():
    """Get samples from a model and visualize them"""

    actions_all = data_utils.define_actions("All")

    # Load camera parameters
    SUBJECT_IDS = [1, 5, 6, 7, 8, 9, 11]
    rcams = cameras.load_cameras(FLAGS.cameras_path, SUBJECT_IDS)

    # Load 3d data and load (or create) 2d projections
    train_set_3d, test_set_3d, data_mean_3d, data_std_3d, dim_to_ignore_3d, dim_to_use_3d, train_root_positions, test_root_positions = data_utils.read_3d_data(
        actions_all, FLAGS.data_dir, FLAGS.camera_frame, rcams,
        FLAGS.predict_14)
    train_set_3d = data_utils.remove_first_frame(train_set_3d)
    test_set_3d = data_utils.remove_first_frame(test_set_3d)
    train_root_positions = data_utils.remove_first_frame(train_root_positions)
    test_root_positions = data_utils.remove_first_frame(test_root_positions)
    print("Finished Read 3D Data")

    train_set_2d, test_set_2d, data_mean_2d, data_std_2d, dim_to_ignore_2d, dim_to_use_2d = data_utils.read_2d_predictions(
        actions_all, FLAGS.data_dir)
    train_set_2d, test_set_2d, data_mean_2d, data_std_2d, dim_to_ignore_2d, dim_to_use_2d = data_utils.transform_to_2d_biframe_prediction(
        train_set_2d, test_set_2d, data_mean_2d, data_std_2d, dim_to_ignore_2d,
        dim_to_use_2d)
    print("Finished Read 2D Data")
    print(test_set_2d)

    device_count = {"GPU": 0} if FLAGS.use_cpu else {"GPU": 1}
    with tf.Session(config=tf.ConfigProto(device_count=device_count)) as sess:
        # === Create the model ===
        print("Creating %d layers of %d units." %
              (FLAGS.num_layers, FLAGS.linear_size))
        batch_size = FLAGS.batch_size  #Intial code is 64*2
        model = predict_3dpose_biframe.create_model(sess, actions_all,
                                                    batch_size)
        print("Model loaded")

        for key2d in test_set_2d.keys():

            (subj, b, fname) = key2d
            # if subj != 11:
            #   continue
            # #if fname != 'Discussion 1.55011271.h5-sh':
            if (fname, subj) not in [("Greeting 1.60457274.h5-sh", 9),
                                     ("Photo.58860488.h5-sh", 9),
                                     ("Directions 1.54138969.h5-sh", 9),
                                     ("Purchases 1.55011271.h5-sh", 9),
                                     ("Greeting.54138969.h5-sh", 11),
                                     ("Discussion 1.55011271.h5-sh", 11),
                                     ("Eating 1.55011271.h5-sh", 11),
                                     ("Purchases 1.55011271.h5-sh", 11)]:
                continue
            print("Subject: {}, action: {}, fname: {}".format(subj, b, fname))

            enc_in = test_set_2d[key2d]
            n2d, _ = enc_in.shape
            print("Model Input has size : ", enc_in.shape)

            # Split into about-same-size batches
            enc_in = np.array_split(enc_in, n2d // batch_size)
            all_poses_3d = []

            for bidx in range(len(enc_in)):

                # Dropout probability 0 (keep probability 1) for sampling
                dp = 1.0
                anything = np.zeros((enc_in[bidx].shape[0], 48))
                _, _, poses3d = model.step(sess,
                                           enc_in[bidx],
                                           anything,
                                           dp,
                                           isTraining=False)

                # denormalize
                enc_in[bidx] = data_utils.unNormalizeData(
                    enc_in[bidx], data_mean_2d, data_std_2d, dim_to_ignore_2d)
                poses3d = data_utils.unNormalizeData(poses3d, data_mean_3d,
                                                     data_std_3d,
                                                     dim_to_ignore_3d)
                all_poses_3d.append(poses3d)

            # Put all the poses together
            enc_in, poses3d = map(np.vstack, [enc_in, all_poses_3d])

            # Convert back to world coordinates
            if FLAGS.camera_frame:
                N_CAMERAS = 4
                N_JOINTS_H36M = 32

                cname = fname.split(
                    '.'
                )[1]  #camera_mapping[fname.split('.')[0][-1]] # <-- camera name "55011271"
                scams = {(subj, c + 1): rcams[(subj, c + 1)]
                         for c in range(N_CAMERAS)}  # cams of this subject
                scam_idx = [
                    scams[(subj, c + 1)][-1] for c in range(N_CAMERAS)
                ].index(cname)  # index of camera used
                the_cam = scams[(subj, scam_idx + 1)]  # <-- the camera used
                R, T, f, c, k, p, name = the_cam
                assert name == cname

                def cam2world_centered(data_3d_camframe):
                    data_3d_worldframe = cameras.camera_to_world_frame(
                        data_3d_camframe.reshape((-1, 3)), R, T)
                    data_3d_worldframe = data_3d_worldframe.reshape(
                        (-1, N_JOINTS_H36M * 3))
                    # subtract root translation
                    return data_3d_worldframe - np.tile(
                        data_3d_worldframe[:, :3], (1, N_JOINTS_H36M))

                # Apply inverse rotation and translation
                poses3d = cam2world_centered(poses3d)

            # Grab a random batch to visualize
            enc_in, poses3d = map(np.vstack, [enc_in, poses3d])

            #1080p	= 1,920 x 1,080
            fig = plt.figure(figsize=(7, 7))
            gs1 = gridspec.GridSpec(1, 1)
            plt.axis('on')

            # dir_2d_poses = FLAGS.data_dir + 'S' + str(subj) + '/VideoBiframe/' + fname + '/2Destimate/'
            # if not os.path.isdir(dir_2d_poses):
            #   os.makedirs(dir_2d_poses)

            dir_3d_estimates = FLAGS.data_dir + 'S' + str(
                subj) + '/VideoBiframe/' + fname + '/3Destimate/'
            if not os.path.isdir(dir_3d_estimates):
                os.makedirs(dir_3d_estimates)
コード例 #18
0
def main(_):
    actions_all = data_utils.define_actions("All")
    actions = data_utils.define_actions("Discussion")

    # Load camera parameters
    SUBJECT_IDS = [1, 5, 6, 7, 8, 9, 11]
    rcams = cameras.load_cameras(FLAGS.cameras_path, SUBJECT_IDS)

    # Load 3d data and load (or create) 2d projections
    train_set_3d, test_set_3d, data_mean_3d, data_std_3d, dim_to_ignore_3d, dim_to_use_3d, train_root_positions, test_root_positions = data_utils.read_3d_data(
        actions, FLAGS.data_dir, FLAGS.camera_frame, rcams, FLAGS.predict_14)
    train_set_3d = data_utils.remove_first_frame(train_set_3d)
    test_set_3d = data_utils.remove_first_frame(test_set_3d)
    train_root_positions = data_utils.remove_first_frame(train_root_positions)
    test_root_positions = data_utils.remove_first_frame(test_root_positions)
    print("Finished Read 3D Data")

    # train_set_2d, test_set_2d, data_mean_2d, data_std_2d, dim_to_ignore_2d, dim_to_use_2d = data_utils.read_2d_predictions(actions_all, FLAGS.data_dir)
    # train_set_2d, test_set_2d, data_mean_2d, data_std_2d, dim_to_ignore_2d, dim_to_use_2d = data_utils.transform_to_2d_biframe_prediction(train_set_2d,
    #                                                                                                                                        test_set_2d,
    #                                                                                                                                        data_mean_2d,
    #                                                                                                                                        data_std_2d,
    #                                                                                                                                        dim_to_ignore_2d,
    #                                                                                                                                        dim_to_use_2d)
    train_set_2d, test_set_2d, data_mean_2d, data_std_2d, dim_to_ignore_2d, dim_to_use_2d = data_utils.create_2d_data(
        actions_all, FLAGS.data_dir, rcams)
    train_set_2d, test_set_2d, data_mean_2d, data_std_2d, dim_to_ignore_2d, dim_to_use_2d = data_utils.transform_to_2d_biframe_prediction(
        train_set_2d, test_set_2d, data_mean_2d, data_std_2d, dim_to_ignore_2d,
        dim_to_use_2d)

    SH_TO_GT_PERM = np.array(
        [SH_NAMES.index(h) for h in H36M_NAMES if h != '' and h in SH_NAMES])
    assert np.all(SH_TO_GT_PERM == np.array(
        [6, 2, 1, 0, 3, 4, 5, 7, 8, 9, 13, 14, 15, 12, 11, 10]))

    test_set = {}

    manipulation_dir = os.path.dirname(FLAGS.data_dir)
    manipulation_dir = os.path.dirname(manipulation_dir)
    manipulation_dir += '/manipulation_video/'
    manipulation_folders = glob.glob(manipulation_dir + '*')

    subj = 1
    action = 'manipulation-video'
    for folder in manipulation_folders:
        seqname = os.path.basename(folder)
        with h5py.File(folder + '/' + seqname + '.h5', 'r') as h5f:
            poses = h5f['poses'][:]

            # Permute the loaded data to make it compatible with H36M
            poses = poses[:, SH_TO_GT_PERM, :]

            # Reshape into n x (32*2) matrix
            poses = np.reshape(poses, [poses.shape[0], -1])
            poses_final = np.zeros([poses.shape[0], len(H36M_NAMES) * 2])

            dim_to_use_x = np.where(
                np.array([x != '' and x != 'Neck/Nose'
                          for x in H36M_NAMES]))[0] * 2
            dim_to_use_y = dim_to_use_x + 1

            dim_to_use = np.zeros(len(SH_NAMES) * 2, dtype=np.int32)
            dim_to_use[0::2] = dim_to_use_x
            dim_to_use[1::2] = dim_to_use_y
            poses_final[:, dim_to_use] = poses

            print(seqname, poses_final.shape)
            poses_final[poses_final == 0.] = 0.1
            test_set[(subj, action, seqname)] = poses_final

    test_set = data_utils.uni_frame_to_bi_frame(test_set)
    test_set_2d = data_utils.normalize_data(test_set, data_mean_2d,
                                            data_std_2d, dim_to_use_2d)
    for key in test_set.keys():
        test_set[key] = test_set[key][0::2, :]

    dim_to_use_12_manipulation_joints = np.array([
        3, 4, 5, 6, 7, 8, 9, 10, 11, 18, 19, 20, 21, 22, 23, 24, 25, 26, 51,
        52, 53, 54, 55, 56, 57, 58, 59, 75, 76, 77, 78, 79, 80, 81, 82, 83
    ])

    print("Finished Normalize Manipualtion Videos")
    device_count = {"GPU": 0} if FLAGS.use_cpu else {"GPU": 1}
    with tf.Session(config=tf.ConfigProto(device_count=device_count)) as sess:
        # === Create the model ===
        print("Creating %d layers of %d units." %
              (FLAGS.num_layers, FLAGS.linear_size))
        batch_size = FLAGS.batch_size  #Intial code is 64*2
        model = predict_3dpose_biframe.create_model(sess, actions_all,
                                                    batch_size)
        print("Model loaded")

        j = 0
        for key2d in test_set_2d.keys():

            (subj, b, fname) = key2d
            # if fname !=  specific_seqname + '.h5':
            #     continue
            print("Subject: {}, action: {}, fname: {}".format(subj, b, fname))

            enc_in = test_set_2d[key2d]
            n2d, _ = enc_in.shape

            # Split into about-same-size batches
            enc_in = np.array_split(enc_in, n2d // 1)
            all_poses_3d = []

            for bidx in range(len(enc_in)):

                # Dropout probability 0 (keep probability 1) for sampling
                dp = 1.0
                anything = np.zeros((enc_in[bidx].shape[0], 48))
                _, _, poses3d = model.step(sess,
                                           enc_in[bidx],
                                           anything,
                                           dp,
                                           isTraining=False)

                # Denormalize
                enc_in[bidx] = data_utils.unNormalizeData(
                    enc_in[bidx], data_mean_2d, data_std_2d, dim_to_ignore_2d)
                poses3d = data_utils.unNormalizeData(poses3d, data_mean_3d,
                                                     data_std_3d,
                                                     dim_to_ignore_3d)
                all_poses_3d.append(poses3d)

            # Put all the poses together
            enc_in, poses3d = map(np.vstack, [enc_in, all_poses_3d])

            enc_in, poses3d = map(np.vstack, [enc_in, poses3d])

            poses3d_12_manipulation = poses3d[:,
                                              dim_to_use_12_manipulation_joints]

            annotated_images = glob.glob(manipulation_dir + fname +
                                         '/info/*.xml')
            annotated_images = sorted(annotated_images)

            # 1080p	= 1,920 x 1,080
            fig = plt.figure(j, figsize=(10, 10))
            gs1 = gridspec.GridSpec(3, 3)
            gs1.update(wspace=-0, hspace=0.1)  # set the spacing between axes.
            plt.axis('off')

            subplot_idx = 1
            nsamples = 3
            for i in np.arange(nsamples):
                # Plot 2d Detection
                ax1 = plt.subplot(gs1[subplot_idx - 1])
                img = mpimg.imread(
                    manipulation_dir + fname + '/skeleton_cropped/' +
                    os.path.basename(annotated_images[i]).split('_')[0] +
                    '.jpg')
                ax1.imshow(img)

                # Plot 2d pose
                ax2 = plt.subplot(gs1[subplot_idx])
                # p2d = enc_in[i,:]
                # viz.show2Dpose( p2d, ax2 )
                # ax2.invert_yaxis()
                ax2.imshow(img)

                # Plot 3d predictions
                # Compute first the procrustion and print error
                gt = getJ3dPosFromXML(annotated_images[i])
                A = poses3d_12_manipulation[i, :].reshape(gt.shape)
                _, Z, T, b, c = procrustes.compute_similarity_transform(
                    gt, A, compute_optimal_scale=True)
                sqerr = np.sqrt(np.sum((gt - (b * A.dot(T)) - c)**2, axis=1))
                print("{0} - {1} - Mean Error (mm) : {2}".format(
                    fname, os.path.basename(annotated_images[i]),
                    np.mean(sqerr)))

                ax3 = plt.subplot(gs1[subplot_idx + 1], projection='3d')
                temp = poses3d[i, :].reshape((32, 3))
                temp = c + temp.dot(T)  #Do not scale
                # p3d = temp.reshape((1, 96))
                p3d = poses3d[i, :]
                viz.show3Dpose(p3d, ax3, lcolor="#9b59b6", rcolor="#2ecc71")
                ax3.invert_zaxis()
                ax3.invert_yaxis()

                subplot_idx = subplot_idx + 3

            plt.show()
            j += 1
def sample():
    """Get samples from a model and visualize them"""
    path = '{}/samples_sh'.format(FLAGS.train_dir)
    if not os.path.exists(path):
        os.makedirs(path)
    actions = data_utils.define_actions(FLAGS.action)

    # Load camera parameters
    SUBJECT_IDS = [1, 5, 6, 7, 8, 9, 11]
    rcams = cameras.load_cameras(FLAGS.cameras_path, SUBJECT_IDS)
    n_joints = 17 if not (FLAGS.predict_14) else 14

    # Load 3d data and load (or create) 2d projections
    train_set_3d, test_set_3d, data_mean_3d, data_std_3d, dim_to_ignore_3d, dim_to_use_3d, train_root_positions, test_root_positions = data_utils.read_3d_data(
        actions, FLAGS.data_dir, FLAGS.camera_frame, rcams, FLAGS.predict_14)

    if FLAGS.use_sh:
        train_set_2d, test_set_2d, data_mean_2d, data_std_2d, dim_to_ignore_2d, dim_to_use_2d = data_utils.read_2d_predictions(
            actions, FLAGS.data_dir)
    else:
        train_set_2d, test_set_2d, data_mean_2d, data_std_2d, dim_to_ignore_2d, dim_to_use_2d, _ = data_utils.create_2d_data(
            actions, FLAGS.data_dir, rcams)

    device_count = {"GPU": 0} if FLAGS.use_cpu else {"GPU": 1}
    with tf.Session(config=tf.ConfigProto(device_count=device_count)) as sess:
        # === Create the model ===

        batch_size = 128
        model = create_model(sess, actions, batch_size)
        print("Model loaded")

        for key2d in test_set_2d.keys():

            (subj, b, fname) = key2d

            # choose SittingDown action to visualize
            if b == 'SittingDown':
                print("Subject: {}, action: {}, fname: {}".format(
                    subj, b, fname))

                # keys should be the same if 3d is in camera coordinates
                key3d = key2d if FLAGS.camera_frame else (
                    subj, b, '{0}.h5'.format(fname.split('.')[0]))
                key3d = (subj, b, fname[:-3]) if (
                    fname.endswith('-sh')) and FLAGS.camera_frame else key3d

                enc_in = test_set_2d[key2d]
                n2d, _ = enc_in.shape
                dec_out = test_set_3d[key3d]
                n3d, _ = dec_out.shape
                assert n2d == n3d

                # Split into about-same-size batches

                enc_in = np.array_split(enc_in, n2d // batch_size)
                dec_out = np.array_split(dec_out, n3d // batch_size)

                # store all pose hypotheses in a list
                pose_3d_mdm = [[], [], [], [], []]

                for bidx in range(len(enc_in)):

                    # Dropout probability 0 (keep probability 1) for sampling
                    dp = 1.0
                    loss, _, out_all_components = model.step(sess,
                                                             enc_in[bidx],
                                                             dec_out[bidx],
                                                             dp,
                                                             isTraining=False)

                    # denormalize the input 2d pose, ground truth 3d pose as well as 3d pose hypotheses from mdm
                    out_all_components = np.reshape(
                        out_all_components,
                        [-1, model.HUMAN_3D_SIZE + 2, model.num_models])
                    out_mean = out_all_components[:, :model.HUMAN_3D_SIZE, :]

                    enc_in[bidx] = data_utils.unNormalizeData(
                        enc_in[bidx], data_mean_2d, data_std_2d,
                        dim_to_ignore_2d)
                    dec_out[bidx] = data_utils.unNormalizeData(
                        dec_out[bidx], data_mean_3d, data_std_3d,
                        dim_to_ignore_3d)
                    poses3d = np.zeros(
                        (out_mean.shape[0], 96, out_mean.shape[-1]))
                    for j in range(out_mean.shape[-1]):
                        poses3d[:, :, j] = data_utils.unNormalizeData(
                            out_mean[:, :, j], data_mean_3d, data_std_3d,
                            dim_to_ignore_3d)

                    # extract the 17 joints
                    dtu3d = np.hstack(
                        (np.arange(3), dim_to_use_3d
                         )) if not (FLAGS.predict_14) else dim_to_use_3d
                    dec_out_17 = dec_out[bidx][:, dtu3d]
                    pose_3d_17 = poses3d[:, dtu3d, :]
                    sqerr = (pose_3d_17 -
                             np.expand_dims(dec_out_17, axis=2))**2
                    dists = np.zeros(
                        (sqerr.shape[0], n_joints, sqerr.shape[2]))
                    for m in range(dists.shape[-1]):
                        dist_idx = 0
                        for k in np.arange(0, n_joints * 3, 3):
                            dists[:, dist_idx, m] = np.sqrt(
                                np.sum(sqerr[:, k:k + 3, m], axis=1))
                            dist_idx = dist_idx + 1

                    [
                        pose_3d_mdm[i].append(poses3d[:, :, i])
                        for i in range(poses3d.shape[-1])
                    ]

                # Put all the poses together
                enc_in, dec_out = map(np.vstack, [enc_in, dec_out])
                for i in range(poses3d.shape[-1]):
                    pose_3d_mdm[i] = np.vstack(pose_3d_mdm[i])

                    # Convert back to world coordinates
                if FLAGS.camera_frame:
                    N_CAMERAS = 4
                    N_JOINTS_H36M = 32

                    # Add global position back
                    dec_out = dec_out + np.tile(test_root_positions[key3d],
                                                [1, N_JOINTS_H36M])
                    for i in range(poses3d.shape[-1]):
                        pose_3d_mdm[i] = pose_3d_mdm[i] + np.tile(
                            test_root_positions[key3d], [1, N_JOINTS_H36M])

                    # Load the appropriate camera
                    subj, action, sname = key3d

                    cname = sname.split('.')[1]  # <-- camera name
                    scams = {(subj, c + 1): rcams[(subj, c + 1)]
                             for c in range(N_CAMERAS)}  # cams of this subject
                    scam_idx = [
                        scams[(subj, c + 1)][-1] for c in range(N_CAMERAS)
                    ].index(cname)  # index of camera used
                    the_cam = scams[(subj,
                                     scam_idx + 1)]  # <-- the camera used
                    R, T, f, c, k, p, name = the_cam
                    assert name == cname

                    def cam2world_centered(data_3d_camframe):
                        data_3d_worldframe = cameras.camera_to_world_frame(
                            data_3d_camframe.reshape((-1, 3)), R, T)
                        data_3d_worldframe = data_3d_worldframe.reshape(
                            (-1, N_JOINTS_H36M * 3))
                        # subtract root translation
                        return data_3d_worldframe - np.tile(
                            data_3d_worldframe[:, :3], (1, N_JOINTS_H36M))

                    # Apply inverse rotation and translation
                    dec_out = cam2world_centered(dec_out)
                    for i in range(poses3d.shape[-1]):
                        pose_3d_mdm[i] = cam2world_centered(pose_3d_mdm[i])

                # sample some results to visualize
                np.random.seed(42)
                idx = np.random.permutation(enc_in.shape[0])
                enc_in, dec_out = enc_in[idx, :], dec_out[idx, :]
                for i in range(poses3d.shape[-1]):
                    pose_3d_mdm[i] = pose_3d_mdm[i][idx, :]

                exidx = 1
                nsamples = 20

                for i in np.arange(nsamples):
                    fig = plt.figure(figsize=(20, 5))

                    subplot_idx = 1
                    gs1 = gridspec.GridSpec(1, 7)  # 5 rows, 9 columns
                    gs1.update(wspace=-0.00,
                               hspace=0.05)  # set the spacing between axes.
                    plt.axis('off')

                    # Plot 2d pose
                    ax1 = plt.subplot(gs1[subplot_idx - 1])
                    p2d = enc_in[exidx, :]
                    viz.show2Dpose(p2d, ax1)
                    ax1.invert_yaxis()

                    # Plot 3d gt
                    ax2 = plt.subplot(gs1[subplot_idx], projection='3d')
                    p3d = dec_out[exidx, :]
                    viz.show3Dpose(p3d, ax2)

                    # Plot 3d pose hypotheses

                    for i in range(poses3d.shape[-1]):
                        ax3 = plt.subplot(gs1[subplot_idx + i + 1],
                                          projection='3d')
                        p3d = pose_3d_mdm[i][exidx]
                        viz.show3Dpose(p3d,
                                       ax3,
                                       lcolor="#9b59b6",
                                       rcolor="#2ecc71")
                    # plt.show()
                    plt.savefig('{}/sample_{}_{}_{}_{}.png'.format(
                        path, subj, action, scam_idx, exidx))
                    plt.close(fig)
                    exidx = exidx + 1
def evaluate_batches(sess,
                     model,
                     data_mean_3d,
                     data_std_3d,
                     dim_to_use_3d,
                     dim_to_ignore_3d,
                     data_mean_2d,
                     data_std_2d,
                     dim_to_use_2d,
                     dim_to_ignore_2d,
                     current_step,
                     encoder_inputs,
                     decoder_outputs,
                     current_epoch=0):
    """
  Generic method that evaluates performance of a list of batches.
  May be used to evaluate all actions or a single action.

  Args
    sess
    model
    data_mean_3d
    data_std_3d
    dim_to_use_3d
    dim_to_ignore_3d
    data_mean_2d
    data_std_2d
    dim_to_use_2d
    dim_to_ignore_2d
    current_step
    encoder_inputs
    decoder_outputs
    current_epoch
  Returns

    total_err
    joint_err
    step_time
    loss
  """

    n_joints = 17 if not (FLAGS.predict_14) else 14
    nbatches = len(encoder_inputs)

    # Loop through test examples
    all_dists, start_time, loss = [], time.time(), 0.
    log_every_n_batches = 100
    all_poses_3d = []
    all_enc_in = []

    for i in range(nbatches):

        if current_epoch > 0 and (i + 1) % log_every_n_batches == 0:
            print("Working on test epoch {0}, batch {1} / {2}".format(
                current_epoch, i + 1, nbatches))

        enc_in, dec_out = encoder_inputs[i], decoder_outputs[i]
        # enc_in = data_utils.generage_missing_data(enc_in, FLAGS.miss_num)
        dp = 1.0  # dropout keep probability is always 1 at test time
        step_loss, loss_summary, out_all_components_ori = model.step(
            sess, enc_in, dec_out, dp, isTraining=False)
        loss += step_loss

        out_all_components = np.reshape(
            out_all_components_ori,
            [-1, model.HUMAN_3D_SIZE + 2, model.num_models])
        out_mean = out_all_components[:, :model.HUMAN_3D_SIZE, :]

        # denormalize
        enc_in = data_utils.unNormalizeData(enc_in, data_mean_2d, data_std_2d,
                                            dim_to_ignore_2d)
        enc_in_ = copy.deepcopy(enc_in)
        all_enc_in.append(enc_in_)
        dec_out = data_utils.unNormalizeData(dec_out, data_mean_3d,
                                             data_std_3d, dim_to_ignore_3d)
        pose_3d = np.zeros((enc_in.shape[0], 96, out_mean.shape[-1]))

        for j in range(out_mean.shape[-1]):
            pose_3d[:, :,
                    j] = data_utils.unNormalizeData(out_mean[:, :, j],
                                                    data_mean_3d, data_std_3d,
                                                    dim_to_ignore_3d)

        pose_3d_ = copy.deepcopy(pose_3d)
        all_poses_3d.append(pose_3d_)

        # Keep only the relevant dimensions
        dtu3d = np.hstack(
            (np.arange(3),
             dim_to_use_3d)) if not (FLAGS.predict_14) else dim_to_use_3d

        dec_out = dec_out[:, dtu3d]
        pose_3d = pose_3d[:, dtu3d, :]

        assert dec_out.shape[0] == FLAGS.batch_size
        assert pose_3d.shape[0] == FLAGS.batch_size

        if FLAGS.procrustes:
            # Apply per-frame procrustes alignment if asked to do so
            for j in range(FLAGS.batch_size):
                for k in range(model.num_models):
                    gt = np.reshape(dec_out[j, :], [-1, 3])
                    out = np.reshape(pose_3d[j, :, k], [-1, 3])
                    _, Z, T, b, c = procrustes.compute_similarity_transform(
                        gt, out, compute_optimal_scale=True)
                    out = (b * out.dot(T)) + c

                    pose_3d[j, :, k] = np.reshape(
                        out, [-1, 17 *
                              3]) if not (FLAGS.predict_14) else np.reshape(
                                  pose_3d[j, :, k], [-1, 14 * 3])

        # Compute Euclidean distance error per joint
        sqerr = (pose_3d - np.expand_dims(dec_out, axis=2)
                 )**2  # Squared error between prediction and expected output
        dists = np.zeros(
            (sqerr.shape[0], n_joints,
             sqerr.shape[2]))  # Array with L2 error per joint in mm

        for m in range(dists.shape[-1]):
            dist_idx = 0
            for k in np.arange(0, n_joints * 3, 3):
                # Sum across X,Y, and Z dimenstions to obtain L2 distance
                dists[:, dist_idx,
                      m] = np.sqrt(np.sum(sqerr[:, k:k + 3, m], axis=1))

                dist_idx = dist_idx + 1

        all_dists.append(dists)
        assert sqerr.shape[0] == FLAGS.batch_size

    step_time = (time.time() - start_time) / nbatches
    loss = loss / nbatches

    all_dists = np.vstack(all_dists)
    aver_minerr = np.mean(np.min(np.sum(all_dists, axis=1), axis=1)) / n_joints

    return aver_minerr, step_time, loss
コード例 #21
0
def evaluate_batches( sess, model,
  data_mean_3d, data_std_3d, dim_to_use_3d, dim_to_ignore_3d,
  data_mean_2d, data_std_2d, dim_to_use_2d, dim_to_ignore_2d,
  current_step, encoder_inputs, decoder_outputs, current_epoch=0 ):
  """
  Generic method that evaluates performance of a list of batches.
  May be used to evaluate all actions or a single action.

  Args
    sess
    model
    data_mean_3d
    data_std_3d
    dim_to_use_3d
    dim_to_ignore_3d
    data_mean_2d
    data_std_2d
    dim_to_use_2d
    dim_to_ignore_2d
    current_step
    encoder_inputs
    decoder_outputs
    current_epoch
  Returns

    total_err
    joint_err
    step_time
    loss
  """

  n_joints = 17 if not(FLAGS.predict_14) else 14
  nbatches = len( encoder_inputs )

  # Loop through test examples
  all_dists, start_time, loss = [], time.time(), 0.
  log_every_n_batches = 100
  for i in range(nbatches):

    if current_epoch > 0 and (i+1) % log_every_n_batches == 0:
      print("Working on test epoch {0}, batch {1} / {2}".format( current_epoch, i+1, nbatches) )

    enc_in, dec_out = encoder_inputs[i], decoder_outputs[i]
    dp = 1.0 # dropout keep probability is always 1 at test time
    step_loss, loss_summary, poses3d = model.step( sess, enc_in, dec_out, dp, isTraining=False )
    loss += step_loss

    # denormalize
    enc_in  = data_utils.unNormalizeData( enc_in,  data_mean_2d, data_std_2d, dim_to_ignore_2d )
    dec_out = data_utils.unNormalizeData( dec_out, data_mean_3d, data_std_3d, dim_to_ignore_3d )
    poses3d = data_utils.unNormalizeData( poses3d, data_mean_3d, data_std_3d, dim_to_ignore_3d )

    # Keep only the relevant dimensions
    dtu3d = np.hstack( (np.arange(3), dim_to_use_3d) ) if not(FLAGS.predict_14) else  dim_to_use_3d

    dec_out = dec_out[:, dtu3d]
    poses3d = poses3d[:, dtu3d]

    assert dec_out.shape[0] == FLAGS.batch_size
    assert poses3d.shape[0] == FLAGS.batch_size

    if FLAGS.procrustes:
      # Apply per-frame procrustes alignment if asked to do so
      for j in range(FLAGS.batch_size):
        gt  = np.reshape(dec_out[j,:],[-1,3])
        out = np.reshape(poses3d[j,:],[-1,3])
        _, Z, T, b, c = procrustes.compute_similarity_transform(gt,out)
        out = out.dot(T)+c

        poses3d[j,:] = np.reshape(out,[-1,17*3] ) if not(FLAGS.predict_14) else np.reshape(out,[-1,14*3] )

    # Compute Euclidean distance error per joint
    sqerr = (poses3d - dec_out)**2 # Squared error between prediction and expected output
    dists = np.zeros( (sqerr.shape[0], njoints) ) # Array with L2 error per joint in mm
    dist_idx = 0
    for k in np.arange(0, n_joints*3, 3):
      # Sum across X,Y, and Z dimenstions to obtain L2 distance
      dists[:,dist_idx] = np.sqrt( np.sum( sqerr[:, k:k+3], axis=1 ))
      dist_idx = dist_idx + 1

    all_dists.append(dists)
    assert sqerr.shape[0] == FLAGS.batch_size

  step_time = (time.time() - start_time) / nbatches
  loss      = loss / nbatches

  all_dists = np.vstack( all_dists )

  # Error per joint and total for all passed batches
  joint_err = np.mean( all_dists, axis=0 )
  total_err = np.mean( all_dists )

  return total_err, joint_err, step_time, loss
コード例 #22
0
def main(_):
    global framenum

    #clear out all old frames
    os.system("rm png/*")

    #set done to empty array, it will hold the json files from openpose that we've already processed
    done = []

    #initialize input tensor to 1x64 array of zeroes [[0. 0. 0. ...]]
    #this is list of numpy vectors to feed as encoder inputs (32 2d coordinates)
    enc_in = np.zeros((1, 64))
    enc_in[0] = [0 for i in range(64)]

    #actions to run on, default is all
    actions = data_utils.define_actions(FLAGS.action)

    #the list of Human3.6m subjects to look at
    SUBJECT_IDS = [1, 5, 6, 7, 8, 9, 11]

    #load camera parameters from the h36m dataset
    rcams = cameras2.load_cameras(FLAGS.cameras_path, SUBJECT_IDS)

    #loads 2d data from precomputed Stacked Hourglass detections
    train_set_2d, test_set_2d, data_mean_2d, data_std_2d, dim_to_ignore_2d, dim_to_use_2d = data_utils.read_2d_predictions(
        actions, FLAGS.data_dir)

    #loads 3d poses, zero-centres and normalizes them
    train_set_3d, test_set_3d, data_mean_3d, data_std_3d, dim_to_ignore_3d, dim_to_use_3d, train_root_positions, test_root_positions = data_utils.read_3d_data(
        actions, FLAGS.data_dir, FLAGS.camera_frame, rcams, FLAGS.predict_14)

    device_count = {"GPU": 0}

    png_lib = []

    #run a tensorflow inference session
    with tf.Session(config=tf.ConfigProto(device_count=device_count,
                                          allow_soft_placement=True)) as sess:
        #plt.figure(3)

        #load pre-trained model
        batch_size = 128
        model = create_model(sess, actions, batch_size)

        #infinitely show 3d pose visualization
        while True:
            #wait for key to be pressed
            key = cv2.waitKey(1) & 0xFF

            _, frame = cv2.VideoCapture(
                0).read()  #ignore the other returned value

            #resize and rotate the incoming image frame
            frame, W, H = resize_img(frame)
            frame = cv2.rotate(frame, cv2.ROTATE_90_COUNTERCLOCKWISE)

            start = time.time()
            #run posenet inference on the frame
            joints_2d = estimate_pose(frame)

            #throw out confidence score and flatten
            _data = joints_2d[..., :2].flatten()

            #open pop-up and draw the keypoints found
            img2D = draw_2Dimg(frame, joints_2d, 1)

            #fake the thorax point by finding midpt between left and right shoulder
            lt_should_x = _data[10]
            lt_should_y = _data[11]
            rt_should_x = _data[12]
            rt_should_y = _data[13]

            thorax = midpoint(lt_should_x, lt_should_y, rt_should_x,
                              rt_should_y)

            #print("testing thorax pt at ", thorax)

            #insert thorax into data where it should be, at index 1
            _data = np.insert(_data, 2, thorax[0])
            _data = np.insert(_data, 3, thorax[1])

            #print("new _data is ", _data)
            _data = np.around(_data)

            #set xy to the array of 2d joint data
            xy = _data

            #create new 1x36 array of zeroes, which will store the 18 2d keypoints
            joints_array = np.zeros((1, 36))
            joints_array[0] = [0 for i in range(36)]

            #index into our data array
            index = 0

            #iterates 18 times
            for o in range(int(len(joints_array[0]) / 2)):
                #feed array with xy array (the 18 keypoints), but switch ordering: posenet to openpose
                for j in range(2):
                    #print("o is", o, "j is", j, "index is ", index)
                    index_into_posenet_data = order_pnet_to_openpose[o] * 2 + j
                    #print("putting posenet[", index_into_posenet_data, "], value ", xy[index_into_posenet_data], " , into joints_array[0][", index, "]")

                    joints_array[0][index] = xy[index_into_posenet_data]
                    index += 1

            #set _data to the array containing the 36 coordinates of the 2d keypts
            _data = joints_array[0]

            #print("_data is ", _data)

            #mapping all body parts for 3d-pose-baseline format (32 2d coordinates)
            for i in range(len(order)):  #iterates 14 times
                #select which coordinateof this point: x or y
                for j in range(2):
                    #create encoder input, switching around the order of the joint points
                    enc_in[0][order[i] * 2 + j] = _data[i * 2 + j]

            #now enc_in contains 14 points (28 total coordinates)

            #at this pt enc_in should be array of 64 vals

            for j in range(2):
                #place hip at index 0
                enc_in[0][0 * 2 + j] = (enc_in[0][1 * 2 + j] +
                                        enc_in[0][6 * 2 + j]) / 2
                #place neck/nose at index 14
                enc_in[0][14 * 2 + j] = (enc_in[0][15 * 2 + j] +
                                         enc_in[0][12 * 2 + j]) / 2
                #place thorax at index 13
                enc_in[0][13 * 2 +
                          j] = 2 * enc_in[0][12 * 2 + j] - enc_in[0][14 * 2 +
                                                                     j]

            #set spine found by openpose
            spine_x = enc_in[0][24]
            spine_y = enc_in[0][25]

            #dim_to_use_2d is always [0  1  2  3  4  5  6  7 12 13 14 15 16 17 24 25 26 27 30 31 34 35 36 37 38 39 50 51 52 53 54 55]

            #take 32 entries of enc_in
            enc_in = enc_in[:, dim_to_use_2d]

            #find mean of 2d data
            mu = data_mean_2d[dim_to_use_2d]

            #find stdev of 2d data
            stddev = data_std_2d[dim_to_use_2d]

            #subtract mean and divide std for all
            enc_in = np.divide((enc_in - mu), stddev)

            #dropout keep probability
            dp = 1.0

            #output tensor, initialize it to zeroes. We'll get 16 joints with 3d coordinates
            #this is list of numpy vectors that are the expected decoder outputs
            dec_out = np.zeros((1, 48))
            dec_out[0] = [0 for i in range(48)]

            #get the 3d poses by running the 3d-pose-baseline inference. Model operates on 32 points
            _, _, poses3d = model.step(sess,
                                       enc_in,
                                       dec_out,
                                       dp,
                                       isTraining=False)
            #poses3d comes back as a 1x96 array (I guess its 32 points)

            end = time.time()
            #print("ELAPSED: ", end-start)

            #hold our 3d poses while we're doing some post-processing
            all_poses_3d = []

            #un-normalize the input and output data using the means and stdevs
            enc_in = data_utils.unNormalizeData(enc_in, data_mean_2d,
                                                data_std_2d, dim_to_ignore_2d)
            poses3d = data_utils.unNormalizeData(poses3d, data_mean_3d,
                                                 data_std_3d, dim_to_ignore_3d)

            #create a grid for drawing
            gs1 = gridspec.GridSpec(1, 1)

            #set spacing between axes
            gs1.update(wspace=-0.00, hspace=0.05)
            plt.axis('off')

            #fill all_poses_3d with the 3d poses predicted by the model step fxn
            all_poses_3d.append(poses3d)

            #vstack stacks arrays in sequence vertically (row wise)
            #this doesn't do anything in this case, as far as I can tell
            enc_in, poses3d = map(np.vstack, [enc_in, all_poses_3d])

            subplot_idx, exidx = 1, 1
            _max = 0
            _min = 10000

            #iterates once
            for i in range(poses3d.shape[0]):
                #iterate over all 32 points in poses3d
                for j in range(32):
                    #save the last coordinate of this point into tmp
                    tmp = poses3d[i][j * 3 + 2]

                    #swap the second and third coordinates of this pt
                    poses3d[i][j * 3 + 2] = poses3d[i][j * 3 + 1]
                    poses3d[i][j * 3 + 1] = tmp

                    #keep track of max of last coordinate
                    if poses3d[i][j * 3 + 2] > _max:
                        _max = poses3d[i][j * 3 + 2]
                    if poses3d[i][j * 3 + 2] < _min:
                        _min = poses3d[i][j * 3 + 2]

            #iterates once
            for i in range(poses3d.shape[0]):
                #iterate over all 32 points in poses3d (2nd and 3rd coords have all been swapped at this pt)
                for j in range(32):
                    #change the third coord of this pt, subtracting it from sum of max and min third coord to get new value
                    poses3d[i][j * 3 + 2] = _max - poses3d[i][j * 3 + 2] + _min

                    #modify first coord of this pt by adding the x coord of the spine found by 2d model
                    poses3d[i][j * 3] += (spine_x - 630)

                    #modify third coord of this pt by adding 500 minus y coord of spine found by 2d model
                    poses3d[i][j * 3 + 2] += (500 - spine_y)

            #Plot 3d predictions
            ax = plt.subplot(gs1[subplot_idx - 1], projection='3d')
            ax.view_init(18, -70)
            logger.debug(np.min(poses3d))

            #TODO: if something happened with the data, reuse data from last frame (before_pose)

            p3d = poses3d

            #plot the 3d skeleton
            viz.show3Dpose(p3d, ax, lcolor="#9b59b6", rcolor="#2ecc71")

            #keep track of this poses3d in case we need to reuse it for next frame
            before_pose = poses3d

            #save this frame as a png in the ./png/ folder
            pngName = 'png/test_{0}.png'.format(str(framenum))
            #print("pngName is ", pngName)

            plt.savefig(pngName)

            #plt.show()

            #read this frame which was just saved as png
            img = cv2.imread(pngName, 0)

            rect_cpy = img.copy()

            #show this frame
            cv2.imshow('3d-pose-baseline', rect_cpy)

            framenum += 1

            #quit if q is pressed
            if key == ord('q'):
                break

        sess.close()
コード例 #23
0
def main(_):
    done = []

    enc_in = np.zeros((1, 64))
    enc_in[0] = [0 for i in range(64)]

    actions = data_utils.define_actions(FLAGS.action)

    SUBJECT_IDS = [1, 5, 6, 7, 8, 9, 11]
    rcams = cameras.load_cameras(FLAGS.cameras_path, SUBJECT_IDS)
    train_set_2d, test_set_2d, data_mean_2d, data_std_2d, dim_to_ignore_2d, dim_to_use_2d = data_utils.read_2d_predictions(
        actions, FLAGS.data_dir)
    train_set_3d, test_set_3d, data_mean_3d, data_std_3d, dim_to_ignore_3d, dim_to_use_3d, train_root_positions, test_root_positions = data_utils.read_3d_data(
        actions, FLAGS.data_dir, FLAGS.camera_frame, rcams, FLAGS.predict_14)

    device_count = {"GPU": 0}
    png_lib = []
    with tf.Session(config=tf.ConfigProto(device_count=device_count,
                                          allow_soft_placement=True)) as sess:
        #plt.figure(3)
        batch_size = 128
        model = create_model(sess, actions, batch_size)
        while True:
            key = cv2.waitKey(1) & 0xFF
            #logger.info("start reading data")
            # check for other file types
            list_of_files = glob.iglob("{0}/*".format(
                openpose_output_dir))  # You may use iglob in Python3
            latest_file = ""
            try:
                latest_file = max(list_of_files, key=os.path.getctime)
            except ValueError:
                #empthy dir
                pass
            if not latest_file:
                continue
            try:
                _file = file_name = latest_file
                print(latest_file)
                if not os.path.isfile(_file):
                    raise Exception("No file found!!, {0}".format(_file))
                data = json.load(open(_file))
                #take first person
                _data = data["people"][0]["pose_keypoints"]
                xy = []
                #ignore confidence score
                for o in range(0, len(_data), 3):
                    xy.append(_data[o])
                    xy.append(_data[o + 1])

                frame_indx = re.findall("(\d+)", file_name)
                frame = int(frame_indx[0])

                joints_array = np.zeros((1, 36))
                joints_array[0] = [0 for i in range(36)]
                for o in range(len(joints_array[0])):
                    #feed array with xy array
                    joints_array[0][o] = xy[o]
                _data = joints_array[0]
                # mapping all body parts or 3d-pose-baseline format
                for i in range(len(order)):
                    for j in range(2):
                        # create encoder input
                        enc_in[0][order[i] * 2 + j] = _data[i * 2 + j]
                for j in range(2):
                    # Hip
                    enc_in[0][0 * 2 + j] = (enc_in[0][1 * 2 + j] +
                                            enc_in[0][6 * 2 + j]) / 2
                    # Neck/Nose
                    enc_in[0][14 * 2 + j] = (enc_in[0][15 * 2 + j] +
                                             enc_in[0][12 * 2 + j]) / 2
                    # Thorax
                    enc_in[0][
                        13 * 2 +
                        j] = 2 * enc_in[0][12 * 2 + j] - enc_in[0][14 * 2 + j]

                # set spine
                spine_x = enc_in[0][24]
                spine_y = enc_in[0][25]

                enc_in = enc_in[:, dim_to_use_2d]
                mu = data_mean_2d[dim_to_use_2d]
                stddev = data_std_2d[dim_to_use_2d]
                enc_in = np.divide((enc_in - mu), stddev)

                dp = 1.0
                dec_out = np.zeros((1, 48))
                dec_out[0] = [0 for i in range(48)]
                _, _, poses3d = model.step(sess,
                                           enc_in,
                                           dec_out,
                                           dp,
                                           isTraining=False)
                all_poses_3d = []
                enc_in = data_utils.unNormalizeData(enc_in, data_mean_2d,
                                                    data_std_2d,
                                                    dim_to_ignore_2d)
                poses3d = data_utils.unNormalizeData(poses3d, data_mean_3d,
                                                     data_std_3d,
                                                     dim_to_ignore_3d)
                gs1 = gridspec.GridSpec(1, 1)
                gs1.update(wspace=-0.00,
                           hspace=0.05)  # set the spacing between axes.
                plt.axis('off')
                all_poses_3d.append(poses3d)
                enc_in, poses3d = map(np.vstack, [enc_in, all_poses_3d])
                subplot_idx, exidx = 1, 1
                _max = 0
                _min = 10000

                for i in range(poses3d.shape[0]):
                    for j in range(32):
                        tmp = poses3d[i][j * 3 + 2]
                        poses3d[i][j * 3 + 2] = poses3d[i][j * 3 + 1]
                        poses3d[i][j * 3 + 1] = tmp
                        if poses3d[i][j * 3 + 2] > _max:
                            _max = poses3d[i][j * 3 + 2]
                        if poses3d[i][j * 3 + 2] < _min:
                            _min = poses3d[i][j * 3 + 2]

                for i in range(poses3d.shape[0]):
                    for j in range(32):
                        poses3d[i][j * 3 +
                                   2] = _max - poses3d[i][j * 3 + 2] + _min
                        poses3d[i][j * 3] += (spine_x - 630)
                        poses3d[i][j * 3 + 2] += (500 - spine_y)

                # Plot 3d predictions
                ax = plt.subplot(gs1[subplot_idx - 1], projection='3d')
                ax.view_init(18, -70)
                logger.debug(np.min(poses3d))
                if np.min(poses3d) < -1000 and frame != 0:
                    poses3d = before_pose

                p3d = poses3d

                viz.show3Dpose(p3d, ax, lcolor="#9b59b6", rcolor="#2ecc71")
                before_pose = poses3d
                pngName = 'png/test_{0}.png'.format(str(frame))
                plt.savefig(pngName)

                #plt.show()
                img = cv2.imread(pngName, 0)
                rect_cpy = img.copy()
                cv2.imshow('3d-pose-baseline', rect_cpy)
                done.append(file_name)
                if key == ord('q'):
                    break
            except Exception as e:
                print(e)

        sess.close()
コード例 #24
0
def sample():

  actions = data_utils.define_actions( FLAGS.action )

  SUBJECT_IDS = [1,5,6,7,8,9,11]
  rcams = load_cameras(FLAGS.cameras_path, SUBJECT_IDS)

  train_set_3d, test_set_3d, data_mean_3d, data_std_3d, dim_to_ignore_3d, dim_to_use_3d, train_root_positions, test_root_positions = data_utils.read_3d_data(
    actions, FLAGS.data_dir, FLAGS.camera_frame, rcams, FLAGS.predict_14 )

  if FLAGS.use_sh:
    train_set_2d, test_set_2d, data_mean_2d, data_std_2d, dim_to_ignore_2d, dim_to_use_2d = data_utils.read_2d_predictions(actions, FLAGS.data_dir)
  else:
    train_set_2d, test_set_2d, data_mean_2d, data_std_2d, dim_to_ignore_2d, dim_to_use_2d = data_utils.create_2d_data( actions, FLAGS.data_dir, rcams )
  print( "done reading and normalizing data." )

  device_count = {"GPU": 0} if FLAGS.use_cpu else {"GPU": 1}
  with tf.Session(config=tf.ConfigProto( device_count = device_count )) as sess:
    print("Creating %d layers of %d units." % (FLAGS.num_layers, FLAGS.linear_size))
    batch_size = 128
    model = create_model(sess, actions, batch_size)
    print("Model loaded")

    for key2d in test_set_2d.keys():

      (subj, b, fname) = key2d
      print( "Subject: {}, action: {}, fname: {}".format(subj, b, fname) )

      key3d = key2d if FLAGS.camera_frame else (subj, b, '{0}.h5'.format(fname.split('.')[0]))
      key3d = (subj, b, fname[:-3]) if (fname.endswith('-sh')) and FLAGS.camera_frame else key3d

      enc_in  = test_set_2d[ key2d ]
      n2d, _ = enc_in.shape
      dec_out = test_set_3d[ key3d ]
      n3d, _ = dec_out.shape
      assert n2d == n3d

      enc_in   = np.array_split( enc_in,  n2d // batch_size )
      dec_out  = np.array_split( dec_out, n3d // batch_size )
      all_poses_3d = []

      for bidx in range( len(enc_in) ):

        dp = 1.0
        _, _, poses3d = model.step(sess, enc_in[bidx], dec_out[bidx], dp, isTraining=False)

        enc_in[bidx]  = data_utils.unNormalizeData(  enc_in[bidx], data_mean_2d, data_std_2d, dim_to_ignore_2d )
        dec_out[bidx] = data_utils.unNormalizeData( dec_out[bidx], data_mean_3d, data_std_3d, dim_to_ignore_3d )
        poses3d = data_utils.unNormalizeData( poses3d, data_mean_3d, data_std_3d, dim_to_ignore_3d )
        all_poses_3d.append( poses3d )

      enc_in, dec_out, poses3d = map( np.vstack, [enc_in, dec_out, all_poses_3d] )

      if FLAGS.camera_frame:
        N_CAMERAS = 4
        N_JOINTS_H36M = 32

        dec_out = dec_out + np.tile( test_root_positions[ key3d ], [1,N_JOINTS_H36M] )
        subj, _, sname = key3d

        cname = sname.split('.')[1] 
        scams = {(subj,c+1): rcams[(subj,c+1)] for c in range(N_CAMERAS)} 
        scam_idx = [scams[(subj,c+1)][-1] for c in range(N_CAMERAS)].index( cname )
        the_cam  = scams[(subj, scam_idx+1)]
        R, T, f, c, k, p, name = the_cam
        assert name == cname

        def cam2world_centered(data_3d_camframe):
          data_3d_worldframe = camera_to_world_frame(data_3d_camframe.reshape((-1, 3)), R, T)
          data_3d_worldframe = data_3d_worldframe.reshape((-1, N_JOINTS_H36M*3))
          return data_3d_worldframe - np.tile( data_3d_worldframe[:,:3], (1,N_JOINTS_H36M) )

        dec_out = cam2world_centered(dec_out)
        poses3d = cam2world_centered(poses3d)

  enc_in, dec_out, poses3d = map( np.vstack, [enc_in, dec_out, poses3d] )
  idx = np.random.permutation( enc_in.shape[0] )
  enc_in, dec_out, poses3d = enc_in[idx, :], dec_out[idx, :], poses3d[idx, :]

  import matplotlib.gridspec as gridspec

  fig = plt.figure( figsize=(19.2, 10.8) )

  gs1 = gridspec.GridSpec(5, 9)
  gs1.update(wspace=-0.00, hspace=0.05)
  plt.axis('off')

  subplot_idx, exidx = 1, 1
  nsamples = 15
  for i in np.arange( nsamples ):

    ax1 = plt.subplot(gs1[subplot_idx-1])
    p2d = enc_in[exidx,:]
    viz.show2Dpose( p2d, ax1 )
    ax1.invert_yaxis()

    ax2 = plt.subplot(gs1[subplot_idx], projection='3d')
    p3d = dec_out[exidx,:]
    viz.show3Dpose( p3d, ax2 )

    ax3 = plt.subplot(gs1[subplot_idx+1], projection='3d')
    p3d = poses3d[exidx,:]
    viz.show3Dpose( p3d, ax3, lcolor="#9b59b6", rcolor="#2ecc71" )

    exidx = exidx + 1
    subplot_idx = subplot_idx + 3

  plt.show()
コード例 #25
0
def evaluate_batches( sess, model,
  data_mean_3d, data_std_3d, dim_to_use_3d, dim_to_ignore_3d,
  data_mean_2d, data_std_2d, dim_to_use_2d, dim_to_ignore_2d,
  current_step, encoder_inputs, decoder_outputs, current_epoch=0 ):
  """
  Generic method that evaluates performance of a list of batches.
  May be used to evaluate all actions or a single action.

  Args
    sess
    model
    data_mean_3d
    data_std_3d
    dim_to_use_3d
    dim_to_ignore_3d
    data_mean_2d
    data_std_2d
    dim_to_use_2d
    dim_to_ignore_2d
    current_step
    encoder_inputs
    decoder_outputs
    current_epoch
  Returns

    total_err
    joint_err
    step_time
    loss
  """

  n_joints = 17 if not(FLAGS.predict_14) else 14
  nbatches = len( encoder_inputs )

  # Loop through test examples
  all_dists, start_time, loss = [], time.time(), 0.
  log_every_n_batches = 100
  for i in range(nbatches):

    if current_epoch > 0 and (i+1) % log_every_n_batches == 0:
      print("Working on test epoch {0}, batch {1} / {2}".format( current_epoch, i+1, nbatches) )

    enc_in, dec_out = encoder_inputs[i], decoder_outputs[i]
    dp = 1.0 # dropout keep probability is always 1 at test time
    step_loss, loss_summary, poses3d = model.step( sess, enc_in, dec_out, dp, isTraining=False )
    loss += step_loss

    # denormalize
    enc_in  = data_utils.unNormalizeData( enc_in,  data_mean_2d, data_std_2d, dim_to_ignore_2d )
    dec_out = data_utils.unNormalizeData( dec_out, data_mean_3d, data_std_3d, dim_to_ignore_3d )
    poses3d = data_utils.unNormalizeData( poses3d, data_mean_3d, data_std_3d, dim_to_ignore_3d )

    # Keep only the relevant dimensions
    dtu3d = np.hstack( (np.arange(3), dim_to_use_3d) ) if not(FLAGS.predict_14) else  dim_to_use_3d

    dec_out = dec_out[:, dtu3d]
    poses3d = poses3d[:, dtu3d]

    assert dec_out.shape[0] == FLAGS.batch_size
    assert poses3d.shape[0] == FLAGS.batch_size

    if FLAGS.procrustes:
      # Apply per-frame procrustes alignment if asked to do so
      for j in range(FLAGS.batch_size):
        gt  = np.reshape(dec_out[j,:],[-1,3])
        out = np.reshape(poses3d[j,:],[-1,3])
        _, Z, T, b, c = procrustes.compute_similarity_transform(gt,out,compute_optimal_scale=True)
        out = (b*out.dot(T))+c

        poses3d[j,:] = np.reshape(out,[-1,17*3] ) if not(FLAGS.predict_14) else np.reshape(out,[-1,14*3] )

    # Compute Euclidean distance error per joint
    sqerr = (poses3d - dec_out)**2 # Squared error between prediction and expected output
    dists = np.zeros( (sqerr.shape[0], n_joints) ) # Array with L2 error per joint in mm
    dist_idx = 0
    for k in np.arange(0, n_joints*3, 3):
      # Sum across X,Y, and Z dimenstions to obtain L2 distance
      dists[:,dist_idx] = np.sqrt( np.sum( sqerr[:, k:k+3], axis=1 ))
      dist_idx = dist_idx + 1

    all_dists.append(dists)
    assert sqerr.shape[0] == FLAGS.batch_size

  step_time = (time.time() - start_time) / nbatches
  loss      = loss / nbatches

  all_dists = np.vstack( all_dists )

  # Error per joint and total for all passed batches
  joint_err = np.mean( all_dists, axis=0 )
  total_err = np.mean( all_dists )

  return total_err, joint_err, step_time, loss
コード例 #26
0
def sample():
    """Sample predictions for srnn's seeds"""

    if FLAGS.load <= 0:
        raise (ValueError, "Must give an iteration to read parameters from")

    actions = define_actions(FLAGS.action)

    # Use the CPU if asked to
    device_count = {"GPU": 0} if FLAGS.use_cpu else {"GPU": 1}
    with tf.Session(config=tf.ConfigProto(device_count=device_count)) as sess:

        # Load all the data
        train_set, test_set, train_seq_len, test_seq_len, rp_stats, ch_stats, max_seq_len = read_all_data(
            actions, FLAGS.data_dir, not FLAGS.omit_one_hot)

        # === Create the model ===
        print("Creating %d layers of %d units." %
              (FLAGS.num_layers, FLAGS.size))
        model = create_model(sess, actions, max_seq_len)
        print("Model created")

        # Clean and create a new h5 file of samples
        SAMPLES_DNAME = 'samples'
        try:
            import shutil
            shutil.rmtree(SAMPLES_DNAME)
        except OSError:
            pass

# Predict and save for each action
        for action in actions:

            inputs = test_set[:, 0]
            inputs = np.pad(inputs, [[0, 0], [0, 1], [0, 0]], 'constant')
            gts = test_set[:, 1]
            seq_length = test_seq_len

            forward_only = True
            sample = True
            pred_loss, pred_poses, _ = model.step(sess, inputs, gts,
                                                  seq_length, forward_only,
                                                  sample)

            pred_poses = np.array(pred_poses)

            shape = test_set.shape

            unNormalized_test_set = np.zeros(
                [shape[0], shape[1], shape[2], 63])
            unNormalized_pred_poses = np.zeros([shape[0], shape[2], 63])
            # denormalizes too
            #unNormalized_test_set[:,0] = data_utils.unNormalizeData(test_set[:,0], rp_stats, actions, False)
            #unNormalized_test_set[:,1] = data_utils.unNormalizeData(test_set[:,1], ch_stats, actions, False)
            unNormalized_pred_poses = data_utils.unNormalizeData(
                np.reshape(pred_poses,
                           [pred_poses.shape[1], pred_poses.shape[0], -1]),
                ch_stats, actions, False)
            unNormalized_test_set = read_all_data(actions,
                                                  FLAGS.data_dir,
                                                  not FLAGS.omit_one_hot,
                                                  GT=True)
            # Save the conditioning seeds
            print("loss: {}".format(np.mean(pred_loss)))
            # Save the samples
            os.mkdir(SAMPLES_DNAME)
            for i in range(len(unNormalized_pred_poses)):
                np.savez(SAMPLES_DNAME + '/sample{}.npz'.format(i),
                         real_person=unNormalized_test_set[i, 0],
                         character=unNormalized_pred_poses[i],
                         ground_truth=unNormalized_test_set[i, 1],
                         loss=pred_loss[i])
            '''' 
      # Compute and save the errors here
      mean_errors = np.zeros( (len(srnn_pred_expmap), srnn_pred_expmap[0].shape[0]) )

      for i in np.arange(8):

        eulerchannels_pred = srnn_pred_expmap[i]

        for j in np.arange( eulerchannels_pred.shape[0] ):
          for k in np.arange(0,eulerchannels_pred.shape[1],3):
            eulerchannels_pred[j,k:k+3] = data_utils.rotmat2euler(
              data_utils.expmap2rotmat( eulerchannels_pred[j,k:k+3] ))

        eulerchannels_pred[:,0:6] = 0

        # Pick only the dimensions with sufficient standard deviation. Others are ignored.
        idx_to_use = np.where( np.std( eulerchannels_pred, 0 ) > 1e-4 )[0]

        euc_error = np.power( srnn_gts_euler[action][i][:,idx_to_use] - eulerchannels_pred[:,idx_to_use], 2)
        euc_error = np.sum(euc_error, 1)
        euc_error = np.sqrt( euc_error )
        mean_errors[i,:] = euc_error

      mean_mean_errors = np.mean( mean_errors, 0 )
      print( action )
      print( ','.join(map(str, mean_mean_errors.tolist() )) )

      with h5py.File( SAMPLES_FNAME, 'a' ) as hf:
        node_name = 'mean_{0}_error'.format( action )
        hf.create_dataset( node_name, data=mean_mean_errors )
      '''
    return
コード例 #27
0
def main(_):
    #ABS_DIR = os.path.abspath('.')
    posf = open(pose_output_dir, 'w')
    #smoothedf = open(ABS_DIR + '/tmp/smoothed.txt', 'w')

    smoothed = read_openpose_json()
    plt.figure(2)
    smooth_curves_plot = show_anim_curves(smoothed, plt)
    pngName = 'gif_output/smooth_plot.png'
    smooth_curves_plot.savefig(pngName)
    logger.info('writing gif_output/smooth_plot.png')

    if FLAGS.interpolation:
        logger.info("start interpolation")

        framerange = len( smoothed.keys() )
        joint_rows = 36
        array = np.concatenate(list(smoothed.values()))
        array_reshaped = np.reshape(array, (framerange, joint_rows) )
        print(array_reshaped[0,:])

        arm = [4,5,6,7,8,9,10,11]
        multiplier = FLAGS.multiplier
        multiplier_inv = 1/multiplier

        out_array = np.array([])
        for row in range(joint_rows):
            x = []
            for frame in range(framerange):
                x.append( array_reshaped[frame, row] )

            frame = range( framerange )
            frame_resampled = np.arange(0, framerange, multiplier)
            spl = UnivariateSpline(frame, x, k=3)
            #relative smooth factor based on jnt anim curve
            min_x, max_x = min(x), max(x)
            smooth_fac = max_x - min_x
            if row in arm:
                smooth_resamp = 1
            else:
                smooth_resamp = 75
            smooth_fac = smooth_fac * smooth_resamp
            spl.set_smoothing_factor( float(smooth_fac) )
            xnew = spl(frame_resampled)

            out_array = np.append(out_array, xnew)

        logger.info("done interpolating. reshaping {0} frames,  please wait!!".format(framerange))

        a = np.array([])
        for frame in range( int( framerange * multiplier_inv ) ):
            jnt_array = []
            for jnt in range(joint_rows):
                jnt_array.append( out_array[ jnt * int(framerange * multiplier_inv) + frame] )
            a = np.append(a, jnt_array)

        a = np.reshape(a, (int(framerange * multiplier_inv), joint_rows))
        out_array = a

        interpolate_smoothed = {}
        for frame in range( int(framerange * multiplier_inv) ):
            interpolate_smoothed[frame] = list( out_array[frame] )

        plt.figure(3)
        smoothed = interpolate_smoothed
        interpolate_curves_plot = show_anim_curves(smoothed, plt)
        pngName = 'gif_output/interpolate_{0}.png'.format(smooth_resamp)
        interpolate_curves_plot.savefig(pngName)
        logger.info('writing gif_output/interpolate_plot.png')

    enc_in = np.zeros((1, 64))
    enc_in[0] = [0 for i in range(64)]

    actions = data_utils.define_actions(FLAGS.action)

    SUBJECT_IDS = [1, 5, 6, 7, 8, 9, 11]
    rcams = cameras.load_cameras(FLAGS.cameras_path, SUBJECT_IDS)
    train_set_2d, test_set_2d, data_mean_2d, data_std_2d, dim_to_ignore_2d, dim_to_use_2d = data_utils.read_2d_predictions(
        actions, FLAGS.data_dir)
    train_set_3d, test_set_3d, data_mean_3d, data_std_3d, dim_to_ignore_3d, dim_to_use_3d, train_root_positions, test_root_positions = data_utils.read_3d_data(
        actions, FLAGS.data_dir, FLAGS.camera_frame, rcams, FLAGS.predict_14)

    device_count = {"GPU": 1}
    png_lib = []
    with tf.Session(config=tf.ConfigProto(
            device_count=device_count,
            allow_soft_placement=True)) as sess:
        #plt.figure(3)
        batch_size = 128
        model = create_model(sess, actions, batch_size)
        iter_range = len(smoothed.keys())
        for n, (frame, xy) in enumerate(smoothed.items()):
            logger.info("calc frame {0}/{1}".format(frame, iter_range))
            # map list into np array
            joints_array = np.zeros((1, 36))
            joints_array[0] = [0 for i in range(36)]
            for o in range(len(joints_array[0])):
                #feed array with xy array
                joints_array[0][o] = xy[o]
            _data = joints_array[0]
            #smoothedf.write(' '.join(map(str, _data)))
            #smoothedf.write("\n")
            # mapping all body parts or 3d-pose-baseline format
            for i in range(len(order)):
                for j in range(2):
                    # create encoder input
                    enc_in[0][order[i] * 2 + j] = _data[i * 2 + j]
            for j in range(2):
                # Hip
                enc_in[0][0 * 2 + j] = (enc_in[0][1 * 2 + j] + enc_in[0][6 * 2 + j]) / 2
                # Neck/Nose
                enc_in[0][14 * 2 + j] = (enc_in[0][15 * 2 + j] + enc_in[0][12 * 2 + j]) / 2
                # Thorax
                enc_in[0][13 * 2 + j] = 2 * enc_in[0][12 * 2 + j] - enc_in[0][14 * 2 + j]

            # set spine
            spine_x = enc_in[0][24]
            spine_y = enc_in[0][25]

            enc_in = enc_in[:, dim_to_use_2d]
            mu = data_mean_2d[dim_to_use_2d]
            stddev = data_std_2d[dim_to_use_2d]
            enc_in = np.divide((enc_in - mu), stddev)

            dp = 1.0
            dec_out = np.zeros((1, 48))
            dec_out[0] = [0 for i in range(48)]
            _, _, poses3d = model.step(sess, enc_in, dec_out, dp, isTraining=False)
            all_poses_3d = []
            enc_in = data_utils.unNormalizeData(enc_in, data_mean_2d, data_std_2d, dim_to_ignore_2d)
            poses3d = data_utils.unNormalizeData(poses3d, data_mean_3d, data_std_3d, dim_to_ignore_3d)
            gs1 = gridspec.GridSpec(1, 1)
            gs1.update(wspace=-0.00, hspace=0.05)  # set the spacing between axes.
            plt.axis('off')
            all_poses_3d.append( poses3d )
            enc_in, poses3d = map( np.vstack, [enc_in, all_poses_3d] )
            subplot_idx, exidx = 1, 1
            _max = 0
            _min = 10000

            for i in range(poses3d.shape[0]):
                for j in range(32):
                    tmp = poses3d[i][j * 3 + 2]
                    poses3d[i][j * 3 + 2] = poses3d[i][j * 3 + 1]
                    poses3d[i][j * 3 + 1] = tmp
                    if poses3d[i][j * 3 + 2] > _max:
                        _max = poses3d[i][j * 3 + 2]
                    if poses3d[i][j * 3 + 2] < _min:
                        _min = poses3d[i][j * 3 + 2]

            for i in range(poses3d.shape[0]):
                for j in range(32):
                    poses3d[i][j * 3 + 2] = _max - poses3d[i][j * 3 + 2] + _min
                    poses3d[i][j * 3] += (spine_x - 630)
                    poses3d[i][j * 3 + 2] += (500 - spine_y)

            # Plot 3d predictions
            ax = plt.subplot(gs1[subplot_idx - 1], projection='3d')
            ax.view_init(18, -70)
            if np.min(poses3d) < -1000:
                try:
                    poses3d = before_pose
                except:
                    pass

            p3d = poses3d
            #viz.show3Dpose(p3d, ax, lcolor="#9b59b6", rcolor="#2ecc71")

            # pngName = 'png/pose_frame_{0}.png'.format(str(frame).zfill(12))
            # plt.savefig(pngName)
            # if FLAGS.write_gif:
            #     png_lib.append(imageio.imread(pngName))
            before_pose = poses3d
            write_pos_data(poses3d, ax, posf)
        posf.close()
コード例 #28
0
def generate_3dpose_eccv18():

    # Generate directory for CSV prediction files
    if FLAGS.for_submission == True:
        predict_step_dir = FLAGS.prediction_dir + "_" + str(FLAGS.load)
    else:
        predict_step_dir = FLAGS.prediction_dir + "_Val_" + str(FLAGS.load)

    if not (os.path.isdir(predict_step_dir)):
        os.makedirs(os.path.join(predict_step_dir))
    actions = data_utils.define_actions(FLAGS.action)

    # Load 3d & 2d data
    _, _, data_mean_3d, data_std_3d, dim_to_ignore_3d, dim_to_use_3d = data_utils.read_data_eccv18(
        FLAGS.data_dir,
        FLAGS.centering_2d,
        FLAGS.detector_2d,
        FLAGS.idx_split,
        dim=3,
        for_submission=FLAGS.for_submission)

    train_set_2d, test_set_2d, data_mean_2d, data_std_2d, dim_to_ignore_2d, dim_to_use_2d = data_utils.read_data_eccv18(
        FLAGS.data_dir,
        FLAGS.centering_2d,
        FLAGS.detector_2d,
        FLAGS.idx_split,
        dim=2,
        for_submission=FLAGS.for_submission)

    # Load test filename_list (Unshuffled)
    file_list = []
    if (FLAGS.for_submission == False):
        split_path = os.path.join(FLAGS.data_dir, "split",
                                  'Val_list_' + FLAGS.detector_2d + '.csv')
    else:
        split_path = os.path.join(FLAGS.data_dir, "split",
                                  'Test_list_' + FLAGS.detector_2d + '.csv')
    with open(split_path, 'r') as f:
        csvReader = csv.reader(f)
        for row in csvReader:
            # file_list.append(row[0].split('.jp')[0])
            file_list.append(row)

    device_count = {"GPU": 0} if FLAGS.use_cpu else {"GPU": 1}
    idx_file = 0
    with tf.Session(config=tf.ConfigProto(device_count=device_count)) as sess:
        # === Create the model ===
        print("Creating %d layers of %d units." %
              (FLAGS.num_layers, FLAGS.linear_size))
        model = create_model(sess,
                             actions,
                             FLAGS.batch_size,
                             FLAGS.centering_2d,
                             for_eccv18=True)
        print("Model loaded")

        n_joints = 17 if not (FLAGS.predict_14) else 14
        encoder_inputs = model.get_all_batches_2D_eccv18(test_set_2d)
        nbatches = len(encoder_inputs)

        print("Model (%d step) created" % FLAGS.load)
        g_step = model.global_step.eval()
        print("g_step: ", g_step)

        for i in range(nbatches):

            enc_in = encoder_inputs[i]
            dp = 1.0  # dropout keep probability is always 1 at test time
            poses3d = model.step_only_enc(sess, enc_in, dp, isTraining=False)

            # denormalize
            poses3d = data_utils.unNormalizeData(poses3d, data_mean_3d,
                                                 data_std_3d, dim_to_ignore_3d)

            # Keep only the relevant dimensions
            dtu3d = np.hstack(
                (np.arange(3),
                 dim_to_use_3d)) if not (FLAGS.predict_14) else dim_to_use_3d
            poses3d = poses3d[:, dtu3d]

            batch_size = poses3d.shape[0]
            n_joints = 17 if not (FLAGS.predict_14) else 14

            for i in range(batch_size):
                pose3d_sample = poses3d[i].reshape(n_joints, -1)
                print(predict_step_dir + "/" + file_list[idx_file][0] + ".csv")
                np.savetxt(predict_step_dir + "/" + file_list[idx_file][0] +
                           ".csv",
                           pose3d_sample,
                           delimiter=",",
                           fmt='%.3f')
                idx_file += 1

    # convert csv files to a json file
    src_path = predict_step_dir
    out_path = src_path + "_json"
    # out_path = '_'.join(src_path.split("_")[ : 3]) + "_json"

    # print (src_path, out_path, poses3d.shape)
    # # save prediction results as a single CSV file
    # if not (os.path.isdir(out_path)):
    #   os.makedirs(os.path.join(out_path))
    #
    # np.savetxt(out_path + "/split_" + str(FLAGS.idx_split) + ".csv", poses3d, delimiter=",", fmt='%.3f')

    csv2json(src_path, out_path)
コード例 #29
0
def predict_3d(poses, estimator):
    enc_in = np.zeros((1, 64))
    enc_in[0] = [0 for i in range(64)]

    with estimator.persistent_sess.as_default():
        with estimator.graph.as_default():
            _3d_predictions = []
            for n, xy in enumerate(poses):
                joints_array = np.zeros((1, 36))
                joints_array[0] = [float(xy[o]) for o in range(36)]

                _data = joints_array[0]
                # mapping all body parts or 3d-pose-baseline format
                for i in range(len(order)):
                    for j in range(2):
                        # create encoder input
                        enc_in[0][order[i] * 2 + j] = _data[i * 2 + j]
                for j in range(2):
                    # Hip
                    enc_in[0][0 * 2 + j] = (enc_in[0][1 * 2 + j] +
                                            enc_in[0][6 * 2 + j]) / 2
                    # Neck/Nose
                    enc_in[0][14 * 2 + j] = (enc_in[0][15 * 2 + j] +
                                             enc_in[0][12 * 2 + j]) / 2
                    # Thorax
                    enc_in[0][
                        13 * 2 +
                        j] = 2 * enc_in[0][12 * 2 + j] - enc_in[0][14 * 2 + j]

                # set spine
                spine_x = enc_in[0][24]
                spine_y = enc_in[0][25]

                enc_in = enc_in[:, estimator.dim_to_use_2d]
                mu = estimator.data_mean_2d[estimator.dim_to_use_2d]
                stddev = estimator.data_std_2d[estimator.dim_to_use_2d]
                enc_in = np.divide((enc_in - mu), stddev)

                # ?
                dp = 1.0
                dec_out = np.zeros((1, 48))
                dec_out[0] = [0 for i in range(48)]
                _, _, poses3d = estimator.model.step(estimator.persistent_sess,
                                                     enc_in,
                                                     dec_out,
                                                     dp,
                                                     isTraining=False)
                all_poses_3d = []
                enc_in = data_utils.unNormalizeData(enc_in,
                                                    estimator.data_mean_2d,
                                                    estimator.data_std_2d,
                                                    estimator.dim_to_ignore_2d)
                poses3d = data_utils.unNormalizeData(
                    poses3d, estimator.data_mean_3d, estimator.data_std_3d,
                    estimator.dim_to_ignore_3d)
                all_poses_3d.append(poses3d)
                enc_in, poses3d = map(np.vstack, [enc_in, all_poses_3d])
                subplot_idx, exidx = 1, 1
                _max = 0
                _min = 10000

                # ??????
                for i in range(poses3d.shape[0]):
                    for j in range(32):
                        tmp = poses3d[i][j * 3 + 2]
                        poses3d[i][j * 3 + 2] = poses3d[i][j * 3 + 1]
                        poses3d[i][j * 3 + 1] = tmp
                        if poses3d[i][j * 3 + 2] > _max:
                            _max = poses3d[i][j * 3 + 2]
                        if poses3d[i][j * 3 + 2] < _min:
                            _min = poses3d[i][j * 3 + 2]

                for i in range(poses3d.shape[0]):
                    for j in range(32):
                        poses3d[i][j * 3 +
                                   2] = _max - poses3d[i][j * 3 + 2] + _min
                        poses3d[i][j * 3] += (spine_x - 630)
                        poses3d[i][j * 3 + 2] += (500 - spine_y)

                # np.min(poses3d) é o score do frame
                if False:  # FLAGS.cache_on_fail ;; TODO: colocar regra pra não inserir keypoint
                    if np.min(poses3d) < -1000:
                        poses3d = before_pose

                p3d = poses3d
                x, y, z = [[] for _ in range(3)]
                if not poses3d is None:
                    to_export = poses3d.tolist()[0]
                else:
                    to_export = [0.0 for _ in range(96)]
                for o in range(0, len(to_export), 3):
                    x.append(to_export[o])
                    y.append(to_export[o + 1])
                    z.append(to_export[o + 2])

                export_units = {}
                for jnt_index, (_x, _y, _z) in enumerate(zip(x, y, z)):
                    export_units[jnt_index] = [_x, _y, _z]
                _3d_predictions.append(export_units)
            return _3d_predictions
コード例 #30
0
def main(_):
    done = []

    enc_in = np.zeros((1, 64))
    enc_in[0] = [0 for i in range(64)]

    actions = data_utils.define_actions(FLAGS.action)

    SUBJECT_IDS = [1, 5, 6, 7, 8, 9, 11]
    rcams = cameras.load_cameras(FLAGS.cameras_path, SUBJECT_IDS)
    train_set_2d, test_set_2d, data_mean_2d, data_std_2d, dim_to_ignore_2d, dim_to_use_2d = data_utils.read_2d_predictions(
        actions, FLAGS.data_dir)
    train_set_3d, test_set_3d, data_mean_3d, data_std_3d, dim_to_ignore_3d, dim_to_use_3d, train_root_positions, test_root_positions = data_utils.read_3d_data(
        actions, FLAGS.data_dir, FLAGS.camera_frame, rcams, FLAGS.predict_14)

    device_count = {"GPU": 0}
    png_lib = []
    with tf.Session(config=tf.ConfigProto(device_count=device_count,
                                          allow_soft_placement=True)) as sess:
        #plt.figure(3)
        batch_size = 128
        model = create_model(sess, actions, batch_size)
        rows = 0
        filename = "Realtimedata.xlsx"
        workbook = xlsxwriter.Workbook(filename)
        worksheet = workbook.add_worksheet()
        while True:
            key = cv2.waitKey(1) & 0xFF
            #logger.info("start reading data")
            # check for other file types
            list_of_files = glob.iglob("{0}/*".format(
                openpose_output_dir))  # You may use iglob in Python3
            latest_file = ""
            try:
                latest_file = max(list_of_files, key=os.path.getctime)
            except ValueError:
                #empthy dir
                pass
            if not latest_file:
                continue
            try:
                _file = file_name = latest_file
                print(latest_file)
                if not os.path.isfile(_file):
                    raise Exception("No file found!!, {0}".format(_file))
                data = json.load(open(_file))
                #take first person
                _data = data["people"][0]["pose_keypoints_2d"]
                xy = []
                #ignore confidence score
                """for o in range(0,len(_data),3):
                    xy.append(_data[o])
                    xy.append(_data[o+1])"""
                if len(_data) >= 53:
                    #openpose incl. confidence score
                    #ignore confidence score
                    for o in range(0, len(_data), 3):
                        xy.append(_data[o])
                        xy.append(_data[o + 1])
                else:
                    #tf-pose-estimation
                    xy = _data

                frame_indx = re.findall("(\d+)", file_name)
                frame = int(frame_indx[0])

                joints_array = np.zeros((1, 36))
                joints_array[0] = [0 for i in range(36)]
                for o in range(len(joints_array[0])):
                    #feed array with xy array
                    joints_array[0][o] = xy[o]
                _data = joints_array[0]
                # mapping all body parts or 3d pose offline format
                for i in range(len(order)):
                    for j in range(2):
                        # create encoder input
                        enc_in[0][order[i] * 2 + j] = _data[i * 2 + j]
                for j in range(2):
                    # Hip
                    enc_in[0][0 * 2 + j] = (enc_in[0][1 * 2 + j] +
                                            enc_in[0][6 * 2 + j]) / 2
                    # Neck/Nose
                    enc_in[0][14 * 2 + j] = (enc_in[0][15 * 2 + j] +
                                             enc_in[0][12 * 2 + j]) / 2
                    # Thorax
                    enc_in[0][
                        13 * 2 +
                        j] = 2 * enc_in[0][12 * 2 + j] - enc_in[0][14 * 2 + j]

                # set spine
                spine_x = enc_in[0][24]
                spine_y = enc_in[0][25]

                enc_in = enc_in[:, dim_to_use_2d]
                mu = data_mean_2d[dim_to_use_2d]
                stddev = data_std_2d[dim_to_use_2d]
                enc_in = np.divide((enc_in - mu), stddev)

                dp = 1.0
                dec_out = np.zeros((1, 48))
                dec_out[0] = [0 for i in range(48)]
                _, _, poses3d = model.step(sess,
                                           enc_in,
                                           dec_out,
                                           dp,
                                           isTraining=False)
                all_poses_3d = []
                enc_in = data_utils.unNormalizeData(enc_in, data_mean_2d,
                                                    data_std_2d,
                                                    dim_to_ignore_2d)
                poses3d = data_utils.unNormalizeData(poses3d, data_mean_3d,
                                                     data_std_3d,
                                                     dim_to_ignore_3d)
                gs1 = gridspec.GridSpec(1, 1)
                gs1.update(wspace=-0.00,
                           hspace=0.05)  # set the spacing between axes.
                plt.axis('off')
                all_poses_3d.append(poses3d)
                enc_in, poses3d = map(np.vstack, [enc_in, all_poses_3d])
                subplot_idx, exidx = 1, 1
                _max = 0
                _min = 10000

                for i in range(poses3d.shape[0]):
                    for j in range(32):
                        tmp = poses3d[i][j * 3 + 2]
                        poses3d[i][j * 3 + 2] = poses3d[i][j * 3 + 1]
                        poses3d[i][j * 3 + 1] = tmp
                        if poses3d[i][j * 3 + 2] > _max:
                            _max = poses3d[i][j * 3 + 2]
                        if poses3d[i][j * 3 + 2] < _min:
                            _min = poses3d[i][j * 3 + 2]

                for i in range(poses3d.shape[0]):
                    for j in range(32):
                        poses3d[i][j * 3 +
                                   2] = _max - poses3d[i][j * 3 + 2] + _min
                        poses3d[i][j * 3] += (spine_x - 630)
                        poses3d[i][j * 3 + 2] += (500 - spine_y)

                for val in min_vex:
                    # f.write(str(val) + ' ' + str(p_vex[i]) + '');
                    # gait_list1.append({'IX': "%i" % val[0],
                    #                     'IY': "%i" % val[1],
                    #                     'Ix': "%i" % p_vex[i][0],
                    #                     'Iy': "%i" % p_vex[i][1],
                    #                     'Iz': "%i" % p_vex[i][2],
                    # })
                    gait_list1.append(val[0])
                    gait_list1.append(val[1])
                    gait_list1.append(p_vex[i][0])
                    gait_list1.append(p_vex[i][1])
                    gait_list1.append(p_vex[i][2])

                    points.append(
                        " %f %f %f %d %d %d 0\n" %
                        (p_vex[i][0], p_vex[i][1], p_vex[i][2], 0, 255, 0))
                    x.append(p_vex[i][0])
                    y.append(p_vex[i][1])
                    z.append(p_vex[i][2])
                    i = i + 1

                # Plot 3d predictions
                ax = plt.subplot(gs1[subplot_idx - 1], projection='3d')
                ax.view_init(18, -70)
                logger.debug(np.min(poses3d))
                if np.min(poses3d) < -1000 and frame != 0:
                    poses3d = before_pose

                p3d = poses3d
                '''gait_list1 = []
                #enter file path below
                with open('key_joint_info.csv', 'w', newline='') as myfile:
                    gait_list2.append(gait_list1)
                    data1 = pd.DataFrame(gait_list2)
                    wr = csv.writer(myfile, dialect = 'key_joint_info.csv' )
                    wr.writerow(p3d)
                    wb.save(key_joint_info.csv)'''

                viz.show3Dpose(p3d, ax, lcolor="#9b59b6", rcolor="#2ecc71")
                col = 0
                for i in p3d[0]:
                    worksheet.write(rows, col, i)
                    col += 1
                    #.append(i)
                rows += 1
                before_pose = poses3d
                pngName = '{}_keypoints.png'.format(str(frame))
                plt.savefig(pngName)

                #plt.show()
                img = cv2.imread(pngName, 0)
                rect_cpy = img.copy()
                cv2.imshow('3d-pose-realtime', rect_cpy)
                done.append(file_name)
                if key == ord('q'):
                    break
            except Exception as e:
                print(e)

        sess.close()
コード例 #31
0
def sample():
  """Get samples from a model and visualize them"""

  actions = data_utils.define_actions( FLAGS.action )

  # Load camera parameters
  SUBJECT_IDS = [1,5,6,7,8,9,11]
  rcams = cameras.load_cameras(FLAGS.cameras_path, SUBJECT_IDS)

  # Load 3d data and load (or create) 2d projections
  train_set_3d, test_set_3d, data_mean_3d, data_std_3d, dim_to_ignore_3d, dim_to_use_3d, train_root_positions, test_root_positions = data_utils.read_3d_data(
    actions, FLAGS.data_dir, FLAGS.camera_frame, rcams, FLAGS.predict_14 )

  if FLAGS.use_sh:
    train_set_2d, test_set_2d, data_mean_2d, data_std_2d, dim_to_ignore_2d, dim_to_use_2d = data_utils.read_2d_predictions(actions, FLAGS.data_dir)
  else:
    train_set_2d, test_set_2d, data_mean_2d, data_std_2d, dim_to_ignore_2d, dim_to_use_2d = data_utils.create_2d_data( actions, FLAGS.data_dir, rcams )
  print( "done reading and normalizing data." )

  device_count = {"GPU": 0} if FLAGS.use_cpu else {"GPU": 1}
  with tf.Session(config=tf.ConfigProto( device_count = device_count )) as sess:
    # === Create the model ===
    print("Creating %d layers of %d units." % (FLAGS.num_layers, FLAGS.linear_size))
    batch_size = 128
    model = create_model(sess, actions, batch_size)
    print("Model loaded")

    for key2d in test_set_2d.keys():

      (subj, b, fname) = key2d
      print( "Subject: {}, action: {}, fname: {}".format(subj, b, fname) )

      # keys should be the same if 3d is in camera coordinates
      key3d = key2d if FLAGS.camera_frame else (subj, b, '{0}.h5'.format(fname.split('.')[0]))
      key3d = (subj, b, fname[:-3]) if (fname.endswith('-sh')) and FLAGS.camera_frame else key3d

      enc_in  = test_set_2d[ key2d ]
      n2d, _ = enc_in.shape
      dec_out = test_set_3d[ key3d ]
      n3d, _ = dec_out.shape
      assert n2d == n3d

      # Split into about-same-size batches
      enc_in   = np.array_split( enc_in,  n2d // batch_size )
      dec_out  = np.array_split( dec_out, n3d // batch_size )
      all_poses_3d = []

      for bidx in range( len(enc_in) ):

        # Dropout probability 0 (keep probability 1) for sampling
        dp = 1.0
        _, _, poses3d = model.step(sess, enc_in[bidx], dec_out[bidx], dp, isTraining=False)

        # denormalize
        enc_in[bidx]  = data_utils.unNormalizeData(  enc_in[bidx], data_mean_2d, data_std_2d, dim_to_ignore_2d )
        dec_out[bidx] = data_utils.unNormalizeData( dec_out[bidx], data_mean_3d, data_std_3d, dim_to_ignore_3d )
        poses3d = data_utils.unNormalizeData( poses3d, data_mean_3d, data_std_3d, dim_to_ignore_3d )
        all_poses_3d.append( poses3d )

      # Put all the poses together
      enc_in, dec_out, poses3d = map( np.vstack, [enc_in, dec_out, all_poses_3d] )

      # Convert back to world coordinates
      if FLAGS.camera_frame:
        N_CAMERAS = 4
        N_JOINTS_H36M = 32

        # Add global position back
        dec_out = dec_out + np.tile( test_root_positions[ key3d ], [1,N_JOINTS_H36M] )

        # Load the appropriate camera
        subj, _, sname = key3d

        cname = sname.split('.')[1] # <-- camera name
        scams = {(subj,c+1): rcams[(subj,c+1)] for c in range(N_CAMERAS)} # cams of this subject
        scam_idx = [scams[(subj,c+1)][-1] for c in range(N_CAMERAS)].index( cname ) # index of camera used
        the_cam  = scams[(subj, scam_idx+1)] # <-- the camera used
        R, T, f, c, k, p, name = the_cam
        assert name == cname

        def cam2world_centered(data_3d_camframe):
          data_3d_worldframe = cameras.camera_to_world_frame(data_3d_camframe.reshape((-1, 3)), R, T)
          data_3d_worldframe = data_3d_worldframe.reshape((-1, N_JOINTS_H36M*3))
          # subtract root translation
          return data_3d_worldframe - np.tile( data_3d_worldframe[:,:3], (1,N_JOINTS_H36M) )

        # Apply inverse rotation and translation
        dec_out = cam2world_centered(dec_out)
        poses3d = cam2world_centered(poses3d)

  # Grab a random batch to visualize
  enc_in, dec_out, poses3d = map( np.vstack, [enc_in, dec_out, poses3d] )
  idx = np.random.permutation( enc_in.shape[0] )
  enc_in, dec_out, poses3d = enc_in[idx, :], dec_out[idx, :], poses3d[idx, :]

  # Visualize random samples
  import matplotlib.gridspec as gridspec

  # 1080p	= 1,920 x 1,080
  fig = plt.figure( figsize=(19.2, 10.8) )

  gs1 = gridspec.GridSpec(5, 9) # 5 rows, 9 columns
  gs1.update(wspace=-0.00, hspace=0.05) # set the spacing between axes.
  plt.axis('off')

  subplot_idx, exidx = 1, 1
  nsamples = 15
  for i in np.arange( nsamples ):

    # Plot 2d pose
    ax1 = plt.subplot(gs1[subplot_idx-1])
    p2d = enc_in[exidx,:]
    viz.show2Dpose( p2d, ax1 )
    ax1.invert_yaxis()

    # Plot 3d gt
    ax2 = plt.subplot(gs1[subplot_idx], projection='3d')
    p3d = dec_out[exidx,:]
    viz.show3Dpose( p3d, ax2 )

    # Plot 3d predictions
    ax3 = plt.subplot(gs1[subplot_idx+1], projection='3d')
    p3d = poses3d[exidx,:]
    viz.show3Dpose( p3d, ax3, lcolor="#9b59b6", rcolor="#2ecc71" )

    exidx = exidx + 1
    subplot_idx = subplot_idx + 3

  plt.show()
コード例 #32
0
def predict(convert_to_world):
    """
  Run the model and predict pose data

  convert_to_world is a flag indicating whether to convert the data back to 
  world coordinates from the camera frame.
  """

    actions = data_utils.define_actions(FLAGS.action)

    # Load camera parameters
    SUBJECT_IDS = [1, 5, 6, 7, 8, 9, 11]
    rcams = cameras.load_cameras(FLAGS.cameras_path, SUBJECT_IDS)

    # Load 3d data and load (or create) 2d projections
    train_set_3d, test_set_3d, data_mean_3d, data_std_3d, dim_to_ignore_3d, dim_to_use_3d, train_root_positions, test_root_positions = data_utils.read_3d_data(
        actions, FLAGS.data_dir, FLAGS.camera_frame, rcams, FLAGS.predict_14)

    if FLAGS.use_sh:
        train_set_2d, test_set_2d, data_mean_2d, data_std_2d, dim_to_ignore_2d, dim_to_use_2d = data_utils.read_2d_predictions(
            actions, FLAGS.data_dir)
    else:
        train_set_2d, test_set_2d, data_mean_2d, data_std_2d, dim_to_ignore_2d, dim_to_use_2d = data_utils.create_2d_data(
            actions, FLAGS.data_dir, rcams)
    print("done reading and normalizing data.")

    device_count = {"GPU": 0} if FLAGS.use_cpu else {"GPU": 1}
    with tf.Session(config=tf.ConfigProto(device_count=device_count)) as sess:
        # === Create the model ===
        print("Creating %d layers of %d units." %
              (FLAGS.num_layers, FLAGS.linear_size))
        batch_size = 128
        model = create_model(sess, actions, batch_size)
        print("Model loaded")

        for key2d in test_set_2d.keys():

            (subj, b, fname) = key2d
            print("Subject: {}, action: {}, fname: {}".format(subj, b, fname))

            # keys should be the same if 3d is in camera coordinates
            key3d = key2d if FLAGS.camera_frame else (subj, b, '{0}.h5'.format(
                fname.split('.')[0]))
            key3d = (subj, b, fname[:-3]) if (
                fname.endswith('-sh')) and FLAGS.camera_frame else key3d

            enc_in = test_set_2d[key2d]
            n2d, _ = enc_in.shape
            dec_out = test_set_3d[key3d]
            n3d, _ = dec_out.shape
            assert n2d == n3d

            # Generate the loss pairs
            loss_pairs = None
            if model.num_loss_pairs:
                num_pts = int(model.HUMAN_3D_SIZE / 3)
                pairs = np.asarray([(i, j) for i in range(num_pts)
                                    for j in range(num_pts) if i < j])
                pair_idxs = [
                    np.random.choice(len(pairs),
                                     model.num_loss_pairs,
                                     replace=False) for _ in range(n3d)
                ]
                loss_pairs = np.take(pairs, pair_idxs, axis=0)
                loss_pairs = np.array_split(loss_pairs, n2d // batch_size)

            # Split into about-same-size batches
            enc_in = np.array_split(enc_in, n2d // batch_size)
            dec_out = np.array_split(dec_out, n3d // batch_size)
            all_poses_3d = []

            # enc_in_modified = []

            for bidx in range(len(enc_in)):

                # Dropout probability 0 (keep probability 1) for sampling
                dp = 1.0
                if model.num_loss_pairs:
                    _, _, poses3d = model.step(sess,
                                               enc_in[bidx],
                                               dec_out[bidx],
                                               dp,
                                               loss_pairs=loss_pairs[bidx],
                                               isTraining=False)
                else:
                    _, _, poses3d = model.step(sess,
                                               enc_in[bidx],
                                               dec_out[bidx],
                                               dp,
                                               isTraining=False)

                # poses3dnew = []
                # for e in enc_in[bidx]:
                #   poses3dnew.append(np.insert(e, range(1, len(e)+1, 2), poses3d[1::3]))
                # poses3d = poses3dnew
                # print (bidx)
                # print (len(enc_in[bidx]))
                # print (enc_in[bidx])
                # print (data_mean_2d)
                # print (data_mean_3d)
                # data_mean_2d_modified = np.delete(data_mean_3d, np.arange(2, data_mean_3d.size, 3))
                # data_std_2d_modified = np.delete(data_std_3d, np.arange(2, data_std_3d.size, 3))

                # denormalize
                # enc_in_modified.append(data_utils.unNormalizeData(  enc_in[bidx], data_mean_2d_modified, data_std_2d_modified, dim_to_ignore_2d ))
                enc_in[bidx] = data_utils.unNormalizeData(
                    enc_in[bidx], data_mean_2d, data_std_2d, dim_to_ignore_2d)
                dec_out[bidx] = data_utils.unNormalizeData(
                    dec_out[bidx], data_mean_3d, data_std_3d, dim_to_ignore_3d)
                poses3d = data_utils.unNormalizeData(poses3d, data_mean_3d,
                                                     data_std_3d,
                                                     dim_to_ignore_3d)
                all_poses_3d.append(poses3d)

                # print (len(enc_in[bidx]))
                # print (len(poses3d))
            # print (len(enc_in[0]))
            # print (len(poses3d[0]))

            # Put all the poses together
            # enc_in_modified = np.vstack(enc_in_modified)
            enc_in, dec_out, poses3d = map(np.vstack,
                                           [enc_in, dec_out, all_poses_3d])

            # print (len(enc_in[0]))
            # print (len(poses3d[0]))
            # print (enc_in.shape)
            # print (poses3d.shape)

            # poses3dnew = []
            # for p, e in zip(poses3d, enc_in_modified):
            #   poses3dnew.append(np.insert(e, range(1, len(e)+1, 2), p[1::3]))
            # poses3d = np.array(poses3dnew)

            if convert_to_world:
                # Convert back to world coordinates
                if FLAGS.camera_frame:
                    N_CAMERAS = 4
                    N_JOINTS_H36M = 32

                    # Add global position back
                    dec_out = dec_out + np.tile(test_root_positions[key3d],
                                                [1, N_JOINTS_H36M])

                    # Load the appropriate camera
                    subj, _, sname = key3d

                    cname = sname.split('.')[1]  # <-- camera name
                    scams = {(subj, c + 1): rcams[(subj, c + 1)]
                             for c in range(N_CAMERAS)}  # cams of this subject
                    scam_idx = [
                        scams[(subj, c + 1)][-1] for c in range(N_CAMERAS)
                    ].index(cname)  # index of camera used
                    the_cam = scams[(subj,
                                     scam_idx + 1)]  # <-- the camera used
                    R, T, f, c, k, p, name = the_cam
                    assert name == cname

                    def cam2world_centered(data_3d_camframe):
                        data_3d_worldframe = cameras.camera_to_world_frame(
                            data_3d_camframe.reshape((-1, 3)), R, T)
                        data_3d_worldframe = data_3d_worldframe.reshape(
                            (-1, N_JOINTS_H36M * 3))
                        # subtract root translation
                        return data_3d_worldframe - np.tile(
                            data_3d_worldframe[:, :3], (1, N_JOINTS_H36M))

                    # Apply inverse rotation and translation
                    dec_out = cam2world_centered(dec_out)
                    poses3d = cam2world_centered(poses3d)

    poses3dnew = dec_out.copy()
    poses3dnew[:, 1::3] = poses3d[:, 1::3]
    poses3d = poses3dnew
    return enc_in, dec_out, poses3d