コード例 #1
0
    def update(frame_enumerated):
        img_i, frame = frame_enumerated
        ax_img.set_data(img.imread(image_paths[img_i]))

        ax_2d.clear()
        viz.show2Dpose(np.reshape(points_2d[0][img_i], (64, 1)), ax_2d)
        ax_2d.invert_yaxis()

        ax.clear()
        viz.show3Dpose(frame, ax)
コード例 #2
0
def sample():
    """Get samples from a model and visualize them"""
    enc_in, dec_out, poses3d = predict(True)

    # poses3dnew = []
    # for p, e in zip(poses3d, enc_in):
    #   poses3dnew.append(np.insert(e, range(1, len(e)+1, 2), p[1::3]))
    # poses3d = poses3dnew

    # poses3dnew = dec_out.copy()
    # poses3dnew[:,1::3] = poses3d[:,1::3]
    # poses3d = poses3dnew

    # Grab a random batch to visualize
    enc_in, dec_out, poses3d = map(np.vstack, [enc_in, dec_out, poses3d])
    idx = np.random.permutation(enc_in.shape[0])
    enc_in, dec_out, poses3d = enc_in[idx, :], dec_out[idx, :], poses3d[idx, :]

    # Visualize random samples
    import matplotlib.gridspec as gridspec

    # 1080p	= 1,920 x 1,080
    fig = plt.figure(figsize=(19.2, 10.8))

    gs1 = gridspec.GridSpec(5, 9)  # 5 rows, 9 columns
    gs1.update(wspace=-0.00, hspace=0.05)  # set the spacing between axes.
    plt.axis('off')

    subplot_idx, exidx = 1, 1
    nsamples = 15
    for i in np.arange(nsamples):

        # Plot 2d pose
        ax1 = plt.subplot(gs1[subplot_idx - 1])
        p2d = enc_in[exidx, :]
        viz.show2Dpose(p2d, ax1)
        ax1.invert_yaxis()

        # Plot 3d gt
        ax2 = plt.subplot(gs1[subplot_idx], projection='3d')
        p3d = dec_out[exidx, :]
        viz.show3Dpose(p3d, ax2)

        # Plot 3d predictions
        ax3 = plt.subplot(gs1[subplot_idx + 1], projection='3d')
        p3d = poses3d[exidx, :]
        viz.show3Dpose(p3d, ax3, lcolor="#9b59b6", rcolor="#2ecc71")

        exidx = exidx + 1
        subplot_idx = subplot_idx + 3

    plt.savefig('batch_poses')
    plt.show()

    code.interact(local=locals())
コード例 #3
0
def plot_baseline_viz(pts, dataset='baseline'):
    '''
    A small wrapper function around the visualizer from the baseline model.
    It visualizes just the sticks (skeleton) and a ground.

    Enables different datasets to be visualized. Must pass in skeleton
    representation with I and J, and the left-right distinguishing list.
    '''
    fig = plt.figure()
    ax = fig.add_subplot(111, projection='3d')

    if dataset == 'baseline':
        show3Dpose(np.array(pts), ax)
    elif dataset == 'original':
        show3Dpose(np.array(pts), ax, I=I_ORIGINAL, J=J_ORIGINAL, LR=LR_ORIGINAL)
コード例 #4
0
def predict_batch(data, center, scale, batch_size=128):
  """
  Input:
    data: matrix with shape (#frames, 32)
    center: length-2 array with center coordinate
    scale: stacked hourglass scale parameter
  """

  fig = plt.figure()
  ax_2d = fig.add_subplot(121)
  ax_3d = fig.add_subplot(122, projection='3d')

  data = np.array(data)

  # Wrap in another matrix if there's only a single clip
  if len(data.shape) == 1:
    data = np.array([data])

  if data.shape[1] != 32:
    raise ValueError("Expected data shape to be (?, 32), got " + str(data.shape))

  data, destination_indices = data_utils.process_stacked_hourglass(data)
  normalized_data, data_mean_3d, data_std_3d, dim_to_ignore_3d = normalize_batch(data)
  viz.show2Dpose(np.reshape(data[30], (64, 1)), ax_2d)
  ax_2d.invert_yaxis()

  with tf.Session() as sess:
    model = load_model(sess, batch_size)
    dp = 1.0
    dec_out = np.zeros((normalized_data.shape[0], 48))
    _, _, points = model.step(sess, normalized_data, dec_out, dp, isTraining=False)

    points = data_utils.unNormalizeData(points, data_mean_3d, data_std_3d, dim_to_ignore_3d)
    viz.show3Dpose(points[30,:], ax_3d)
    plt.show()
    points = np.reshape(points, (-1, 32, 3))

    return points
コード例 #5
0
def make_3dpoints(path):
    i = 0
    path = Path(path)
    files = glob(str(path / '*.npy'))

    for name in files:
        t = np.load(name)
        if (i == 0):  # for the first frame of the video alone
            t, ref = normalise(t, i, 0)
            t = scale(t)
            i = i + 1
        else:  # for the rest of the frames, since there is a reference now
            t, ref = normalise(t, i, ref)
            t = scale(t)

        fig = plt.figure()
        ax = fig.add_subplot(111, projection='3d')
        viz.show3Dpose(t.flatten(), ax)
        plt.savefig(os.path.splitext(name)[0] + '.png')
        plt.close()
        os.remove(name)
        np.save(
            name, t
        )  # Overwrite the 3d pose with the normalized (rotation + scale) 3d pose
コード例 #6
0
def main(_):
    # 出力用日付
    now_str = "{0:%Y%m%d_%H%M%S}".format(datetime.datetime.now())

    logger.debug("FLAGS.person_idx={0}".format(FLAGS.person_idx))

    # ディレクトリ構成が変わったので、JSON出力と同階層に出力(2/9)
    if FLAGS.output is None:
        subdir = openpose_output_dir
    else:
        subdir = FLAGS.output

    os.makedirs(subdir, exist_ok=True)

    frame3d_dir = "{0}/frame3d".format(subdir)
    if os.path.exists(frame3d_dir):
        # 既にディレクトリがある場合、一旦削除
        shutil.rmtree(frame3d_dir)
    os.makedirs(frame3d_dir)

    #関節位置情報ファイル
    posf = open(subdir + '/pos.txt', 'w')

    #正規化済みOpenpose位置情報ファイル
    smoothedf = open(subdir + '/smoothed.txt', 'w')

    #開始フレームインデックスファイル
    start_frame_f = open(subdir + '/start_frame.txt', 'w')

    idx = FLAGS.person_idx - 1
    start_frame_index, smoothed = openpose_utils.read_openpose_json(
        "{0}/json".format(openpose_output_dir), idx, FLAGS.verbose == 3)

    # 開始フレームインデックスを保存
    start_frame_f.write(str(start_frame_index))
    start_frame_f.close()

    logger.info("reading and smoothing done. start feeding 3d-pose-baseline")
    logger.debug(smoothed)
    plt.figure(2)
    smooth_curves_plot = show_anim_curves(smoothed, plt)
    pngName = subdir + '/smooth_plot.png'
    smooth_curves_plot.savefig(pngName)

    enc_in = np.zeros((1, 64))
    enc_in[0] = [0 for i in range(64)]

    actions = data_utils.define_actions(FLAGS.action)

    SUBJECT_IDS = [1, 5, 6, 7, 8, 9, 11]
    rcams = cameras.load_cameras(FLAGS.cameras_path, SUBJECT_IDS)
    train_set_2d, test_set_2d, data_mean_2d, data_std_2d, dim_to_ignore_2d, dim_to_use_2d = data_utils.read_2d_predictions(
        actions, FLAGS.data_dir)
    train_set_3d, test_set_3d, data_mean_3d, data_std_3d, dim_to_ignore_3d, dim_to_use_3d, train_root_positions, test_root_positions = data_utils.read_3d_data(
        actions, FLAGS.data_dir, FLAGS.camera_frame, rcams, FLAGS.predict_14)

    # before_pose = None
    device_count = {"GPU": 1}
    png_lib = []
    with tf.Session(config=tf.ConfigProto(device_count=device_count,
                                          allow_soft_placement=True)) as sess:
        #plt.figure(3)
        batch_size = 128
        model = create_model(sess, actions, batch_size)

        # 入力画像のスケール調整のため、NeckからHipまでの距離を測定
        length_neck2hip_mean = get_length_neck2hip_mean(smoothed)

        # 2D、3D結果の保存用リスト
        poses3d_list = []
        poses2d_list = []

        # 2dと3dのスケール比率計算のためのリスト
        length_2d_list = []
        length_3d_list = []

        for n, (frame, xy) in enumerate(smoothed.items()):
            if frame % 200 == 0:
                logger.info("calc idx {0}, frame {1}".format(idx, frame))
            #if frame % 300 == 0:
            #    print(frame)

            # map list into np array
            joints_array = np.zeros((1, 36))
            joints_array[0] = [0 for i in range(36)]
            for o in range(len(joints_array[0])):
                #feed array with xy array
                joints_array[0][o] = xy[o]
            _data = joints_array[0]

            smoothedf.write(' '.join(map(str, _data)))
            smoothedf.write("\n")

            # mapping all body parts or 3d-pose-baseline format
            for i in range(len(order)):
                for j in range(2):
                    # create encoder input
                    enc_in[0][order[i] * 2 + j] = _data[i * 2 + j]
            for j in range(2):
                # Hip
                enc_in[0][0 * 2 + j] = (enc_in[0][1 * 2 + j] +
                                        enc_in[0][6 * 2 + j]) / 2
                # Thorax
                # 3dPoseBaselineのThoraxの位置は、OpenPoseのNeckの位置より少し上のため調整する
                enc_in[0][13 * 2 +
                          j] = 1.1 * enc_in[0][13 * 2 +
                                               j] - 0.1 * enc_in[0][0 * 2 + j]
                # Neck/Nose
                enc_in[0][14 * 2 + j] = (enc_in[0][15 * 2 + j] +
                                         enc_in[0][13 * 2 + j]) / 2
                # Spine
                enc_in[0][12 * 2 + j] = (enc_in[0][0 * 2 + j] +
                                         enc_in[0][13 * 2 + j]) / 2

            # set spine
            # spine_x = enc_in[0][24]
            # spine_y = enc_in[0][25]

            # logger.debug("enc_in - 1")
            # logger.debug(enc_in)

            poses2d = enc_in

            # 入力データの拡大
            # neckからHipまでが110ピクセル程度になるように入力を拡大する
            # (教師データとスケールが大きく異なると精度が落ちるため)
            input_scaling_factor = 110 / length_neck2hip_mean
            enc_in = enc_in * input_scaling_factor

            enc_in = enc_in[:, dim_to_use_2d]
            mu = data_mean_2d[dim_to_use_2d]
            stddev = data_std_2d[dim_to_use_2d]
            enc_in = np.divide((enc_in - mu), stddev)

            dp = 1.0
            dec_out = np.zeros((1, 48))
            dec_out[0] = [0 for i in range(48)]
            _, _, poses3d = model.step(sess,
                                       enc_in,
                                       dec_out,
                                       dp,
                                       isTraining=False)
            all_poses_3d = []
            enc_in = data_utils.unNormalizeData(enc_in, data_mean_2d,
                                                data_std_2d, dim_to_ignore_2d)
            poses3d = data_utils.unNormalizeData(poses3d, data_mean_3d,
                                                 data_std_3d, dim_to_ignore_3d)
            gs1 = gridspec.GridSpec(1, 1)
            gs1.update(wspace=-0.00,
                       hspace=0.05)  # set the spacing between axes.
            plt.axis('off')
            all_poses_3d.append(poses3d)
            enc_in, poses3d = map(np.vstack, [enc_in, all_poses_3d])
            subplot_idx, exidx = 1, 1

            poses3d_list.append(poses3d[0])
            poses2d_list.append(poses2d[0])

            length_2d_list.append(sum_length_xy(poses2d[0], 2))
            length_3d_list.append(sum_length_xy(poses3d[0], 3))

        # OpenPose出力の(x, y)とBaseline出力のzから、3次元の位置を計算する

        # OpenPose出力値とBaseline出力値のスケール比率
        # 骨格の長さの合計の比較することで、比率を推定
        # 前後の91フレームで移動平均をとることで、結果を安定化する
        move_ave_length_2d = calc_move_average(length_2d_list, 91)
        move_ave_length_3d = calc_move_average(length_3d_list, 91)
        move_ave_length_2d[move_ave_length_2d == 0] = 1  # error防止
        xy_scale = move_ave_length_3d / move_ave_length_2d

        # 以下の4つは仮の値で計算。多少違っていても、精度に影響はないと思う
        center_2d_x, center_2d_y = camera_center(
            openpose_output_dir)  #動画の中心座標(動画の解像度の半分)
        logger.info("center_2d_x {0}".format(center_2d_x))
        z_distance = 4000  # カメラから体までの距離(mm) 遠近の影響計算で使用
        camera_incline = 0  # カメラの水平方向に対する下への傾き(度)

        teacher_camera_incline = 13  # 教師データ(Human3.6M)のカメラの傾き(下向きに平均13度)

        for frame, (poses3d,
                    poses2d) in enumerate(zip(poses3d_list, poses2d_list)):

            # 誤差を減らすため、OpenPose出力の(x, y)と3dPoseBaseline出力のzから、3次元の位置を計算する

            poses3d_op_xy = np.zeros(96)
            for i in [0, 1, 2, 3, 6, 7, 8, 13, 15, 17, 18, 19, 25, 26, 27]:
                # Hipとの差分
                dy = poses3d[i * 3 + 1] - poses3d[0 * 3 + 1]
                dz = poses3d[i * 3 + 2] - poses3d[0 * 3 + 2]
                # 教師データのカメラ傾きを補正
                dz = dz - dy * math.tan(
                    math.radians(teacher_camera_incline - camera_incline))
                # 遠近によるx,yの拡大率
                z_ratio = (z_distance + dz) / z_distance
                # x, yはOpenposeの値から計算
                poses3d_op_xy[i *
                              3] = (poses2d[i * 2] -
                                    center_2d_x) * xy_scale[frame] * z_ratio
                poses3d_op_xy[i * 3 +
                              1] = (poses2d[i * 2 + 1] -
                                    center_2d_y) * xy_scale[frame] * z_ratio
                # zはBaselineの値から計算
                poses3d_op_xy[i * 3 + 2] = dz

            # 12(Spine)、14(Neck/Nose)、15(Head)はOpenPoseの出力にないため、baseline(poses3d)から計算する
            for i in [12, 14, 15]:

                # 13(Thorax)は認識されることが多いため基準とする
                # 差分
                dx = poses3d[i * 3] - poses3d[13 * 3]
                dy = poses3d[i * 3 + 1] - poses3d[13 * 3 + 1]
                dz = poses3d[i * 3 + 2] - poses3d[13 * 3 + 2]
                # 教師データのカメラ傾きを補正
                dz = dz - dy * math.tan(
                    math.radians(teacher_camera_incline - camera_incline))
                # 13(Thorax)からの差分でx, y ,zを求める
                poses3d_op_xy[i * 3] = poses3d_op_xy[13 * 3] + dx
                poses3d_op_xy[i * 3 + 1] = poses3d_op_xy[13 * 3 + 1] + dy
                poses3d_op_xy[i * 3 + 2] = poses3d_op_xy[13 * 3 + 2] + dz

            # MMD上で少し顎を引くための処理
            poses3d_op_xy[15 * 3] += 0.5 * (poses3d_op_xy[14 * 3] -
                                            poses3d_op_xy[13 * 3])
            poses3d_op_xy[15 * 3 + 1] += 0.5 * (poses3d_op_xy[14 * 3 + 1] -
                                                poses3d_op_xy[13 * 3 + 1])
            poses3d_op_xy[15 * 3 + 2] += 0.5 * (poses3d_op_xy[14 * 3 + 2] -
                                                poses3d_op_xy[13 * 3 + 2])

            poses3d_list[frame] = poses3d_op_xy

        logger.info("calc ground y")
        # 最も高さが低い足の部位のYを取得(この座標系ではY値が大きい方が低い)
        foot_joint_no = [1, 2, 3, 6, 7, 8]
        max_pos = []
        for frame, poses3d in enumerate(poses3d_list):
            max_pos.append(np.max([poses3d[i * 3 + 1] for i in foot_joint_no]))

        # 地面についている部位の位置(通常は足首)をY軸の0になるように移動する
        for frame, poses3d in enumerate(poses3d_list):
            # 120フレーム分の位置を取得
            max_pos_slice = max_pos[int(np.max([0, frame - 60])):frame + 60]
            # 半分以上のフレームでは着地していると仮定し、メディアンを着地時の足の位置とする
            ankle_pos = np.median(max_pos_slice)

            poses3d_ground = np.zeros(96)
            for i in range(len(data_utils.H36M_NAMES)):
                poses3d_ground[i * 3] = poses3d[i * 3]
                poses3d_ground[i * 3 + 1] = poses3d[i * 3 + 1] - ankle_pos
                poses3d_ground[i * 3 + 2] = poses3d[i * 3 + 2]

            poses3d_list[frame] = poses3d_ground

        for frame, (poses3d,
                    poses2d) in enumerate(zip(poses3d_list, poses2d_list)):
            if frame % 200 == 0:
                logger.info("output frame {}".format(frame))

            # max = 0
            # min = 10000

            # logger.debug("enc_in - 2")
            # logger.debug(enc_in)

            for j in range(32):
                tmp = poses3d[j * 3 + 2]
                poses3d[j * 3 + 2] = -poses3d[j * 3 + 1]
                poses3d[j * 3 + 1] = tmp
            #         if poses3d[i][j * 3 + 2] > max:
            #             max = poses3d[i][j * 3 + 2]
            #         if poses3d[i][j * 3 + 2] < min:
            #             min = poses3d[i][j * 3 + 2]

            # for i in range(poses3d.shape[0]):
            #     for j in range(32):
            #         poses3d[i][j * 3 + 2] = max - poses3d[i][j * 3 + 2] + min
            #         poses3d[i][j * 3] += (spine_x - 630)
            #         poses3d[i][j * 3 + 2] += (500 - spine_y)

            # Plot 3d predictions
            ax = plt.subplot(gs1[subplot_idx - 1], projection='3d')
            ax.view_init(18, 280)
            # logger.debug(np.min(poses3d))
            # if np.min(poses3d) < -1000 and before_pose is not None:
            #    poses3d = before_pose

            p3d = poses3d
            # logger.debug("poses3d")
            # logger.debug(poses3d)
            if frame == 0:
                first_xyz = [0, 0, 0]
                first_xyz[0], first_xyz[1], first_xyz[2] = p3d[0], p3d[1], p3d[
                    2]

            if level[FLAGS.verbose] <= logging.INFO:
                viz.show3Dpose(p3d,
                               ax,
                               lcolor="#9b59b6",
                               rcolor="#2ecc71",
                               add_labels=True,
                               root_xyz=first_xyz)

                # 各フレームの単一視点からのはINFO時のみ
                pngName = frame3d_dir + '/tmp_{0:012d}.png'.format(frame)
                plt.savefig(pngName)
                png_lib.append(imageio.imread(pngName))
                # before_pose = poses3d

            # 各フレームの角度別出力はデバッグ時のみ
            if level[FLAGS.verbose] == logging.DEBUG:

                for azim in [0, 45, 90, 135, 180, 225, 270, 315, 360]:
                    ax2 = plt.subplot(gs1[subplot_idx - 1], projection='3d')
                    ax2.view_init(18, azim)
                    viz.show3Dpose(p3d,
                                   ax2,
                                   lcolor="#FF0000",
                                   rcolor="#0000FF",
                                   add_labels=True,
                                   root_xyz=first_xyz)

                    pngName2 = frame3d_dir + '/tmp_{0:012d}_{1:03d}.png'.format(
                        frame, azim)
                    plt.savefig(pngName2)

            #関節位置情報の出力
            write_pos_data(poses3d, ax, posf)

        posf.close()
        smoothedf.close()

        # INFO時は、アニメーションGIF生成
        if level[FLAGS.verbose] <= logging.INFO:
            logger.info(
                "creating Gif {0}/movie_smoothing.gif, please Wait!".format(
                    subdir))
            imageio.mimsave('{0}/movie_smoothing.gif'.format(subdir),
                            png_lib,
                            fps=FLAGS.gif_fps)

        logger.info("Done!".format(pngName))
def main(_):

    enc_in = np.zeros((1, 64))
    enc_in[0] = [0 for i in range(64)]

    actions = data_utils.define_actions(FLAGS.action)

    SUBJECT_IDS = [1, 5, 6, 7, 8, 9, 11]
    rcams = cameras.load_cameras(FLAGS.cameras_path, SUBJECT_IDS)
    train_set_2d, test_set_2d, data_mean_2d, data_std_2d, dim_to_ignore_2d, dim_to_use_2d = data_utils.read_2d_predictions(
        actions, FLAGS.data_dir)
    train_set_3d, test_set_3d, data_mean_3d, data_std_3d, dim_to_ignore_3d, dim_to_use_3d, train_root_positions, test_root_positions = data_utils.read_3d_data(
        actions, FLAGS.data_dir, FLAGS.camera_frame, rcams, FLAGS.predict_14)

    device_count = {"GPU": 1}
    png_lib = []
    sess = tf.Session(config=tf.ConfigProto(
        device_count=device_count,
        allow_soft_placement=True))
    # plt.figure(3)
    batch_size = 128
    model = create_model(sess, actions, batch_size)



    ''''''
    openpose_dirs = sorted([ name for name in os.listdir(openpose_output_dir) if os.path.isdir(os.path.join(openpose_output_dir, name)) ])
    openpose_dirs = [openpose_output_dir + file for file in openpose_dirs]
    error_dirs = []
    for dir in openpose_dirs:
        # try:
        file_name = os.path.basename(dir)
        file_to_save = '../data/3d-pose-baseline/' + file_name + '.npz'
        if not os.path.isfile(file_to_save): # If haven't already processed
            smoothed = read_openpose_json(dir)
            plt.figure(2)
            # smooth_curves_plot = show_anim_curves(smoothed, plt)
            # return
            # pngName = 'gif_output/smooth_plot.png'
            # smooth_curves_plot.savefig(pngName)
            # logger.info('writing gif_output/smooth_plot.png')

            if FLAGS.interpolation:
                logger.info("start interpolation")

                framerange = len(smoothed.keys())
                joint_rows = 36
                array = np.concatenate(list(smoothed.values()))
                array_reshaped = np.reshape(array, (framerange, joint_rows))

                multiplier = FLAGS.multiplier
                multiplier_inv = 1 / multiplier

                out_array = np.array([])
                for row in range(joint_rows):
                    x = []
                    for frame in range(framerange):
                        x.append(array_reshaped[frame, row])

                    frame = range(framerange)
                    frame_resampled = np.arange(0, framerange, multiplier)
                    spl = UnivariateSpline(frame, x, k=3)
                    # relative smooth factor based on jnt anim curve
                    min_x, max_x = min(x), max(x)
                    smooth_fac = max_x - min_x
                    smooth_resamp = 125
                    smooth_fac = smooth_fac * smooth_resamp
                    spl.set_smoothing_factor(float(smooth_fac))
                    xnew = spl(frame_resampled)

                    out_array = np.append(out_array, xnew)

                logger.info("done interpolating. reshaping {0} frames,  please wait!!".format(framerange))

                a = np.array([])
                for frame in range(int(framerange * multiplier_inv)):
                    jnt_array = []
                    for jnt in range(joint_rows):
                        jnt_array.append(out_array[jnt * int(framerange * multiplier_inv) + frame])
                    a = np.append(a, jnt_array)

                a = np.reshape(a, (int(framerange * multiplier_inv), joint_rows))
                out_array = a

                interpolate_smoothed = {}
                for frame in range(int(framerange * multiplier_inv)):
                    interpolate_smoothed[frame] = list(out_array[frame])

                # plt.figure(3)
                smoothed = interpolate_smoothed
                # interpolate_curves_plot = show_anim_curves(smoothed, plt)
                # pngName = 'gif_output/interpolate_{0}.png'.format(smooth_resamp)
                # interpolate_curves_plot.savefig(pngName)
                # logger.info('writing gif_output/interpolate_plot.png')

            iter_range = len(smoothed.keys())
            positions_3d = []
            for n, (frame, xy) in enumerate(smoothed.items()):
                logger.info("calc frame {0}/{1}".format(frame, iter_range))
                # map list into np array
                joints_array = np.zeros((1, 36))
                joints_array[0] = [0 for i in range(36)]
                for o in range(len(joints_array[0])):
                    #feed array with xy array
                    joints_array[0][o] = xy[o]
                _data = joints_array[0]
                # mapping all body parts or 3d-pose-baseline format
                for i in range(len(order)):
                    for j in range(2):
                        # create encoder input
                        enc_in[0][order[i] * 2 + j] = _data[i * 2 + j]
                for j in range(2):
                    # Hip
                    enc_in[0][0 * 2 + j] = (enc_in[0][1 * 2 + j] + enc_in[0][6 * 2 + j]) / 2
                    # Neck/Nose
                    enc_in[0][14 * 2 + j] = (enc_in[0][15 * 2 + j] + enc_in[0][12 * 2 + j]) / 2
                    # Thorax
                    enc_in[0][13 * 2 + j] = 2 * enc_in[0][12 * 2 + j] - enc_in[0][14 * 2 + j]

                # set spine
                spine_x = enc_in[0][24]
                spine_y = enc_in[0][25]

                enc_in = enc_in[:, dim_to_use_2d]
                mu = data_mean_2d[dim_to_use_2d]
                stddev = data_std_2d[dim_to_use_2d]
                enc_in = np.divide((enc_in - mu), stddev)

                dp = 1.0
                dec_out = np.zeros((1, 48))
                dec_out[0] = [0 for i in range(48)]
                _, _, poses3d = model.step(sess, enc_in, dec_out, dp, isTraining=False)
                all_poses_3d = []
                enc_in = data_utils.unNormalizeData(enc_in, data_mean_2d, data_std_2d, dim_to_ignore_2d)
                poses3d = data_utils.unNormalizeData(poses3d, data_mean_3d, data_std_3d, dim_to_ignore_3d)
                gs1 = gridspec.GridSpec(1, 1)
                gs1.update(wspace=-0.00, hspace=0.05)  # set the spacing between axes.
                plt.axis('off')
                all_poses_3d.append( poses3d )
                enc_in, poses3d = map( np.vstack, [enc_in, all_poses_3d] )
                subplot_idx, exidx = 1, 1
                _max = 0
                _min = 10000

                for i in range(poses3d.shape[0]):
                    for j in range(32):
                        tmp = poses3d[i][j * 3 + 2]
                        poses3d[i][j * 3 + 2] = poses3d[i][j * 3 + 1]
                        poses3d[i][j * 3 + 1] = tmp
                        if poses3d[i][j * 3 + 2] > _max:
                            _max = poses3d[i][j * 3 + 2]
                        if poses3d[i][j * 3 + 2] < _min:
                            _min = poses3d[i][j * 3 + 2]

                for i in range(poses3d.shape[0]):
                    for j in range(32):
                        poses3d[i][j * 3 + 2] = _max - poses3d[i][j * 3 + 2] + _min
                        poses3d[i][j * 3] += (spine_x - 630)
                        poses3d[i][j * 3 + 2] += (500 - spine_y)

                # Plot 3d predictions
                ax = plt.subplot(gs1[subplot_idx - 1], projection='3d')
                ax.view_init(18, -70)
                logger.debug(np.min(poses3d))
                if np.min(poses3d) < -1000 and frame != 0:
                    poses3d = before_pose

                p3d = poses3d
                logger.debug(poses3d)
                viz.show3Dpose(p3d, ax, lcolor="#9b59b6", rcolor="#2ecc71")
                positions_3d.append(p3d)

                pngName = 'png/pose_frame_{0}.png'.format(str(frame).zfill(12))
                plt.savefig(pngName)
                if FLAGS.write_gif:
                    png_lib.append(imageio.imread(pngName))
                before_pose = poses3d

            np.savez_compressed(file_to_save, positions_3d=positions_3d)

            if FLAGS.write_gif:
                if FLAGS.interpolation:
                    #take every frame on gif_fps * multiplier_inv
                    png_lib = np.array([png_lib[png_image] for png_image in range(0,len(png_lib), int(multiplier_inv)) ])
                logger.info("creating Gif png/animation_" + file_name + ".gif, please Wait!")
                imageio.mimsave("gif_output/animation_" + file_name + ".gif", png_lib, fps=FLAGS.gif_fps)
            logger.info("Done!".format(pngName))
        # except:
        #     error_dirs.append(dir)

        with open('error_op_3d_dirs.txt', 'w') as f:
            for item in error_dirs:
                f.write("%s\n" % item)
def sample():
    """Get samples from a model and visualize them"""
    path = '{}/samples_sh'.format(FLAGS.train_dir)
    if not os.path.exists(path):
        os.makedirs(path)
    actions = data_utils.define_actions(FLAGS.action)

    # Load camera parameters
    SUBJECT_IDS = [1, 5, 6, 7, 8, 9, 11]
    rcams = cameras.load_cameras(FLAGS.cameras_path, SUBJECT_IDS)
    n_joints = 17 if not (FLAGS.predict_14) else 14

    # Load 3d data and load (or create) 2d projections
    train_set_3d, test_set_3d, data_mean_3d, data_std_3d, dim_to_ignore_3d, dim_to_use_3d, train_root_positions, test_root_positions = data_utils.read_3d_data(
        actions, FLAGS.data_dir, FLAGS.camera_frame, rcams, FLAGS.predict_14)

    if FLAGS.use_sh:
        train_set_2d, test_set_2d, data_mean_2d, data_std_2d, dim_to_ignore_2d, dim_to_use_2d = data_utils.read_2d_predictions(
            actions, FLAGS.data_dir)
    else:
        train_set_2d, test_set_2d, data_mean_2d, data_std_2d, dim_to_ignore_2d, dim_to_use_2d, _ = data_utils.create_2d_data(
            actions, FLAGS.data_dir, rcams)

    device_count = {"GPU": 0} if FLAGS.use_cpu else {"GPU": 1}
    with tf.Session(config=tf.ConfigProto(device_count=device_count)) as sess:
        # === Create the model ===

        batch_size = 128
        model = create_model(sess, actions, batch_size)
        print("Model loaded")

        for key2d in test_set_2d.keys():

            (subj, b, fname) = key2d

            # choose SittingDown action to visualize
            if b == 'SittingDown':
                print("Subject: {}, action: {}, fname: {}".format(
                    subj, b, fname))

                # keys should be the same if 3d is in camera coordinates
                key3d = key2d if FLAGS.camera_frame else (
                    subj, b, '{0}.h5'.format(fname.split('.')[0]))
                key3d = (subj, b, fname[:-3]) if (
                    fname.endswith('-sh')) and FLAGS.camera_frame else key3d

                enc_in = test_set_2d[key2d]
                n2d, _ = enc_in.shape
                dec_out = test_set_3d[key3d]
                n3d, _ = dec_out.shape
                assert n2d == n3d

                # Split into about-same-size batches

                enc_in = np.array_split(enc_in, n2d // batch_size)
                dec_out = np.array_split(dec_out, n3d // batch_size)

                # store all pose hypotheses in a list
                pose_3d_mdm = [[], [], [], [], []]

                for bidx in range(len(enc_in)):

                    # Dropout probability 0 (keep probability 1) for sampling
                    dp = 1.0
                    loss, _, out_all_components = model.step(sess,
                                                             enc_in[bidx],
                                                             dec_out[bidx],
                                                             dp,
                                                             isTraining=False)

                    # denormalize the input 2d pose, ground truth 3d pose as well as 3d pose hypotheses from mdm
                    out_all_components = np.reshape(
                        out_all_components,
                        [-1, model.HUMAN_3D_SIZE + 2, model.num_models])
                    out_mean = out_all_components[:, :model.HUMAN_3D_SIZE, :]

                    enc_in[bidx] = data_utils.unNormalizeData(
                        enc_in[bidx], data_mean_2d, data_std_2d,
                        dim_to_ignore_2d)
                    dec_out[bidx] = data_utils.unNormalizeData(
                        dec_out[bidx], data_mean_3d, data_std_3d,
                        dim_to_ignore_3d)
                    poses3d = np.zeros(
                        (out_mean.shape[0], 96, out_mean.shape[-1]))
                    for j in range(out_mean.shape[-1]):
                        poses3d[:, :, j] = data_utils.unNormalizeData(
                            out_mean[:, :, j], data_mean_3d, data_std_3d,
                            dim_to_ignore_3d)

                    # extract the 17 joints
                    dtu3d = np.hstack(
                        (np.arange(3), dim_to_use_3d
                         )) if not (FLAGS.predict_14) else dim_to_use_3d
                    dec_out_17 = dec_out[bidx][:, dtu3d]
                    pose_3d_17 = poses3d[:, dtu3d, :]
                    sqerr = (pose_3d_17 -
                             np.expand_dims(dec_out_17, axis=2))**2
                    dists = np.zeros(
                        (sqerr.shape[0], n_joints, sqerr.shape[2]))
                    for m in range(dists.shape[-1]):
                        dist_idx = 0
                        for k in np.arange(0, n_joints * 3, 3):
                            dists[:, dist_idx, m] = np.sqrt(
                                np.sum(sqerr[:, k:k + 3, m], axis=1))
                            dist_idx = dist_idx + 1

                    [
                        pose_3d_mdm[i].append(poses3d[:, :, i])
                        for i in range(poses3d.shape[-1])
                    ]

                # Put all the poses together
                enc_in, dec_out = map(np.vstack, [enc_in, dec_out])
                for i in range(poses3d.shape[-1]):
                    pose_3d_mdm[i] = np.vstack(pose_3d_mdm[i])

                    # Convert back to world coordinates
                if FLAGS.camera_frame:
                    N_CAMERAS = 4
                    N_JOINTS_H36M = 32

                    # Add global position back
                    dec_out = dec_out + np.tile(test_root_positions[key3d],
                                                [1, N_JOINTS_H36M])
                    for i in range(poses3d.shape[-1]):
                        pose_3d_mdm[i] = pose_3d_mdm[i] + np.tile(
                            test_root_positions[key3d], [1, N_JOINTS_H36M])

                    # Load the appropriate camera
                    subj, action, sname = key3d

                    cname = sname.split('.')[1]  # <-- camera name
                    scams = {(subj, c + 1): rcams[(subj, c + 1)]
                             for c in range(N_CAMERAS)}  # cams of this subject
                    scam_idx = [
                        scams[(subj, c + 1)][-1] for c in range(N_CAMERAS)
                    ].index(cname)  # index of camera used
                    the_cam = scams[(subj,
                                     scam_idx + 1)]  # <-- the camera used
                    R, T, f, c, k, p, name = the_cam
                    assert name == cname

                    def cam2world_centered(data_3d_camframe):
                        data_3d_worldframe = cameras.camera_to_world_frame(
                            data_3d_camframe.reshape((-1, 3)), R, T)
                        data_3d_worldframe = data_3d_worldframe.reshape(
                            (-1, N_JOINTS_H36M * 3))
                        # subtract root translation
                        return data_3d_worldframe - np.tile(
                            data_3d_worldframe[:, :3], (1, N_JOINTS_H36M))

                    # Apply inverse rotation and translation
                    dec_out = cam2world_centered(dec_out)
                    for i in range(poses3d.shape[-1]):
                        pose_3d_mdm[i] = cam2world_centered(pose_3d_mdm[i])

                # sample some results to visualize
                np.random.seed(42)
                idx = np.random.permutation(enc_in.shape[0])
                enc_in, dec_out = enc_in[idx, :], dec_out[idx, :]
                for i in range(poses3d.shape[-1]):
                    pose_3d_mdm[i] = pose_3d_mdm[i][idx, :]

                exidx = 1
                nsamples = 20

                for i in np.arange(nsamples):
                    fig = plt.figure(figsize=(20, 5))

                    subplot_idx = 1
                    gs1 = gridspec.GridSpec(1, 7)  # 5 rows, 9 columns
                    gs1.update(wspace=-0.00,
                               hspace=0.05)  # set the spacing between axes.
                    plt.axis('off')

                    # Plot 2d pose
                    ax1 = plt.subplot(gs1[subplot_idx - 1])
                    p2d = enc_in[exidx, :]
                    viz.show2Dpose(p2d, ax1)
                    ax1.invert_yaxis()

                    # Plot 3d gt
                    ax2 = plt.subplot(gs1[subplot_idx], projection='3d')
                    p3d = dec_out[exidx, :]
                    viz.show3Dpose(p3d, ax2)

                    # Plot 3d pose hypotheses

                    for i in range(poses3d.shape[-1]):
                        ax3 = plt.subplot(gs1[subplot_idx + i + 1],
                                          projection='3d')
                        p3d = pose_3d_mdm[i][exidx]
                        viz.show3Dpose(p3d,
                                       ax3,
                                       lcolor="#9b59b6",
                                       rcolor="#2ecc71")
                    # plt.show()
                    plt.savefig('{}/sample_{}_{}_{}_{}.png'.format(
                        path, subj, action, scam_idx, exidx))
                    plt.close(fig)
                    exidx = exidx + 1
コード例 #9
0
>>>>>>> upstream/master

            p3d = poses3d
            if not poses3d is None:
                to_export = poses3d.tolist()[0]
                x,y,z = [[] for _ in range(3)]
                for o in range(0, len(to_export), 3):
                    x.append(to_export[o])
                    y.append(to_export[o+1])
                    z.append(to_export[o+2])
                export_units[frame]={}
                for jnt_index, (_x, _y, _z) in enumerate(zip(x,y,z)):
                    export_units[frame][jnt_index] = {"translate": [_x, _y, _z]}


                viz.show3Dpose(p3d, ax, lcolor="#9b59b6", rcolor="#2ecc71")

            pngName = 'png/pose_frame_{0}.png'.format(str(frame).zfill(12))
            plt.savefig(pngName)
            if FLAGS.write_gif:
                png_lib.append(imageio.imread(pngName))

            if FLAGS.cache_on_fail:
                before_pose = poses3d

    if FLAGS.write_gif:
        if FLAGS.interpolation:
            #take every frame on gif_fps * multiplier_inv
            png_lib = np.array([png_lib[png_image] for png_image in range(0,len(png_lib), int(multiplier_inv)) ])
        logger.info("creating Gif gif_output/animation.gif, please Wait!")
        imageio.mimsave('gif_output/animation.gif', png_lib, fps=FLAGS.gif_fps)
コード例 #10
0
def main(_):
    global framenum

    #clear out all old frames
    os.system("rm png/*")

    #set done to empty array, it will hold the json files from openpose that we've already processed
    done = []

    #initialize input tensor to 1x64 array of zeroes [[0. 0. 0. ...]]
    #this is list of numpy vectors to feed as encoder inputs (32 2d coordinates)
    enc_in = np.zeros((1, 64))
    enc_in[0] = [0 for i in range(64)]

    #actions to run on, default is all
    actions = data_utils.define_actions(FLAGS.action)

    #the list of Human3.6m subjects to look at
    SUBJECT_IDS = [1, 5, 6, 7, 8, 9, 11]

    #load camera parameters from the h36m dataset
    rcams = cameras2.load_cameras(FLAGS.cameras_path, SUBJECT_IDS)

    #loads 2d data from precomputed Stacked Hourglass detections
    train_set_2d, test_set_2d, data_mean_2d, data_std_2d, dim_to_ignore_2d, dim_to_use_2d = data_utils.read_2d_predictions(
        actions, FLAGS.data_dir)

    #loads 3d poses, zero-centres and normalizes them
    train_set_3d, test_set_3d, data_mean_3d, data_std_3d, dim_to_ignore_3d, dim_to_use_3d, train_root_positions, test_root_positions = data_utils.read_3d_data(
        actions, FLAGS.data_dir, FLAGS.camera_frame, rcams, FLAGS.predict_14)

    device_count = {"GPU": 0}

    png_lib = []

    #run a tensorflow inference session
    with tf.Session(config=tf.ConfigProto(device_count=device_count,
                                          allow_soft_placement=True)) as sess:
        #plt.figure(3)

        #load pre-trained model
        batch_size = 128
        model = create_model(sess, actions, batch_size)

        #infinitely show 3d pose visualization
        while True:
            #wait for key to be pressed
            key = cv2.waitKey(1) & 0xFF

            _, frame = cv2.VideoCapture(
                0).read()  #ignore the other returned value

            #resize and rotate the incoming image frame
            frame, W, H = resize_img(frame)
            frame = cv2.rotate(frame, cv2.ROTATE_90_COUNTERCLOCKWISE)

            start = time.time()
            #run posenet inference on the frame
            joints_2d = estimate_pose(frame)

            #throw out confidence score and flatten
            _data = joints_2d[..., :2].flatten()

            #open pop-up and draw the keypoints found
            img2D = draw_2Dimg(frame, joints_2d, 1)

            #fake the thorax point by finding midpt between left and right shoulder
            lt_should_x = _data[10]
            lt_should_y = _data[11]
            rt_should_x = _data[12]
            rt_should_y = _data[13]

            thorax = midpoint(lt_should_x, lt_should_y, rt_should_x,
                              rt_should_y)

            #print("testing thorax pt at ", thorax)

            #insert thorax into data where it should be, at index 1
            _data = np.insert(_data, 2, thorax[0])
            _data = np.insert(_data, 3, thorax[1])

            #print("new _data is ", _data)
            _data = np.around(_data)

            #set xy to the array of 2d joint data
            xy = _data

            #create new 1x36 array of zeroes, which will store the 18 2d keypoints
            joints_array = np.zeros((1, 36))
            joints_array[0] = [0 for i in range(36)]

            #index into our data array
            index = 0

            #iterates 18 times
            for o in range(int(len(joints_array[0]) / 2)):
                #feed array with xy array (the 18 keypoints), but switch ordering: posenet to openpose
                for j in range(2):
                    #print("o is", o, "j is", j, "index is ", index)
                    index_into_posenet_data = order_pnet_to_openpose[o] * 2 + j
                    #print("putting posenet[", index_into_posenet_data, "], value ", xy[index_into_posenet_data], " , into joints_array[0][", index, "]")

                    joints_array[0][index] = xy[index_into_posenet_data]
                    index += 1

            #set _data to the array containing the 36 coordinates of the 2d keypts
            _data = joints_array[0]

            #print("_data is ", _data)

            #mapping all body parts for 3d-pose-baseline format (32 2d coordinates)
            for i in range(len(order)):  #iterates 14 times
                #select which coordinateof this point: x or y
                for j in range(2):
                    #create encoder input, switching around the order of the joint points
                    enc_in[0][order[i] * 2 + j] = _data[i * 2 + j]

            #now enc_in contains 14 points (28 total coordinates)

            #at this pt enc_in should be array of 64 vals

            for j in range(2):
                #place hip at index 0
                enc_in[0][0 * 2 + j] = (enc_in[0][1 * 2 + j] +
                                        enc_in[0][6 * 2 + j]) / 2
                #place neck/nose at index 14
                enc_in[0][14 * 2 + j] = (enc_in[0][15 * 2 + j] +
                                         enc_in[0][12 * 2 + j]) / 2
                #place thorax at index 13
                enc_in[0][13 * 2 +
                          j] = 2 * enc_in[0][12 * 2 + j] - enc_in[0][14 * 2 +
                                                                     j]

            #set spine found by openpose
            spine_x = enc_in[0][24]
            spine_y = enc_in[0][25]

            #dim_to_use_2d is always [0  1  2  3  4  5  6  7 12 13 14 15 16 17 24 25 26 27 30 31 34 35 36 37 38 39 50 51 52 53 54 55]

            #take 32 entries of enc_in
            enc_in = enc_in[:, dim_to_use_2d]

            #find mean of 2d data
            mu = data_mean_2d[dim_to_use_2d]

            #find stdev of 2d data
            stddev = data_std_2d[dim_to_use_2d]

            #subtract mean and divide std for all
            enc_in = np.divide((enc_in - mu), stddev)

            #dropout keep probability
            dp = 1.0

            #output tensor, initialize it to zeroes. We'll get 16 joints with 3d coordinates
            #this is list of numpy vectors that are the expected decoder outputs
            dec_out = np.zeros((1, 48))
            dec_out[0] = [0 for i in range(48)]

            #get the 3d poses by running the 3d-pose-baseline inference. Model operates on 32 points
            _, _, poses3d = model.step(sess,
                                       enc_in,
                                       dec_out,
                                       dp,
                                       isTraining=False)
            #poses3d comes back as a 1x96 array (I guess its 32 points)

            end = time.time()
            #print("ELAPSED: ", end-start)

            #hold our 3d poses while we're doing some post-processing
            all_poses_3d = []

            #un-normalize the input and output data using the means and stdevs
            enc_in = data_utils.unNormalizeData(enc_in, data_mean_2d,
                                                data_std_2d, dim_to_ignore_2d)
            poses3d = data_utils.unNormalizeData(poses3d, data_mean_3d,
                                                 data_std_3d, dim_to_ignore_3d)

            #create a grid for drawing
            gs1 = gridspec.GridSpec(1, 1)

            #set spacing between axes
            gs1.update(wspace=-0.00, hspace=0.05)
            plt.axis('off')

            #fill all_poses_3d with the 3d poses predicted by the model step fxn
            all_poses_3d.append(poses3d)

            #vstack stacks arrays in sequence vertically (row wise)
            #this doesn't do anything in this case, as far as I can tell
            enc_in, poses3d = map(np.vstack, [enc_in, all_poses_3d])

            subplot_idx, exidx = 1, 1
            _max = 0
            _min = 10000

            #iterates once
            for i in range(poses3d.shape[0]):
                #iterate over all 32 points in poses3d
                for j in range(32):
                    #save the last coordinate of this point into tmp
                    tmp = poses3d[i][j * 3 + 2]

                    #swap the second and third coordinates of this pt
                    poses3d[i][j * 3 + 2] = poses3d[i][j * 3 + 1]
                    poses3d[i][j * 3 + 1] = tmp

                    #keep track of max of last coordinate
                    if poses3d[i][j * 3 + 2] > _max:
                        _max = poses3d[i][j * 3 + 2]
                    if poses3d[i][j * 3 + 2] < _min:
                        _min = poses3d[i][j * 3 + 2]

            #iterates once
            for i in range(poses3d.shape[0]):
                #iterate over all 32 points in poses3d (2nd and 3rd coords have all been swapped at this pt)
                for j in range(32):
                    #change the third coord of this pt, subtracting it from sum of max and min third coord to get new value
                    poses3d[i][j * 3 + 2] = _max - poses3d[i][j * 3 + 2] + _min

                    #modify first coord of this pt by adding the x coord of the spine found by 2d model
                    poses3d[i][j * 3] += (spine_x - 630)

                    #modify third coord of this pt by adding 500 minus y coord of spine found by 2d model
                    poses3d[i][j * 3 + 2] += (500 - spine_y)

            #Plot 3d predictions
            ax = plt.subplot(gs1[subplot_idx - 1], projection='3d')
            ax.view_init(18, -70)
            logger.debug(np.min(poses3d))

            #TODO: if something happened with the data, reuse data from last frame (before_pose)

            p3d = poses3d

            #plot the 3d skeleton
            viz.show3Dpose(p3d, ax, lcolor="#9b59b6", rcolor="#2ecc71")

            #keep track of this poses3d in case we need to reuse it for next frame
            before_pose = poses3d

            #save this frame as a png in the ./png/ folder
            pngName = 'png/test_{0}.png'.format(str(framenum))
            #print("pngName is ", pngName)

            plt.savefig(pngName)

            #plt.show()

            #read this frame which was just saved as png
            img = cv2.imread(pngName, 0)

            rect_cpy = img.copy()

            #show this frame
            cv2.imshow('3d-pose-baseline', rect_cpy)

            framenum += 1

            #quit if q is pressed
            if key == ord('q'):
                break

        sess.close()
コード例 #11
0
def main(args):

    model = evaluation_util.load_model(vars(args))
    chainer.serializers.load_npz(args.lift_model, model)
#    cap = cv.VideoCapture(args.input if args.input else 0)

    # 深度推定結果ディレクトリ({動画名}_json_{実行日時}_idx00)
    subdir = args.base_target

    frame3d_dir = "{0}/frame3d_gan".format(subdir)
    os.makedirs(frame3d_dir)

    #関節位置情報ファイル
    posf = open(subdir +'/pos_gan.txt', 'w')

    #正規化済みOpenpose位置情報ファイル
    smoothedf = open(subdir +'/smoothed_gan.txt', 'w')

    start_frame_index, smoothed = openpose_utils.read_openpose_json("{0}/json".format(subdir), 0)

    before_pose = None
    png_lib = []
    for n, (frame, xy) in enumerate(smoothed.items()):
        if frame % 100 == 0:
            logger.info("calc idx {0}, frame {1}".format(0, frame))

        logger.debug("xy")
        logger.debug(xy)

        points = []
        for o in range(0,len(xy),2):
            points.append(np.array( [xy[o], xy[o+1]] ))

        logger.debug("points pre 36m")
        logger.debug(points)

        BODY_PARTS, POSE_PAIRS = parts(args)

        # Openpose位置情報をとりあえず出力する
        for poi in points:
#            logger.debug(poi)
#            logger.debug(poi.dtype)
#            logger.debug(poi.dtype == 'object')
            if poi.dtype == 'object':
                # logger.debug('poi is None')
                pass
            else:
#                logger.debug(' ' + str(poi[0]) + ' ' + str(poi[1]))
                smoothedf.write(' ' + str(poi[0]) + ' ' + str(poi[1]))

        smoothedf.write("\n")

        # 2d→3dに変換
        points = to36M(points, BODY_PARTS)
        logger.debug("points after 36m")
        logger.debug(points)
        points = np.reshape(points, [1, -1]).astype('f')
        logger.debug("points reshape 36m")
        logger.debug(points)
        points_norm = projection_gan.pose.dataset.pose_dataset.pose_dataset_base.Normalization.normalize_2d(points)
        logger.debug("points_norm")
        logger.debug(points_norm)
        poses3d = create_pose(model, points_norm)
        logger.debug("poses3d")
        logger.debug(poses3d)

        # Plot 3d predictions
        subplot_idx, exidx = 1, 1
        gs1 = gridspec.GridSpec(1, 1)
        gs1.update(wspace=-0.00, hspace=0.05)  # set the spacing between axes.
        ax = plt.subplot(gs1[subplot_idx - 1], projection='3d')
        ax.view_init(18, 280)

        logger.debug(np.min(poses3d))

        if np.min(poses3d) < -1000 and before_pose is not None:
            poses3d = before_pose

        p3d = poses3d

        xs = p3d[:, 0::3]
        ys = p3d[:, 1::3]
        zs = p3d[:, 2::3]

        # 拡大する
        xs *= 600
        ys *= 600
        zs *= 600

        # 画像の出力だけYZ反転させる
        p3d_copy = copy.deepcopy(p3d)
        p3d_copy[:, 1::3] *= -1
        p3d_copy[:, 2::3] *= -1

        # 3D画像を出力する
        if level[args.verbose] <= logging.INFO:
            # d = 30
            # img = evaluation_util.create_img_xyz(xs, ys, zs, np.pi * d / 180.)
            # cv.imwrite(os.path.join(frame3d_dir, "out_{0:012d}_{0:03d}_degree.png".format(n, d)), img)
            
            viz.show3Dpose(p3d_copy, ax, lcolor="#9b59b6", rcolor="#2ecc71", add_labels=True)

            # 各フレームの単一視点からのはINFO時のみ
            pngName = os.path.join(frame3d_dir, "3d_gan_{0:012d}.png".format(n))
            plt.savefig(pngName)
            png_lib.append(imageio.imread(pngName))            
            before_pose = poses3d

        # 各フレームの角度別出力はデバッグ時のみ
        if level[args.verbose] == logging.DEBUG:

            for azim in [0, 45, 90, 135, 180, 225, 270, 315, 360]:
                ax2 = plt.subplot(gs1[subplot_idx - 1], projection='3d')
                ax2.view_init(18, azim)
                viz.show3Dpose(p3d, ax2, lcolor="#FF0000", rcolor="#0000FF", add_labels=True)

                pngName2 = os.path.join(frame3d_dir, "debug_{0:012d}_{1:03d}.png".format(n, azim))
                plt.savefig(pngName2)

        # 3D関節位置情報を出力する
        for o in range(0,len(p3d[0]),3):
            logger.debug(str(o) + " "+ str(p3d[0][o]) +" "+ str(p3d[0][o+2]) +" "+ str(p3d[0][o+1] * -1) + ", ")
            posf.write(str(o) + " "+ str(p3d[0][o]) +" "+ str(p3d[0][o+2]) +" "+ str(p3d[0][o+1] * -1) + ", ")
            
        posf.write("\n")

        # # 各角度の出力はデバッグ時のみ
        # if level[args.verbose] == logging.DEBUG:
        #     deg = 15
        #     for d in range(0, 360 + deg, deg):
        #         img = evaluation_util.create_projection_img(pose, np.pi * d / 180.)
        #         cv.imwrite(os.path.join(out_sub_dir, "rot_{0:03d}_degree.png".format(d)), img)

        n += 1

    smoothedf.close()
    posf.close()

    # INFO時は、アニメーションGIF生成
    if level[args.verbose] <= logging.INFO:
        logger.info("creating Gif {0}/movie_smoothing_gan.gif, please Wait!".format(subdir))
        imageio.mimsave('{0}/movie_smoothing_gan.gif'.format(subdir), png_lib, fps=30)
コード例 #12
0
def main(args):
    human36m_data_path = os.path.join('data', 'data_3d_' + "h36m" + '.npz')
    MUCO3DHP_path = "/home/lgh/data1/multi3Dpose/muco-3dhp/output/unaugmented_set_001"
    hid_dim = 128
    num_layers = 4
    non_local = True
    lr = 1.0e-3
    epochs = 30
    _lr_decay = 100000
    lr_gamma = 0.96
    max_norm = True
    num_workers = 8
    snapshot = 5
    batch_size = 64
    print('==> Loading multi-person dataset...')
    #human36m_dataset_path = path.join(human36m_data_path)
    data_2d, data_3d, img_name, feature_mutual = get_MUCO3DHP_data(
        MUCO3DHP_path, args)  ## N * (M*17) * 2    N * (M*17) * 3 numpy

    print(img_name[1])
    ax = plt.subplot(111, projection='3d')
    #ax.scatter(data_3d[0, 0::4, 0], data_3d[0, 0::4, 1], data_3d[0, 0::4, 2])
    viz.show3Dpose(data_3d[1, :, :], ax)
    plt.show()
    person_num = data_2d.shape[1] / 17
    dataset = CMUPanoDataset(human36m_data_path, person_num)
    ### divide into trainsets and testsets 4/5 and 1/5
    num = len(data_2d)
    train_num = num * 4 / 5

    cudnn.benchmark = True
    device = torch.device("cuda")

    adj, adj_mutual = adj_mx_from_skeleton(dataset.skeleton(), person_num)  #ok
    model_pos = MultiSemGCN(adj,
                            adj_mutual,
                            person_num,
                            hid_dim,
                            num_layers=num_layers,
                            nodes_group=dataset.skeleton().joints_group()
                            if non_local else None).to(device)  #ok
    criterion = nn.MSELoss(reduction='mean').to(device)
    optimizer = torch.optim.Adam(model_pos.parameters(), lr=lr)

    start_epoch = 0
    error_best = None
    glob_step = 0
    lr_now = lr
    ckpt_dir_path = os.path.join(
        'checkpoint_multi',
        datetime.datetime.now().isoformat() +
        "_l_%04d_hid_%04d_e_%04d_non_local_%d" %
        (num_layers, hid_dim, epochs, non_local))

    if not os.path.exists(ckpt_dir_path):
        os.makedirs(ckpt_dir_path)
        print('=> Making checkpoint dir: {}'.format(ckpt_dir_path))

    logger = Logger(os.path.join(ckpt_dir_path, 'log.txt'))
    logger.set_names(
        ['epoch', 'lr', 'loss_train', 'error_eval_p1', 'error_eval_p2'])

    train_loader = DataLoader(PoseGenerator(data_3d[:train_num],
                                            data_2d[:train_num],
                                            feature_mutual[:train_num]),
                              batch_size=batch_size,
                              shuffle=True,
                              num_workers=num_workers,
                              pin_memory=True)

    valid_loader = DataLoader(PoseGenerator(data_3d[train_num:],
                                            data_2d[train_num:],
                                            feature_mutual[train_num:]),
                              batch_size=batch_size,
                              shuffle=False,
                              num_workers=num_workers,
                              pin_memory=True)

    writer = SummaryWriter()
    for epoch in range(start_epoch, epochs):
        # Train for one epoch
        print('\nEpoch: %d | LR: %.8f' % (epoch + 1, lr_now))
        epoch_loss, lr_now, glob_step = train(train_loader,
                                              model_pos,
                                              criterion,
                                              optimizer,
                                              device,
                                              lr,
                                              lr_now,
                                              glob_step,
                                              _lr_decay,
                                              lr_gamma,
                                              max_norm=max_norm)
        writer.add_scalar('epoch_loss', epoch_loss, epoch)
        # Evaluate
        error_eval_p1, error_eval_p2 = evaluate(valid_loader, model_pos,
                                                device)

        # Update log file
        logger.append(
            [epoch + 1, lr_now, epoch_loss, error_eval_p1, error_eval_p2])

        # Save checkpoint
        if error_best is None or error_best > error_eval_p1:
            error_best = error_eval_p1
            save_ckpt(
                {
                    'epoch': epoch + 1,
                    'lr': lr_now,
                    'step': glob_step,
                    'state_dict': model_pos.state_dict(),
                    'optimizer': optimizer.state_dict(),
                    'error': error_eval_p1
                },
                ckpt_dir_path,
                suffix='best')

        if (epoch + 1) % snapshot == 0:
            save_ckpt(
                {
                    'epoch': epoch + 1,
                    'lr': lr_now,
                    'step': glob_step,
                    'state_dict': model_pos.state_dict(),
                    'optimizer': optimizer.state_dict(),
                    'error': error_eval_p1
                }, ckpt_dir_path)

    logger.close()
    writer.close()
    logger.plot(['loss_train', 'error_eval_p1'])
    savefig(os.path.join(ckpt_dir_path, 'log.eps'))
コード例 #13
0
def hankgogo(gogodata, gogodatafake):
    """Get samples from a model and visualize them"""

    actions = data_utils.define_actions(FLAGS.action)

    SUBJECT_IDS = [1, 5, 6, 7, 8, 9, 11]
    rcams = cameras.load_cameras(FLAGS.cameras_path, SUBJECT_IDS)

    # Load 3d data and load (or create) 2d projections
    train_set_3d, test_set_3d, data_mean_3d, data_std_3d, dim_to_ignore_3d, dim_to_use_3d, train_root_positions, test_root_positions = data_utils.read_3d_data(
        actions, FLAGS.data_dir, FLAGS.camera_frame, rcams, FLAGS.predict_14)

    #if FLAGS.use_sh:
    #  train_set_2d, test_set_2d, data_mean_2d, data_std_2d, dim_to_ignore_2d, dim_to_use_2d = data_utils.read_2d_predictions(actions, FLAGS.data_dir)
    #else:
    train_set_2d, test_set_2d, data_mean_2d, data_std_2d, dim_to_ignore_2d, dim_to_use_2d = data_utils.create_2d_data(
        actions, FLAGS.data_dir, rcams)
    print("done reading and normalizing data.")

    device_count = {"GPU": 0} if FLAGS.use_cpu else {"GPU": 1}
    with tf.Session(config=tf.ConfigProto(device_count=device_count)) as sess:
        # === Create the model ===
        print("Creating %d layers of %d units." %
              (FLAGS.num_layers, FLAGS.linear_size))
        batch_size = 1
        model = create_model_my(sess, actions, batch_size)
        print("Model loaded")

        # Dropout probability 0 (keep probability 1) for sampling
        dp = 1.0
        poses3d = model.step(sess, gogodata, isTraining=False)
        tesmp = poses3d
        poses3d = data_utils.unNormalizeData(poses3d, data_mean_3d,
                                             data_std_3d, dim_to_ignore_3d)
        model.saver.save(sess, os.path.join(mysave_dir, "gogo"))

    # Grab a random batch to visualize

# enc_in, dec_out, poses3d = map( np.vstack, [enc_in, dec_out, poses3d] )
# idx = np.random.permutation( enc_in.shape[0] )
# enc_in, dec_out, poses3d = enc_in[idx, :], dec_out[idx, :], poses3d[idx, :]

# Visualize random samples
    import matplotlib.gridspec as gridspec

    # 1080p	= 1,920 x 1,080
    fig = plt.figure(figsize=(19.2, 10.8))

    gs1 = gridspec.GridSpec(5, 9)  # 5 rows, 9 columns
    gs1.update(wspace=-0.00, hspace=0.05)  # set the spacing between axes.
    plt.axis('off')

    subplot_idx, exidx = 1, 1
    nsamples = 1
    # Plot 2d pose
    #ax1 = plt.subplot(gs1[subplot_idx-1])
    #p2d = enc_in[exidx,:]
    #viz.show2Dpose( p2d, ax1 )
    #ax1.invert_yaxis()

    # Plot 3d gt
    #ax2 = plt.subplot(gs1[subplot_idx], projection='3d')
    #p3d = dec_out[exidx,:]
    #viz.show3Dpose( p3d, ax2 )

    # Plot 3d predictions
    ax3 = plt.subplot(gs1[subplot_idx + 1], projection='3d')
    p3d = poses3d
    viz.show3Dpose(p3d, ax3, lcolor="#9b59b6", rcolor="#2ecc71")

    exidx = exidx + 1
    subplot_idx = subplot_idx + 3

    plt.show()
コード例 #14
0
def main(_):
    smoothed = read_openpose_json()
    logger.info("reading and smoothing done. start feeding 3d-pose-baseline")
    plt.figure(2)
    smooth_curves_plot = show_anim_curves(smoothed, plt)
    pngName = 'png/smooth_plot.png'
    smooth_curves_plot.savefig(pngName)

    enc_in = np.zeros((1, 64))
    enc_in[0] = [0 for i in range(64)]

    actions = data_utils.define_actions(FLAGS.action)

    SUBJECT_IDS = [1, 5, 6, 7, 8, 9, 11]
    rcams = cameras.load_cameras(FLAGS.cameras_path, SUBJECT_IDS)
    train_set_2d, test_set_2d, data_mean_2d, data_std_2d, dim_to_ignore_2d, dim_to_use_2d = data_utils.read_2d_predictions(
        actions, FLAGS.data_dir)
    train_set_3d, test_set_3d, data_mean_3d, data_std_3d, dim_to_ignore_3d, dim_to_use_3d, train_root_positions, test_root_positions = data_utils.read_3d_data(
        actions, FLAGS.data_dir, FLAGS.camera_frame, rcams, FLAGS.predict_14)

    device_count = {"GPU": 1}
    png_lib = []
    with tf.Session(config=tf.ConfigProto(device_count=device_count,
                                          allow_soft_placement=True)) as sess:
        #plt.figure(3)
        batch_size = 128
        model = create_model(sess, actions, batch_size)
        for n, (frame, xy) in enumerate(smoothed.items()):
            logger.info("calc frame {0}".format(frame))
            # map list into np array
            joints_array = np.zeros((1, 36))
            joints_array[0] = [0 for i in range(36)]
            for o in range(len(joints_array[0])):
                #feed array with xy array
                joints_array[0][o] = xy[o]
            _data = joints_array[0]
            # mapping all body parts or 3d-pose-baseline format
            for i in range(len(order)):
                for j in range(2):
                    # create encoder input
                    enc_in[0][order[i] * 2 + j] = _data[i * 2 + j]
            for j in range(2):
                # Hip
                enc_in[0][0 * 2 + j] = (enc_in[0][1 * 2 + j] +
                                        enc_in[0][6 * 2 + j]) / 2
                # Neck/Nose
                enc_in[0][14 * 2 + j] = (enc_in[0][15 * 2 + j] +
                                         enc_in[0][12 * 2 + j]) / 2
                # Thorax
                enc_in[0][13 * 2 +
                          j] = 2 * enc_in[0][12 * 2 + j] - enc_in[0][14 * 2 +
                                                                     j]

            # set spine
            spine_x = enc_in[0][24]
            spine_y = enc_in[0][25]

            enc_in = enc_in[:, dim_to_use_2d]
            mu = data_mean_2d[dim_to_use_2d]
            stddev = data_std_2d[dim_to_use_2d]
            enc_in = np.divide((enc_in - mu), stddev)

            dp = 1.0
            dec_out = np.zeros((1, 48))
            dec_out[0] = [0 for i in range(48)]
            _, _, poses3d = model.step(sess,
                                       enc_in,
                                       dec_out,
                                       dp,
                                       isTraining=False)
            all_poses_3d = []
            enc_in = data_utils.unNormalizeData(enc_in, data_mean_2d,
                                                data_std_2d, dim_to_ignore_2d)
            poses3d = data_utils.unNormalizeData(poses3d, data_mean_3d,
                                                 data_std_3d, dim_to_ignore_3d)
            gs1 = gridspec.GridSpec(1, 1)
            gs1.update(wspace=-0.00,
                       hspace=0.05)  # set the spacing between axes.
            plt.axis('off')
            all_poses_3d.append(poses3d)
            enc_in, poses3d = map(np.vstack, [enc_in, all_poses_3d])
            subplot_idx, exidx = 1, 1
            max = 0
            min = 10000

            for i in range(poses3d.shape[0]):
                for j in range(32):
                    tmp = poses3d[i][j * 3 + 2]
                    poses3d[i][j * 3 + 2] = poses3d[i][j * 3 + 1]
                    poses3d[i][j * 3 + 1] = tmp
                    if poses3d[i][j * 3 + 2] > max:
                        max = poses3d[i][j * 3 + 2]
                    if poses3d[i][j * 3 + 2] < min:
                        min = poses3d[i][j * 3 + 2]

            for i in range(poses3d.shape[0]):
                for j in range(32):
                    poses3d[i][j * 3 + 2] = max - poses3d[i][j * 3 + 2] + min
                    poses3d[i][j * 3] += (spine_x - 630)
                    poses3d[i][j * 3 + 2] += (500 - spine_y)

            # Plot 3d predictions
            ax = plt.subplot(gs1[subplot_idx - 1], projection='3d')
            ax.view_init(18, -70)
            logger.debug(np.min(poses3d))
            if np.min(poses3d) < -1000:
                poses3d = before_pose

            p3d = poses3d
            logger.debug(poses3d)
            viz.show3Dpose(p3d, ax, lcolor="#9b59b6", rcolor="#2ecc71")

            pngName = 'png/test_{0}.png'.format(str(frame))
            plt.savefig(pngName)
            png_lib.append(imageio.imread(pngName))
            before_pose = poses3d

    logger.info("creating Gif png/movie_smoothing.gif, please Wait!")
    imageio.mimsave('png/movie_smoothing.gif', png_lib, fps=FLAGS.gif_fps)
    logger.info("Done!".format(pngName))
コード例 #15
0
def sample():
  """Get samples from a model and visualize them"""

  actions = data_utils.define_actions( FLAGS.action )

  # Load camera parameters
  SUBJECT_IDS = [1,5,6,7,8,9,11]
  rcams = cameras.load_cameras(FLAGS.cameras_path, SUBJECT_IDS)

  # Load 3d data and load (or create) 2d projections
  train_set_3d, test_set_3d, data_mean_3d, data_std_3d, dim_to_ignore_3d, dim_to_use_3d, train_root_positions, test_root_positions = data_utils.read_3d_data(
    actions, FLAGS.data_dir, FLAGS.camera_frame, rcams, FLAGS.predict_14 )

  if FLAGS.use_sh:
    train_set_2d, test_set_2d, data_mean_2d, data_std_2d, dim_to_ignore_2d, dim_to_use_2d = data_utils.read_2d_predictions(actions, FLAGS.data_dir)
  else:
    train_set_2d, test_set_2d, data_mean_2d, data_std_2d, dim_to_ignore_2d, dim_to_use_2d = data_utils.create_2d_data( actions, FLAGS.data_dir, rcams )
  print( "done reading and normalizing data." )

  device_count = {"GPU": 0} if FLAGS.use_cpu else {"GPU": 1}
  with tf.Session(config=tf.ConfigProto( device_count = device_count )) as sess:
    # === Create the model ===
    print("Creating %d layers of %d units." % (FLAGS.num_layers, FLAGS.linear_size))
    batch_size = 128
    model = create_model(sess, actions, batch_size)
    print("Model loaded")

    for key2d in test_set_2d.keys():

      (subj, b, fname) = key2d
      print( "Subject: {}, action: {}, fname: {}".format(subj, b, fname) )

      # keys should be the same if 3d is in camera coordinates
      key3d = key2d if FLAGS.camera_frame else (subj, b, '{0}.h5'.format(fname.split('.')[0]))
      key3d = (subj, b, fname[:-3]) if (fname.endswith('-sh')) and FLAGS.camera_frame else key3d

      enc_in  = test_set_2d[ key2d ]
      n2d, _ = enc_in.shape
      dec_out = test_set_3d[ key3d ]
      n3d, _ = dec_out.shape
      assert n2d == n3d

      # Split into about-same-size batches
      enc_in   = np.array_split( enc_in,  n2d // batch_size )
      dec_out  = np.array_split( dec_out, n3d // batch_size )
      all_poses_3d = []

      for bidx in range( len(enc_in) ):

        # Dropout probability 0 (keep probability 1) for sampling
        dp = 1.0
        _, _, poses3d = model.step(sess, enc_in[bidx], dec_out[bidx], dp, isTraining=False)

        # denormalize
        enc_in[bidx]  = data_utils.unNormalizeData(  enc_in[bidx], data_mean_2d, data_std_2d, dim_to_ignore_2d )
        dec_out[bidx] = data_utils.unNormalizeData( dec_out[bidx], data_mean_3d, data_std_3d, dim_to_ignore_3d )
        poses3d = data_utils.unNormalizeData( poses3d, data_mean_3d, data_std_3d, dim_to_ignore_3d )
        all_poses_3d.append( poses3d )

      # Put all the poses together
      enc_in, dec_out, poses3d = map( np.vstack, [enc_in, dec_out, all_poses_3d] )

      # Convert back to world coordinates
      if FLAGS.camera_frame:
        N_CAMERAS = 4
        N_JOINTS_H36M = 32

        # Add global position back
        dec_out = dec_out + np.tile( test_root_positions[ key3d ], [1,N_JOINTS_H36M] )

        # Load the appropriate camera
        subj, _, sname = key3d

        cname = sname.split('.')[1] # <-- camera name
        scams = {(subj,c+1): rcams[(subj,c+1)] for c in range(N_CAMERAS)} # cams of this subject
        scam_idx = [scams[(subj,c+1)][-1] for c in range(N_CAMERAS)].index( cname ) # index of camera used
        the_cam  = scams[(subj, scam_idx+1)] # <-- the camera used
        R, T, f, c, k, p, name = the_cam
        assert name == cname

        def cam2world_centered(data_3d_camframe):
          data_3d_worldframe = cameras.camera_to_world_frame(data_3d_camframe.reshape((-1, 3)), R, T)
          data_3d_worldframe = data_3d_worldframe.reshape((-1, N_JOINTS_H36M*3))
          # subtract root translation
          return data_3d_worldframe - np.tile( data_3d_worldframe[:,:3], (1,N_JOINTS_H36M) )

        # Apply inverse rotation and translation
        dec_out = cam2world_centered(dec_out)
        poses3d = cam2world_centered(poses3d)

  # Grab a random batch to visualize
  enc_in, dec_out, poses3d = map( np.vstack, [enc_in, dec_out, poses3d] )
  idx = np.random.permutation( enc_in.shape[0] )
  enc_in, dec_out, poses3d = enc_in[idx, :], dec_out[idx, :], poses3d[idx, :]

  # Visualize random samples
  import matplotlib.gridspec as gridspec

  # 1080p	= 1,920 x 1,080
  fig = plt.figure( figsize=(19.2, 10.8) )

  gs1 = gridspec.GridSpec(5, 9) # 5 rows, 9 columns
  gs1.update(wspace=-0.00, hspace=0.05) # set the spacing between axes.
  plt.axis('off')

  subplot_idx, exidx = 1, 1
  nsamples = 15
  for i in np.arange( nsamples ):

    # Plot 2d pose
    ax1 = plt.subplot(gs1[subplot_idx-1])
    p2d = enc_in[exidx,:]
    viz.show2Dpose( p2d, ax1 )
    ax1.invert_yaxis()

    # Plot 3d gt
    ax2 = plt.subplot(gs1[subplot_idx], projection='3d')
    p3d = dec_out[exidx,:]
    viz.show3Dpose( p3d, ax2 )

    # Plot 3d predictions
    ax3 = plt.subplot(gs1[subplot_idx+1], projection='3d')
    p3d = poses3d[exidx,:]
    viz.show3Dpose( p3d, ax3, lcolor="#9b59b6", rcolor="#2ecc71" )

    exidx = exidx + 1
    subplot_idx = subplot_idx + 3

  plt.show()
コード例 #16
0
def main(_):

    smoothed = read_openpose_json()
    logger.info("reading and smoothing done. start feeding 3d-pose-baseline")
    plt.figure(2)
    smooth_curves_plot = show_anim_curves(smoothed, plt)
    pngName = FLAGS.output_dirname + '/supplemental' + '/smooth_plot.png'
    smooth_curves_plot.savefig(pngName)

    if FLAGS.interpolation:
        framerange = len(smoothed.keys())
        joint_rows = 36
        array = np.concatenate(list(smoothed.values()))
        array_reshaped = np.reshape(array, (framerange, joint_rows))

        multiplier = 0.1
        multiplier_inv = 1 / multiplier
        out_array = np.array([])
        for row in range(joint_rows):
            x = []
            for frame in range(framerange):
                x.append(array_reshaped[frame, row])

            frame = range(framerange)
            frame_resampled = np.arange(0, framerange, multiplier)
            spl = UnivariateSpline(frame, x, k=3)
            min_x, max_x = min(x), max(x)
            smooth_fac = max_x - min_x
            smooth_fac = smooth_fac * 125
            spl.set_smoothing_factor(float(smooth_fac))
            xnew = spl(frame_resampled)

            out_array = np.append(out_array, xnew)

        logger.info("done interpolating")

        a = np.array([])
        for frame in range(int(framerange * multiplier_inv)):
            jnt_array = []
            #print(frame)
            for jnt in range(joint_rows):
                jnt_array.append(
                    out_array[jnt * int(framerange * multiplier_inv) + frame])
            a = np.append(a, jnt_array)

        a = np.reshape(a, (int(framerange * multiplier_inv), joint_rows))
        out_array = a

        interpolate_smoothed = {}
        for frame in range(int(framerange * multiplier_inv)):
            interpolate_smoothed[frame] = list(out_array[frame])

        plt.figure(3)
        smoothed = interpolate_smoothed
        interpolate_curves_plot = show_anim_curves(smoothed, plt)
        pngName = FLAGS.output_dirname + '/supplemental' + '/interpolate_plot.png'
        interpolate_curves_plot.savefig(pngName)

    enc_in = np.zeros((1, 64))
    enc_in[0] = [0 for i in range(64)]

    actions = data_utils.define_actions(FLAGS.action)

    SUBJECT_IDS = [1, 5, 6, 7, 8, 9, 11]
    rcams = cameras.load_cameras(FLAGS.cameras_path, SUBJECT_IDS)
    train_set_2d, test_set_2d, data_mean_2d, data_std_2d, dim_to_ignore_2d, dim_to_use_2d = data_utils.read_2d_predictions(
        actions, FLAGS.data_dir)
    train_set_3d, test_set_3d, data_mean_3d, data_std_3d, dim_to_ignore_3d, dim_to_use_3d, train_root_positions, test_root_positions = data_utils.read_3d_data(
        actions, FLAGS.data_dir, FLAGS.camera_frame, rcams, FLAGS.predict_14)

    device_count = {"GPU": 1}
    png_lib = []
    with tf.Session(config=tf.ConfigProto(device_count=device_count,
                                          allow_soft_placement=True)) as sess:
        plt.figure(4)
        batch_size = 128
        model = create_model(sess, actions, batch_size)
        for n, (frame, xy) in enumerate(smoothed.items()):
            logger.info("calc frame {0}".format(frame))
            # map list into np array
            joints_array = np.zeros((1, 36))
            joints_array[0] = [0 for i in range(36)]
            for o in range(len(joints_array[0])):
                #feed array with xy array
                joints_array[0][o] = xy[o]
            _data = joints_array[0]
            # mapping all body parts or 3d-pose-baseline format
            for i in range(len(order)):
                for j in range(2):
                    # create encoder input
                    enc_in[0][order[i] * 2 + j] = _data[i * 2 + j]
            for j in range(2):
                # Hip
                enc_in[0][0 * 2 + j] = (enc_in[0][1 * 2 + j] +
                                        enc_in[0][6 * 2 + j]) / 2
                # Neck/Nose
                enc_in[0][14 * 2 + j] = (enc_in[0][15 * 2 + j] +
                                         enc_in[0][12 * 2 + j]) / 2
                # Thorax
                enc_in[0][13 * 2 +
                          j] = 2 * enc_in[0][12 * 2 + j] - enc_in[0][14 * 2 +
                                                                     j]

            # set spine
            spine_x = enc_in[0][24]
            spine_y = enc_in[0][25]

            enc_in = enc_in[:, dim_to_use_2d]
            mu = data_mean_2d[dim_to_use_2d]
            stddev = data_std_2d[dim_to_use_2d]
            enc_in = np.divide((enc_in - mu), stddev)

            dp = 1.0
            dec_out = np.zeros((1, 48))
            dec_out[0] = [0 for i in range(48)]
            _, _, poses3d = model.step(sess,
                                       enc_in,
                                       dec_out,
                                       dp,
                                       isTraining=False)
            all_poses_3d = []
            enc_in = data_utils.unNormalizeData(enc_in, data_mean_2d,
                                                data_std_2d, dim_to_ignore_2d)
            poses3d = data_utils.unNormalizeData(poses3d, data_mean_3d,
                                                 data_std_3d, dim_to_ignore_3d)
            gs1 = gridspec.GridSpec(1, 1)
            gs1.update(wspace=-0.00,
                       hspace=0.05)  # set the spacing between axes.
            plt.axis('off')
            all_poses_3d.append(poses3d)
            enc_in, poses3d = map(np.vstack, [enc_in, all_poses_3d])
            subplot_idx, exidx = 1, 1
            _max = 0
            _min = 10000

            for i in range(poses3d.shape[0]):
                for j in range(32):
                    tmp = poses3d[i][j * 3 + 2]
                    poses3d[i][j * 3 + 2] = poses3d[i][j * 3 + 1]
                    poses3d[i][j * 3 + 1] = tmp
                    if poses3d[i][j * 3 + 2] > _max:
                        _max = poses3d[i][j * 3 + 2]
                    if poses3d[i][j * 3 + 2] < _min:
                        _min = poses3d[i][j * 3 + 2]

            for i in range(poses3d.shape[0]):
                for j in range(32):
                    poses3d[i][j * 3 + 2] = _max - poses3d[i][j * 3 + 2] + _min
                    poses3d[i][j * 3] += (spine_x - 630)
                    poses3d[i][j * 3 + 2] += (500 - spine_y)

            # Plot 3d predictions
            ax = plt.subplot(gs1[subplot_idx - 1], projection='3d')
            ax.view_init(18, -70)
            logger.debug(np.min(poses3d))
            if np.min(poses3d) < -1000:
                poses3d = before_pose

            p3d = poses3d
            logger.debug(poses3d)
            viz.show3Dpose(p3d, ax, lcolor="#9b59b6", rcolor="#2ecc71")

            pngName = FLAGS.output_dirname + '/' + FLAGS.output_filename + '_{0}.png'.format(
                str(frame))
            if FLAGS.write_output_img:
                plt.savefig(pngName)
            #png_lib.append(imageio.imread(pngName))
            before_pose = poses3d

    if FLAGS.write_gif:
        logger.info("creating Gif movie_smoothing.gif, please Wait!")
        imageio.mimsave(FLAGS.output_dirname + '/supplemental' +
                        '/movie_smoothing.gif',
                        png_lib,
                        fps=FLAGS.gif_fps)
    logger.info("Done!".format(pngName))
コード例 #17
0
def main(_):

    smoothed = read_openpose_json()
    '''plt.figure(2)
    smooth_curves_plot = show_anim_curves(smoothed, plt)
    #return
    pngName = 'gif_output/smooth_plot.png'
    smooth_curves_plot.savefig(pngName)
    logger.info('writing gif_output/smooth_plot.png')'''

    if FLAGS.interpolation:
        logger.info("start interpolation")

        framerange = len(smoothed.keys())
        joint_rows = 36
        array = np.concatenate(list(smoothed.values()))
        array_reshaped = np.reshape(array, (framerange, joint_rows))

        multiplier = FLAGS.multiplier
        multiplier_inv = 1 / multiplier

        out_array = np.array([])
        for row in range(joint_rows):
            x = []
            for frame in range(framerange):
                x.append(array_reshaped[frame, row])

            frame = range(framerange)
            frame_resampled = np.arange(0, framerange, multiplier)
            spl = UnivariateSpline(frame, x, k=3)
            #relative smooth factor based on jnt anim curve
            min_x, max_x = min(x), max(x)
            smooth_fac = max_x - min_x
            smooth_resamp = 125
            smooth_fac = smooth_fac * smooth_resamp
            spl.set_smoothing_factor(float(smooth_fac))
            xnew = spl(frame_resampled)

            out_array = np.append(out_array, xnew)

        logger.info(
            "done interpolating. reshaping {0} frames,  please wait!!".format(
                framerange))

        a = np.array([])
        for frame in range(int(framerange * multiplier_inv)):
            jnt_array = []
            for jnt in range(joint_rows):
                jnt_array.append(
                    out_array[jnt * int(framerange * multiplier_inv) + frame])
            a = np.append(a, jnt_array)

        a = np.reshape(a, (int(framerange * multiplier_inv), joint_rows))
        out_array = a

        interpolate_smoothed = {}
        for frame in range(int(framerange * multiplier_inv)):
            interpolate_smoothed[frame] = list(out_array[frame])
        '''plt.figure(3)
        smoothed = interpolate_smoothed
        interpolate_curves_plot = show_anim_curves(smoothed, plt)
        pngName = 'gif_output/interpolate_{0}.png'.format(smooth_resamp)
        interpolate_curves_plot.savefig(pngName)
        logger.info('writing gif_output/interpolate_plot.png')'''

    enc_in = np.zeros((1, 64))
    enc_in[0] = [0 for i in range(64)]

    actions = data_utils.define_actions(FLAGS.action)

    SUBJECT_IDS = [1, 5, 6, 7, 8, 9, 11]
    rcams = cameras.load_cameras(FLAGS.cameras_path, SUBJECT_IDS)
    train_set_2d, test_set_2d, data_mean_2d, data_std_2d, dim_to_ignore_2d, dim_to_use_2d = data_utils.read_2d_predictions(
        actions, FLAGS.data_dir)
    train_set_3d, test_set_3d, data_mean_3d, data_std_3d, dim_to_ignore_3d, dim_to_use_3d, train_root_positions, test_root_positions = data_utils.read_3d_data(
        actions, FLAGS.data_dir, FLAGS.camera_frame, rcams, FLAGS.predict_14)

    device_count = {"GPU": 1}
    png_lib = []
    with tf.Session(config=tf.ConfigProto(device_count=device_count,
                                          allow_soft_placement=True)) as sess:
        #plt.figure(3)
        batch_size = 128
        model = create_model(sess, actions, batch_size)
        iter_range = len(smoothed.keys())
        rows = 0
        filename = "keyjoints_proband_222_01.xlsx"
        workbook = xlsxwriter.Workbook(filename)
        worksheet = workbook.add_worksheet()
        for n, (frame, xy) in enumerate(smoothed.items()):
            logger.info("calc frame {0}/{1}".format(frame, iter_range))
            # map list into np array
            joints_array = np.zeros((1, 36))
            joints_array[0] = [0 for i in range(36)]
            for o in range(len(joints_array[0])):
                #feed array with xy array
                joints_array[0][o] = xy[o]
            _data = joints_array[0]
            # mapping all body parts or 3d-pose-baseline format
            for i in range(len(order)):
                for j in range(2):
                    # create encoder input
                    enc_in[0][order[i] * 2 + j] = _data[i * 2 + j]
            for j in range(2):
                # Hip
                enc_in[0][0 * 2 + j] = (enc_in[0][1 * 2 + j] +
                                        enc_in[0][6 * 2 + j]) / 2
                # Neck/Nose
                enc_in[0][14 * 2 + j] = (enc_in[0][15 * 2 + j] +
                                         enc_in[0][12 * 2 + j]) / 2
                # Thorax
                enc_in[0][13 * 2 +
                          j] = 2 * enc_in[0][12 * 2 + j] - enc_in[0][14 * 2 +
                                                                     j]

            # set spine
            spine_x = enc_in[0][24]
            spine_y = enc_in[0][25]

            enc_in = enc_in[:, dim_to_use_2d]
            mu = data_mean_2d[dim_to_use_2d]
            stddev = data_std_2d[dim_to_use_2d]
            enc_in = np.divide((enc_in - mu), stddev)

            dp = 1.0
            dec_out = np.zeros((1, 48))
            dec_out[0] = [0 for i in range(48)]
            _, _, poses3d = model.step(sess,
                                       enc_in,
                                       dec_out,
                                       dp,
                                       isTraining=False)
            all_poses_3d = []
            enc_in = data_utils.unNormalizeData(enc_in, data_mean_2d,
                                                data_std_2d, dim_to_ignore_2d)
            poses3d = data_utils.unNormalizeData(poses3d, data_mean_3d,
                                                 data_std_3d, dim_to_ignore_3d)
            gs1 = gridspec.GridSpec(1, 1)
            gs1.update(wspace=-0.00,
                       hspace=0.05)  # set the spacing between axes.
            plt.axis('off')
            all_poses_3d.append(poses3d)
            enc_in, poses3d = map(np.vstack, [enc_in, all_poses_3d])
            subplot_idx, exidx = 1, 1
            _max = 0
            _min = 10000

            for i in range(poses3d.shape[0]):
                for j in range(32):
                    tmp = poses3d[i][j * 3 + 2]
                    poses3d[i][j * 3 + 2] = poses3d[i][j * 3 + 1]
                    poses3d[i][j * 3 + 1] = tmp
                    if poses3d[i][j * 3 + 2] > _max:
                        _max = poses3d[i][j * 3 + 2]
                    if poses3d[i][j * 3 + 2] < _min:
                        _min = poses3d[i][j * 3 + 2]

            for i in range(poses3d.shape[0]):
                for j in range(32):
                    poses3d[i][j * 3 + 2] = _max - poses3d[i][j * 3 + 2] + _min
                    poses3d[i][j * 3] += (spine_x - 630)
                    poses3d[i][j * 3 + 2] += (500 - spine_y)

            # Plot 3d predictions
            #fig = plt.figure( figsize=(12.8, 7.2) )
            ax = plt.subplot(gs1[subplot_idx - 1], projection='3d')
            #ax.view_init(18, -70)
            logger.debug(np.min(poses3d))
            if np.min(poses3d) < -1000:
                poses3d = before_pose

            p3d = poses3d
            logger.debug(poses3d)
            #viz.Ax3DPose(ax, lcolor="#9b59b6", rcolor="#2ecc71")
            viz.show3Dpose(p3d, ax, lcolor="#9b59b6", rcolor="#2ecc71")
            #ax.scatter(p3d, c='b', marker='o')
            #result = viz.show3Dpose(p3d,ax)
            #print(p3d)
            ax.set_xlabel('X Label')
            ax.set_ylabel('Y Label')
            ax.set_zlabel('Z Label')

            ax.set_aspect('equal')
            #print(p3d)
            #plt.show()
            col = 0
            for i in p3d[0]:
                worksheet.write(rows, col, i)
                col += 1
                #.append(i)
            rows += 1
            pngName = '{}_keypoints.png'.format(str(frame))
            plt.savefig(pngName)
            if FLAGS.write_gif:
                png_lib.append(imageio.imread(pngName))
            before_pose = poses3d
    workbook.close()

    if FLAGS.write_gif:
        if FLAGS.interpolation:
            #take every frame on gif_fps * multiplier_inv
            png_lib = np.array([
                png_lib[png_image]
                for png_image in range(0, len(png_lib), int(multiplier_inv))
            ])
        logger.info("creating Gif gif_output/animation.gif, please Wait!")
        imageio.mimsave('gif_output/animation.gif', png_lib, fps=FLAGS.gif_fps)
    logger.info("Done!".format(pngName))
コード例 #18
0
def main(_):
    # 出力用日付
    now_str = "{0:%Y%m%d_%H%M%S}".format(datetime.datetime.now())

    logger.debug("FLAGS.person_idx={0}".format(FLAGS.person_idx))

    # 日付+indexディレクトリ作成
    subdir = '{0}/{1}_3d_{2}_idx{3:02d}'.format(
        os.path.dirname(openpose_output_dir),
        os.path.basename(openpose_output_dir), now_str, FLAGS.person_idx)
    os.makedirs(subdir)

    frame3d_dir = "{0}/frame3d".format(subdir)
    os.makedirs(frame3d_dir)

    #関節位置情報ファイル
    posf = open(subdir + '/pos.txt', 'w')

    #正規化済みOpenpose位置情報ファイル
    smoothedf = open(subdir + '/smoothed.txt', 'w')

    idx = FLAGS.person_idx - 1
    smoothed = openpose_utils.read_openpose_json(openpose_output_dir, idx,
                                                 level[FLAGS.verbose] == 3)
    logger.info("reading and smoothing done. start feeding 3d-pose-baseline")
    logger.debug(smoothed)
    plt.figure(2)
    smooth_curves_plot = show_anim_curves(smoothed, plt)
    pngName = subdir + '/smooth_plot.png'
    smooth_curves_plot.savefig(pngName)

    enc_in = np.zeros((1, 64))
    enc_in[0] = [0 for i in range(64)]

    actions = data_utils.define_actions(FLAGS.action)

    SUBJECT_IDS = [1, 5, 6, 7, 8, 9, 11]
    rcams = cameras.load_cameras(FLAGS.cameras_path, SUBJECT_IDS)
    train_set_2d, test_set_2d, data_mean_2d, data_std_2d, dim_to_ignore_2d, dim_to_use_2d = data_utils.read_2d_predictions(
        actions, FLAGS.data_dir)
    train_set_3d, test_set_3d, data_mean_3d, data_std_3d, dim_to_ignore_3d, dim_to_use_3d, train_root_positions, test_root_positions = data_utils.read_3d_data(
        actions, FLAGS.data_dir, FLAGS.camera_frame, rcams, FLAGS.predict_14)

    before_pose = None
    device_count = {"GPU": 1}
    png_lib = []
    with tf.Session(config=tf.ConfigProto(device_count=device_count,
                                          allow_soft_placement=True)) as sess:
        #plt.figure(3)
        batch_size = 128
        model = create_model(sess, actions, batch_size)
        for n, (frame, xy) in enumerate(smoothed.items()):
            logger.info("calc idx {0}, frame {1}".format(idx, frame))

            # map list into np array
            joints_array = np.zeros((1, 36))
            joints_array[0] = [0 for i in range(36)]
            for o in range(len(joints_array[0])):
                #feed array with xy array
                joints_array[0][o] = xy[o]
            _data = joints_array[0]

            smoothedf.write(' '.join(map(str, _data)))
            smoothedf.write("\n")

            # mapping all body parts or 3d-pose-baseline format
            for i in range(len(order)):
                for j in range(2):
                    # create encoder input
                    enc_in[0][order[i] * 2 + j] = _data[i * 2 + j]
            for j in range(2):
                # Hip
                enc_in[0][0 * 2 + j] = (enc_in[0][1 * 2 + j] +
                                        enc_in[0][6 * 2 + j]) / 2
                # Neck/Nose
                enc_in[0][14 * 2 + j] = (enc_in[0][15 * 2 + j] +
                                         enc_in[0][12 * 2 + j]) / 2
                # Thorax
                enc_in[0][13 * 2 +
                          j] = 2 * enc_in[0][12 * 2 + j] - enc_in[0][14 * 2 +
                                                                     j]

            # set spine
            spine_x = enc_in[0][24]
            spine_y = enc_in[0][25]

            # logger.debug("enc_in - 1")
            # logger.debug(enc_in)

            enc_in = enc_in[:, dim_to_use_2d]
            mu = data_mean_2d[dim_to_use_2d]
            stddev = data_std_2d[dim_to_use_2d]
            enc_in = np.divide((enc_in - mu), stddev)

            dp = 1.0
            dec_out = np.zeros((1, 48))
            dec_out[0] = [0 for i in range(48)]
            _, _, poses3d = model.step(sess,
                                       enc_in,
                                       dec_out,
                                       dp,
                                       isTraining=False)
            all_poses_3d = []
            enc_in = data_utils.unNormalizeData(enc_in, data_mean_2d,
                                                data_std_2d, dim_to_ignore_2d)
            poses3d = data_utils.unNormalizeData(poses3d, data_mean_3d,
                                                 data_std_3d, dim_to_ignore_3d)
            gs1 = gridspec.GridSpec(1, 1)
            gs1.update(wspace=-0.00,
                       hspace=0.05)  # set the spacing between axes.
            plt.axis('off')
            all_poses_3d.append(poses3d)
            enc_in, poses3d = map(np.vstack, [enc_in, all_poses_3d])
            subplot_idx, exidx = 1, 1
            max = 0
            min = 10000

            # logger.debug("enc_in - 2")
            # logger.debug(enc_in)

            for i in range(poses3d.shape[0]):
                for j in range(32):
                    tmp = poses3d[i][j * 3 + 2]
                    poses3d[i][j * 3 + 2] = poses3d[i][j * 3 + 1]
                    poses3d[i][j * 3 + 1] = tmp
                    if poses3d[i][j * 3 + 2] > max:
                        max = poses3d[i][j * 3 + 2]
                    if poses3d[i][j * 3 + 2] < min:
                        min = poses3d[i][j * 3 + 2]

            for i in range(poses3d.shape[0]):
                for j in range(32):
                    poses3d[i][j * 3 + 2] = max - poses3d[i][j * 3 + 2] + min
                    poses3d[i][j * 3] += (spine_x - 630)
                    poses3d[i][j * 3 + 2] += (500 - spine_y)

            # Plot 3d predictions
            ax = plt.subplot(gs1[subplot_idx - 1], projection='3d')
            ax.view_init(18, 280)
            logger.debug(np.min(poses3d))
            if np.min(poses3d) < -1000 and before_pose is not None:
                poses3d = before_pose

            p3d = poses3d
            # logger.debug("poses3d")
            # logger.debug(poses3d)

            if level[FLAGS.verbose] == logging.INFO:
                viz.show3Dpose(p3d,
                               ax,
                               lcolor="#9b59b6",
                               rcolor="#2ecc71",
                               add_labels=True)

                # 各フレームの単一視点からのはINFO時のみ
                pngName = frame3d_dir + '/tmp_{0:012d}.png'.format(frame)
                plt.savefig(pngName)
                png_lib.append(imageio.imread(pngName))
                before_pose = poses3d

            # 各フレームの角度別出力はデバッグ時のみ
            if level[FLAGS.verbose] == logging.DEBUG:

                for azim in [0, 45, 90, 135, 180, 225, 270, 315, 360]:
                    ax2 = plt.subplot(gs1[subplot_idx - 1], projection='3d')
                    ax2.view_init(18, azim)
                    viz.show3Dpose(p3d,
                                   ax2,
                                   lcolor="#FF0000",
                                   rcolor="#0000FF",
                                   add_labels=True)

                    pngName2 = frame3d_dir + '/tmp_{0:012d}_{1:03d}.png'.format(
                        frame, azim)
                    plt.savefig(pngName2)

            #関節位置情報の出力
            write_pos_data(poses3d, ax, posf)

        posf.close()

        # INFO時は、アニメーションGIF生成
        if level[FLAGS.verbose] == logging.INFO:
            logger.info(
                "creating Gif {0}/movie_smoothing.gif, please Wait!".format(
                    subdir))
            imageio.mimsave('{0}/movie_smoothing.gif'.format(subdir),
                            png_lib,
                            fps=FLAGS.gif_fps)

        logger.info("Done!".format(pngName))
コード例 #19
0
def main(_):
    done = []

    enc_in = np.zeros((1, 64))
    enc_in[0] = [0 for i in range(64)]

    actions = data_utils.define_actions(FLAGS.action)

    SUBJECT_IDS = [1, 5, 6, 7, 8, 9, 11]
    rcams = cameras.load_cameras(FLAGS.cameras_path, SUBJECT_IDS)
    train_set_2d, test_set_2d, data_mean_2d, data_std_2d, dim_to_ignore_2d, dim_to_use_2d = data_utils.read_2d_predictions(
        actions, FLAGS.data_dir)
    train_set_3d, test_set_3d, data_mean_3d, data_std_3d, dim_to_ignore_3d, dim_to_use_3d, train_root_positions, test_root_positions = data_utils.read_3d_data(
        actions, FLAGS.data_dir, FLAGS.camera_frame, rcams, FLAGS.predict_14)

    device_count = {"GPU": 0}
    png_lib = []
    with tf.Session(config=tf.ConfigProto(device_count=device_count,
                                          allow_soft_placement=True)) as sess:
        #plt.figure(3)
        batch_size = 128
        model = create_model(sess, actions, batch_size)
        rows = 0
        filename = "Realtimedata.xlsx"
        workbook = xlsxwriter.Workbook(filename)
        worksheet = workbook.add_worksheet()
        while True:
            key = cv2.waitKey(1) & 0xFF
            #logger.info("start reading data")
            # check for other file types
            list_of_files = glob.iglob("{0}/*".format(
                openpose_output_dir))  # You may use iglob in Python3
            latest_file = ""
            try:
                latest_file = max(list_of_files, key=os.path.getctime)
            except ValueError:
                #empthy dir
                pass
            if not latest_file:
                continue
            try:
                _file = file_name = latest_file
                print(latest_file)
                if not os.path.isfile(_file):
                    raise Exception("No file found!!, {0}".format(_file))
                data = json.load(open(_file))
                #take first person
                _data = data["people"][0]["pose_keypoints_2d"]
                xy = []
                #ignore confidence score
                """for o in range(0,len(_data),3):
                    xy.append(_data[o])
                    xy.append(_data[o+1])"""
                if len(_data) >= 53:
                    #openpose incl. confidence score
                    #ignore confidence score
                    for o in range(0, len(_data), 3):
                        xy.append(_data[o])
                        xy.append(_data[o + 1])
                else:
                    #tf-pose-estimation
                    xy = _data

                frame_indx = re.findall("(\d+)", file_name)
                frame = int(frame_indx[0])

                joints_array = np.zeros((1, 36))
                joints_array[0] = [0 for i in range(36)]
                for o in range(len(joints_array[0])):
                    #feed array with xy array
                    joints_array[0][o] = xy[o]
                _data = joints_array[0]
                # mapping all body parts or 3d pose offline format
                for i in range(len(order)):
                    for j in range(2):
                        # create encoder input
                        enc_in[0][order[i] * 2 + j] = _data[i * 2 + j]
                for j in range(2):
                    # Hip
                    enc_in[0][0 * 2 + j] = (enc_in[0][1 * 2 + j] +
                                            enc_in[0][6 * 2 + j]) / 2
                    # Neck/Nose
                    enc_in[0][14 * 2 + j] = (enc_in[0][15 * 2 + j] +
                                             enc_in[0][12 * 2 + j]) / 2
                    # Thorax
                    enc_in[0][
                        13 * 2 +
                        j] = 2 * enc_in[0][12 * 2 + j] - enc_in[0][14 * 2 + j]

                # set spine
                spine_x = enc_in[0][24]
                spine_y = enc_in[0][25]

                enc_in = enc_in[:, dim_to_use_2d]
                mu = data_mean_2d[dim_to_use_2d]
                stddev = data_std_2d[dim_to_use_2d]
                enc_in = np.divide((enc_in - mu), stddev)

                dp = 1.0
                dec_out = np.zeros((1, 48))
                dec_out[0] = [0 for i in range(48)]
                _, _, poses3d = model.step(sess,
                                           enc_in,
                                           dec_out,
                                           dp,
                                           isTraining=False)
                all_poses_3d = []
                enc_in = data_utils.unNormalizeData(enc_in, data_mean_2d,
                                                    data_std_2d,
                                                    dim_to_ignore_2d)
                poses3d = data_utils.unNormalizeData(poses3d, data_mean_3d,
                                                     data_std_3d,
                                                     dim_to_ignore_3d)
                gs1 = gridspec.GridSpec(1, 1)
                gs1.update(wspace=-0.00,
                           hspace=0.05)  # set the spacing between axes.
                plt.axis('off')
                all_poses_3d.append(poses3d)
                enc_in, poses3d = map(np.vstack, [enc_in, all_poses_3d])
                subplot_idx, exidx = 1, 1
                _max = 0
                _min = 10000

                for i in range(poses3d.shape[0]):
                    for j in range(32):
                        tmp = poses3d[i][j * 3 + 2]
                        poses3d[i][j * 3 + 2] = poses3d[i][j * 3 + 1]
                        poses3d[i][j * 3 + 1] = tmp
                        if poses3d[i][j * 3 + 2] > _max:
                            _max = poses3d[i][j * 3 + 2]
                        if poses3d[i][j * 3 + 2] < _min:
                            _min = poses3d[i][j * 3 + 2]

                for i in range(poses3d.shape[0]):
                    for j in range(32):
                        poses3d[i][j * 3 +
                                   2] = _max - poses3d[i][j * 3 + 2] + _min
                        poses3d[i][j * 3] += (spine_x - 630)
                        poses3d[i][j * 3 + 2] += (500 - spine_y)

                for val in min_vex:
                    # f.write(str(val) + ' ' + str(p_vex[i]) + '');
                    # gait_list1.append({'IX': "%i" % val[0],
                    #                     'IY': "%i" % val[1],
                    #                     'Ix': "%i" % p_vex[i][0],
                    #                     'Iy': "%i" % p_vex[i][1],
                    #                     'Iz': "%i" % p_vex[i][2],
                    # })
                    gait_list1.append(val[0])
                    gait_list1.append(val[1])
                    gait_list1.append(p_vex[i][0])
                    gait_list1.append(p_vex[i][1])
                    gait_list1.append(p_vex[i][2])

                    points.append(
                        " %f %f %f %d %d %d 0\n" %
                        (p_vex[i][0], p_vex[i][1], p_vex[i][2], 0, 255, 0))
                    x.append(p_vex[i][0])
                    y.append(p_vex[i][1])
                    z.append(p_vex[i][2])
                    i = i + 1

                # Plot 3d predictions
                ax = plt.subplot(gs1[subplot_idx - 1], projection='3d')
                ax.view_init(18, -70)
                logger.debug(np.min(poses3d))
                if np.min(poses3d) < -1000 and frame != 0:
                    poses3d = before_pose

                p3d = poses3d
                '''gait_list1 = []
                #enter file path below
                with open('key_joint_info.csv', 'w', newline='') as myfile:
                    gait_list2.append(gait_list1)
                    data1 = pd.DataFrame(gait_list2)
                    wr = csv.writer(myfile, dialect = 'key_joint_info.csv' )
                    wr.writerow(p3d)
                    wb.save(key_joint_info.csv)'''

                viz.show3Dpose(p3d, ax, lcolor="#9b59b6", rcolor="#2ecc71")
                col = 0
                for i in p3d[0]:
                    worksheet.write(rows, col, i)
                    col += 1
                    #.append(i)
                rows += 1
                before_pose = poses3d
                pngName = '{}_keypoints.png'.format(str(frame))
                plt.savefig(pngName)

                #plt.show()
                img = cv2.imread(pngName, 0)
                rect_cpy = img.copy()
                cv2.imshow('3d-pose-realtime', rect_cpy)
                done.append(file_name)
                if key == ord('q'):
                    break
            except Exception as e:
                print(e)

        sess.close()
コード例 #20
0
def main(_):
    
    smoothed = read_openpose_json()
    plt.figure(2)
    smooth_curves_plot = show_anim_curves(smoothed, plt)
    #return
    pngName = 'gif_output/smooth_plot.png'
    smooth_curves_plot.savefig(pngName)
    logger.info('writing gif_output/smooth_plot.png')
    
    if FLAGS.interpolation:
        logger.info("start interpolation")

        framerange = len( smoothed.keys() )
        joint_rows = 36
        array = np.concatenate(list(smoothed.values()))
        array_reshaped = np.reshape(array, (framerange, joint_rows) )
    
        multiplier = FLAGS.multiplier
        multiplier_inv = 1/multiplier

        out_array = np.array([])
        for row in range(joint_rows):
            x = []
            for frame in range(framerange):
                x.append( array_reshaped[frame, row] )
            
            frame = range( framerange )
            frame_resampled = np.arange(0, framerange, multiplier)
            spl = UnivariateSpline(frame, x, k=3)
            #relative smooth factor based on jnt anim curve
            min_x, max_x = min(x), max(x)
            smooth_fac = max_x - min_x
            smooth_resamp = 125
            smooth_fac = smooth_fac * smooth_resamp
            spl.set_smoothing_factor( float(smooth_fac) )
            xnew = spl(frame_resampled)
            
            out_array = np.append(out_array, xnew)
    
        logger.info("done interpolating. reshaping {0} frames,  please wait!!".format(framerange))
    
        a = np.array([])
        for frame in range( int( framerange * multiplier_inv ) ):
            jnt_array = []
            for jnt in range(joint_rows):
                jnt_array.append( out_array[ jnt * int(framerange * multiplier_inv) + frame] )
            a = np.append(a, jnt_array)
        
        a = np.reshape(a, (int(framerange * multiplier_inv), joint_rows))
        out_array = a
    
        interpolate_smoothed = {}
        for frame in range( int(framerange * multiplier_inv) ):
            interpolate_smoothed[frame] = list( out_array[frame] )
        
        plt.figure(3)
        smoothed = interpolate_smoothed
        interpolate_curves_plot = show_anim_curves(smoothed, plt)
        pngName = 'gif_output/interpolate_{0}.png'.format(smooth_resamp)
        interpolate_curves_plot.savefig(pngName)
        logger.info('writing gif_output/interpolate_plot.png')

    enc_in = np.zeros((1, 64))
    enc_in[0] = [0 for i in range(64)]

    actions = data_utils.define_actions(FLAGS.action)

    SUBJECT_IDS = [1, 5, 6, 7, 8, 9, 11]
    rcams = cameras.load_cameras(FLAGS.cameras_path, SUBJECT_IDS)
    train_set_2d, test_set_2d, data_mean_2d, data_std_2d, dim_to_ignore_2d, dim_to_use_2d = data_utils.read_2d_predictions(
        actions, FLAGS.data_dir)
    train_set_3d, test_set_3d, data_mean_3d, data_std_3d, dim_to_ignore_3d, dim_to_use_3d, train_root_positions, test_root_positions = data_utils.read_3d_data(
        actions, FLAGS.data_dir, FLAGS.camera_frame, rcams, FLAGS.predict_14)

    device_count = {"GPU": 1}
    png_lib = []
    before_pose = None
    with tf.Session(config=tf.ConfigProto(
            device_count=device_count,
            allow_soft_placement=True)) as sess:
        #plt.figure(3)
        batch_size = 128
        model = create_model(sess, actions, batch_size)
        iter_range = len(smoothed.keys())
        export_units = {}
        twod_export_units = {}
        for n, (frame, xy) in enumerate(smoothed.items()):
            logger.info("calc frame {0}/{1}".format(frame, iter_range))
            # map list into np array  
            joints_array = np.zeros((1, 36))
            joints_array[0] = [0 for i in range(36)]
            for o in range(len(joints_array[0])):
                #feed array with xy array
                joints_array[0][o] = xy[o]

            twod_export_units[frame]={}
            for abs_b, __n in enumerate(range(0, len(xy),2)):
                twod_export_units[frame][abs_b] = {"translate": [xy[__n],xy[__n+1]]}

            _data = joints_array[0]
            # mapping all body parts or 3d-pose-baseline format
            for i in range(len(order)):
                for j in range(2):
                    # create encoder input
                    enc_in[0][order[i] * 2 + j] = _data[i * 2 + j]
            for j in range(2):
                # Hip
                enc_in[0][0 * 2 + j] = (enc_in[0][1 * 2 + j] + enc_in[0][6 * 2 + j]) / 2
                # Neck/Nose
                enc_in[0][14 * 2 + j] = (enc_in[0][15 * 2 + j] + enc_in[0][12 * 2 + j]) / 2
                # Thorax
                enc_in[0][13 * 2 + j] = 2 * enc_in[0][12 * 2 + j] - enc_in[0][14 * 2 + j]

            # set spine
            spine_x = enc_in[0][24]
            spine_y = enc_in[0][25]

            enc_in = enc_in[:, dim_to_use_2d]
            mu = data_mean_2d[dim_to_use_2d]
            stddev = data_std_2d[dim_to_use_2d]
            enc_in = np.divide((enc_in - mu), stddev)

            dp = 1.0
            dec_out = np.zeros((1, 48))
            dec_out[0] = [0 for i in range(48)]
            _, _, poses3d = model.step(sess, enc_in, dec_out, dp, isTraining=False)
            all_poses_3d = []
            enc_in = data_utils.unNormalizeData(enc_in, data_mean_2d, data_std_2d, dim_to_ignore_2d)
            poses3d = data_utils.unNormalizeData(poses3d, data_mean_3d, data_std_3d, dim_to_ignore_3d)
            gs1 = gridspec.GridSpec(1, 1)
            gs1.update(wspace=-0.00, hspace=0.05)  # set the spacing between axes.
            plt.axis('off')
            all_poses_3d.append( poses3d )
            enc_in, poses3d = map( np.vstack, [enc_in, all_poses_3d] )
            subplot_idx, exidx = 1, 1
            _max = 0
            _min = 10000

            for i in range(poses3d.shape[0]):
                for j in range(32):
                    tmp = poses3d[i][j * 3 + 2]
                    poses3d[i][j * 3 + 2] = poses3d[i][j * 3 + 1]
                    poses3d[i][j * 3 + 1] = tmp
                    if poses3d[i][j * 3 + 2] > _max:
                        _max = poses3d[i][j * 3 + 2]
                    if poses3d[i][j * 3 + 2] < _min:
                        _min = poses3d[i][j * 3 + 2]

            for i in range(poses3d.shape[0]):
                for j in range(32):
                    poses3d[i][j * 3 + 2] = _max - poses3d[i][j * 3 + 2] + _min
                    poses3d[i][j * 3] += (spine_x - 630)
                    poses3d[i][j * 3 + 2] += (500 - spine_y)

            # Plot 3d predictions
            ax = plt.subplot(gs1[subplot_idx - 1], projection='3d')
            ax.view_init(18, -70)    

            if FLAGS.cache_on_fail:
                if np.min(poses3d) < -1000:
                    poses3d = before_pose

            p3d = poses3d
            to_export = poses3d.tolist()[0]
            x,y,z = [[] for _ in range(3)]
            for o in range(0, len(to_export), 3):
                x.append(to_export[o])
                y.append(to_export[o+1])
                z.append(to_export[o+2])
            export_units[frame]={}
            for jnt_index, (_x, _y, _z) in enumerate(zip(x,y,z)):
                export_units[frame][jnt_index] = {"translate": [_x, _y, _z]}


            viz.show3Dpose(p3d, ax, lcolor="#9b59b6", rcolor="#2ecc71")

            pngName = 'png/pose_frame_{0}.png'.format(str(frame).zfill(12))
            plt.savefig(pngName)
            if FLAGS.write_gif:
                png_lib.append(imageio.imread(pngName))

            if FLAGS.cache_on_fail:
                before_pose = poses3d

    if FLAGS.write_gif:
        if FLAGS.interpolation:
            #take every frame on gif_fps * multiplier_inv
            png_lib = np.array([png_lib[png_image] for png_image in range(0,len(png_lib), int(multiplier_inv)) ])
        logger.info("creating Gif gif_output/animation.gif, please Wait!")
        imageio.mimsave('gif_output/animation.gif', png_lib, fps=FLAGS.gif_fps)

    _out_file = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'maya/3d_data.json')
    twod_out_file = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'maya/2d_data.json')
    with open(_out_file, 'w') as outfile:
        logger.info("exported maya json to {0}".format(_out_file))
        json.dump(export_units, outfile)
    with open(twod_out_file, 'w') as outfile:
        logger.info("exported maya json to {0}".format(twod_out_file))
        json.dump(twod_export_units, outfile)

    logger.info("Done!".format(pngName))
コード例 #21
0
def sample():

  actions = data_utils.define_actions( FLAGS.action )

  SUBJECT_IDS = [1,5,6,7,8,9,11]
  rcams = load_cameras(FLAGS.cameras_path, SUBJECT_IDS)

  train_set_3d, test_set_3d, data_mean_3d, data_std_3d, dim_to_ignore_3d, dim_to_use_3d, train_root_positions, test_root_positions = data_utils.read_3d_data(
    actions, FLAGS.data_dir, FLAGS.camera_frame, rcams, FLAGS.predict_14 )

  if FLAGS.use_sh:
    train_set_2d, test_set_2d, data_mean_2d, data_std_2d, dim_to_ignore_2d, dim_to_use_2d = data_utils.read_2d_predictions(actions, FLAGS.data_dir)
  else:
    train_set_2d, test_set_2d, data_mean_2d, data_std_2d, dim_to_ignore_2d, dim_to_use_2d = data_utils.create_2d_data( actions, FLAGS.data_dir, rcams )
  print( "done reading and normalizing data." )

  device_count = {"GPU": 0} if FLAGS.use_cpu else {"GPU": 1}
  with tf.Session(config=tf.ConfigProto( device_count = device_count )) as sess:
    print("Creating %d layers of %d units." % (FLAGS.num_layers, FLAGS.linear_size))
    batch_size = 128
    model = create_model(sess, actions, batch_size)
    print("Model loaded")

    for key2d in test_set_2d.keys():

      (subj, b, fname) = key2d
      print( "Subject: {}, action: {}, fname: {}".format(subj, b, fname) )

      key3d = key2d if FLAGS.camera_frame else (subj, b, '{0}.h5'.format(fname.split('.')[0]))
      key3d = (subj, b, fname[:-3]) if (fname.endswith('-sh')) and FLAGS.camera_frame else key3d

      enc_in  = test_set_2d[ key2d ]
      n2d, _ = enc_in.shape
      dec_out = test_set_3d[ key3d ]
      n3d, _ = dec_out.shape
      assert n2d == n3d

      enc_in   = np.array_split( enc_in,  n2d // batch_size )
      dec_out  = np.array_split( dec_out, n3d // batch_size )
      all_poses_3d = []

      for bidx in range( len(enc_in) ):

        dp = 1.0
        _, _, poses3d = model.step(sess, enc_in[bidx], dec_out[bidx], dp, isTraining=False)

        enc_in[bidx]  = data_utils.unNormalizeData(  enc_in[bidx], data_mean_2d, data_std_2d, dim_to_ignore_2d )
        dec_out[bidx] = data_utils.unNormalizeData( dec_out[bidx], data_mean_3d, data_std_3d, dim_to_ignore_3d )
        poses3d = data_utils.unNormalizeData( poses3d, data_mean_3d, data_std_3d, dim_to_ignore_3d )
        all_poses_3d.append( poses3d )

      enc_in, dec_out, poses3d = map( np.vstack, [enc_in, dec_out, all_poses_3d] )

      if FLAGS.camera_frame:
        N_CAMERAS = 4
        N_JOINTS_H36M = 32

        dec_out = dec_out + np.tile( test_root_positions[ key3d ], [1,N_JOINTS_H36M] )
        subj, _, sname = key3d

        cname = sname.split('.')[1] 
        scams = {(subj,c+1): rcams[(subj,c+1)] for c in range(N_CAMERAS)} 
        scam_idx = [scams[(subj,c+1)][-1] for c in range(N_CAMERAS)].index( cname )
        the_cam  = scams[(subj, scam_idx+1)]
        R, T, f, c, k, p, name = the_cam
        assert name == cname

        def cam2world_centered(data_3d_camframe):
          data_3d_worldframe = camera_to_world_frame(data_3d_camframe.reshape((-1, 3)), R, T)
          data_3d_worldframe = data_3d_worldframe.reshape((-1, N_JOINTS_H36M*3))
          return data_3d_worldframe - np.tile( data_3d_worldframe[:,:3], (1,N_JOINTS_H36M) )

        dec_out = cam2world_centered(dec_out)
        poses3d = cam2world_centered(poses3d)

  enc_in, dec_out, poses3d = map( np.vstack, [enc_in, dec_out, poses3d] )
  idx = np.random.permutation( enc_in.shape[0] )
  enc_in, dec_out, poses3d = enc_in[idx, :], dec_out[idx, :], poses3d[idx, :]

  import matplotlib.gridspec as gridspec

  fig = plt.figure( figsize=(19.2, 10.8) )

  gs1 = gridspec.GridSpec(5, 9)
  gs1.update(wspace=-0.00, hspace=0.05)
  plt.axis('off')

  subplot_idx, exidx = 1, 1
  nsamples = 15
  for i in np.arange( nsamples ):

    ax1 = plt.subplot(gs1[subplot_idx-1])
    p2d = enc_in[exidx,:]
    viz.show2Dpose( p2d, ax1 )
    ax1.invert_yaxis()

    ax2 = plt.subplot(gs1[subplot_idx], projection='3d')
    p3d = dec_out[exidx,:]
    viz.show3Dpose( p3d, ax2 )

    ax3 = plt.subplot(gs1[subplot_idx+1], projection='3d')
    p3d = poses3d[exidx,:]
    viz.show3Dpose( p3d, ax3, lcolor="#9b59b6", rcolor="#2ecc71" )

    exidx = exidx + 1
    subplot_idx = subplot_idx + 3

  plt.show()
コード例 #22
0
def main(_):
    actions_all = data_utils.define_actions("All")
    actions = data_utils.define_actions("Discussion")

    # Load camera parameters
    SUBJECT_IDS = [1, 5, 6, 7, 8, 9, 11]
    rcams = cameras.load_cameras(FLAGS.cameras_path, SUBJECT_IDS)

    # Load 3d data and load (or create) 2d projections
    train_set_3d, test_set_3d, data_mean_3d, data_std_3d, dim_to_ignore_3d, dim_to_use_3d, train_root_positions, test_root_positions = data_utils.read_3d_data(
        actions, FLAGS.data_dir, FLAGS.camera_frame, rcams, FLAGS.predict_14)
    train_set_3d = data_utils.remove_first_frame(train_set_3d)
    test_set_3d = data_utils.remove_first_frame(test_set_3d)
    train_root_positions = data_utils.remove_first_frame(train_root_positions)
    test_root_positions = data_utils.remove_first_frame(test_root_positions)
    print("Finished Read 3D Data")

    # train_set_2d, test_set_2d, data_mean_2d, data_std_2d, dim_to_ignore_2d, dim_to_use_2d = data_utils.read_2d_predictions(actions_all, FLAGS.data_dir)
    # train_set_2d, test_set_2d, data_mean_2d, data_std_2d, dim_to_ignore_2d, dim_to_use_2d = data_utils.transform_to_2d_biframe_prediction(train_set_2d,
    #                                                                                                                                        test_set_2d,
    #                                                                                                                                        data_mean_2d,
    #                                                                                                                                        data_std_2d,
    #                                                                                                                                        dim_to_ignore_2d,
    #                                                                                                                                        dim_to_use_2d)
    train_set_2d, test_set_2d, data_mean_2d, data_std_2d, dim_to_ignore_2d, dim_to_use_2d = data_utils.create_2d_data(
        actions_all, FLAGS.data_dir, rcams)
    train_set_2d, test_set_2d, data_mean_2d, data_std_2d, dim_to_ignore_2d, dim_to_use_2d = data_utils.transform_to_2d_biframe_prediction(
        train_set_2d, test_set_2d, data_mean_2d, data_std_2d, dim_to_ignore_2d,
        dim_to_use_2d)

    SH_TO_GT_PERM = np.array(
        [SH_NAMES.index(h) for h in H36M_NAMES if h != '' and h in SH_NAMES])
    assert np.all(SH_TO_GT_PERM == np.array(
        [6, 2, 1, 0, 3, 4, 5, 7, 8, 9, 13, 14, 15, 12, 11, 10]))

    test_set = {}

    manipulation_dir = os.path.dirname(FLAGS.data_dir)
    manipulation_dir = os.path.dirname(manipulation_dir)
    manipulation_dir += '/manipulation_video/'
    manipulation_folders = glob.glob(manipulation_dir + '*')

    subj = 1
    action = 'manipulation-video'
    for folder in manipulation_folders:
        seqname = os.path.basename(folder)
        with h5py.File(folder + '/' + seqname + '.h5', 'r') as h5f:
            poses = h5f['poses'][:]

            # Permute the loaded data to make it compatible with H36M
            poses = poses[:, SH_TO_GT_PERM, :]

            # Reshape into n x (32*2) matrix
            poses = np.reshape(poses, [poses.shape[0], -1])
            poses_final = np.zeros([poses.shape[0], len(H36M_NAMES) * 2])

            dim_to_use_x = np.where(
                np.array([x != '' and x != 'Neck/Nose'
                          for x in H36M_NAMES]))[0] * 2
            dim_to_use_y = dim_to_use_x + 1

            dim_to_use = np.zeros(len(SH_NAMES) * 2, dtype=np.int32)
            dim_to_use[0::2] = dim_to_use_x
            dim_to_use[1::2] = dim_to_use_y
            poses_final[:, dim_to_use] = poses

            print(seqname, poses_final.shape)
            poses_final[poses_final == 0.] = 0.1
            test_set[(subj, action, seqname)] = poses_final

    test_set = data_utils.uni_frame_to_bi_frame(test_set)
    test_set_2d = data_utils.normalize_data(test_set, data_mean_2d,
                                            data_std_2d, dim_to_use_2d)
    for key in test_set.keys():
        test_set[key] = test_set[key][0::2, :]

    dim_to_use_12_manipulation_joints = np.array([
        3, 4, 5, 6, 7, 8, 9, 10, 11, 18, 19, 20, 21, 22, 23, 24, 25, 26, 51,
        52, 53, 54, 55, 56, 57, 58, 59, 75, 76, 77, 78, 79, 80, 81, 82, 83
    ])

    print("Finished Normalize Manipualtion Videos")
    device_count = {"GPU": 0} if FLAGS.use_cpu else {"GPU": 1}
    with tf.Session(config=tf.ConfigProto(device_count=device_count)) as sess:
        # === Create the model ===
        print("Creating %d layers of %d units." %
              (FLAGS.num_layers, FLAGS.linear_size))
        batch_size = FLAGS.batch_size  #Intial code is 64*2
        model = predict_3dpose_biframe.create_model(sess, actions_all,
                                                    batch_size)
        print("Model loaded")

        j = 0
        for key2d in test_set_2d.keys():

            (subj, b, fname) = key2d
            # if fname !=  specific_seqname + '.h5':
            #     continue
            print("Subject: {}, action: {}, fname: {}".format(subj, b, fname))

            enc_in = test_set_2d[key2d]
            n2d, _ = enc_in.shape

            # Split into about-same-size batches
            enc_in = np.array_split(enc_in, n2d // 1)
            all_poses_3d = []

            for bidx in range(len(enc_in)):

                # Dropout probability 0 (keep probability 1) for sampling
                dp = 1.0
                anything = np.zeros((enc_in[bidx].shape[0], 48))
                _, _, poses3d = model.step(sess,
                                           enc_in[bidx],
                                           anything,
                                           dp,
                                           isTraining=False)

                # Denormalize
                enc_in[bidx] = data_utils.unNormalizeData(
                    enc_in[bidx], data_mean_2d, data_std_2d, dim_to_ignore_2d)
                poses3d = data_utils.unNormalizeData(poses3d, data_mean_3d,
                                                     data_std_3d,
                                                     dim_to_ignore_3d)
                all_poses_3d.append(poses3d)

            # Put all the poses together
            enc_in, poses3d = map(np.vstack, [enc_in, all_poses_3d])

            enc_in, poses3d = map(np.vstack, [enc_in, poses3d])

            poses3d_12_manipulation = poses3d[:,
                                              dim_to_use_12_manipulation_joints]

            annotated_images = glob.glob(manipulation_dir + fname +
                                         '/info/*.xml')
            annotated_images = sorted(annotated_images)

            # 1080p	= 1,920 x 1,080
            fig = plt.figure(j, figsize=(10, 10))
            gs1 = gridspec.GridSpec(3, 3)
            gs1.update(wspace=-0, hspace=0.1)  # set the spacing between axes.
            plt.axis('off')

            subplot_idx = 1
            nsamples = 3
            for i in np.arange(nsamples):
                # Plot 2d Detection
                ax1 = plt.subplot(gs1[subplot_idx - 1])
                img = mpimg.imread(
                    manipulation_dir + fname + '/skeleton_cropped/' +
                    os.path.basename(annotated_images[i]).split('_')[0] +
                    '.jpg')
                ax1.imshow(img)

                # Plot 2d pose
                ax2 = plt.subplot(gs1[subplot_idx])
                # p2d = enc_in[i,:]
                # viz.show2Dpose( p2d, ax2 )
                # ax2.invert_yaxis()
                ax2.imshow(img)

                # Plot 3d predictions
                # Compute first the procrustion and print error
                gt = getJ3dPosFromXML(annotated_images[i])
                A = poses3d_12_manipulation[i, :].reshape(gt.shape)
                _, Z, T, b, c = procrustes.compute_similarity_transform(
                    gt, A, compute_optimal_scale=True)
                sqerr = np.sqrt(np.sum((gt - (b * A.dot(T)) - c)**2, axis=1))
                print("{0} - {1} - Mean Error (mm) : {2}".format(
                    fname, os.path.basename(annotated_images[i]),
                    np.mean(sqerr)))

                ax3 = plt.subplot(gs1[subplot_idx + 1], projection='3d')
                temp = poses3d[i, :].reshape((32, 3))
                temp = c + temp.dot(T)  #Do not scale
                # p3d = temp.reshape((1, 96))
                p3d = poses3d[i, :]
                viz.show3Dpose(p3d, ax3, lcolor="#9b59b6", rcolor="#2ecc71")
                ax3.invert_zaxis()
                ax3.invert_yaxis()

                subplot_idx = subplot_idx + 3

            plt.show()
            j += 1
コード例 #23
0
def main(_):
    done = []

    enc_in = np.zeros((1, 64))
    enc_in[0] = [0 for i in range(64)]

    actions = data_utils.define_actions(FLAGS.action)

    SUBJECT_IDS = [1, 5, 6, 7, 8, 9, 11]
    rcams = cameras.load_cameras(FLAGS.cameras_path, SUBJECT_IDS)
    train_set_2d, test_set_2d, data_mean_2d, data_std_2d, dim_to_ignore_2d, dim_to_use_2d = data_utils.read_2d_predictions(
        actions, FLAGS.data_dir)
    train_set_3d, test_set_3d, data_mean_3d, data_std_3d, dim_to_ignore_3d, dim_to_use_3d, train_root_positions, test_root_positions = data_utils.read_3d_data(
        actions, FLAGS.data_dir, FLAGS.camera_frame, rcams, FLAGS.predict_14)

    device_count = {"GPU": 0}
    png_lib = []
    with tf.Session(config=tf.ConfigProto(device_count=device_count,
                                          allow_soft_placement=True)) as sess:
        #plt.figure(3)
        batch_size = 128
        model = create_model(sess, actions, batch_size)
        while True:
            key = cv2.waitKey(1) & 0xFF
            #logger.info("start reading data")
            # check for other file types
            list_of_files = glob.iglob("{0}/*".format(
                openpose_output_dir))  # You may use iglob in Python3
            latest_file = ""
            try:
                latest_file = max(list_of_files, key=os.path.getctime)
            except ValueError:
                #empthy dir
                pass
            if not latest_file:
                continue
            try:
                _file = file_name = latest_file
                print(latest_file)
                if not os.path.isfile(_file):
                    raise Exception("No file found!!, {0}".format(_file))
                data = json.load(open(_file))
                #take first person
                _data = data["people"][0]["pose_keypoints"]
                xy = []
                #ignore confidence score
                for o in range(0, len(_data), 3):
                    xy.append(_data[o])
                    xy.append(_data[o + 1])

                frame_indx = re.findall("(\d+)", file_name)
                frame = int(frame_indx[0])

                joints_array = np.zeros((1, 36))
                joints_array[0] = [0 for i in range(36)]
                for o in range(len(joints_array[0])):
                    #feed array with xy array
                    joints_array[0][o] = xy[o]
                _data = joints_array[0]
                # mapping all body parts or 3d-pose-baseline format
                for i in range(len(order)):
                    for j in range(2):
                        # create encoder input
                        enc_in[0][order[i] * 2 + j] = _data[i * 2 + j]
                for j in range(2):
                    # Hip
                    enc_in[0][0 * 2 + j] = (enc_in[0][1 * 2 + j] +
                                            enc_in[0][6 * 2 + j]) / 2
                    # Neck/Nose
                    enc_in[0][14 * 2 + j] = (enc_in[0][15 * 2 + j] +
                                             enc_in[0][12 * 2 + j]) / 2
                    # Thorax
                    enc_in[0][
                        13 * 2 +
                        j] = 2 * enc_in[0][12 * 2 + j] - enc_in[0][14 * 2 + j]

                # set spine
                spine_x = enc_in[0][24]
                spine_y = enc_in[0][25]

                enc_in = enc_in[:, dim_to_use_2d]
                mu = data_mean_2d[dim_to_use_2d]
                stddev = data_std_2d[dim_to_use_2d]
                enc_in = np.divide((enc_in - mu), stddev)

                dp = 1.0
                dec_out = np.zeros((1, 48))
                dec_out[0] = [0 for i in range(48)]
                _, _, poses3d = model.step(sess,
                                           enc_in,
                                           dec_out,
                                           dp,
                                           isTraining=False)
                all_poses_3d = []
                enc_in = data_utils.unNormalizeData(enc_in, data_mean_2d,
                                                    data_std_2d,
                                                    dim_to_ignore_2d)
                poses3d = data_utils.unNormalizeData(poses3d, data_mean_3d,
                                                     data_std_3d,
                                                     dim_to_ignore_3d)
                gs1 = gridspec.GridSpec(1, 1)
                gs1.update(wspace=-0.00,
                           hspace=0.05)  # set the spacing between axes.
                plt.axis('off')
                all_poses_3d.append(poses3d)
                enc_in, poses3d = map(np.vstack, [enc_in, all_poses_3d])
                subplot_idx, exidx = 1, 1
                _max = 0
                _min = 10000

                for i in range(poses3d.shape[0]):
                    for j in range(32):
                        tmp = poses3d[i][j * 3 + 2]
                        poses3d[i][j * 3 + 2] = poses3d[i][j * 3 + 1]
                        poses3d[i][j * 3 + 1] = tmp
                        if poses3d[i][j * 3 + 2] > _max:
                            _max = poses3d[i][j * 3 + 2]
                        if poses3d[i][j * 3 + 2] < _min:
                            _min = poses3d[i][j * 3 + 2]

                for i in range(poses3d.shape[0]):
                    for j in range(32):
                        poses3d[i][j * 3 +
                                   2] = _max - poses3d[i][j * 3 + 2] + _min
                        poses3d[i][j * 3] += (spine_x - 630)
                        poses3d[i][j * 3 + 2] += (500 - spine_y)

                # Plot 3d predictions
                ax = plt.subplot(gs1[subplot_idx - 1], projection='3d')
                ax.view_init(18, -70)
                logger.debug(np.min(poses3d))
                if np.min(poses3d) < -1000 and frame != 0:
                    poses3d = before_pose

                p3d = poses3d

                viz.show3Dpose(p3d, ax, lcolor="#9b59b6", rcolor="#2ecc71")
                before_pose = poses3d
                pngName = 'png/test_{0}.png'.format(str(frame))
                plt.savefig(pngName)

                #plt.show()
                img = cv2.imread(pngName, 0)
                rect_cpy = img.copy()
                cv2.imshow('3d-pose-baseline', rect_cpy)
                done.append(file_name)
                if key == ord('q'):
                    break
            except Exception as e:
                print(e)

        sess.close()
コード例 #24
0
def sample():
  """Get samples from a model and visualize them"""

  actions = data_utils.define_actions( FLAGS.action )

  # Load camera parameters
  SUBJECT_IDS = [1,5,6,7,8,9,11]
  rcams = cameras.load_cameras(FLAGS.cameras_path, SUBJECT_IDS)

  # Load 3d data and load (or create) 2d projections
  train_set_3d, test_set_3d, data_mean_3d, data_std_3d, dim_to_ignore_3d, dim_to_use_3d, train_root_positions, test_root_positions = data_utils.read_3d_data(
    actions, FLAGS.data_dir, FLAGS.camera_frame, rcams, FLAGS.predict_14 )

  if FLAGS.use_sh:
    train_set_2d, test_set_2d, data_mean_2d, data_std_2d, dim_to_ignore_2d, dim_to_use_2d = data_utils.read_2d_predictions(actions, FLAGS.data_dir)
  else:
    train_set_2d, test_set_2d, data_mean_2d, data_std_2d, dim_to_ignore_2d, dim_to_use_2d = data_utils.create_2d_data( actions, FLAGS.data_dir, rcams )
  print( "done reading and normalizing data." )

  device_count = {"GPU": 0} if FLAGS.use_cpu else {"GPU": 1}
  with tf.Session(config=tf.ConfigProto( device_count = device_count )) as sess:
    # === Create the model ===
    print("Creating %d layers of %d units." % (FLAGS.num_layers, FLAGS.linear_size))
    batch_size = 128
    model = create_model(sess, actions, batch_size)
    print("Model loaded")

    for key2d in test_set_2d.keys():

      (subj, b, fname) = key2d
      print( "Subject: {}, action: {}, fname: {}".format(subj, b, fname) )

      # keys should be the same if 3d is in camera coordinates
      key3d = key2d if FLAGS.camera_frame else (subj, b, '{0}.h5'.format(fname.split('.')[0]))
      key3d = (subj, b, fname[:-3]) if (fname.endswith('-sh')) and FLAGS.camera_frame else key3d

      enc_in  = test_set_2d[ key2d ]
      n2d, _ = enc_in.shape
      dec_out = test_set_3d[ key3d ]
      n3d, _ = dec_out.shape
      assert n2d == n3d

      # Split into about-same-size batches
      enc_in   = np.array_split( enc_in,  n2d // batch_size )
      dec_out  = np.array_split( dec_out, n3d // batch_size )
      all_poses_3d = []

      for bidx in range( len(enc_in) ):

        # Dropout probability 0 (keep probability 1) for sampling
        dp = 1.0
        _, _, poses3d = model.step(sess, enc_in[bidx], dec_out[bidx], dp, isTraining=False)

        # denormalize
        enc_in[bidx]  = data_utils.unNormalizeData(  enc_in[bidx], data_mean_2d, data_std_2d, dim_to_ignore_2d )
        dec_out[bidx] = data_utils.unNormalizeData( dec_out[bidx], data_mean_3d, data_std_3d, dim_to_ignore_3d )
        poses3d = data_utils.unNormalizeData( poses3d, data_mean_3d, data_std_3d, dim_to_ignore_3d )
        all_poses_3d.append( poses3d )

      # Put all the poses together
      enc_in, dec_out, poses3d = map( np.vstack, [enc_in, dec_out, all_poses_3d] )

      # Convert back to world coordinates
      if FLAGS.camera_frame:
        N_CAMERAS = 4
        N_JOINTS_H36M = 32

        # Add global position back
        dec_out = dec_out + np.tile( test_root_positions[ key3d ], [1,N_JOINTS_H36M] )

        # Load the appropriate camera
        subj, _, sname = key3d

        cname = sname.split('.')[1] # <-- camera name
        scams = {(subj,c+1): rcams[(subj,c+1)] for c in range(N_CAMERAS)} # cams of this subject
        scam_idx = [scams[(subj,c+1)][-1] for c in range(N_CAMERAS)].index( cname ) # index of camera used
        the_cam  = scams[(subj, scam_idx+1)] # <-- the camera used
        R, T, f, c, k, p, name = the_cam
        assert name == cname

        def cam2world_centered(data_3d_camframe):
          data_3d_worldframe = cameras.camera_to_world_frame(data_3d_camframe.reshape((-1, 3)), R, T)
          data_3d_worldframe = data_3d_worldframe.reshape((-1, N_JOINTS_H36M*3))
          # subtract root translation
          return data_3d_worldframe - np.tile( data_3d_worldframe[:,:3], (1,N_JOINTS_H36M) )

        # Apply inverse rotation and translation
        dec_out = cam2world_centered(dec_out)
        poses3d = cam2world_centered(poses3d)

  # Grab a random batch to visualize
  enc_in, dec_out, poses3d = map( np.vstack, [enc_in, dec_out, poses3d] )
  idx = np.random.permutation( enc_in.shape[0] )
  enc_in, dec_out, poses3d = enc_in[idx, :], dec_out[idx, :], poses3d[idx, :]

  # Visualize random samples
  import matplotlib.gridspec as gridspec

  # 1080p	= 1,920 x 1,080
  fig = plt.figure( figsize=(19.2, 10.8) )

  gs1 = gridspec.GridSpec(5, 9) # 5 rows, 9 columns
  gs1.update(wspace=-0.00, hspace=0.05) # set the spacing between axes.
  plt.axis('off')

  subplot_idx, exidx = 1, 1
  nsamples = 15
  for i in np.arange( nsamples ):

    # Plot 2d pose
    ax1 = plt.subplot(gs1[subplot_idx-1])
    p2d = enc_in[exidx,:]
    viz.show2Dpose( p2d, ax1 )
    ax1.invert_yaxis()

    # Plot 3d gt
    ax2 = plt.subplot(gs1[subplot_idx], projection='3d')
    p3d = dec_out[exidx,:]
    viz.show3Dpose( p3d, ax2 )

    # Plot 3d predictions
    ax3 = plt.subplot(gs1[subplot_idx+1], projection='3d')
    p3d = poses3d[exidx,:]
    viz.show3Dpose( p3d, ax3, lcolor="#9b59b6", rcolor="#2ecc71" )

    exidx = exidx + 1
    subplot_idx = subplot_idx + 3

  plt.show()
コード例 #25
0
print(dec_out, 'dec_out')
# Visualize random samples
import matplotlib.gridspec as gridspec

# 1080p	= 1,920 x 1,080
fig = plt.figure(figsize=(19.2, 10.8))  #先画出图框大小

gs1 = gridspec.GridSpec(1, 2)  # 5 rows, 9 columns
gs1.update(wspace=0.05, hspace=0.05)  # set the spacing between axes.
plt.axis('off')

subplot_idx, exidx = 1, 1
nsamples = 1
for i in np.arange(nsamples):

    # Plot 2d pose
    ax1 = plt.subplot(gs1[subplot_idx - 1])
    p2d = enc_final[exidx - 1, :]
    viz.show2Dpose(p2d, ax1)
    ax1.invert_yaxis()

    # Plot 3d gt
    ax2 = plt.subplot(gs1[subplot_idx], projection='3d')
    p3d = dec_final[exidx - 1, :]
    viz.show3Dpose(p3d, ax2)

    exidx = exidx + 1
    subplot_idx = subplot_idx + 3

plt.show()