Ejemplo n.º 1
0
def demo_for_kinect():
    # data_dir = "E:/git/python-learning-notes/tutorial/kinect2/dataset/kitnect3d"
    data_dir = "/media/dm/dm/X2/Pose/dataset/kitnet_data/panjinquan"
    image, joint_world = load_data(data_dir, flag=5)
    joint_world = -joint_world * 1000
    kps_lines = [
        [0, 1],
        [1, 20],
        [20, 2],
        [2, 3],  # Spine
        [20, 4],
        [4, 5],
        [5, 6],
        [6, 7],
        [7, 21],
        [7, 22],  # Left arm and hand
        [20, 8],
        [8, 9],
        [9, 10],
        [10, 11],
        [11, 23],
        [11, 24],  # Right arm and hand
        [0, 12],
        [12, 13],
        [13, 14],
        [14, 15],  # Left leg
        [0, 16],
        [16, 17],
        [17, 18],
        [18, 19]
    ]  # Right leg
    # show in 世界坐标系
    vis.vis_3d(joint_world, kps_lines, coordinate="WC", title="WC")

    kp_vis = CameraTools()

    # show in 相机坐标系
    joint_cam = kp_vis.convert_wc_to_cc(joint_world)
    vis.vis_3d(joint_cam, kps_lines, coordinate="CC", title="CC")
    joint_img = kp_vis.convert_cc_to_ic(joint_cam)
    # show in 像素坐标系
    kpt_2d = joint_img[:, 0:2]
    image = image_processing.draw_key_point_in_image(image,
                                                     key_points=[kpt_2d],
                                                     pointline=kps_lines)
    image_processing.cv_show_image("image_dict", image)
Ejemplo n.º 2
0
def demo_for_human36m():
    joint_world = [[-91.679, 154.404, 907.261],
                   [-223.23566, 163.80551, 890.5342],
                   [-188.4703, 14.077106, 475.1688],
                   [-261.84055, 186.55286, 61.438915],
                   [39.877888, 145.00247, 923.98785],
                   [-11.675994, 160.89919, 484.39148],
                   [-51.550297, 220.14624, 35.834396],
                   [-132.34781, 215.73018, 1128.8396],
                   [-97.1674, 202.34435, 1383.1466],
                   [-112.97073, 127.96946, 1477.4457],
                   [-120.03289, 190.96477, 1573.4],
                   [25.895456, 192.35947, 1296.1571],
                   [107.10581, 116.050285, 1040.5062],
                   [129.8381, -48.024918, 850.94806],
                   [-230.36955, 203.17923, 1311.9639],
                   [-315.40536, 164.55284, 1049.1747],
                   [-350.77136, 43.442127, 831.3473],
                   [-102.237045, 197.76935, 1304.0605]]
    joint_world = np.asarray(joint_world)
    # 关节点连接线
    kps_lines = ((0, 7), (7, 8), (8, 9), (9, 10), (8, 11), (11, 12), (12, 13),
                 (8, 14), (14, 15), (15, 16), (0, 1), (1, 2), (2, 3), (0, 4),
                 (4, 5), (5, 6))
    # show in 世界坐标系
    vis.vis_3d(joint_world,
               kps_lines,
               coordinate="WC",
               title="WC",
               set_lim=True,
               isshow=True)

    kp_vis = CameraTools()

    # show in 相机坐标系
    joint_cam = kp_vis.convert_wc_to_cc(joint_world)
    vis.vis_3d(joint_cam,
               kps_lines,
               coordinate="CC",
               title="CC",
               set_lim=True,
               isshow=True)
    joint_img = kp_vis.convert_cc_to_ic(joint_cam)

    joint_world1 = kp_vis.convert_cc_to_wc(joint_cam)
    vis.vis_3d(joint_world1,
               kps_lines,
               coordinate="WC",
               title="WC",
               set_lim=True,
               isshow=True)

    # show in 像素坐标系
    kpt_2d = joint_img[:, 0:2]
    image_path = "./data/s_01_act_02_subact_01_ca_02_000001.jpg"
    image = image_processing.read_image(image_path)
    image = image_processing.draw_key_point_in_image(image,
                                                     key_points=[kpt_2d],
                                                     pointline=kps_lines)
    image_processing.cv_show_image("image_dict", image)
def demo_for_kinect():
    flip_transform = np.asarray([[1, 0, 0],
                                 [0, -1, 0],
                                 [0, 0, -1]])

    # flip_transform = np.linalg.inv(flip_transform)
    # data_dir = "E:/git/python-learning-notes/tutorial/kinect2/dataset/kitnect3d"
    data_dir = "/media/dm/dm/X2/Pose/dataset/kitnet_data/panjinquan"  # flag= 5
    # data_dir = "/media/dm/dm/X2/Pose/dataset/kitnet_data/dengjianxiang"  # 241,245,348
    image, joint_world = load_data(data_dir, flag=503)
    h, w, d = image.shape
    joint_world = convert_kinect2h36m(joint_world)
    joint_world = joint_world * 1000
    joint_world = np.dot(-flip_transform, joint_world.T).T  # R * (pt - T)

    # kps_lines = [[0, 1], [1, 20], [20, 2], [2, 3],  # Spine
    #              [20, 4], [4, 5], [5, 6], [6, 7], [7, 21], [7, 22],  # Left arm and hand
    #              [20, 8], [8, 9], [9, 10], [10, 11], [11, 23], [11, 24],  # Right arm and hand
    #              [0, 12], [12, 13], [13, 14], [14, 15],  # Left leg
    #              [0, 16], [16, 17], [17, 18], [18, 19]]  # Right leg
    kps_lines = ((0, 7), (7, 8), (8, 9), (9, 10), (8, 11), (11, 12), (12, 13), (8, 14), (14, 15),
                 (15, 16), (0, 1), (1, 2), (2, 3), (0, 4), (4, 5), (5, 6))

    # show in 世界坐标系
    vis.vis_3d(joint_world, kps_lines, coordinate="WC", title="WC", set_lim=True)

    kp_vis = KeyPointsVisual()

    # show in 相机坐标系
    joint_cam = kp_vis.convert_wc_to_cc(joint_world)
    vis.vis_3d(joint_cam, kps_lines, coordinate="CC", title="CC", set_lim=True)
    joint_cam = np.dot(flip_transform, joint_cam.T).T  # R * (pt - T)
    joint_img = kp_vis.convert_cc_to_ic(joint_cam)
    # show in 像素坐标系
    kpt_2d = joint_img[:, 0:2]
    image = image_processing.draw_key_point_in_image(image, key_points=[kpt_2d], pointline=kps_lines)
    image_processing.cv_show_image("image_dict", image)
def demo_for_human36m():
    from modules.utils_3d.data import human36m_data
    # x,y,z
    # joint_world = human36m_data.data0
    joint_world = human36m_data.data2 * 1000
    # joint_world = human36m_data.data1*1000
    joint_world = np.asarray(joint_world)
    kps_lines = human36m_data.kps_lines
    # show in 世界坐标系
    vis.vis_3d(joint_world, kps_lines, coordinate="WC", title="WC", set_lim=True)

    kp_vis = KeyPointsVisual()

    # show in 相机坐标系
    joint_cam = kp_vis.convert_wc_to_cc(joint_world)
    vis.vis_3d(joint_cam, kps_lines, coordinate="CC", title="CC", set_lim=True)
    joint_img = kp_vis.convert_cc_to_ic(joint_cam)

    # show in 像素坐标系
    kpt_2d = joint_img[:, 0:2]
    image_path = "/media/dm/dm1/git/python-learning-notes/modules/utils_3d/data/s_01_act_02_subact_01_ca_02_000001.jpg"
    image = image_processing.read_image(image_path)
    image = image_processing.draw_key_point_in_image(image, key_points=[kpt_2d], pointline=kps_lines)
    image_processing.cv_show_image("image_dict", image)
Ejemplo n.º 5
0
                   [-132.34781, 215.73018, 1128.8396],
                   [-97.1674, 202.34435, 1383.1466],
                   [-112.97073, 127.96946, 1477.4457],
                   [-120.03289, 190.96477, 1573.4],
                   [25.895456, 192.35947, 1296.1571],
                   [107.10581, 116.050285, 1040.5062],
                   [129.8381, -48.024918, 850.94806],
                   [-230.36955, 203.17923, 1311.9639],
                   [-315.40536, 164.55284, 1049.1747],
                   [-350.77136, 43.442127, 831.3473],
                   [-102.237045, 197.76935, 1304.0605]]
    joint_world = np.asarray(joint_world)
    kps_lines = ((0, 7), (7, 8), (8, 9), (9, 10), (8, 11), (11, 12), (12, 13), (8, 14), (14, 15),
                 (15, 16), (0, 1), (1, 2), (2, 3), (0, 4), (4, 5), (5, 6))
    # show in 世界坐标系
    vis.vis_3d(joint_world, kps_lines, coordinate="WC", title="WC")

    human36m = Human36M()

    # show in 相机坐标系
    joint_cam = human36m.convert_wc_to_cc(joint_world)
    vis.vis_3d(joint_cam, kps_lines, coordinate="CC", title="CC")
    joint_img = human36m.convert_cc_to_ic(joint_cam)

    # show in 像素坐标系
    kpt_2d = joint_img[:, 0:2]
    image_path = "data/s_01_act_02_subact_01_ca_02_000001.jpg"
    image = image_processing.read_image(image_path)
    image = image_processing.draw_key_point_in_image(image, key_points=[kpt_2d], pointline=kps_lines)
    image_processing.cv_show_image("image_dict", image)