Exemple #1
0
    def __getitem__(self, idx):
        subpath, file_num = self.get_subpath_and_file_num(idx)

        depth_filepath = self.root_folder + self.video_folder + subpath + \
                         self.depth_folder + 'depth_' + \
                         file_num + '.' + self.depth_fileext
        depth_image = fpa_io.read_depth_img(depth_filepath)

        joints_filepath = self.root_folder + self.hand_pose_folder + \
                          subpath + 'skeleton.txt'
        hand_joints = fpa_io.read_action_joints_sequence(joints_filepath)[int(
            file_num)]

        _, crop_coords = self.get_cropped_depth_img(depth_image, hand_joints)
        crop_coords_numpy = np.zeros((2, 2))
        crop_coords_numpy[0, 0] = crop_coords[0]
        crop_coords_numpy[0, 1] = crop_coords[1]
        crop_coords_numpy[1, 0] = crop_coords[2]
        crop_coords_numpy[1, 1] = crop_coords[3]

        corner_heatmap1 = conv.color_space_label_to_heatmap(
            crop_coords_numpy[0, :],
            heatmap_res=self.orig_img_res,
            orig_img_res=self.orig_img_res)
        #print(np.unravel_index(np.argmax(corner_heatmap1), corner_heatmap1.shape))
        corner_heatmap2 = conv.color_space_label_to_heatmap(
            crop_coords_numpy[1, :],
            heatmap_res=self.orig_img_res,
            orig_img_res=self.orig_img_res)

        corner_heatmaps = np.stack((corner_heatmap1, corner_heatmap2))
        corner_heatmaps = torch.from_numpy(corner_heatmaps).float()
        return data_image, corner_heatmaps
Exemple #2
0
def read_fpa_skeleton(root_folder, subpath, frame_idx):
    #root_idx, thumb_idxs, index_idxs, middle_idxs, ring_idxs, little_idxs = get_fpa_finger_idx()
    hand_pose_folder = 'Hand_pose_annotation_v1/'
    joints_filepath = root_folder + hand_pose_folder + subpath + 'skeleton.txt'
    hand_joints = fpa_io.read_action_joints_sequence(joints_filepath)[int(
        frame_idx)]
    hand_joints = hand_joints.reshape((21, 3))
    skeleton = np.copy(hand_joints)
    #skeleton[0, :] = hand_joints[root_idx, :]
    #skeleton[1:5, :] = hand_joints[thumb_idxs, :]
    #skeleton[5:9, :] = hand_joints[index_idxs, :]
    #skeleton[9:13, :] = hand_joints[middle_idxs, :]
    #skeleton[13:17, :] = hand_joints[ring_idxs, :]
    #skeleton[17:, :] = hand_joints[little_idxs, :]
    return skeleton
Exemple #3
0
    def __getitem__(self, idx):
        subpath, file_num = self.get_subpath_and_file_num(idx)
        depth_img = self.read_depth_img(subpath, file_num).astype(float)

        #depth_obj_img_path = self.root_folder + self.gen_obj_folder + subpath + \
        #                str(int(file_num)) + '_depth.jpg'
        #depth_obj_img = fpa_io.read_depth_img(depth_obj_img_path)

        depth_obj_csv_path = self.root_folder + self.gen_obj_folder + subpath + \
                             str(int(file_num)) + '_depth.csv'
        img2_depth_array = np.loadtxt(open(depth_obj_csv_path, "rb"),
                                      delimiter=",")
        depth_obj_img = img2_depth_array.T

        joints_filepath = self.root_folder + self.hand_pose_folder + \
                          subpath + 'skeleton.txt'
        hand_joints = fpa_io.read_action_joints_sequence(joints_filepath)[int(
            file_num)]
        depth_img, _ = self.get_cropped_depth_img(depth_img, hand_joints)
        depth_obj_img, _ = self.get_cropped_depth_img(depth_obj_img,
                                                      hand_joints)

        depth_img /= self.normalise_const_max_depth
        depth_obj_img /= self.normalise_const_max_depth

        #vis.plot_image(depth_img)
        #vis.show()
        #vis.plot_image(depth_obj_img)
        #vis.show()

        depth_img = depth_img.reshape(
            (1, depth_img.shape[0], depth_img.shape[1]))
        depth_img_torch = torch.from_numpy(depth_img).float()
        depth_img_torch = self.resize2d(depth_img_torch, self.crop_res)

        depth_obj_img = depth_obj_img.reshape(
            (1, depth_obj_img.shape[0], depth_obj_img.shape[1]))
        depth_obj_img_torch = torch.from_numpy(depth_obj_img).float()
        depth_obj_img_torch = self.resize2d(depth_obj_img_torch, self.crop_res)

        return depth_img_torch, depth_obj_img_torch
Exemple #4
0
 def get_hand_joints(self, idx):
     subpath, file_num = self.get_subpath_and_file_num(idx)
     joints_filepath = self.root_folder + self.hand_pose_folder + \
                       subpath + 'skeleton.txt'
     return fpa_io.read_action_joints_sequence(joints_filepath)[int(
         file_num)]
Exemple #5
0
import fpa_io
import visualize
import camera as cam


dataset_root_folder = 'C:/Users/Administrator/Documents/Datasets/fpa_benchmark/'
gt_folder = 'Hand_pose_annotation_v1'
data_folder = 'video_files'
subject = 'Subject_1'
action = 'put_tea_bag'
sequence = '4'
gt_filepath = '/'.join([dataset_root_folder, gt_folder, subject, action, sequence, 'skeleton.txt'])
curr_data_folder = '/'.join([dataset_root_folder, data_folder, subject, action, sequence])


gt_skeletons = fpa_io.read_action_joints_sequence(gt_filepath)

fig = visualize.create_fig()
for i in range(99):
    if i < 10:
        frame_num = '000' + str(i)
    else:
        frame_num = '00' + str(i)
    joints = gt_skeletons[i, :].reshape((21, 3))
    color_filepath = '/'.join([curr_data_folder, 'color', 'color_' + frame_num + '.jpeg'])
    depth_filepath = '/'.join([curr_data_folder, 'depth', 'depth_' + frame_num + '.png'])
    depth_img = fpa_io.read_depth_img(depth_filepath)
    joints_uv = cam.joints_depth2color(joints, cam.fpa_depth_intrinsics)
    visualize.plot_joints_from_colorspace(joints_colorspace=joints_uv, data=depth_img,
                                          fig=fig, title='/'.join([subject, action, sequence]))
    visualize.pause(0.001)
Exemple #6
0
    def __getitem__(self, idx):
        subpath, file_num = self.get_subpath_and_file_num(idx)
        depth_img = self.read_depth_img(subpath, file_num).astype(float)

        # depth_obj_img_path = self.root_folder + self.gen_obj_folder + subpath + \
        #                str(int(file_num)) + '_depth.jpg'
        # depth_obj_img = fpa_io.read_depth_img(depth_obj_img_path)

        # read obj img
        depth_obj_csv_path = self.root_folder + self.gen_obj_folder + subpath + \
                                 str(int(file_num)) + '_depth.csv'
        img2_depth_array = np.loadtxt(open(depth_obj_csv_path, "rb"),
                                      delimiter=",")
        depth_obj_img = img2_depth_array.T

        # read hand image
        hand_img_path = self.root_folder + self.gen_hands_folder + subpath + \
                             'depth_' + str(int(file_num)) + '.bmp'
        hand_img = self.read_unreal_depth_img(
            hand_img_path, unreal_max_depth=self.unreal_max_depth)


        joints_filepath = self.root_folder + self.hand_pose_folder + \
                              subpath + 'skeleton.txt'
        hand_joints = fpa_io.read_action_joints_sequence(joints_filepath)[int(
            file_num)]
        depth_img, _ = self.get_cropped_depth_img(depth_img, hand_joints)
        depth_obj_img, _ = self.get_cropped_depth_img(depth_obj_img,
                                                      hand_joints)
        hand_img, _ = self.get_cropped_depth_img(hand_img, hand_joints)

        depth_img /= self.normalise_const_max_depth
        depth_obj_img /= self.normalise_const_max_depth
        hand_img /= self.normalise_const_max_depth

        #print(np.max(depth_img))
        #print(np.max(depth_obj_img))
        #print(np.max(hand_img))

        #vis.plot_image(depth_img)
        #vis.show()
        #vis.plot_image(depth_obj_img)
        #vis.show()
        #vis.plot_image(hand_img)
        #vis.show()

        depth_img = depth_img.reshape(
            (1, depth_img.shape[0], depth_img.shape[1]))
        depth_img_torch = torch.from_numpy(depth_img).float()
        depth_img_torch = self.resize2d(depth_img_torch, self.crop_res)

        depth_obj_img = depth_obj_img.reshape(
            (1, depth_obj_img.shape[0], depth_obj_img.shape[1]))
        depth_obj_img_torch = torch.from_numpy(depth_obj_img).float()
        depth_obj_img_torch = self.resize2d(depth_obj_img_torch, self.crop_res)

        #obj_img_numpy = depth_obj_img_torch.numpy().astype(float).reshape(self.crop_res)
        #vis.plot_image(obj_img_numpy)
        #vis.show()

        hand_img = hand_img.reshape((1, hand_img.shape[0], hand_img.shape[1]))
        hand_img_torch = torch.from_numpy(hand_img).float()
        hand_img_torch = self.resize2d(hand_img_torch, self.crop_res)

        #hand_img_numpy = hand_img_torch.numpy().astype(float).reshape(self.crop_res)
        #vis.plot_image(hand_img_numpy)
        #vis.show()

        obj_hand_img = torch.cat((hand_img_torch, depth_obj_img_torch))

        return depth_img_torch, obj_hand_img
Exemple #7
0
frame = 21

unreal_bone_lengths_filepath = dataset_root + 'bonelengths/' + 'UnrealMaleRightHands.txt'
unreal_bone_lengths = genfromtxt(unreal_bone_lengths_filepath, delimiter=',')
unreal_bone_lengths = unreal_bone_lengths[:, 1]

subject_bone_lengths_filepath = dataset_root + 'bonelengths/' + subject + '.txt'
subject_bone_lengths = genfromtxt(subject_bone_lengths_filepath, delimiter=',')
subject_bone_lengths = subject_bone_lengths[:, 1]

bone_prop = (unreal_bone_lengths / subject_bone_lengths).reshape((20, 1))
bone_prop[bone_prop < 0] = 1.0

hand_gt_filepath = dataset_root + hand_gt_folder + subject + '/' + action + '/' + seq + 'skeleton.txt'

hand_joints = fpa_io.read_action_joints_sequence(hand_gt_filepath)[int(31)]
hand_joints = hand_joints.reshape((21, 3))
hand_joints -= hand_joints[0, :]
hand_joints_unreal = np.copy(hand_joints)

i = 0
for finger_idx in range(5):
    finger_start_joint_idx = (finger_idx * 4) + 1
    for j in range(3):
        parent_joint_idx = finger_start_joint_idx + j
        parent_joint_before = np.copy(hand_joints_unreal[parent_joint_idx, :])
        curr_bone_prop = bone_prop[parent_joint_idx - 1]
        hand_joints_unreal[parent_joint_idx, :] *= curr_bone_prop
        parent_joint_transl = hand_joints_unreal[
            parent_joint_idx, :] - parent_joint_before
        print(