コード例 #1
0
    def __preprocess_and_save__(self, index):
        obj_category = self.list_obj[index]
        ins = self.list_instance[index]
        obj = self.objnamelist.index(obj_category)
        art_status = self.list_status[index]
        frame_order = self.list_rank[index]
        label = self.list_label[index]
        h5_save_path = self.root_dir + '/hdf5/' + obj_category + '/' + ins + '/' + art_status
        if (not os.path.exists(h5_save_path)):
            os.makedirs(h5_save_path)
        h5_save_name = h5_save_path + '/{}.h5'.format(frame_order)
        num_parts = self.urdf_dict[obj_category][ins]['num_links']

        model_offsets = self.urdf_dict[obj_category][ins]['link']
        joint_offsets = self.urdf_dict[obj_category][ins]['joint']

        parts_model_point = [None] * num_parts
        parts_world_point = [None] * num_parts
        parts_target_point = [None] * num_parts

        parts_cloud_cam = [None] * num_parts
        parts_cloud_world = [None] * num_parts
        parts_cloud_canon = [None] * num_parts
        parts_cloud_urdf = [None] * num_parts
        parts_cloud_norm = [None] * num_parts

        parts_world_pos = [None] * num_parts
        parts_world_orn = [None] * num_parts
        parts_urdf_pos = [None] * num_parts
        parts_urdf_orn = [None] * num_parts
        parts_urdf_box = [None] * num_parts

        parts_model2world = [None] * num_parts
        parts_canon2urdf = [None] * num_parts
        parts_target_r = [None] * num_parts
        parts_target_t = [None] * num_parts

        parts_mask = [None] * num_parts
        choose_x = [None] * num_parts
        choose_y = [None] * num_parts
        choose_to_whole = [None] * num_parts

        # rgb/depth/label
        print('current image: ', self.list_rgb[index])
        img = Image.open(self.list_rgb[index])
        img = np.array(img)  #.astype(np.uint8)
        depth = np.array(h5py.File(self.list_depth[index], 'r')['data'])
        label = np.array(Image.open(self.list_label[index]))

        # pose infos
        pose_dict = self.meta_dict[obj_category][ins][art_status][
            'frame_{}'.format(frame_order)]
        urdf_dict = self.urdf_dict[obj_category][ins]
        viewMat = np.array(pose_dict['viewMat']).reshape(4, 4).T
        projMat = np.array(pose_dict['projMat']).reshape(4, 4).T

        parts_world_pos[0] = np.array([0, 0, 0])
        parts_world_orn[0] = np.array([0, 0, 0, 1])
        for link in range(0, num_parts):
            if link > 0:
                parts_world_pos[link] = np.array(
                    pose_dict['obj'][link - 1][4]).astype(np.float32)
                parts_world_orn[link] = np.array(
                    pose_dict['obj'][link - 1][5]).astype(np.float32)

        for link in range(num_parts):
            if link == 1 and num_parts == 2:
                parts_urdf_pos[link] = np.array(urdf_dict['joint']['xyz'][
                    link -
                    1])  # todo, accumulate joints pffsets != link offsets
            else:
                parts_urdf_pos[link] = -np.array(
                    urdf_dict['link']['xyz'][link])
            parts_urdf_orn[link] = np.array(urdf_dict['link']['rpy'][link])

        for k in range(num_parts):
            center_world_orn = parts_world_orn[k]
            center_world_orn = np.array([
                center_world_orn[3], center_world_orn[0], center_world_orn[1],
                center_world_orn[2]
            ])
            my_model2world_r = quaternion_matrix(
                center_world_orn)[:4, :4]  # [w, x, y, z]
            my_model2world_t = parts_world_pos[k]
            my_model2world_mat = np.copy(my_model2world_r)
            for m in range(3):
                my_model2world_mat[m, 3] = my_model2world_t[m]
            my_world2camera_mat = viewMat
            my_camera2clip_mat = projMat
            my_model2camera_mat = np.dot(my_world2camera_mat,
                                         my_model2world_mat)
            parts_model2world[k] = my_model2world_mat

        # depth to cloud data
        mask = np.array((label[:, :] < num_parts) & (label[:, :] > -1)).astype(
            np.uint8)
        mask_whole = np.copy(mask)
        for n in range(num_parts):
            parts_mask[n] = np.array((label[:, :] == (n))).astype(np.uint8)
            choose_to_whole[n] = np.where(parts_mask[n] > 0)

        #>>>>>>>>>>------- rendering target pcloud from depth image --------<<<<<<<<<#
        # first get projected map
        ymap = self.ymap
        xmap = self.xmap
        h = self.height
        w = self.width
        u_map = ymap * 2 / w - 1
        v_map = (512 - xmap) * 2 / h - 1
        v1_map = xmap * 2 / h - 1
        w_channel = -depth
        projected_map = np.stack(
            [u_map * w_channel, v_map * w_channel, depth,
             w_channel]).transpose([1, 2, 0])
        projected_map1 = np.stack(
            [u_map * w_channel, v1_map * w_channel, depth,
             w_channel]).transpose([1, 2, 0])
        for s in range(num_parts):
            x_set, y_set = choose_to_whole[s]
            if len(x_set) < 10:
                print('data is empty, skipping!!!')
                return None
            else:
                choose_x[s] = x_set
                choose_y[s] = y_set

            # ---------------> from projected map into target part_cloud
            # order: cam->world->canon)
            projected_points = projected_map[
                choose_x[s][:].astype(np.uint16),
                choose_y[s][:].astype(np.uint16), :]
            projected_points = np.reshape(projected_points, [-1, 4])
            depth_channel = -projected_points[:, 3:4]
            cloud_cam = np.dot(
                projected_points[:, 0:2] -
                np.dot(depth_channel, projMat[0:2, 2:3].T),
                np.linalg.pinv(projMat[:2, :2].T))

            projected_points1 = projected_map1[
                choose_x[s][:].astype(np.uint16),
                choose_y[s][:].astype(np.uint16), :]
            projected_points1 = np.reshape(projected_points1, [-1, 4])
            cloud_cam_real = np.dot(
                projected_points1[:, 0:2] -
                np.dot(depth_channel, projMat[0:2, 2:3].T),
                np.linalg.pinv(projMat[:2, :2].T))
            cloud_cam_real = np.concatenate((cloud_cam_real, depth_channel),
                                            axis=1)

            cloud_cam = np.concatenate((cloud_cam, depth_channel), axis=1)
            cloud_cam_full = np.concatenate(
                (cloud_cam, np.ones((cloud_cam.shape[0], 1))), axis=1)

            # modify, todo
            camera_pose_mat = np.linalg.pinv(viewMat.T)
            camera_pose_mat[:3, :] = -camera_pose_mat[:3, :]
            cloud_world = np.dot(cloud_cam_full, camera_pose_mat)
            cloud_canon = np.dot(cloud_world,
                                 np.linalg.pinv(parts_model2world[s].T))

            # canon points should be points coordinates centered in the inertial frame
            parts_cloud_cam[s] = cloud_cam_real[:, :3]
            parts_cloud_world[s] = cloud_world[:, :3]
            parts_cloud_canon[s] = cloud_canon[:, :3]

        for k in range(num_parts):
            center_joint_orn = parts_urdf_orn[k]
            my_canon2urdf_r = euler_matrix(
                center_joint_orn[0], center_joint_orn[1],
                center_joint_orn[2])[:4, :4]  # [w, x, y, z]
            my_canon2urdf_t = parts_urdf_pos[k]
            my_canon2urdf_mat = my_canon2urdf_r
            for m in range(3):
                my_canon2urdf_mat[m, 3] = my_canon2urdf_t[m]
            part_points_space = np.concatenate(
                (parts_cloud_canon[k],
                 np.ones((parts_cloud_canon[k].shape[0], 1))),
                axis=1)
            parts_cloud_urdf[k] = np.dot(part_points_space,
                                         my_canon2urdf_mat.T)

        #>>>>>>>>>>>>>>> go to PNCS space
        for link in range(num_parts):
            tight_w = max(parts_cloud_urdf[link][:, 0]) - min(
                parts_cloud_urdf[link][:, 0])
            tight_l = max(parts_cloud_urdf[link][:, 1]) - min(
                parts_cloud_urdf[link][:, 1])
            tight_h = max(parts_cloud_urdf[link][:, 2]) - min(
                parts_cloud_urdf[link][:, 2])
            norm_factor = np.sqrt(1) / np.sqrt(tight_w**2 + tight_l**2 +
                                               tight_h**2)
            base_p = np.array([
                min(parts_cloud_urdf[link][:, 0]),
                min(parts_cloud_urdf[link][:, 1]),
                min(parts_cloud_urdf[link][:, 2])
            ]).reshape(1, 3)
            extre_p = np.array([
                max(parts_cloud_urdf[link][:, 0]),
                max(parts_cloud_urdf[link][:, 1]),
                max(parts_cloud_urdf[link][:, 2])
            ]).reshape(1, 3)
            center_p = (extre_p - base_p) / 2 * norm_factor

            parts_cloud_norm[link] = (parts_cloud_urdf[link][:, :3] -
                                      base_p) * norm_factor + np.array(
                                          [0.5, 0.5, 0.5]).reshape(
                                              1, 3) - center_p.reshape(1, 3)

        x_set_pcloud = np.concatenate(choose_x, axis=0)
        y_set_pcloud = np.concatenate(choose_y, axis=0)

        # save into h5 for rgb_img, input_pts, mask, correpsonding urdf_points
        print('Writing to ', h5_save_name)
        hf = h5py.File(h5_save_name, 'w')
        hf.create_dataset('rgb', data=img)
        hf.create_dataset('mask', data=mask)
        cloud_cam = hf.create_group('gt_points')
        for part_i, points in enumerate(parts_cloud_cam):
            cloud_cam.create_dataset(str(part_i), data=points)
        coord_gt = hf.create_group('gt_coords')
        for part_i, points in enumerate(parts_cloud_urdf):
            coord_gt.create_dataset(str(part_i), data=points)
        hf.close()

        ################# for debug only, let me know if you have questions #################
        if self.is_debug:
            figure = plt.figure(dpi=200)
            ax = plt.subplot(121)
            plt.imshow(img)
            plt.title('RGB image')
            ax1 = plt.subplot(122)
            plt.imshow(depth)
            plt.title('depth image')
            plt.show()
            plot3d_pts(
                [parts_cloud_cam],
                [['part {}'.format(i) for i in range(len(parts_cloud_cam))]],
                s=5,
                title_name=['camera coords'])
            plot3d_pts(
                [parts_cloud_world],
                [['part {}'.format(i) for i in range(len(parts_cloud_world))]],
                s=5,
                title_name=['world coords'])
            plot3d_pts(
                [parts_cloud_canon],
                [['part {}'.format(i) for i in range(len(parts_cloud_canon))]],
                s=5,
                title_name=['canon coords'])
            plot3d_pts(
                [parts_cloud_urdf],
                [['part {}'.format(i) for i in range(len(parts_cloud_urdf))]],
                s=5,
                title_name=['urdf coords'])

        return None
コード例 #2
0
    def __preprocess_and_save__(self, index):
        obj_category = self.list_obj[index]
        ins = self.list_instance[index]
        obj = self.objnamelist.index(obj_category)
        art_status = self.list_status[index]
        frame_order = self.list_rank[index]
        label = self.list_label[index]
        h5_save_path = (self.root_dir + "/hdf5/" + obj_category + "/" + ins +
                        "/" + art_status)
        if not os.path.exists(h5_save_path):
            os.makedirs(h5_save_path)
        h5_save_name = h5_save_path + "/{}.h5".format(frame_order)
        num_parts = self.urdf_dict[obj_category][ins]["num_links"]
        new_num_parts = num_parts - 1

        model_offsets = self.urdf_dict[obj_category][ins]["link"]
        joint_offsets = self.urdf_dict[obj_category][ins]["joint"]

        parts_model_point = [None] * new_num_parts
        parts_world_point = [None] * new_num_parts
        parts_target_point = [None] * new_num_parts

        parts_cloud_cam = [None] * new_num_parts
        parts_cloud_world = [None] * new_num_parts
        parts_cloud_canon = [None] * new_num_parts
        parts_cloud_urdf = [None] * new_num_parts
        parts_cloud_norm = [None] * new_num_parts

        parts_world_pos = [None] * new_num_parts
        parts_world_orn = [None] * new_num_parts
        parts_urdf_pos = [None] * new_num_parts
        parts_urdf_orn = [None] * new_num_parts
        parts_urdf_box = [None] * new_num_parts

        parts_model2world = [None] * new_num_parts
        parts_canon2urdf = [None] * new_num_parts
        parts_target_r = [None] * new_num_parts
        parts_target_t = [None] * new_num_parts

        parts_mask = [None] * new_num_parts
        choose_x = [None] * new_num_parts
        choose_y = [None] * new_num_parts
        choose_to_whole = [None] * new_num_parts

        # rgb/depth/label
        print("current image: ", self.list_rgb[index])
        img = Image.open(self.list_rgb[index])
        img = np.array(img)  # .astype(np.uint8)
        depth = np.array(h5py.File(self.list_depth[index], "r")["data"])
        label = np.array(Image.open(self.list_label[index]))

        # pose infos
        pose_dict = self.meta_dict[obj_category][ins][art_status][
            "frame_{}".format(frame_order)]
        urdf_dict = self.urdf_dict[obj_category][ins]
        viewMat = np.array(pose_dict["viewMat"]).reshape(4, 4).T
        projMat = np.array(pose_dict["projMat"]).reshape(4, 4).T

        parts_world_pos[0] = np.array([0, 0, 0])
        parts_world_orn[0] = np.array([0, 0, 0, 1])
        # SAPIEN: skip the virtual base part
        for link in range(1, num_parts):
            if link > 0:
                parts_world_pos[link - 1] = np.array(
                    pose_dict["obj"][link - 1][4]).astype(np.float32)
                parts_world_orn[link - 1] = np.array(
                    pose_dict["obj"][link - 1][5]).astype(np.float32)

        for link in range(1, num_parts):
            if link == 1 and num_parts == 2:
                parts_urdf_pos[link] = np.array(urdf_dict["joint"]["xyz"][
                    link -
                    1])  # todo, accumulate joints offsets != link offsets
            else:
                # Only change this branch for SAPIEN data
                parts_urdf_pos[link - 1] = -np.array(
                    urdf_dict["link"]["xyz"][link][0])
            parts_urdf_orn[link - 1] = np.array(
                urdf_dict["link"]["rpy"][link][0])

        for k in range(1, num_parts):
            center_world_orn = parts_world_orn[k - 1]
            center_world_orn = np.array([
                center_world_orn[3],
                center_world_orn[0],
                center_world_orn[1],
                center_world_orn[2],
            ])
            my_model2world_r = quaternion_matrix(
                center_world_orn)[:4, :4]  # [w, x, y, z]
            my_model2world_t = parts_world_pos[k - 1]
            my_model2world_mat = np.copy(my_model2world_r)
            for m in range(3):
                my_model2world_mat[m, 3] = my_model2world_t[m]
            my_world2camera_mat = viewMat
            my_camera2clip_mat = projMat
            my_model2camera_mat = np.dot(my_world2camera_mat,
                                         my_model2world_mat)
            parts_model2world[k - 1] = my_model2world_mat

        # depth to cloud data
        # SAPIEN, throw away the label 0 for virtual base link
        mask = np.array((label[:, :] < num_parts) & (label[:, :] > 0)).astype(
            np.uint8)
        mask_whole = np.copy(mask)
        for n in range(1, num_parts):
            parts_mask[n - 1] = np.array((label[:, :] == (n))).astype(np.uint8)
            choose_to_whole[n - 1] = np.where(parts_mask[n - 1] > 0)

        # >>>>>>>>>>------- rendering target pcloud from depth image --------<<<<<<<<<#
        # first get projected map
        # ymap = self.ymap
        # xmap = self.xmap
        # h = self.height
        # w = self.width
        # u_map = ymap * 2 / w - 1
        # v_map = (512 - xmap) * 2 / h - 1
        # v1_map = xmap * 2 / h - 1
        # w_channel = -depth
        # projected_map = np.stack(
        #     [u_map * w_channel, v_map * w_channel, depth, w_channel]
        # ).transpose([1, 2, 0])
        # projected_map1 = np.stack(
        #     [u_map * w_channel, v1_map * w_channel, depth, w_channel]
        # ).transpose([1, 2, 0])
        for s in range(1, num_parts):
            x_set, y_set = choose_to_whole[s - 1]
            if len(x_set) < 10:
                print(ins, art_status, frame_order)
                print(
                    "data is empty, skipping!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!"
                )
                return None
            else:
                choose_x[s - 1] = x_set
                choose_y[s - 1] = y_set

            # ---------------> from projected map into target part_cloud
            # order: cam->world->canon)

            # MotionNet Modification
            point_num = np.shape(choose_x[s - 1])[0]
            cloud_cam = []
            cloud_cam_real = []
            for i in range(point_num):
                x = choose_x[s - 1][i]
                y = choose_y[s - 1][i]
                y_real = 512 - y
                cloud_cam.append([
                    (y + projMat[0, 2]) * depth[x, y] / projMat[0, 0],
                    -(x + projMat[1, 2]) * depth[x, y] / (-projMat[1, 1]),
                    -depth[x, y],
                ])
                cloud_cam_real.append([
                    (y + projMat[0, 2]) * depth[x, y] / projMat[0, 0],
                    -(x + projMat[1, 2]) * depth[x, y] / (-projMat[1, 1]),
                    -depth[x, y],
                ])
                # cloud_cam.append(
                #     [
                #         (x + projMat[0, 2]) * depth[x, y_real] / projMat[0, 0],
                #         -(y_real + projMat[1, 2]) * depth[x, y_real] / (-projMat[1, 1]),
                #         -depth[x, y_real],
                #     ]
                # )
                # pdb.set_trace()
                # print(cloud_cam)
                # break
            cloud_cam = np.array(cloud_cam)
            cloud_cam_real = np.array(cloud_cam_real)

            # pdb.set_trace()
            # MotionNet Modification
            # projected_points = projected_map[choose_x[s-1][:].astype(np.uint16), choose_y[s-1][:].astype(np.uint16), :]
            # projected_points = np.reshape(projected_points, [-1, 4])
            # depth_channel    = - projected_points[:, 3:4]
            # cloud_cam        = np.dot(projected_points[:, 0:2] - np.dot(depth_channel, projMat[0:2, 2:3].T), np.linalg.pinv(projMat[:2, :2].T))

            # projected_points1 = projected_map1[choose_x[s-1][:].astype(np.uint16), choose_y[s-1][:].astype(np.uint16), :]
            # projected_points1 = np.reshape(projected_points1, [-1, 4])
            # cloud_cam_real    = np.dot(projected_points1[:, 0:2] - np.dot(depth_channel, projMat[0:2, 2:3].T), np.linalg.pinv(projMat[:2, :2].T))
            # cloud_cam_real    = np.concatenate((cloud_cam_real, depth_channel), axis=1)

            # cloud_cam      = np.concatenate((cloud_cam, depth_channel), axis=1)

            cloud_cam_full = np.concatenate(
                (cloud_cam, np.ones((cloud_cam.shape[0], 1))), axis=1)

            # modify, todo
            camera_pose_mat = np.linalg.pinv(viewMat.T)
            # MotionNet modification
            # camera_pose_mat[:3, :] = -camera_pose_mat[:3, :]
            cloud_world = np.dot(cloud_cam_full, camera_pose_mat)
            cloud_canon = np.dot(cloud_world,
                                 np.linalg.pinv(parts_model2world[s - 1].T))

            # canon points should be points coordinates centered in the inertial frame
            parts_cloud_cam[s - 1] = cloud_cam_real[:, :3]
            parts_cloud_world[s - 1] = cloud_world[:, :3]
            parts_cloud_canon[s - 1] = cloud_canon[:, :3]

        for k in range(1, num_parts):
            center_joint_orn = parts_urdf_orn[k - 1]
            my_canon2urdf_r = euler_matrix(
                center_joint_orn[0], center_joint_orn[1],
                center_joint_orn[2])[:4, :4]  # [w, x, y, z]
            my_canon2urdf_t = parts_urdf_pos[k - 1]
            my_canon2urdf_mat = my_canon2urdf_r
            for m in range(3):
                my_canon2urdf_mat[m, 3] = my_canon2urdf_t[m]
            part_points_space = np.concatenate(
                (
                    parts_cloud_canon[k - 1],
                    np.ones((parts_cloud_canon[k - 1].shape[0], 1)),
                ),
                axis=1,
            )
            parts_cloud_urdf[k - 1] = np.dot(part_points_space,
                                             my_canon2urdf_mat.T)

        # >>>>>>>>>>>>>>> go to NPCS space
        # NPCS here is not stored into hdf5
        for link in range(1, num_parts):
            tight_w = max(parts_cloud_urdf[link - 1][:, 0]) - min(
                parts_cloud_urdf[link - 1][:, 0])
            tight_l = max(parts_cloud_urdf[link - 1][:, 1]) - min(
                parts_cloud_urdf[link - 1][:, 1])
            tight_h = max(parts_cloud_urdf[link - 1][:, 2]) - min(
                parts_cloud_urdf[link - 1][:, 2])
            norm_factor = np.sqrt(1) / np.sqrt(tight_w**2 + tight_l**2 +
                                               tight_h**2)
            base_p = np.array([
                min(parts_cloud_urdf[link - 1][:, 0]),
                min(parts_cloud_urdf[link - 1][:, 1]),
                min(parts_cloud_urdf[link - 1][:, 2]),
            ]).reshape(1, 3)
            extre_p = np.array([
                max(parts_cloud_urdf[link - 1][:, 0]),
                max(parts_cloud_urdf[link - 1][:, 1]),
                max(parts_cloud_urdf[link - 1][:, 2]),
            ]).reshape(1, 3)
            center_p = (extre_p - base_p) / 2 * norm_factor

            parts_cloud_norm[link - 1] = (
                (parts_cloud_urdf[link - 1][:, :3] - base_p) * norm_factor +
                np.array([0.5, 0.5, 0.5]).reshape(1, 3) -
                center_p.reshape(1, 3))

        x_set_pcloud = np.concatenate(choose_x, axis=0)
        y_set_pcloud = np.concatenate(choose_y, axis=0)

        # save into h5 for rgb_img, input_pts, mask, correpsonding urdf_points
        print("Writing to ", h5_save_name)
        hf = h5py.File(h5_save_name, "w")
        hf.create_dataset("rgb", data=img)
        hf.create_dataset("mask", data=mask)
        cloud_cam = hf.create_group("gt_points")
        for part_i, points in enumerate(parts_cloud_cam):
            cloud_cam.create_dataset(str(part_i), data=points)
        coord_gt = hf.create_group("gt_coords")
        for part_i, points in enumerate(parts_cloud_urdf):
            coord_gt.create_dataset(str(part_i), data=points)
        hf.close()

        ################# for debug only, let me know if you have questions #################
        if self.is_debug:
            figure = plt.figure(dpi=200)
            ax = plt.subplot(121)
            plt.imshow(img)
            plt.title('RGB image')
            ax1 = plt.subplot(122)
            plt.imshow(depth)
            plt.title('depth image')
            plt.show()
            plot3d_pts(
                [parts_cloud_cam],
                [['part {}'.format(i) for i in range(len(parts_cloud_cam))]],
                s=5,
                title_name=['camera coords'])
            plot3d_pts(
                [parts_cloud_world],
                [['part {}'.format(i) for i in range(len(parts_cloud_world))]],
                s=5,
                title_name=['world coords'])
            plot3d_pts(
                [parts_cloud_canon],
                [['part {}'.format(i) for i in range(len(parts_cloud_canon))]],
                s=5,
                title_name=['canon coords'])
            plot3d_pts(
                [parts_cloud_urdf],
                [['part {}'.format(i) for i in range(len(parts_cloud_urdf))]],
                s=5,
                title_name=['urdf coords'])

        return None
コード例 #3
0
def get_part_bounding_box(my_dir, test_ins, args, viz=False):
    pts_m = {}
    bbox3d_all = {}
    start = time.time()
    m_file = my_dir + '/shape2motion/pickle/{}_pts.pkl'.format(args.item)
    c_file = my_dir + '/shape2motion/pickle/{}_corners.pkl'.format(args.item)
    n_file = my_dir + '/shape2motion/pickle/{}.pkl'.format(args.item)

    if args.process:
        for item in os.listdir(test_ins):
            print('now fetching for item {}'.format(item))
            pts, nf, cpts = get_model_pts(root_dset, args.item, item)
            pt_ii = []
            for p, pt in enumerate(pts):
                pt_s = np.concatenate(pt, axis=0)
                np.random.shuffle(pt_s)
                # pt_s = pt_s[::20, :]
                pt_ii.append(pt_s)
                print('We have {} pts'.format(pt_ii[p].shape[0]))
            if pt_ii is not []:
                pts_m[item] = pt_ii
            else:
                print('!!!!! {} model loading is wrong'.format(item))
        end_t = time.time()

        with open(m_file, 'wb') as f:
            pickle.dump(pts_m, f)
    else:
        with open(m_file, 'rb') as f:
            pts_m = pickle.load(f)

        with open(c_file, 'rb') as f:
            pts_c = pickle.load(f)

        with open(n_file, 'rb') as f:
            pts_n = pickle.load(f)

        # for item in list(pts_m.keys()):
        for item in test_ins:
            pts = pts_m[item]
            norm_factors = pts_n[item]
            norm_corners = pts_c[item]
            pt_ii = []
            bbox3d_per_part = []
            for p, pt in enumerate(
                    pts
            ):  # todo: assume we are dealing part-nocs, so model pts are processed
                norm_factor = norm_factors[p + 1]
                norm_corner = norm_corners[p + 1]
                nocs_corner = np.copy(
                    norm_corner)  # copy is very important, as they are
                print('norm_corner:\n', norm_corner)
                pt_nocs = (pt - norm_corner[0]) * norm_factor + np.array(
                    [0.5, 0.5, 0.5]).reshape(1, 3) - 0.5 * (
                        norm_corner[1] - norm_corner[0]) * norm_factor
                nocs_corner[0] = np.array([0.5, 0.5, 0.5]).reshape(
                    1,
                    3) - 0.5 * (norm_corner[1] - norm_corner[0]) * norm_factor
                nocs_corner[1] = np.array([0.5, 0.5, 0.5]).reshape(
                    1,
                    3) + 0.5 * (norm_corner[1] - norm_corner[0]) * norm_factor
                bbox3d_per_part.append(nocs_corner)
                np.random.shuffle(pt_nocs)
                pt_ii.append(pt_nocs[0:2000, :])  # sub-sampling
                print('We have {} pts'.format(pt_ii[p].shape[0]))
            if pt_ii is not []:
                pts_m[item] = pt_ii
            else:
                print('!!!!! {} model loading is wrong'.format(item))
            assert bbox3d_per_part != []
            bbox3d_all[item] = bbox3d_per_part
        end_t = time.time()

    print('It takes {} seconds to get: \n'.format(end_t - start),
          list(pts_m.keys()))
    if viz:
        plot3d_pts([pts_m['0001']], [['part 0', 'part 1']],
                   s=5,
                   title_name=['sampled model pts'],
                   dpi=200)

    return bbox3d_all, pts_m
コード例 #4
0
def get_all_objs(root_dset,
                 obj_category,
                 item,
                 obj_file_list=None,
                 offsets=None,
                 is_debug=False,
                 verbose=False):
    """
    offsets is usually 0, but sometimes could be [x, y, z] in array 1*3, it could be made to a K*3 array if necessary
    """
    norm_factors = []
    pts_list = []
    name_list = []
    target_dir = root_dset + '/objects/' + obj_category + '/' + item

    offset = 0
    if obj_file_list is None:
        for k, obj_file in enumerate(glob.glob(target_dir +
                                               '/part_objs/*.obj')):
            if offsets is not None:
                offset = offsets[k:k + 1, :]
            if is_debug:
                print('obj_file is: ', obj_file)
            try:
                tm = trimesh.load(obj_file)
                vertices_obj = np.array(tm.vertices)
            except:
                dict_mesh, _, _, _ = load_model_split(obj_file)
                vertices_obj = np.concatenate(dict_mesh['v'], axis=0)
            pts_list.append(vertices_obj + offset)
            name_obj = obj_file.split('.')[0].split('/')[-1]
            name_list.append(name_obj)
    else:
        for k, obj_files in enumerate(obj_file_list):
            if offsets is not None:
                offset = offsets[k:k + 1, :]
            if obj_files is not None and not isinstance(obj_files, list):
                try:
                    tm = trimesh.load(obj_files)
                    vertices_obj = np.array(tm.vertices)
                except:
                    dict_mesh, _, _, _ = load_model_split(obj_files)
                    vertices_obj = np.concatenate(dict_mesh['v'], axis=0)
                pts_list.append(vertices_obj + offset)
                name_obj = obj_files.split('.')[0].split('/')[-1]
                name_list.append(
                    name_obj)  # which should follow the right order
            elif isinstance(obj_files, list):
                if verbose:
                    print('{} part has {} obj files'.format(k, len(obj_files)))
                part_pts = []
                name_objs = []
                for obj_file in obj_files:
                    if obj_file is not None and not isinstance(obj_file, list):
                        try:
                            tm = trimesh.load(obj_file)
                            vertices_obj = np.array(tm.vertices)
                        except:
                            dict_mesh, _, _, _ = load_model_split(obj_file)
                            vertices_obj = np.concatenate(dict_mesh['v'],
                                                          axis=0)
                        name_obj = obj_file.split('.')[0].split('/')[-1]
                        name_objs.append(name_obj)
                        part_pts.append(vertices_obj)
                part_pts_whole = np.concatenate(part_pts, axis=0)
                pts_list.append(part_pts_whole + offset)
                name_list.append(name_objs)  # which should follow the right

    if is_debug:
        print('name_list is: ', name_list)

    parts_a = []
    parts_a = pts_list
    parts_b = [None] * len(obj_file_list)
    # dof_rootd_Aa001_r.obj  dof_rootd_Aa002_r.obj  none_motion.obj
    # bike: part2: 'dof_Aa001_Ca001_r', 'dof_rootd_Aa001_r'
    if obj_category == 'bike':
        part0 = []
        part1 = []
        part2 = []
        part0 = pts_list
        for i, name_obj in enumerate(name_list):
            if name_obj in ['dof_Aa001_Ca001_r', 'dof_rootd_Aa001_r']:
                print('part 2 adding ', name_obj)
                part2.append(pts_list[i])
            else:
                print('part 1 adding ', name_obj)
                part1.append(pts_list[i])
        parts = [part0, part1, part2]

    elif obj_category == 'eyeglasses':
        for i, name_obj in enumerate(name_list):
            if name_obj in ['none_motion']:
                parts_b[0] = []
                parts_b[0].append(pts_list[i])
            if name_obj in ['dof_rootd_Aa001_r']:
                parts_b[1] = []
                parts_b[1].append(pts_list[i])
            elif name_obj in ['dof_rootd_Aa002_r']:
                parts_b[2] = []
                parts_b[2].append(pts_list[i])

        parts = [parts_a] + parts_b

    else:
        parts_a = []
        parts_a = pts_list
        parts_b = [None] * len(name_list)
        for i, name_obj in enumerate(name_list):
            parts_b[i] = []
            parts_b[i].append(pts_list[i])

        parts = [parts_a] + parts_b

    corner_pts = [None] * len(parts)

    for j in range(len(parts)):
        if is_debug:
            print('Now checking ', j)
        part_gts = np.concatenate(parts[j], axis=0)
        print('part_gts: ', part_gts.shape)
        tight_w = max(part_gts[:, 0]) - min(part_gts[:, 0])
        tight_l = max(part_gts[:, 1]) - min(part_gts[:, 1])
        tight_h = max(part_gts[:, 2]) - min(part_gts[:, 2])
        corner_pts[j] = np.amin(part_gts, axis=1)
        norm_factor = np.sqrt(1) / np.sqrt(tight_w**2 + tight_l**2 +
                                           tight_h**2)
        norm_factors.append(norm_factor)
        corner_pt_left = np.amin(part_gts, axis=0, keepdims=True)
        corner_pt_right = np.amax(part_gts, axis=0, keepdims=True)
        corner_pts[j] = [corner_pt_left, corner_pt_right
                         ]  # [index][left/right][x, y, z], numpy array
        if is_debug:
            print('Group {} has {} points with shape {}'.format(
                j, len(corner_pts[j]), corner_pts[j][0].shape))
        if verbose:
            plot3d_pts([[part_gts[::2]]], ['model pts'],
                       s=15,
                       title_name=['GT model pts {}'.format(j)],
                       sub_name=str(j))
        # for k in range(len(parts[j])):
        #     plot3d_pts([[parts[j][k][::2]]], ['model pts of part {}'.format(k)], s=15, title_name=['GT model pts'], sub_name=str(k))

    return parts[1:], norm_factors, corner_pts
コード例 #5
0
ファイル: aligning.py プロジェクト: hiyyg/articulated-pose
def evaluateModelRotation(Rotation,
                          SourceHom,
                          TargetHom,
                          PassThreshold,
                          rt_ref=None,
                          viz_normal=False):
    SourceCentroid = np.mean(SourceHom[:3, :], axis=1)
    TargetCentroid = np.mean(TargetHom[:3, :], axis=1)
    nPoints = SourceHom.shape[1]

    CenteredSource = SourceHom[:3, :] - np.tile(SourceCentroid,
                                                (nPoints, 1)).transpose()
    CenteredTarget = TargetHom[:3, :] - np.tile(TargetCentroid,
                                                (nPoints, 1)).transpose()

    stdSource = np.sqrt(np.var(CenteredSource[:3, :], axis=1).sum())
    stdTarget = np.sqrt(np.var(CenteredTarget[:3, :], axis=1).sum())

    CenteredSource = CenteredSource / stdSource
    CenteredTarget = CenteredTarget / stdTarget
    origin = np.zeros((1, 3))
    if viz_normal:
        if rt_ref is not None:
            RotatedSource = np.matmul(rt_ref[:3, :3], CenteredSource)
            plot3d_pts([[
                RotatedSource[:3].transpose(), CenteredTarget[:3].transpose(),
                origin
            ]], [['GT rotated source', 'target', 'origin']],
                       s=100,
                       title_name=['normalized source and target pts'],
                       color_channel=None,
                       save_fig=False,
                       sub_name='default')
        plot3d_pts([[
            CenteredSource[:3].transpose(), CenteredTarget[:3].transpose(),
            origin
        ]], [['source', 'target', 'origin']],
                   s=100,
                   title_name=['normalized source and target pts'],
                   color_channel=None,
                   save_fig=False,
                   sub_name='default')
    RotatedSource = np.matmul(Rotation, CenteredSource)
    if viz_normal:
        plot3d_pts([[
            RotatedSource[:3].transpose(), CenteredTarget[:3].transpose(),
            origin
        ]], [['Pred rotated source', 'target', 'origin']],
                   s=100,
                   title_name=['normalized source and target pts'],
                   color_channel=None,
                   save_fig=False,
                   sub_name='default')
    Diff = CenteredTarget - RotatedSource
    ResidualVec = np.linalg.norm(Diff[:3, :], axis=0)
    Residual = np.linalg.norm(ResidualVec)
    InlierIdx = np.where(ResidualVec < PassThreshold)
    nInliers = np.count_nonzero(InlierIdx)
    # Residual = np.linalg.norm(ResidualVec[InlierIdx[0]]) # todo
    InlierRatio = nInliers / SourceHom.shape[1]
    return Residual, InlierRatio, InlierIdx[0]
コード例 #6
0
ファイル: aligning.py プロジェクト: hiyyg/articulated-pose
def svd_pts(SourceHom,
            TargetHom,
            raw_axis=None,
            rotated_axis=None,
            viz_sample=False,
            index=0):
    # Copy of original paper is at: http://web.stanford.edu/class/cs273/refs/umeyama.pdf
    SourceCentroid = np.mean(SourceHom[:3, :], axis=1)
    TargetCentroid = np.mean(TargetHom[:3, :], axis=1)
    nPoints = SourceHom.shape[1]

    # pre-centering
    CenteredSource = SourceHom[:3, :] - np.tile(SourceCentroid,
                                                (nPoints, 1)).transpose()
    CenteredTarget = TargetHom[:3, :] - np.tile(TargetCentroid,
                                                (nPoints, 1)).transpose()

    # pre-scaling
    stdSource = np.sqrt(np.var(CenteredSource[:3, :], axis=1).sum())
    stdTarget = np.sqrt(np.var(CenteredTarget[:3, :], axis=1).sum())
    try:
        CenteredSource = CenteredSource / stdSource
        CenteredTarget = CenteredTarget / stdTarget
    except:
        CenteredSource = CenteredSource
        CenteredTarget = CenteredTarget
    origin = np.zeros((1, 3))
    if viz_sample:
        if raw_axis is not None:
            # raw_axis[:3, 0:1].transpose(), rotated_axis[:3, 0:1].transpose(), 'axis', 'rotated axis',
            plot3d_pts(
                [[
                    CenteredSource[:3].transpose(),
                    CenteredTarget[:3].transpose(),
                    raw_axis.transpose(),
                    rotated_axis.transpose(), origin
                ]], [[
                    'source', 'target', 'raw_axis_point', 'rotated_axis_point',
                    'origin'
                ]],
                s=100,
                title_name=[
                    '{}th iteration points for coords descent'.format(index)
                ],
                color_channel=None,
                save_fig=False,
                sub_name='default')

    if raw_axis is not None:
        CenteredSource = np.concatenate([CenteredSource, raw_axis[:3]], axis=1)
        CenteredTarget = np.concatenate([CenteredTarget, rotated_axis[:3]],
                                        axis=1)
        nPoints = nPoints + raw_axis.shape[1]

    CovMatrix = np.matmul(CenteredTarget,
                          np.transpose(CenteredSource)) / nPoints

    if np.isnan(CovMatrix).any():
        print('nPoints:', nPoints)
        print(SourceHom.shape)
        print(TargetHom.shape)
        return None, None, None

    U, D, Vh = np.linalg.svd(CovMatrix, full_matrices=True)
    d = (np.linalg.det(U) * np.linalg.det(Vh)) < 0.0
    if d:
        D[-1] = -D[-1]
        U[:, -1] = -U[:, -1]

    return U, D, Vh
コード例 #7
0
ファイル: aligning.py プロジェクト: hiyyg/articulated-pose
def estimateSimilarityUmeyamaCoords(SourceHom0, TargetHom0, SourceHom1, TargetHom1, joint_axis, joint_pts=None, rt_ref=[None, None], rt_pre=[None, None], \
                 viz=False, viz_ransac=False, viz_sample=False, use_jt_pts=False, use_ext_rot=False, verbose=False, index=0):
    """
    SourceHom0: [4, 5]
    joint_pts : [4, 5]
    joint_axis: [4, 1]
    """
    U, D0, Vh = svd_pts(SourceHom0, TargetHom0)  #
    R0 = np.matmul(U, Vh).T  # Transpose is the one that works
    U, D1, Vh = svd_pts(SourceHom1, TargetHom1)  #
    R1 = np.matmul(U, Vh).T  #
    # begin EM
    max_iter = 100
    # max_iter = 1 # todo
    StopThreshold = 2 * np.cos(0.5 / 180 * np.pi)
    if viz_sample:
        plot3d_pts([[
            SourceHom0[:3].transpose(), SourceHom1[:3].transpose(),
            TargetHom0[:3].transpose(), TargetHom1[:3].transpose(),
            joint_pts[:3].transpose()
        ]], [['source0', 'source1', 'target0', 'target1', 'joint_points']],
                   s=100,
                   title_name=['sampled points'],
                   color_channel=None,
                   save_fig=False,
                   sub_name='default')
    joint_axis_tiled0 = np.tile(joint_axis, (1, int(SourceHom0.shape[1] / 5)))
    joint_axis_tiled1 = np.tile(joint_axis, (1, int(SourceHom1.shape[1] / 5)))
    # joint_axis_tiled0 = np.tile(joint_axis, (1, int(SourceHom0.shape[1])))
    # joint_axis_tiled1 = np.tile(joint_axis, (1, int(SourceHom1.shape[1])))
    if use_ext_rot and rt_pre[0] is not None:
        # print('using external rotation')
        R0 = rt_pre[0][:3, :3].T
        R1 = rt_pre[1][:3, :3].T
    else:
        r_list = [[R0], [R1]]
        for i in range(max_iter):
            rotated_axis = np.matmul(R0.T, joint_axis_tiled1[:3])  # [3, 1]
            U, D1, Vh = svd_pts(SourceHom1,
                                TargetHom1,
                                joint_axis_tiled1,
                                rotated_axis,
                                viz_sample=viz_sample,
                                index=2 * i)
            R1_new = np.matmul(U, Vh).T
            rotated_axis = np.matmul(R1_new.T, joint_axis_tiled0[:3])
            U, D0, Vh = svd_pts(SourceHom0,
                                TargetHom0,
                                joint_axis_tiled0,
                                rotated_axis,
                                viz_sample=viz_sample,
                                index=2 * i + 1)
            R0_new = np.matmul(U, Vh).T
            eigen_sum0 = np.trace(np.matmul(R0_new.T, R0)) - 1
            eigen_sum1 = np.trace(np.matmul(R1_new.T, R1)) - 1
            R0 = R0_new
            R1 = R1_new
            r_list[0].append(R0)
            r_list[1].append(R1)
            if eigen_sum0 > StopThreshold and eigen_sum1 > StopThreshold:
                # if verbose:
                #     print('Algorithm converges at {}th iteration for Coordinate Descent'.format(i))
                break
    if viz_ransac and index < 10:  # and SourceHom0.shape[1]>5:
        ang_dis_list = [[], []]
        for j in range(2):
            q_gt = quaternion_from_matrix(rt_ref[j][:3, :3])
            for rot_iter in r_list[j]:
                q_iter = quaternion_from_matrix(rot_iter.T)
                ang_dis = 2 * np.arccos(sum(q_iter * q_gt)) * 180 / np.pi
                if ang_dis > 180:
                    ang_dis = 360 - ang_dis
                ang_dis_list[j].append(ang_dis)
        fig = plt.figure(dpi=200)
        for j in range(2):
            ax = plt.subplot(1, 2, j + 1)
            plt.plot(range(len(ang_dis_list[j])), ang_dis_list[j])
            plt.xlabel('iteration')
            plt.ylabel('rotation error')
            plt.title('{}th sampling part {}'.format(index, j))
        plt.show()
    Rs = [R0, R1]

    if use_jt_pts:
        if viz_sample:
            plot3d_pts([[
                SourceHom0[:3].transpose(), SourceHom1[:3].transpose(),
                TargetHom0[:3].transpose(), TargetHom1[:3].transpose(),
                joint_pts[:3].transpose()
            ]], [['source0', 'source1', 'target0', 'target1', 'joint_points']],
                       s=100,
                       title_name=['sampled points'],
                       color_channel=None,
                       save_fig=False,
                       sub_name='default')
        final_scale, Ts, OutTrans = compute_scale_translation(
            [SourceHom0, SourceHom1], [TargetHom0, TargetHom1], Rs, joint_pts)
        if verbose:
            print("scale by adding joints are \n: {}".format(final_scale))
    else:
        if viz_sample:
            plot3d_pts([[
                SourceHom0[:3].transpose(), SourceHom1[:3].transpose(),
                TargetHom0[:3].transpose(), TargetHom1[:3].transpose()
            ]], [['source0', 'source1', 'target0', 'target1']],
                       s=100,
                       title_name=['points after sampling'],
                       color_channel=None,
                       save_fig=False,
                       sub_name='default')
        final_scale0, T0, OutTrans0 = est_ST(SourceHom0, TargetHom0, D0, Rs[0])
        final_scale1, T1, OutTrans1 = est_ST(SourceHom1, TargetHom1, D1, Rs[1])
        final_scale = [final_scale0, final_scale1]
        Ts = [T0, T1]
        OutTrans = [OutTrans0, OutTrans1]
        if verbose:
            print("scale by direct solving per part are \n: {}".format(
                final_scale))

    return final_scale, Rs, Ts, OutTrans