Beispiel #1
0
    def visualize(self, states, dt):

        import meshcat
        import meshcat.geometry as g
        import meshcat.transformations as tf
        import time

        # Create a new visualizer
        vis = meshcat.Visualizer()
        vis.open()

        vis["/Cameras/default"].set_transform(
            tf.translation_matrix([0, 0, 0]).dot(
                tf.euler_matrix(0, np.radians(-30), -np.pi / 2)))

        vis["/Cameras/default/rotated/<object>"].set_transform(
            tf.translation_matrix([1, 0, 0]))

        vis["Quadrotor"].set_object(
            g.StlMeshGeometry.from_file('systems/crazyflie2.stl'))

        while True:
            for state in states:
                vis["Quadrotor"].set_transform(
                    tf.translation_matrix([state[0], state[1], state[2]]).dot(
                        tf.quaternion_matrix(state[6:10])))
                time.sleep(dt)
Beispiel #2
0
    def setCameraTransform(self,
                           translation=np.zeros(3),
                           rotation=np.zeros(3),
                           relative=False):
        # translation : [Px, Py, Pz],
        # rotation : [Roll, Pitch, Yaw]

        R_pnc = rpyToMatrix(np.array(rotation))
        if Viewer.backend == 'gepetto-gui':
            H_abs = SE3(R_pnc, np.array(translation))
            if relative:
                H_orig = XYZQUATToSe3(
                    self._client.getCameraTransform(self._window_id))
                H_abs = H_abs * H_orig
            self._client.setCameraTransform(self._window_id,
                                            se3ToXYZQUAT(H_abs).tolist())
        else:
            if relative:
                raise RuntimeError(
                    "'relative'=True not available with meshcat.")
            import meshcat.transformations as tf
            # Transformation of the camera object
            T_meshcat = tf.translation_matrix(translation)
            self._client.viewer[
                "/Cameras/default/rotated/<object>"].set_transform(T_meshcat)
            # Orientation of the camera object
            Q_pnc = Quaternion(R_pnc).coeffs()
            Q_meshcat = np.roll(Q_pnc, shift=1)
            R_meshcat = tf.quaternion_matrix(Q_meshcat)
            self._client.viewer["/Cameras/default"].set_transform(R_meshcat)
Beispiel #3
0
    def visualize_train_input(self, in_img, p, q, theta, z, roll, pitch):
        """Visualize the training input."""
        points = []
        colors = []
        height = in_img[:, :, 3]

        for i in range(in_img.shape[0]):
            for j in range(in_img.shape[1]):
                pixel = (i, j)
                position = utils.pixel_to_position(pixel, height, self.bounds,
                                                   self.pixel_size)
                points.append(position)
                colors.append(in_img[i, j, :3])

        points = np.array(points).T  # shape (3, N)
        colors = np.array(colors).T / 255.0  # shape (3, N)

        self.vis["pointclouds/scene"].set_object(
            g.PointCloud(position=points, color=colors))

        pick_position = utils.pixel_to_position(p, height, self.bounds,
                                                self.pixel_size)
        label = "pick"
        utils.make_frame(self.vis, label, h=0.05, radius=0.0012, o=0.1)

        pick_transform = np.eye(4)
        pick_transform[0:3, 3] = pick_position
        self.vis[label].set_transform(pick_transform)

        place_position = utils.pixel_to_position(q, height, self.bounds,
                                                 self.pixel_size)
        label = "place"
        utils.make_frame(self.vis, label, h=0.05, radius=0.0012, o=0.1)

        place_transform = np.eye(4)
        place_transform[0:3, 3] = place_position
        place_transform[2, 3] = z

        rotation = utils.get_pybullet_quaternion_from_rot(
            (roll, pitch, -theta))
        quaternion_wxyz = np.asarray(
            [rotation[3], rotation[0], rotation[1], rotation[2]])

        place_transform[0:3, 0:3] = mtf.quaternion_matrix(quaternion_wxyz)[0:3,
                                                                           0:3]
        self.vis[label].set_transform(place_transform)

        _, ax = plt.subplots(2, 1)
        ax[0].imshow(in_img.transpose(1, 0, 2)[:, :, :3] / 255.0)
        ax[0].scatter(p[0], p[1])
        ax[0].scatter(q[0], q[1])
        ax[1].imshow(in_img.transpose(1, 0, 2)[:, :, 3])
        ax[1].scatter(p[0], p[1])
        ax[1].scatter(q[0], q[1])
        plt.show()
Beispiel #4
0
def meshcat_visualize(vis, obs, act, info):
    """Visualize data using meshcat."""

    for key in sorted(info.keys()):

        pose = info[key]
        pick_transform = np.eye(4)
        pick_transform[0:3, 3] = pose[0]
        quaternion_wxyz = np.asarray(
            [pose[1][3], pose[1][0], pose[1][1], pose[1][2]])
        pick_transform[0:3, 0:3] = mtf.quaternion_matrix(quaternion_wxyz)[0:3,
                                                                          0:3]
        label = 'obj_' + str(key)
        make_frame(vis, label, h=0.05, radius=0.0012, o=1.0)
        vis[label].set_transform(pick_transform)

    for cam_index in range(len(act['camera_config'])):

        verts = unproject_depth_vectorized(
            obs['depth'][cam_index], [0, 1],
            np.array(act['camera_config'][cam_index]['intrinsics']).reshape(
                3, 3), np.zeros(5))

        # switch from [N,3] to [3,N]
        verts = verts.T

        cam_transform = np.eye(4)
        cam_transform[0:3, 3] = act['camera_config'][cam_index]['position']
        quaternion_xyzw = act['camera_config'][cam_index]['rotation']
        quaternion_wxyz = np.asarray([
            quaternion_xyzw[3], quaternion_xyzw[0], quaternion_xyzw[1],
            quaternion_xyzw[2]
        ])
        cam_transform[0:3, 0:3] = mtf.quaternion_matrix(quaternion_wxyz)[0:3,
                                                                         0:3]
        verts = apply_transform(cam_transform, verts)

        colors = obs['color'][cam_index].reshape(-1, 3).T / 255.0

        vis['pointclouds/' + str(cam_index)].set_object(
            g.PointCloud(position=verts, color=colors))
Beispiel #5
0
    def setCameraTransform(self, translation, rotation):
        # translation : [Px, Py, Pz],
        # rotation : [Roll, Pitch, Yaw]

        R_pnc = rpyToMatrix(np.array(rotation))
        if Viewer.backend == 'gepetto-gui':
            T_pnc = np.array(translation)
            T_R = SE3(R_pnc, T_pnc)
            self._client.setCameraTransform(self._window_id,
                                            se3ToXYZQUAT(T_R).tolist())
        else:
            import meshcat.transformations as tf
            # Transformation of the camera object
            T_meshcat = tf.translation_matrix(translation)
            self._client.viewer[
                "/Cameras/default/rotated/<object>"].set_transform(T_meshcat)
            # Orientation of the camera object
            Q_pnc = Quaternion(R_pnc).coeffs()
            Q_meshcat = np.roll(Q_pnc, shift=1)
            R_meshcat = tf.quaternion_matrix(Q_meshcat)
            self._client.viewer["/Cameras/default"].set_transform(R_meshcat)