Пример #1
0
    def _reset_direct_simulation(self):
        """Reset direct simulation.

        With this the env can be used without backend.
        """
        # set this to false to disable pyBullet's simulation
        visualization = True

        # reset simulation
        del self.platform

        # initialize simulation
        initial_object_pose = move_cube.sample_goal(difficulty=-1)
        self.platform = trifinger_simulation.TriFingerPlatform(
            visualization=visualization,
            initial_object_pose=initial_object_pose,
        )

        # visualize the goal
        if visualization:
            self.goal_marker = trifinger_simulation.visual_objects.CubeMarker(
                width=0.065,
                position=self.goal["position"],
                orientation=self.goal["orientation"],
                physicsClientId=self.platform.simfinger._pybullet_client_id,
            )
Пример #2
0
    def _reset_direct_simulation(self):
        """Reset direct simulation.

        With this the env can be used without backend.
        """

        # reset simulation
        del self.platform

        # initialize simulation
        if self.initializer is None:
            initial_object_pose = move_cube.sample_goal(difficulty=-1)
        else:
            initial_object_pose = self.initializer.get_initial_state()
            self.goal = self.initializer.get_goal()
        self.platform = trifinger_simulation.TriFingerPlatform(
            visualization=self.visualization,
            initial_object_pose=initial_object_pose,
        )
        # visualize the goal
        if self.visualization:
            self.goal_marker = trifinger_simulation.visual_objects.CubeMarker(
                width=0.065,
                position=self.goal["position"],
                orientation=self.goal["orientation"],
                physicsClientId=self.platform.simfinger._pybullet_client_id,
            )
Пример #3
0
    def _reset_direct_simulation(self):
        """Reset direct simulation.

        With this the env can be used without backend.
        """

        # reset simulation
        del self.platform

        # initialize simulation
        if self.initializer is None:
            initial_object_pose = move_cube.sample_goal(difficulty=-1)
        else:
            initial_object_pose = self.initializer.get_initial_state()
            self.goal = self.initializer.get_goal().to_dict()
        self.platform = trifinger_simulation.TriFingerPlatform(
            visualization=self.visualization,
            initial_object_pose=initial_object_pose,
        )
        # use mass of real cube
        p.changeDynamics(
            bodyUniqueId=self.platform.cube.block,
            linkIndex=-1,
            physicsClientId=self.platform.simfinger._pybullet_client_id,
            mass=CUBOID_MASS)
        # visualize the goal
        if self.visualization:
            self.goal_marker = trifinger_simulation.visual_objects.CuboidMarker(
                size=CUBOID_SIZE,
                position=self.goal["position"],
                orientation=self.goal["orientation"],
                pybullet_client_id=self.platform.simfinger._pybullet_client_id,
            )
Пример #4
0
def main():
    """Example on how to move the robot in simulation."""
    # create the robot platform simulation
    robot = trifinger_simulation.TriFingerPlatform(
        visualization=True,
        object_type=ObjectType.COLORED_CUBE,
    )

    # move the robot
    position_down = [-0.08, 0.84, -1.2] * 3
    position_up = [0.5, 1.2, -2.4] * 3
    target_positions = [position_down, position_up]

    i = 0
    while True:
        action = robot.Action(position=target_positions[i % 2])
        i += 1

        for _ in range(500):
            t = robot.append_desired_action(action)

            # make sure to not exceed the number of allowed actions
            if t >= move_cube_on_trajectory.EPISODE_LENGTH - 1:
                return

        robot_observation = robot.get_robot_observation(t)
        print("Finger positions:", robot_observation.position)

        camera_observation = robot.get_camera_observation(t)
        print("Object position:", camera_observation.object_pose.position)
Пример #5
0
    def reset(self):
        # hard-reset simulation
        del self.platform

        # initialize simulation
        initial_robot_position = trifingerpro_limits.robot_position.default

        self.platform = trifinger_simulation.TriFingerPlatform(
            visualization=self.visualization,
            initial_robot_position=initial_robot_position,
            enable_cameras=True,
            object_type=ObjectType.DICE,
        )

        # if no goal is given, sample one randomly
        if self.goal is None:
            self.current_goal = task.sample_goal()
        else:
            self.current_goal = self.goal

        self.goal_masks = task.generate_goal_mask(self.camera_params,
                                                  self.current_goal)

        self.info = {"time_index": -1}

        self.step_count = 0

        return self._create_observation(0, self._initial_action)
Пример #6
0
def main(logdir, video_path):
    goal, difficulty = get_goal(logdir)
    data = get_synced_log_data(logdir, goal, difficulty)
    fps = len(data['t']) / (data['stamp'][-1] - data['stamp'][0])
    video_recorder = VideoRecorder(fps)
    cube_drawer = CubeDrawer(logdir)

    initial_object_pose = move_cube.Pose(data['cube'][0].position,
                                         data['cube'][0].orientation)
    platform = trifinger_simulation.TriFingerPlatform(
        visualization=True,
        initial_object_pose=initial_object_pose,
    )
    markers = []
    marker_cube_ori = VisualCubeOrientation(data['cube'][0].position,
                                            data['cube'][0].orientation)
    marker_goal_ori = VisualCubeOrientation(goal['position'], goal['orientation'])

    visual_objects.CubeMarker(
        width=0.065,
        position=goal['position'],
        orientation=goal['orientation']
    )

    p.configureDebugVisualizer(p.COV_ENABLE_GUI, 0)
    p.resetDebugVisualizerCamera(cameraDistance=0.6, cameraYaw=0, cameraPitch=-40, cameraTargetPosition=[0,0,0])

    for i, t in enumerate(data['t']):
        platform.simfinger.reset_finger_positions_and_velocities(data['desired_action'][i].position)
        platform.cube.set_state(data['cube'][i].position, data['cube'][i].orientation)
        marker_cube_ori.set_state(data['cube'][i].position, data['cube'][i].orientation)
        frame_desired = video_recorder.get_views()
        frame_desired = cv2.cvtColor(frame_desired, cv2.COLOR_RGB2BGR)
        platform.simfinger.reset_finger_positions_and_velocities(data['robot'][i].position)
        frame_observed = video_recorder.get_views()
        frame_observed = cv2.cvtColor(frame_observed, cv2.COLOR_RGB2BGR)
        frame_real = np.concatenate(data['images'][i], axis=1)
        frame_real_cube = np.concatenate(cube_drawer.add_cube(data['images'][i],
                                                              data['cube'][i]),
                                         axis=1)

        frame = vstack_frames((frame_desired, frame_observed, frame_real, frame_real_cube))
        # frame = np.concatenate((frame_desired, frame_observed,
        #                         frame_real, frame_real_cube), axis=0)
        # add text
        frame = add_text(frame, text="step: {:06d}".format(t), position=(10, 40))
        frame = add_text(frame, text="acc reward: {:.3f}".format(data["acc_reward"][i]), position=(10, 70))
        frame = add_text(
            frame,
            text="tip force {}".format(
                np.array2string(data["robot"][i].tip_force, precision=3),
            ),
            position=(10, 100),
        )
        video_recorder.add_frame(frame)
    video_recorder.save_video(video_path)
Пример #7
0
    def reset(self):
        """Reset the environment."""
        # hard-reset simulation
        del self.platform

        # initialize simulation
        initial_robot_position = trifingerpro_limits.robot_position.default
        # initialize cube at the centre
        initial_object_pose = task.move_cube.Pose(
            position=task.INITIAL_CUBE_POSITION)

        self.platform = trifinger_simulation.TriFingerPlatform(
            visualization=self.visualization,
            initial_robot_position=initial_robot_position,
            initial_object_pose=initial_object_pose,
        )

        # if no goal is given, sample one randomly
        if self.goal is None:
            trajectory = task.sample_goal()
        else:
            trajectory = self.goal

        # visualize the goal
        if self.visualization:
            self.goal_marker = trifinger_simulation.visual_objects.CubeMarker(
                width=task.move_cube._CUBE_WIDTH,
                position=trajectory[0][1],
                orientation=(0, 0, 0, 1),
                pybullet_client_id=self.platform.simfinger._pybullet_client_id,
            )

        self.info = {"time_index": -1, "trajectory": trajectory}

        self.step_count = 0

        return self._create_observation(0, self._initial_action)
Пример #8
0
def main(logdir, video_path):
    custom_drawer = CustomDrawer(os.path.join(logdir, 'user/custom_data'))
    goal, difficulty = get_goal(logdir)
    data = get_synced_log_data(logdir, goal, difficulty)
    fps = len(data['t']) / (data['stamp'][-1] - data['stamp'][0])
    video_recorder = VideoRecorder(fps)
    cube_drawer = CubeDrawer(logdir)

    initial_object_pose = move_cube.Pose(data['cube'][0].position,
                                         data['cube'][0].orientation)
    platform = trifinger_simulation.TriFingerPlatform(
        visualization=False,
        initial_object_pose=initial_object_pose,
    )
    markers = []
    marker_cube_ori = VisualCubeOrientation(data['cube'][0].position,
                                            data['cube'][0].orientation)
    marker_goal_ori = VisualCubeOrientation(goal['position'],
                                            goal['orientation'])

    visual_objects.CuboidMarker(
        size=move_cube._CUBOID_SIZE,
        position=goal['position'],
        orientation=goal['orientation'],
        pybullet_client_id=platform.simfinger._pybullet_client_id,
    )
    # if 'grasp_target_cube_pose' in custom_log:
    #     markers.append(
    #         visual_objects.CubeMarker(
    #             width=0.065,
    #             position=custom_log['grasp_target_cube_pose']['position'],
    #             orientation=custom_log['grasp_target_cube_pose']['orientation'],
    #             color=(0, 0, 1, 0.5),
    #             pybullet_client_id=platform.simfinger._pybullet_client_id,
    #         )
    #     )
    # if 'pregrasp_tip_positions' in custom_log:
    #     for tip_pos in custom_log['pregrasp_tip_positions']:
    #         print(tip_pos)

    #         markers.append(
    #             SphereMarker(
    #                 radius=0.015,
    #                 position=tip_pos,
    #                 color=(0, 1, 1, 0.5)
    #             )
    #         )
    # if 'failure_target_tip_positions' in custom_log:
    #     for tip_pos in custom_log['failure_target_tip_positions']:
    #         print(tip_pos)
    #         markers.append(
    #             SphereMarker(
    #                 radius=0.015,
    #                 position=tip_pos,
    #                 color=(1, 0, 0, 0.5)
    #             )
    #         )
    # if 'pitch_grasp_positions' in custom_log:
    #     for tip_pos in custom_log['pitch_grasp_positions']:
    #         print(tip_pos)
    #         markers.append(
    #             SphereMarker(
    #                 radius=0.015,
    #                 position=tip_pos,
    #                 color=(1, 1, 1, 0.5)
    #             )
    #         )
    # if 'yaw_grasp_positions' in custom_log:
    #     for tip_pos in custom_log['yaw_grasp_positions']:
    #         print(tip_pos)
    #         markers.append(
    #             SphereMarker(
    #                 radius=0.015,
    #                 position=tip_pos,
    #                 color=(0, 0, 0, 0.5)
    #             )
    #         )

    p.configureDebugVisualizer(p.COV_ENABLE_GUI, 0)
    p.resetDebugVisualizerCamera(cameraDistance=0.6,
                                 cameraYaw=0,
                                 cameraPitch=-40,
                                 cameraTargetPosition=[0, 0, 0])

    for i, t in enumerate(data['t']):
        platform.simfinger.reset_finger_positions_and_velocities(
            data['desired_action'][i].position)
        platform.cube.set_state(data['cube'][i].position,
                                data['cube'][i].orientation)
        marker_cube_ori.set_state(data['cube'][i].position,
                                  data['cube'][i].orientation)
        custom_drawer.draw(t)
        frame_desired = video_recorder.get_views()
        frame_desired = cv2.cvtColor(frame_desired, cv2.COLOR_RGB2BGR)
        platform.simfinger.reset_finger_positions_and_velocities(
            data['robot'][i].position)
        frame_observed = video_recorder.get_views()
        frame_observed = cv2.cvtColor(frame_observed, cv2.COLOR_RGB2BGR)
        frame_real = np.concatenate(data['images'][i], axis=1)
        frame_real_cube = np.concatenate(cube_drawer.add_cube(
            data['images'][i], data['cube'][i]),
                                         axis=1)

        frame = vstack_frames(
            (frame_desired, frame_observed, frame_real, frame_real_cube))
        # frame = np.concatenate((frame_desired, frame_observed,
        #                         frame_real, frame_real_cube), axis=0)
        # add text
        frame = add_text(frame,
                         text="step: {:06d}".format(t),
                         position=(10, 40))
        frame = add_text(frame,
                         text="acc reward: {:.3f}".format(
                             data["acc_reward"][i]),
                         position=(10, 70))
        frame = add_text(
            frame,
            text="tip force {}".format(
                np.array2string(data["robot"][i].tip_force, precision=3), ),
            position=(10, 100),
        )
        video_recorder.add_frame(frame)
    video_recorder.save_video(video_path)