示例#1
0
License: BSD 3-Clause License
Copyright (C) 2018-2019, New York University , Max Planck Gesellschaft
Copyright note valid unless otherwise stated in individual files.
All rights reserved.
"""

import time
import numpy as np
from bullet_utils.env import BulletEnvWithGround
from robot_properties_solo.solo12wrapper import Solo12Robot, Solo12Config

if __name__ == "__main__":

    # Create a Pybullet simulation environment
    env = BulletEnvWithGround()

    # Create a robot instance. This initializes the simulator as well.
    robot = env.add_robot(Solo12Robot)
    tau = np.zeros(robot.nb_dof)

    # Reset the robot to some initial state.
    q0 = np.matrix(Solo12Config.initial_configuration).T
    dq0 = np.matrix(Solo12Config.initial_velocity).T
    robot.reset_state(q0, dq0)

    # Run the simulator for 2000 steps
    for i in range(2000):
        # TODO: Implement a controller here.
        robot.send_joint_command(tau)
    def __init__(
        self,
        SoloRobotClass,
        SoloConfigClass,
        use_fixed_base=False,
        record_video=False,
        init_sliders_pose=4 * [0.5],
    ):
        self.SoloRobotClass = SoloRobotClass
        self.SoloConfigClass = SoloConfigClass
        self.record_video = record_video

        # create the robot configuration.
        self.config = SoloConfigClass()

        # Create the bullet environment.
        self.bullet_env = BulletEnvWithGround()

        robotStartPos = [0.0, 0, 0.7]
        robotStartOrientation = pybullet.getQuaternionFromEuler([0, 0, 0])

        # Create the robot and add it to the env.
        self.robot_bullet = SoloRobotClass(
            pos=robotStartPos,
            orn=robotStartOrientation,
            useFixedBase=use_fixed_base,
            init_sliders_pose=init_sliders_pose,
        )

        self.robot_bullet = self.bullet_env.add_robot(self.robot_bullet)

        self.q0 = zero(self.robot_bullet.pin_robot.nq)

        # Initialize the device.
        self.device = Device(self.config.robot_name)
        assert path.exists(self.config.dgm_yaml_path)
        self.device.initialize(self.config.dgm_yaml_path)

        # Create signals for the base.
        self.signal_base_pos_ = VectorConstant("bullet_quadruped_base_pos")
        self.signal_base_vel_ = VectorConstant("bullet_quadruped_base_vel")
        self.signal_base_vel_world_ = VectorConstant(
            "bullet_quadruped_base_vel_world"
        )
        self.signal_base_pos_.sout.value = np.hstack(
            [robotStartPos, robotStartOrientation]
        )
        self.signal_base_vel_.sout.value = np.array(
            [0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
        )
        self.signal_base_vel_world_.sout.value = np.array(
            [0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
        )

        # Initialize signals that are not filled in sim2signals.
        if self.device.hasSignal("contact_sensors"):
            self.device.contact_sensors.value = np.array(4 * [0.0])

        # Sync the current robot state to the graph input signals.
        self._sim2signal()

        super(DgBulletSoloBaseRobot, self).__init__(
            self.config.robot_name, self.device
        )
class DgBulletSoloBaseRobot(dynamic_graph_manager.robot.Robot):
    """
    Base implementation for solo8 and solo12 robot.
    """

    def __init__(
        self,
        SoloRobotClass,
        SoloConfigClass,
        use_fixed_base=False,
        record_video=False,
        init_sliders_pose=4 * [0.5],
    ):
        self.SoloRobotClass = SoloRobotClass
        self.SoloConfigClass = SoloConfigClass
        self.record_video = record_video

        # create the robot configuration.
        self.config = SoloConfigClass()

        # Create the bullet environment.
        self.bullet_env = BulletEnvWithGround()

        robotStartPos = [0.0, 0, 0.7]
        robotStartOrientation = pybullet.getQuaternionFromEuler([0, 0, 0])

        # Create the robot and add it to the env.
        self.robot_bullet = SoloRobotClass(
            pos=robotStartPos,
            orn=robotStartOrientation,
            useFixedBase=use_fixed_base,
            init_sliders_pose=init_sliders_pose,
        )

        self.robot_bullet = self.bullet_env.add_robot(self.robot_bullet)

        self.q0 = zero(self.robot_bullet.pin_robot.nq)

        # Initialize the device.
        self.device = Device(self.config.robot_name)
        assert path.exists(self.config.dgm_yaml_path)
        self.device.initialize(self.config.dgm_yaml_path)

        # Create signals for the base.
        self.signal_base_pos_ = VectorConstant("bullet_quadruped_base_pos")
        self.signal_base_vel_ = VectorConstant("bullet_quadruped_base_vel")
        self.signal_base_vel_world_ = VectorConstant(
            "bullet_quadruped_base_vel_world"
        )
        self.signal_base_pos_.sout.value = np.hstack(
            [robotStartPos, robotStartOrientation]
        )
        self.signal_base_vel_.sout.value = np.array(
            [0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
        )
        self.signal_base_vel_world_.sout.value = np.array(
            [0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
        )

        # Initialize signals that are not filled in sim2signals.
        if self.device.hasSignal("contact_sensors"):
            self.device.contact_sensors.value = np.array(4 * [0.0])

        # Sync the current robot state to the graph input signals.
        self._sim2signal()

        super(DgBulletSoloBaseRobot, self).__init__(
            self.config.robot_name, self.device
        )

    def base_signals(self):
        return self.signal_base_pos_.sout, self.signal_base_vel_.sout

    def _sim2signal(self):
        """Reads the state from the simulator and fills
        the corresponding signals."""

        # TODO: Handle the following signals:
        # - joint_target_torques
        # - joint_torques

        q, dq = self.robot_bullet.get_state()

        device = self.device
        device.joint_positions.value = q[7:]
        device.joint_velocities.value = dq[6:]

        self.signal_base_pos_.sout.value = q[0:7]
        self.signal_base_vel_.sout.value = dq[0:6]
        self.signal_base_vel_world_.sout.value = (
            self.robot_bullet.get_base_velocity_world()
        )

        device.slider_positions.value = np.array(
            [
                self.robot_bullet.get_slider_position("a"),
                self.robot_bullet.get_slider_position("b"),
                self.robot_bullet.get_slider_position("c"),
                self.robot_bullet.get_slider_position("d"),
            ]
        )

    def run(self, steps=1, sleep=False):
        """ Get input from Device, Step the simulation, feed the Device. """

        for _ in range(steps):
            self.device.execute_graph()
            self.robot_bullet.send_joint_command(
                np.matrix(self.device.ctrl_joint_torques.value).T
            )
            self.bullet_env.step(sleep=sleep)
            self._sim2signal()

    def reset_state(self, q, dq):
        """Sets the bullet simulator and the signals to
        the provided state values."""
        self.robot_bullet.reset_state(q, dq)
        self._sim2signal()

    def add_ros_and_trace(
        self, client_name, signal_name, topic_name=None, topic_type=None
    ):
        ## for vicon entity
        self.signal_name = signal_name

    def start_video_recording(self, file_name):
        if self.record_video:
            self.bullet_env.start_video_recording(file_name)

    def stop_video_recording(self):
        if self.record_video:
            self.bullet_env.stop_video_recording()
"""

import os
from os import path
import numpy as np
import time
import pybullet
import pinocchio

from bullet_utils.env import BulletEnvWithGround
from robot_properties_solo.solo8wrapper import Solo8Robot

if __name__ == "__main__":
    np.set_printoptions(precision=2, suppress=True)
    env = BulletEnvWithGround(pybullet.GUI)
    robot = Solo8Robot()
    env.add_robot(robot)
    q, dq = robot.get_state()

    # Update the simulation state to the new initial configuration.
    robot.reset_state(q, dq)

    # Run the simulator for 2000 steps = 2 seconds.
    for i in range(2000):
        # Get the current state (position and velocity)
        q, dq = robot.get_state()
        active_contact_frames, contact_forces = robot.get_force()

        # Alternative, if you want to use properties from the pinocchio robot
        # like the jacobian or similar, you can also get the state and update
- 2. "source ./devel/setup.bash" is called from the root of the catkin
     workspace.
- 3. Run the demo by either:
    - 3.1. `python3 display_bolt.py`
    - 3.2. `cd /path/to/robot_properties_bolt/` ; `./demos/display_bolt.py`
"""

import time
import numpy as np
from bullet_utils.env import BulletEnvWithGround
from robot_properties_bolt.bolt_wrapper import BoltRobot, BoltConfig
import pybullet as p

if __name__ == "__main__":
    # Create a robot instance. This initializes the simulator as well.
    env = BulletEnvWithGround(p.GUI)
    robot = env.add_robot(BoltRobot)
    tau = np.zeros(robot.nb_dof)

    # Reset the robot to some initial state.
    q0 = BoltConfig.initial_configuration
    dq0 = BoltConfig.initial_velocity
    robot.reset_state(q0, dq0)

    # Run the simulator for 100 steps
    for i in range(500):
        # TODO: Implement a controller here.
        robot.send_joint_command(tau)

        # Step the simulator.
        env.step(
License: BSD 3-Clause License
Copyright (C) 2018-2019, New York University , Max Planck Gesellschaft
Copyright note valid unless otherwise stated in individual files.
All rights reserved.
"""

import time
import numpy as np
import pybullet as p
from bullet_utils.env import BulletEnvWithGround
from robot_properties_teststand.teststand_wrapper import TeststandRobot, TeststandConfig

if __name__ == "__main__":

    # Create a robot instance. This initializes the simulator as well.
    env = BulletEnvWithGround(p.GUI)
    robot = env.add_robot(TeststandRobot)
    tau = np.zeros(robot.nb_dof)

    # Reset the robot to some initial state.
    q0 = np.matrix(TeststandConfig.initial_configuration).T
    dq0 = np.matrix(TeststandConfig.initial_velocity).T
    robot.reset_state(q0, dq0)

    # Run the simulator for 100 steps
    for i in range(500):
        # TODO: Implement a controller here.
        robot.send_joint_command(tau)

        # Step the simulator.
        robot.step_simulation()