Exemple #1
0
def test_import_building_viewing():
    download_assets()
    download_demo_data()
    config = parse_config(os.path.join(gibson2.root_path, '../test/test.yaml'))

    s = Simulator(mode='headless')
    scene = StaticIndoorScene('Rs')
    s.import_scene(scene)
    assert s.objects == list(range(2))

    turtlebot1 = Turtlebot(config)
    turtlebot2 = Turtlebot(config)
    turtlebot3 = Turtlebot(config)

    s.import_robot(turtlebot1)
    s.import_robot(turtlebot2)
    s.import_robot(turtlebot3)

    turtlebot1.set_position([0.5, 0, 0.5])
    turtlebot2.set_position([0, 0, 0.5])
    turtlebot3.set_position([-0.5, 0, 0.5])

    for i in range(10):
        s.step()
        #turtlebot1.apply_action(np.random.randint(4))
        #turtlebot2.apply_action(np.random.randint(4))
        #turtlebot3.apply_action(np.random.randint(4))

    s.disconnect()
Exemple #2
0
    def __init__(self):
        config = parse_config('../../configs/turtlebot_demo.yaml')
        hdr_texture = os.path.join(gibson2.ig_dataset_path, 'scenes',
                                   'background', 'probe_02.hdr')
        hdr_texture2 = os.path.join(gibson2.ig_dataset_path, 'scenes',
                                    'background', 'probe_03.hdr')
        light_modulation_map_filename = os.path.join(gibson2.ig_dataset_path,
                                                     'scenes', 'Rs_int',
                                                     'layout',
                                                     'floor_lighttype_0.png')
        background_texture = os.path.join(gibson2.ig_dataset_path, 'scenes',
                                          'background', 'urban_street_01.jpg')

        settings = MeshRendererSettings(enable_shadow=False, enable_pbr=False)

        self.s = Simulator(mode='headless',
                           image_width=400,
                           image_height=400,
                           rendering_settings=settings)
        scene = StaticIndoorScene('Rs')
        self.s.import_scene(scene)
        #self.s.import_ig_scene(scene)
        self.robot = Turtlebot(config)
        self.s.import_robot(self.robot)

        for _ in range(5):
            obj = YCBObject('003_cracker_box')
            self.s.import_object(obj)
            obj.set_position_orientation(
                np.random.uniform(low=0, high=2, size=3), [0, 0, 0, 1])
        print(self.s.renderer.instances)
Exemple #3
0
def test_import_box():
    s = Simulator(mode='headless')
    scene = StadiumScene()
    s.import_scene(scene)
    print(s.objects)
    # wall = [pos, dim]
    wall = [[[0, 7, 1.01], [10, 0.2, 1]], [[0, -7, 1.01], [6.89, 0.1, 1]],
            [[7, -1.5, 1.01], [0.1, 5.5, 1]], [[-7, -1, 1.01], [0.1, 6, 1]],
            [[-8.55, 5, 1.01], [1.44, 0.1, 1]], [[8.55, 4, 1.01], [1.44, 0.1, 1]]]

    obstacles = [[[-0.5, 2, 1.01], [3.5, 0.1, 1]], [[4.5, -1, 1.01], [1.5, 0.1, 1]],
                 [[-4, -2, 1.01], [0.1, 2, 1]], [[2.5, -4, 1.01], [1.5, 0.1, 1]]]

    for i in range(len(wall)):
        curr = wall[i]
        obj = BoxShape(curr[0], curr[1])
        s.import_object(obj)

    for i in range(len(obstacles)):
        curr = obstacles[i]
        obj = BoxShape(curr[0], curr[1])
        s.import_object(obj)

    config = parse_config(os.path.join(gibson2.root_path, '../test/test.yaml'))
    turtlebot1 = Turtlebot(config)
    turtlebot2 = Turtlebot(config)
    s.import_robot(turtlebot1)
    s.import_robot(turtlebot2)
    turtlebot1.set_position([6., -6., 0.])
    turtlebot2.set_position([-3., 4., 0.])

    for i in range(100):
        s.step()
    s.disconnect()
Exemple #4
0
def main():
    config = parse_config('../configs/jr2_reaching.yaml')
    s = Simulator(mode='gui', image_width=512, image_height=512)
    #scene = BuildingScene('Rs',
    #                      build_graph=True,
    #                      pybullet_load_texture=True)
    scene = StadiumScene()
    s.import_scene(scene)
    jr = JR2_Kinova_Head(config)
    #turtlebot = Turtlebot(config)
    s.import_robot(jr)

    for _ in range(10):
        obj = YCBObject('003_cracker_box')
        s.import_object(obj)
        obj.set_position_orientation(np.random.uniform(low=0, high=2, size=3),
                                     [0, 0, 0, 1])

    for i in range(10000):
        with Profiler('Simulator step'):
            jr.apply_action([0, 0, 0.1, 0.1, 0, 0, 0, 0, 0])
            s.step()
            rgb = s.renderer.render_robot_cameras(modes=('rgb'))

    s.disconnect()
Exemple #5
0
def main():

    config = parse_config('../configs/jr2_reaching.yaml')
    s = Simulator(mode='gui', image_width=512, image_height=512)

    scene = StadiumScene()
    s.import_scene(scene)

    jr = JR2_Kinova(config)
    s.import_robot(jr)

    marker = VisualMarker(visual_shape=p.GEOM_SPHERE,
                          rgba_color=[0, 0, 1, 0.3],
                          radius=0.5)
    s.import_object(marker)
    marker.set_position([0, 4, 1])

    for i in range(10000):
        with Profiler('Simulator step'):

            jr.apply_action([0, 0, 0, 0, 0, 0, 0])
            s.step()
            rgb = s.renderer.render_robot_cameras(modes=('rgb'))

    s.disconnect()
Exemple #6
0
def main():
    config = parse_config('../configs/turtlebot_demo.yaml')
    settings = MeshRendererSettings(enable_shadow=False, msaa=False)
    s = Simulator(mode='gui',
                  image_width=256,
                  image_height=256,
                  rendering_settings=settings)

    scene = StaticIndoorScene('Rs',
                              build_graph=True,
                              pybullet_load_texture=True)
    s.import_scene(scene)
    turtlebot = Turtlebot(config)
    s.import_robot(turtlebot)

    for _ in range(10):
        obj = YCBObject('003_cracker_box')
        s.import_object(obj)
        obj.set_position_orientation(np.random.uniform(low=0, high=2, size=3),
                                     [0, 0, 0, 1])

    print(s.renderer.instances)

    for i in range(10000):
        with Profiler('Simulator step'):
            turtlebot.apply_action([0.1, 0.1])
            s.step()
            rgb = s.renderer.render_robot_cameras(modes=('rgb'))
    s.disconnect()
Exemple #7
0
    def __init__(self,
                 config_file,
                 model_id=None,
                 mode='headless',
                 action_timestep=1 / 10.0,
                 physics_timestep=1 / 240.0,
                 device_idx=0):
        """
        :param config_file: config_file path
        :param model_id: override model_id in config file
        :param mode: headless or gui mode
        :param action_timestep: environment executes action per action_timestep second
        :param physics_timestep: physics timestep for pybullet
        :param device_idx: device_idx: which GPU to run the simulation and rendering on
        """
        self.config = parse_config(config_file)
        if model_id is not None:
            self.config['model_id'] = model_id

        self.mode = mode
        self.action_timestep = action_timestep
        self.physics_timestep = physics_timestep
        self.simulator = Simulator(
            mode=mode,
            timestep=physics_timestep,
            use_fisheye=self.config.get('fisheye', False),
            image_width=self.config.get('image_width', 128),
            image_height=self.config.get('image_height', 128),
            vertical_fov=self.config.get('vertical_fov', 90),
            device_idx=device_idx,
            auto_sync=False)
        self.simulator_loop = int(self.action_timestep /
                                  self.simulator.timestep)
        self.load()
Exemple #8
0
 def __init__(self, config_file, mode='headless', device_idx=0):
     self.config = parse_config(config_file)
     self.simulator = Simulator(mode=mode,
                                use_fisheye=self.config.get(
                                    'fisheye', False),
                                resolution=self.config['resolution'],
                                device_idx=device_idx)
     self.load()
Exemple #9
0
def main():
    p.connect(p.GUI)
    p.setGravity(0, 0, -9.8)
    p.setTimeStep(1. / 240.)

    floor = os.path.join(pybullet_data.getDataPath(), "mjcf/ground_plane.xml")
    p.loadMJCF(floor)

    robots = []
    config = parse_config('../configs/fetch_p2p_nav.yaml')
    fetch = Fetch(config)
    robots.append(fetch)

    config = parse_config('../configs/jr_p2p_nav.yaml')
    jr = JR2_Kinova(config)
    robots.append(jr)

    config = parse_config('../configs/locobot_p2p_nav.yaml')
    locobot = Locobot(config)
    robots.append(locobot)

    config = parse_config('../configs/turtlebot_p2p_nav.yaml')
    turtlebot = Turtlebot(config)
    robots.append(turtlebot)

    positions = [[0, 0, 0], [1, 0, 0], [0, 1, 0], [1, 1, 0]]

    for robot, position in zip(robots, positions):
        robot.load()
        robot.set_position(position)
        robot.robot_specific_reset()
        robot.keep_still()

    for _ in range(2400):  # keep still for 10 seconds
        p.stepSimulation()
        time.sleep(1. / 240.)

    for _ in range(2400):  # move with small random actions for 10 seconds
        for robot, position in zip(robots, positions):
            action = np.random.uniform(-1, 1, robot.action_dim)
            robot.apply_action(action)
        p.stepSimulation()
        time.sleep(1. / 240.0)

    p.disconnect()
Exemple #10
0
    def reload(self, config_file):
        """
        Reload another config file, this allows one to change the envrionment on the fly

        :param config_file: new config file path
        """
        self.config = parse_config(config_file)
        self.simulator.reload()
        self.load()
Exemple #11
0
def main():
    p.connect(p.GUI)
    p.setGravity(0,0,-9.8)
    p.setTimeStep(1./240.)

    floor = os.path.join(pybullet_data.getDataPath(), "mjcf/ground_plane.xml")
    p.loadMJCF(floor)

    #scene = BuildingScene('Placida',
    #                      is_interactive=True,
    #                      build_graph=True,
    #                      pybullet_load_texture=True)
    #scene.load()

    robots = []
    config = parse_config('../configs/jr2_reaching.yaml')
    #jr = JR2_Kinova(config)
    #robots.append(jr)

    jr_head = JR2_Kinova_Head(config)
    robots.append(jr_head)


    positions = [[0, 0, 0]]#, [0,2,0]]

    for robot, position in zip(robots, positions):
        robot.load()
        robot.set_position(position)
        robot.robot_specific_reset()
        robot.keep_still()

    marker_position = [0,4,1]
    marker = VisualMarker(visual_shape=p.GEOM_SPHERE, radius=0.1)
    marker.load()
    marker.set_position(marker_position)

    print("Wait")

    for _ in range(120):  # keep still for 10 seconds
        p.stepSimulation()
        time.sleep(1./240.)

    print("Move")

    action_base = np.random.uniform(0, 1, robot.wheel_dim)

    for _ in range(2400):  # move with small random actions for 10 seconds
        for robot, position in zip(robots, positions):
            action_arm = np.random.uniform(-1, 1, robot.arm_dim)
            action = np.concatenate([action_base, action_arm])
            robot.apply_action([0,0,0.2,0.2,0,0,0,0,0])
        p.stepSimulation()
        time.sleep(1./240.0)

    p.disconnect()
Exemple #12
0
def test_fetch():
    s = Simulator(mode='headless')
    scene = StadiumScene()
    s.import_scene(scene)
    config = parse_config(
        os.path.join(gibson2.root_path, '../test/test_continuous.yaml'))
    fetch = Fetch(config)
    s.import_robot(fetch)
    for i in range(100):
        fetch.apply_action(np.array([0] * 2))
        fetch.calc_state()
        s.step()
    s.disconnect()
Exemple #13
0
    def run_demo(self):
        config = parse_config(os.path.join(gibson2.assets_path, 'example_configs/turtlebot_demo.yaml'))
        s = Simulator(mode='gui', image_width=700, image_height=700)
        scene = BuildingScene('Rs_interactive', is_interactive=True)
        s.import_scene(scene)
        turtlebot = Turtlebot(config)
        s.import_robot(turtlebot)

        for i in range(10000):
            turtlebot.apply_action([0.1,0.5])
            s.step()

        s.disconnect()
Exemple #14
0
    def __init__(self,
                 config_file,
                 scene_id=None,
                 mode='headless',
                 action_timestep=1 / 10.0,
                 physics_timestep=1 / 240.0,
                 render_to_tensor=False,
                 device_idx=0):
        """
        :param config_file: config_file path
        :param scene_id: override scene_id in config file
        :param mode: headless or gui mode
        :param action_timestep: environment executes action per action_timestep second
        :param physics_timestep: physics timestep for pybullet
        :param device_idx: device_idx: which GPU to run the simulation and rendering on
        """
        self.config = parse_config(config_file)
        if scene_id is not None:
            self.config['scene_id'] = scene_id

        self.mode = mode
        self.action_timestep = action_timestep
        self.physics_timestep = physics_timestep
        self.texture_randomization_freq = self.config.get(
            'texture_randomization_freq', None)
        self.object_randomization_freq = self.config.get(
            'object_randomization_freq', None)
        self.object_randomization_idx = 0
        self.num_object_randomization_idx = 10

        enable_shadow = self.config.get('enable_shadow', False)
        enable_pbr = self.config.get('enable_pbr', True)
        texture_scale = self.config.get('texture_scale', 1.0)

        settings = MeshRendererSettings(enable_shadow=enable_shadow,
                                        enable_pbr=enable_pbr,
                                        msaa=False,
                                        texture_scale=texture_scale)

        self.simulator = Simulator(
            mode=mode,
            physics_timestep=physics_timestep,
            render_timestep=action_timestep,
            image_width=self.config.get('image_width', 128),
            image_height=self.config.get('image_height', 128),
            vertical_fov=self.config.get('vertical_fov', 90),
            device_idx=device_idx,
            render_to_tensor=render_to_tensor,
            rendering_settings=settings)
        self.load()
Exemple #15
0
    def __init__(self, robot='turtlebot', scene='Rs_int'):
        config = parse_config('../../configs/turtlebot_demo.yaml')
        hdr_texture = os.path.join(gibson2.ig_dataset_path, 'scenes',
                                   'background', 'probe_02.hdr')
        hdr_texture2 = os.path.join(gibson2.ig_dataset_path, 'scenes',
                                    'background', 'probe_03.hdr')
        light_modulation_map_filename = os.path.join(gibson2.ig_dataset_path,
                                                     'scenes', 'Rs_int',
                                                     'layout',
                                                     'floor_lighttype_0.png')
        background_texture = os.path.join(gibson2.ig_dataset_path, 'scenes',
                                          'background', 'urban_street_01.jpg')

        scene = InteractiveIndoorScene(scene,
                                       texture_randomization=False,
                                       object_randomization=False)
        #scene._set_first_n_objects(5)
        scene.open_all_doors()

        settings = MeshRendererSettings(
            env_texture_filename=hdr_texture,
            env_texture_filename2=hdr_texture2,
            env_texture_filename3=background_texture,
            light_modulation_map_filename=light_modulation_map_filename,
            enable_shadow=True,
            msaa=True,
            light_dimming_factor=1.0,
            optimized=True)

        self.s = Simulator(mode='headless',
                           image_width=400,
                           image_height=400,
                           rendering_settings=settings)
        self.s.import_ig_scene(scene)

        if robot == 'turtlebot':
            self.robot = Turtlebot(config)
        else:
            self.robot = Fetch(config)

        self.s.import_robot(self.robot)

        for _ in range(5):
            obj = YCBObject('003_cracker_box')
            self.s.import_object(obj)
            obj.set_position_orientation(
                np.random.uniform(low=0, high=2, size=3), [0, 0, 0, 1])
        print(self.s.renderer.instances)
Exemple #16
0
    def run_demo(self):
        config = parse_config(
            os.path.join(os.path.dirname(gibson2.__file__),
                         '../examples/configs/turtlebot_demo.yaml'))

        s = Simulator(mode='gui', image_width=700, image_height=700)
        scene = StaticIndoorScene('Rs', pybullet_load_texture=True)
        s.import_scene(scene)
        turtlebot = Turtlebot(config)
        s.import_robot(turtlebot)

        for i in range(1000):
            turtlebot.apply_action([0.1, 0.5])
            s.step()

        s.disconnect()
Exemple #17
0
def benchmark(render_to_tensor=False, resolution=512):
    config = parse_config('../configs/turtlebot_demo.yaml')
    s = Simulator(mode='headless', image_width=resolution, image_height=resolution, render_to_tensor=render_to_tensor)
    scene = BuildingScene('Rs',
                          build_graph=True,
                          pybullet_load_texture=True)
    s.import_scene(scene)
    turtlebot = Turtlebot(config)
    s.import_robot(turtlebot)

    n_frame = 500
    start = time.time()
    for i in range(n_frame):
        turtlebot.apply_action([0.1,0.1])
        s.step()
        rgb = s.renderer.render_robot_cameras(modes=('rgb'))

    physics_render_elapsed = time.time() - start
    print("physics simulation + rendering rgb, resolution {}, render_to_tensor {}: {} fps".format(resolution,
                                                                                                 render_to_tensor,
     n_frame/physics_render_elapsed))


    start = time.time()
    for i in range(n_frame):
        rgb = s.renderer.render_robot_cameras(modes=('rgb'))

    render_elapsed = time.time() - start
    print("Rendering rgb, resolution {}, render_to_tensor {}: {} fps".format(resolution, render_to_tensor,
        n_frame/render_elapsed))

    start = time.time()
    for i in range(n_frame):
        rgb = s.renderer.render_robot_cameras(modes=('3d'))

    render_elapsed = time.time() - start
    print("Rendering 3d, resolution {}, render_to_tensor {}: {} fps".format(resolution, render_to_tensor,
                                                              n_frame / render_elapsed))

    start = time.time()
    for i in range(n_frame):
        rgb = s.renderer.render_robot_cameras(modes=('normal'))

    render_elapsed = time.time() - start
    print("Rendering normal, resolution {}, render_to_tensor {}: {} fps".format(resolution, render_to_tensor,
                                                              n_frame / render_elapsed))
    s.disconnect()
Exemple #18
0
 def __init__(self,
              config_file,
              model_id=None,
              mode='headless',
              device_idx=0):
     """
     :param config_file: config_file path
     :param model_id: override model_id in config file
     :param mode: headless or gui mode
     :param device_idx: which GPU to run the simulation and rendering on
     """
     self.config = parse_config(config_file)
     if model_id is not None:
         self.config['model_id'] = model_id
     self.simulator = Simulator(
         mode=mode,
         use_fisheye=self.config.get('fisheye', False),
         resolution=self.config.get('resolution', 64),
         fov=self.config.get('fov', 90),
         device_idx=device_idx)
     self.load()
def benchmark_rendering(scene_list, rendering_presets_list, modality_list):
    config = parse_config(os.path.join(gibson2.root_path, '../test/test.yaml'))
    assets_version = get_ig_assets_version()
    print('assets_version', assets_version)
    result = {}
    for scene_name in scene_list:
        for rendering_preset in rendering_presets_list:
            scene = InteractiveIndoorScene(scene_name,
                                           texture_randomization=False,
                                           object_randomization=False)
            settings = NamedRenderingPresets[rendering_preset]
            if rendering_preset == 'VISUAL_RL':
                image_width = 128
                image_height = 128
            else:
                image_width = 512
                image_height = 512
            s = Simulator(mode='headless',
                          image_width=image_width,
                          image_height=image_height,
                          device_idx=0,
                          rendering_settings=settings,
                          physics_timestep=1 / 240.0)
            s.import_ig_scene(scene)
            turtlebot = Turtlebot(config)
            s.import_robot(turtlebot)

            for mode in modality_list:
                for _ in range(10):
                    s.step()
                    _ = s.renderer.render_robot_cameras(modes=(mode))
                start = time.time()
                for _ in range(200):
                    _ = s.renderer.render_robot_cameras(modes=(mode))
                end = time.time()
                fps = 200 / (end - start)
                result[(scene_name, rendering_preset, mode)] = fps
            s.disconnect()
    return result
Exemple #20
0
def main():
    config = parse_config('../configs/turtlebot_demo.yaml')
    s = Simulator(mode='gui', image_width=512, image_height=512)
    scene = BuildingScene('Rs',
                          build_graph=True,
                          pybullet_load_texture=True)
    s.import_scene(scene)
    turtlebot = Turtlebot(config)
    s.import_robot(turtlebot)

    for _ in range(10):
        obj = YCBObject('003_cracker_box')
        obj.load()
        obj.set_position_orientation(np.random.uniform(low=0, high=2, size=3), [0,0,0,1])

    for i in range(10000):
        with Profiler('Simulator step'):
            turtlebot.apply_action([0.1,0.1])
            s.step()
            rgb = s.renderer.render_robot_cameras(modes=('rgb'))

    s.disconnect()
def main():
    config = parse_config('../configs/turtlebot_demo.yaml')
    settings = MeshRendererSettings()
    s = Simulator(mode='gui',
                  image_width=256,
                  image_height=256,
                  rendering_settings=settings)

    scene = StaticIndoorScene('Rs',
                              build_graph=True,
                              pybullet_load_texture=True)
    s.import_scene(scene)
    turtlebot = Turtlebot(config)
    s.import_robot(turtlebot)

    for i in range(10000):
        with Profiler('Simulator step'):
            turtlebot.apply_action([0.1, -0.1])
            s.step()
            lidar = s.renderer.get_lidar_all()
            print(lidar.shape)
            # TODO: visualize lidar scan

    s.disconnect()
import yaml
from gibson2.core.physics.robot_locomotors import JR2_Kinova
from gibson2.core.simulator import Simulator
from gibson2.core.physics.scene import BuildingScene, StadiumScene
from gibson2.utils.utils import parse_config
from gibson2.core.physics.interactive_objects import InteractiveObj
import gibson2
import os
import numpy as np

config = parse_config('test_interactive_nav.yaml')


def test_jr2():
    s = Simulator(mode='gui')
    try:
        scene = BuildingScene('Ohoopee')
        s.import_scene(scene)
        jr2 = JR2_Kinova(config)
        s.import_robot(jr2)
        jr2.set_position([-6, 0, 0.1])
        obj3 = InteractiveObj(os.path.join(gibson2.assets_path, 'models',
                                           'scene_components', 'door.urdf'),
                              scale=2)
        s.import_interactive_object(obj3)
        obj3.set_position_rotation(
            [-5, -1, 0], [0, 0, np.sqrt(0.5), np.sqrt(0.5)])
        jr2.apply_action([0.005, 0.005, 0, 0, 0, 0, 0, 0, 0, 0])
        for _ in range(400):
            s.step()
from gibson2.core.simulator import Simulator
from gibson2.core.physics.scene import BuildingScene, StadiumScene
from gibson2.core.physics.robot_locomotors import Turtlebot, Husky, Ant, Humanoid, JR2, JR2_Kinova
import yaml
from gibson2.utils.utils import parse_config
import os
import gibson2

config = parse_config(os.path.join(gibson2.root_path, '../test/test.yaml'))


def test_import_building():
    s = Simulator(mode='headless')
    scene = BuildingScene('Ohoopee')
    s.import_scene(scene, texture_scale=0.4)
    for i in range(15):
        s.step()
    assert s.objects == list(range(2))
    s.disconnect()


def test_import_building_big():
    s = Simulator(mode='headless')
    scene = BuildingScene('Ohoopee')
    s.import_scene(scene, texture_scale=1)
    assert s.objects == list(range(2))
    s.disconnect()


def test_import_stadium():
    s = Simulator(mode='headless')
import yaml
from gibson2.core.physics.robot_locomotors import Turtlebot, Husky, Ant, Humanoid, JR2, JR2_Kinova
from gibson2.core.simulator import Simulator
from gibson2.core.physics.scene import BuildingScene, StadiumScene
from gibson2.utils.utils import parse_config
from gibson2.core.physics.interactive_objects import InteractiveObj
from gibson2.envs.locomotor_env import NavigateEnv, NavigateRandomEnv, InteractiveNavigateEnv
import os
import gibson2

config = parse_config('test.yaml')


def test_jr2():
    config_filename = os.path.join(
        os.path.dirname(gibson2.__file__),
        '../examples/configs/jr_interactive_nav.yaml')
    nav_env = InteractiveNavigateEnv(config_file=config_filename,
                                     mode='gui',
                                     action_timestep=1.0 / 10.0,
                                     physics_timestep=1 / 40.0)
    try:
        nav_env.reset()
        for i in range(10):  # 300 steps, 30s world time
            action = nav_env.action_space.sample()
            state, reward, done, _ = nav_env.step(action)
            if done:
                print('Episode finished after {} timesteps'.format(i + 1))
    finally:
        nav_env.clean()
Exemple #25
0
 def reload(self, config_file):
     self.config = parse_config(config_file)
     self.simulator.reload()
     self.load()
Exemple #26
0
import yaml
from gibson2.core.physics.robot_locomotors import Turtlebot
from gibson2.core.simulator import Simulator
from gibson2.core.physics.scene import BuildingScene, StadiumScene
from gibson2.utils.utils import parse_config
import pytest
import pybullet as p
import numpy as np
from gibson2.core.render.profiler import Profiler

config = parse_config('../configs/turtlebot_p2p_nav.yaml')

s = Simulator(mode='gui', resolution=512, render_to_tensor=False)
scene = BuildingScene('Bolton')
s.import_scene(scene)
turtlebot = Turtlebot(config)
s.import_robot(turtlebot)

p.resetBasePositionAndOrientation(scene.ground_plane_mjcf[0],
                                  posObj=[0, 0, -10],
                                  ornObj=[0, 0, 0, 1])

for i in range(2000):
    with Profiler('simulator step'):
        turtlebot.apply_action([0.1, 0.1])
        s.step()
        rgb = s.renderer.render_robot_cameras(modes=('rgb'))

s.disconnect()
Exemple #27
0
torch.no_grad()
model = SelfMonitoring(**policy_model_kwargs).cuda()
encoder = EncoderRNN(**encoder_kwargs).cuda()
params = list(encoder.parameters()) + list(model.parameters())
optimizer = torch.optim.Adam(params, lr=opts.learning_rate)
resume_training(opts, model, encoder, optimizer)
model.eval()
# model.device = torch.device("cpu")
encoder.eval()
# encoder.device = torch.device("cpu")
resnet = models.resnet152(pretrained=True)
resnet.eval()
resnet.cuda()

# Gibson setup
config = parse_config('ped.yaml')

def transform_img(im):
    ''' Prep gibson rgb input for pytorch model '''
    # RGB pixel mean - from feature precomputing script
    im = im[60:540, :, :3].copy()
    preprocess = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
    ])
    input_tensor = preprocess(im)
    blob = np.zeros((1, 3, im.shape[0], im.shape[1]), dtype=np.float32)
    blob[0, :, :, :] = input_tensor
    return blob

Exemple #28
0
def main():
    config = parse_config('../configs/fetch_p2p_nav.yaml')
    s = Simulator(mode='gui', physics_timestep=1 / 240.0)
    scene = EmptyScene()
    s.import_scene(scene)
    fetch = Fetch(config)
    s.import_robot(fetch)

    robot_id = fetch.robot_ids[0]

    arm_joints = joints_from_names(robot_id, [
        'torso_lift_joint', 'shoulder_pan_joint', 'shoulder_lift_joint',
        'upperarm_roll_joint', 'elbow_flex_joint', 'forearm_roll_joint',
        'wrist_flex_joint', 'wrist_roll_joint'
    ])
    #finger_joints = joints_from_names(robot_id, ['l_gripper_finger_joint', 'r_gripper_finger_joint'])
    fetch.robot_body.reset_position([0, 0, 0])
    fetch.robot_body.reset_orientation([0, 0, 1, 0])
    x, y, z = fetch.get_end_effector_position()
    #set_joint_positions(robot_id, finger_joints, [0.04,0.04])
    print(x, y, z)

    visual_marker = p.createVisualShape(p.GEOM_SPHERE, radius=0.02)
    marker = p.createMultiBody(baseVisualShapeIndex=visual_marker)

    #max_limits = [0,0] + get_max_limits(robot_id, arm_joints) + [0.05,0.05]
    #min_limits = [0,0] + get_min_limits(robot_id, arm_joints) + [0,0]
    #rest_position = [0,0] + list(get_joint_positions(robot_id, arm_joints)) + [0.04,0.04]
    max_limits = [0, 0] + get_max_limits(robot_id, arm_joints)
    min_limits = [0, 0] + get_min_limits(robot_id, arm_joints)
    rest_position = [0, 0] + list(get_joint_positions(robot_id, arm_joints))
    joint_range = list(np.array(max_limits) - np.array(min_limits))
    joint_range = [item + 1 for item in joint_range]
    jd = [0.1 for item in joint_range]
    print(max_limits)
    print(min_limits)

    def accurateCalculateInverseKinematics(robotid, endEffectorId, targetPos,
                                           threshold, maxIter):
        sample_fn = get_sample_fn(robotid, arm_joints)
        set_joint_positions(robotid, arm_joints, sample_fn())
        it = 0
        while it < maxIter:
            jointPoses = p.calculateInverseKinematics(robotid,
                                                      endEffectorId,
                                                      targetPos,
                                                      lowerLimits=min_limits,
                                                      upperLimits=max_limits,
                                                      jointRanges=joint_range,
                                                      restPoses=rest_position,
                                                      jointDamping=jd)
            set_joint_positions(robotid, arm_joints, jointPoses[2:10])
            ls = p.getLinkState(robotid, endEffectorId)
            newPos = ls[4]

            dist = np.linalg.norm(np.array(targetPos) - np.array(newPos))
            if dist < threshold:
                break

            it += 1

        print("Num iter: " + str(it) + ", threshold: " + str(dist))
        return jointPoses

    while True:
        with Profiler("Simulation step"):
            fetch.robot_body.reset_position([0, 0, 0])
            fetch.robot_body.reset_orientation([0, 0, 1, 0])
            threshold = 0.01
            maxIter = 100
            joint_pos = accurateCalculateInverseKinematics(
                robot_id, fetch.parts['gripper_link'].body_part_index,
                [x, y, z], threshold, maxIter)[2:10]

            #set_joint_positions(robot_id, finger_joints, [0.04, 0.04])
            s.step()
            keys = p.getKeyboardEvents()
            for k, v in keys.items():
                if (k == p.B3G_RIGHT_ARROW and (v & p.KEY_IS_DOWN)):
                    x += 0.01
                if (k == p.B3G_LEFT_ARROW and (v & p.KEY_IS_DOWN)):
                    x -= 0.01
                if (k == p.B3G_UP_ARROW and (v & p.KEY_IS_DOWN)):
                    y += 0.01
                if (k == p.B3G_DOWN_ARROW and (v & p.KEY_IS_DOWN)):
                    y -= 0.01
                if (k == ord('z') and (v & p.KEY_IS_DOWN)):
                    z += 0.01
                if (k == ord('x') and (v & p.KEY_IS_DOWN)):
                    z -= 0.01
            p.resetBasePositionAndOrientation(marker, [x, y, z], [0, 0, 0, 1])

    s.disconnect()
Exemple #29
0
    def __init__(self,
                 config_file,
                 model_id=None,
                 mode='headless',
                 action_timestep=1 / 10.0,
                 physics_timestep=1 / 240.0,
                 automatic_reset=False,
                 device_idx=0,
                 render_to_tensor=False):
        """
        :param config_file: config_file path
        :param model_id: override model_id in config file
        :param mode: headless or gui mode
        :param action_timestep: environment executes action per action_timestep second
        :param physics_timestep: physics timestep for pybullet
        :param device_idx: device_idx: which GPU to run the simulation and rendering on
        """
        self.config = parse_config(config_file)
        if model_id is not None:
            self.config['model_id'] = model_id

        # gibson external camera
        self.initial_pos = np.array([0, 1.3, 4])
        self.initial_view_direction = np.array([0, 0, -1])
        self.up = np.array([0, 1, 0])

        # output res for RL
        self.out_image_height = 84
        self.out_image_width = 84

        # class: stadium=0, robot=1, drawer=2, handle=3
        self.num_object_classes = 4
        self.automatic_reset = automatic_reset
        self.mode = mode
        self.action_timestep = action_timestep
        self.physics_timestep = physics_timestep
        self.simulator = Simulator(
            mode=mode,
            gravity=0,
            timestep=physics_timestep,
            use_fisheye=self.config.get('fisheye', False),
            image_width=self.config.get('image_width', 128),
            image_height=self.config.get('image_height', 128),
            vertical_fov=self.config.get('vertical_fov', 90),
            device_idx=device_idx,
            render_to_tensor=render_to_tensor,
            auto_sync=False)
        self.simulator_loop = int(self.action_timestep /
                                  self.simulator.timestep)
        self.load()
        self.hand_start_pos = [0.05, 1, 1.05]
        self.hand_start_orn = p.getQuaternionFromEuler([-np.pi / 2, 0, np.pi])
        self.set_robot_pos_orn(self.robots[0], self.hand_start_pos,
                               self.hand_start_orn)

        # load drawer
        self.drawer_start_pos = [0, 2.2, 1]
        self.drawer_start_orn = p.getQuaternionFromEuler([0, 0, -np.pi / 2])
        self.handle_idx = 3
        self.drawer = InteractiveObj(
            os.path.join(gibson2.assets_path, 'models', 'drawer',
                         'drawer_one_sided_handle.urdf'))
        self.import_drawer_handle()
        self.drawer.set_position_orientation(self.drawer_start_pos,
                                             self.drawer_start_orn)
        self.handle_init_pos = p.getLinkState(self.drawer_id,
                                              self.handle_idx)[0][1]
        self.simulator.sync()
        self._state_id = p.saveState()
    else:
        return 0


###################################################################################################################
###################################################PRINT GRID######################################################
###################################################################################################################
def printGrid(grid):
    for i in range(0, len(grid)):
        print(grid[i])


#################################################################################################
#################################Environment Definitions#########################################
#################################################################################################
config_filename = parse_config(
    os.path.join(gibson2.example_config_path, 'fetch_motion_planning.yaml'))
nav_env = iGibsonEnv(config_file=config_filename,
                     mode='iggui',
                     action_timestep=1.0 / 120.0,
                     physics_timestep=1.0 / 120.0)
motion_planner = MotionPlanningWrapper(nav_env)
goal = [1, -0.2, 0]
n = 21
grid = [[0] * n for _ in range(0, n)]
printGrid(grid)
m = 10
XY = []
for i in range(0, 21):
    o = (i - m) / 5
    XY.append(o)
print(XY)