Пример #1
0
def main():
    config = parse_config('../configs/turtlebot_demo.yaml')
    settings = MeshRendererSettings(enable_shadow=False, msaa=False)
    s = Simulator(mode='gui',
                  image_width=256,
                  image_height=256,
                  rendering_settings=settings)

    scene = StaticIndoorScene('Rs',
                              build_graph=True,
                              pybullet_load_texture=True)
    s.import_scene(scene)
    turtlebot = Turtlebot(config)
    s.import_robot(turtlebot)

    for _ in range(10):
        obj = YCBObject('003_cracker_box')
        s.import_object(obj)
        obj.set_position_orientation(np.random.uniform(low=0, high=2, size=3),
                                     [0, 0, 0, 1])

    print(s.renderer.instances)

    for i in range(10000):
        with Profiler('Simulator step'):
            turtlebot.apply_action([0.1, 0.1])
            s.step()
            rgb = s.renderer.render_robot_cameras(modes=('rgb'))
    s.disconnect()
Пример #2
0
    def __init__(self):
        config = parse_config('../../configs/turtlebot_demo.yaml')
        hdr_texture = os.path.join(gibson2.ig_dataset_path, 'scenes',
                                   'background', 'probe_02.hdr')
        hdr_texture2 = os.path.join(gibson2.ig_dataset_path, 'scenes',
                                    'background', 'probe_03.hdr')
        light_modulation_map_filename = os.path.join(gibson2.ig_dataset_path,
                                                     'scenes', 'Rs_int',
                                                     'layout',
                                                     'floor_lighttype_0.png')
        background_texture = os.path.join(gibson2.ig_dataset_path, 'scenes',
                                          'background', 'urban_street_01.jpg')

        settings = MeshRendererSettings(enable_shadow=False, enable_pbr=False)

        self.s = Simulator(mode='headless',
                           image_width=400,
                           image_height=400,
                           rendering_settings=settings)
        scene = StaticIndoorScene('Rs')
        self.s.import_scene(scene)
        #self.s.import_ig_scene(scene)
        self.robot = Turtlebot(config)
        self.s.import_robot(self.robot)

        for _ in range(5):
            obj = YCBObject('003_cracker_box')
            self.s.import_object(obj)
            obj.set_position_orientation(
                np.random.uniform(low=0, high=2, size=3), [0, 0, 0, 1])
        print(self.s.renderer.instances)
Пример #3
0
def test_turtlebot_position():
    s = Simulator(mode='headless')
    scene = StadiumScene()
    s.import_scene(scene)
    turtlebot = Turtlebot(config)
    s.import_robot(turtlebot)

    turtlebot.set_position([0, 0, 5])

    nbody = p.getNumBodies()
    pos = turtlebot.get_position()
    s.disconnect()
    assert nbody == 5
    assert np.allclose(pos, np.array([0, 0, 5]))
Пример #4
0
    def run_demo(self):
        config = parse_config(
            os.path.join(os.path.dirname(gibson2.__file__),
                         '../examples/configs/turtlebot_demo.yaml'))

        s = Simulator(mode='gui', image_width=700, image_height=700)
        scene = StaticIndoorScene('Rs', pybullet_load_texture=True)
        s.import_scene(scene)
        turtlebot = Turtlebot(config)
        s.import_robot(turtlebot)

        for i in range(1000):
            turtlebot.apply_action([0.1, 0.5])
            s.step()

        s.disconnect()
Пример #5
0
def test_turtlebot():
    s = Simulator(mode='headless')
    scene = StadiumScene()
    s.import_scene(scene)
    turtlebot = Turtlebot(config)
    s.import_robot(turtlebot)
    nbody = p.getNumBodies()
    s.disconnect()
    assert nbody == 5
Пример #6
0
def show_action_sensor_space():
    s = Simulator(mode='headless')
    scene = StadiumScene()
    s.import_scene(scene)

    turtlebot = Turtlebot(config)
    s.import_robot(turtlebot)
    turtlebot.set_position([0, 1, 0.5])

    ant = Ant(config)
    s.import_robot(ant)
    ant.set_position([0, 2, 0.5])

    h = Humanoid(config)
    s.import_robot(h)
    h.set_position([0, 3, 2])

    jr = JR2(config)
    s.import_robot(jr)
    jr.set_position([0, 4, 0.5])

    jr2 = JR2_Kinova(config)
    s.import_robot(jr2)
    jr2.set_position([0, 5, 0.5])

    husky = Husky(config)
    s.import_robot(husky)
    husky.set_position([0, 6, 0.5])

    quad = Quadrotor(config)
    s.import_robot(quad)
    quad.set_position([0, 7, 0.5])

    for robot in s.robots:
        print(type(robot), len(robot.ordered_joints), robot.calc_state().shape)

    for i in range(100):
        s.step()

    s.disconnect()
    def load_dynamic_objects(self, env):
        """
        Load dynamic objects (Turtlebots)

        :param env: environment instance
        :return: a list of interactive objects
        """
        dynamic_objects = []
        for _ in range(self.num_dynamic_objects):
            robot = Turtlebot(self.config)
            env.simulator.import_robot(robot)
            dynamic_objects.append(robot)
        return dynamic_objects
def main():
    config = parse_config('../configs/turtlebot_demo.yaml')
    settings = MeshRendererSettings()
    s = Simulator(mode='gui',
                  image_width=256,
                  image_height=256,
                  rendering_settings=settings)

    scene = StaticIndoorScene('Rs',
                              build_graph=True,
                              pybullet_load_texture=True)
    s.import_scene(scene)
    turtlebot = Turtlebot(config)
    s.import_robot(turtlebot)

    for i in range(10000):
        with Profiler('Simulator step'):
            turtlebot.apply_action([0.1, -0.1])
            s.step()
            lidar = s.renderer.get_lidar_all()
            print(lidar.shape)
            # TODO: visualize lidar scan

    s.disconnect()
Пример #9
0
    def __init__(self, robot='turtlebot', scene='Rs_int'):
        config = parse_config('../../configs/turtlebot_demo.yaml')
        hdr_texture = os.path.join(gibson2.ig_dataset_path, 'scenes',
                                   'background', 'probe_02.hdr')
        hdr_texture2 = os.path.join(gibson2.ig_dataset_path, 'scenes',
                                    'background', 'probe_03.hdr')
        light_modulation_map_filename = os.path.join(gibson2.ig_dataset_path,
                                                     'scenes', 'Rs_int',
                                                     'layout',
                                                     'floor_lighttype_0.png')
        background_texture = os.path.join(gibson2.ig_dataset_path, 'scenes',
                                          'background', 'urban_street_01.jpg')

        scene = InteractiveIndoorScene(scene,
                                       texture_randomization=False,
                                       object_randomization=False)
        #scene._set_first_n_objects(5)
        scene.open_all_doors()

        settings = MeshRendererSettings(
            env_texture_filename=hdr_texture,
            env_texture_filename2=hdr_texture2,
            env_texture_filename3=background_texture,
            light_modulation_map_filename=light_modulation_map_filename,
            enable_shadow=True,
            msaa=True,
            light_dimming_factor=1.0,
            optimized=True)

        self.s = Simulator(mode='headless',
                           image_width=400,
                           image_height=400,
                           rendering_settings=settings)
        self.s.import_ig_scene(scene)

        if robot == 'turtlebot':
            self.robot = Turtlebot(config)
        else:
            self.robot = Fetch(config)

        self.s.import_robot(self.robot)

        for _ in range(5):
            obj = YCBObject('003_cracker_box')
            self.s.import_object(obj)
            obj.set_position_orientation(
                np.random.uniform(low=0, high=2, size=3), [0, 0, 0, 1])
        print(self.s.renderer.instances)
Пример #10
0
def main():
    p.connect(p.GUI)
    p.setGravity(0, 0, -9.8)
    p.setTimeStep(1. / 240.)

    floor = os.path.join(pybullet_data.getDataPath(), "mjcf/ground_plane.xml")
    p.loadMJCF(floor)

    robots = []
    config = parse_config('../configs/fetch_reaching.yaml')
    fetch = Fetch(config)
    robots.append(fetch)

    config = parse_config('../configs/jr_reaching.yaml')
    jr = JR2_Kinova(config)
    robots.append(jr)

    config = parse_config('../configs/locobot_point_nav.yaml')
    locobot = Locobot(config)
    robots.append(locobot)

    config = parse_config('../configs/turtlebot_point_nav.yaml')
    turtlebot = Turtlebot(config)
    robots.append(turtlebot)

    positions = [[0, 0, 0], [1, 0, 0], [0, 1, 0], [1, 1, 0]]

    for robot, position in zip(robots, positions):
        robot.load()
        robot.set_position(position)
        robot.robot_specific_reset()
        robot.keep_still()

    for _ in range(2400):  # keep still for 10 seconds
        p.stepSimulation()
        time.sleep(1. / 240.)

    for _ in range(2400):  # move with small random actions for 10 seconds
        for robot, position in zip(robots, positions):
            action = np.random.uniform(-1, 1, robot.action_dim)
            robot.apply_action(action)
        p.stepSimulation()
        time.sleep(1. / 240.0)

    p.disconnect()
Пример #11
0
def test_import_box():
    s = Simulator(mode='headless')
    scene = StadiumScene()
    s.import_scene(scene)
    print(s.objects)
    # wall = [pos, dim]
    wall = [[[0, 7, 1.01], [10, 0.2, 1]], [[0, -7, 1.01], [6.89, 0.1, 1]],
            [[7, -1.5, 1.01], [0.1, 5.5, 1]], [[-7, -1, 1.01], [0.1, 6, 1]],
            [[-8.55, 5, 1.01], [1.44, 0.1, 1]],
            [[8.55, 4, 1.01], [1.44, 0.1, 1]]]

    obstacles = [[[-0.5, 2, 1.01], [3.5, 0.1, 1]],
                 [[4.5, -1, 1.01], [1.5, 0.1, 1]], [[-4, -2, 1.01],
                                                    [0.1, 2, 1]],
                 [[2.5, -4, 1.01], [1.5, 0.1, 1]]]

    for i in range(len(wall)):
        curr = wall[i]
        obj = Cube(curr[0], curr[1])
        s.import_object(obj)

    for i in range(len(obstacles)):
        curr = obstacles[i]
        obj = Cube(curr[0], curr[1])
        s.import_object(obj)

    config = parse_config(os.path.join(gibson2.root_path, '../test/test.yaml'))
    turtlebot1 = Turtlebot(config)
    turtlebot2 = Turtlebot(config)
    s.import_robot(turtlebot1)
    s.import_robot(turtlebot2)
    turtlebot1.set_position([6., -6., 0.])
    turtlebot2.set_position([-3., 4., 0.])

    for i in range(100):
        s.step()
    s.disconnect()
def benchmark_rendering(scene_list, rendering_presets_list, modality_list):
    config = parse_config(os.path.join(gibson2.root_path, '../test/test.yaml'))
    assets_version = get_ig_assets_version()
    print('assets_version', assets_version)
    result = {}
    for scene_name in scene_list:
        for rendering_preset in rendering_presets_list:
            scene = InteractiveIndoorScene(scene_name,
                                           texture_randomization=False,
                                           object_randomization=False)
            settings = NamedRenderingPresets[rendering_preset]
            if rendering_preset == 'VISUAL_RL':
                image_width = 128
                image_height = 128
            else:
                image_width = 512
                image_height = 512
            s = Simulator(mode='headless',
                          image_width=image_width,
                          image_height=image_height,
                          device_idx=0,
                          rendering_settings=settings,
                          physics_timestep=1 / 240.0)
            s.import_ig_scene(scene)
            turtlebot = Turtlebot(config)
            s.import_robot(turtlebot)

            for mode in modality_list:
                for _ in range(10):
                    s.step()
                    _ = s.renderer.render_robot_cameras(modes=(mode))
                start = time.time()
                for _ in range(200):
                    _ = s.renderer.render_robot_cameras(modes=(mode))
                end = time.time()
                fps = 200 / (end - start)
                result[(scene_name, rendering_preset, mode)] = fps
            s.disconnect()
    return result
Пример #13
0
def test_multiagent():
    s = Simulator(mode='headless')
    scene = StadiumScene()
    s.import_scene(scene)
    turtlebot1 = Turtlebot(config)
    turtlebot2 = Turtlebot(config)
    turtlebot3 = Turtlebot(config)

    s.import_robot(turtlebot1)
    s.import_robot(turtlebot2)
    s.import_robot(turtlebot3)

    turtlebot1.set_position([1, 0, 0.5])
    turtlebot2.set_position([0, 0, 0.5])
    turtlebot3.set_position([-1, 0, 0.5])

    nbody = p.getNumBodies()
    for i in range(100):
        s.step()

    s.disconnect()
    assert nbody == 7
Пример #14
0
def benchmark(render_to_tensor=False, resolution=512):
    config = parse_config(os.path.join(gibson2.root_path, '../test/test.yaml'))
    s = Simulator(mode='headless',
                  image_width=resolution,
                  image_height=resolution,
                  render_to_tensor=render_to_tensor)
    scene = StaticIndoorScene('Rs',
                              build_graph=True,
                              pybullet_load_texture=True)
    s.import_scene(scene)
    turtlebot = Turtlebot(config)
    s.import_robot(turtlebot)

    n_frame = 500
    start = time.time()
    for i in range(n_frame):
        turtlebot.apply_action([0.1, 0.1])
        s.step()
        rgb = s.renderer.render_robot_cameras(modes=('rgb'))

    physics_render_elapsed = time.time() - start
    physics_render_fps = n_frame / physics_render_elapsed
    print(
        "physics simulation + rendering rgb, resolution {}, render_to_tensor {}: {} fps"
        .format(resolution, render_to_tensor, physics_render_fps))

    start = time.time()
    for i in range(n_frame):
        rgb = s.renderer.render_robot_cameras(modes=('rgb'))

    render_elapsed = time.time() - start
    rgb_fps = n_frame / render_elapsed
    print("Rendering rgb, resolution {}, render_to_tensor {}: {} fps".format(
        resolution, render_to_tensor, n_frame / render_elapsed))

    start = time.time()
    for i in range(n_frame):
        rgb = s.renderer.render_robot_cameras(modes=('3d'))

    render_elapsed = time.time() - start
    pc_fps = n_frame / render_elapsed
    print("Rendering 3d, resolution {}, render_to_tensor {}: {} fps".format(
        resolution, render_to_tensor, n_frame / render_elapsed))

    start = time.time()
    for i in range(n_frame):
        rgb = s.renderer.render_robot_cameras(modes=('normal'))

    normal_fps = n_frame / render_elapsed
    render_elapsed = time.time() - start
    print(
        "Rendering normal, resolution {}, render_to_tensor {}: {} fps".format(
            resolution, render_to_tensor, n_frame / render_elapsed))
    plt.figure()
    plt.bar([0, 1, 2, 3], [physics_render_fps, rgb_fps, pc_fps, normal_fps],
            color='g')
    plt.xticks([0, 1, 2, 3], ['sim+render', 'rgb', '3d', 'normal'])
    plt.ylabel('fps')
    plt.xlabel('mode')
    plt.title('Static Scene Benchmark, resolution {}, to_tensor {}'.format(
        resolution, render_to_tensor))
    plt.savefig('static_scene_benchmark_res{}_tensor{}.pdf'.format(
        resolution, render_to_tensor))

    s.disconnect()
Пример #15
0
    def load(self):
        """
        Load the scene and robot
        """
        if self.config['scene'] == 'empty':
            scene = EmptyScene()
            self.simulator.import_scene(scene,
                                        load_texture=self.config.get(
                                            'load_texture', True))
        elif self.config['scene'] == 'stadium':
            scene = StadiumScene()
            self.simulator.import_scene(scene,
                                        load_texture=self.config.get(
                                            'load_texture', True))
        elif self.config['scene'] == 'gibson':
            scene = StaticIndoorScene(
                self.config['scene_id'],
                waypoint_resolution=self.config.get('waypoint_resolution',
                                                    0.2),
                num_waypoints=self.config.get('num_waypoints', 10),
                build_graph=self.config.get('build_graph', False),
                trav_map_resolution=self.config.get('trav_map_resolution',
                                                    0.1),
                trav_map_erosion=self.config.get('trav_map_erosion', 2),
                pybullet_load_texture=self.config.get('pybullet_load_texture',
                                                      False),
            )
            self.simulator.import_scene(scene,
                                        load_texture=self.config.get(
                                            'load_texture', True))
        elif self.config['scene'] == 'igibson':
            scene = InteractiveIndoorScene(
                self.config['scene_id'],
                waypoint_resolution=self.config.get('waypoint_resolution',
                                                    0.2),
                num_waypoints=self.config.get('num_waypoints', 10),
                build_graph=self.config.get('build_graph', False),
                trav_map_resolution=self.config.get('trav_map_resolution',
                                                    0.1),
                trav_map_erosion=self.config.get('trav_map_erosion', 2),
                trav_map_type=self.config.get('trav_map_type', 'with_obj'),
                pybullet_load_texture=self.config.get('pybullet_load_texture',
                                                      False),
                texture_randomization=self.texture_randomization_freq
                is not None,
                object_randomization=self.object_randomization_freq
                is not None,
                object_randomization_idx=self.object_randomization_idx,
                should_open_all_doors=self.config.get('should_open_all_doors',
                                                      False),
                load_object_categories=self.config.get(
                    'load_object_categories', None),
                load_room_types=self.config.get('load_room_types', None),
                load_room_instances=self.config.get('load_room_instances',
                                                    None),
            )
            # TODO: Unify the function import_scene and take out of the if-else clauses
            first_n = self.config.get('_set_first_n_objects', -1)
            if first_n != -1:
                scene._set_first_n_objects(first_n)
            self.simulator.import_ig_scene(scene)

        if self.config['robot'] == 'Turtlebot':
            robot = Turtlebot(self.config)
        elif self.config['robot'] == 'Husky':
            robot = Husky(self.config)
        elif self.config['robot'] == 'Ant':
            robot = Ant(self.config)
        elif self.config['robot'] == 'Humanoid':
            robot = Humanoid(self.config)
        elif self.config['robot'] == 'JR2':
            robot = JR2(self.config)
        elif self.config['robot'] == 'JR2_Kinova':
            robot = JR2_Kinova(self.config)
        elif self.config['robot'] == 'Freight':
            robot = Freight(self.config)
        elif self.config['robot'] == 'Fetch':
            robot = Fetch(self.config)
        elif self.config['robot'] == 'Locobot':
            robot = Locobot(self.config)
        else:
            raise Exception('unknown robot type: {}'.format(
                self.config['robot']))

        self.scene = scene
        self.robots = [robot]
        for robot in self.robots:
            self.simulator.import_robot(robot)
Пример #16
0
def main():
    config = parse_config(
        os.path.join(gibson2.example_config_path,
                     'turtlebot_demo.yaml'))  #robot configuration file path
    #    settings = MeshRendererSettings() #generating renderer settings object

    #generating simulator object
    #s = Simulator(mode='headless', #simulating without gui
    s = Simulator(
        mode='headless',  #simulating with gui
        #                  image_width=256, #robot camera pixel width
        #                  image_height=256, #robot camera pixel height
        #                  vertical_fov=40, #robot camera view angle (from floor to ceiling 40 degrees)
    )
    #rendering_settings=settings)

    #generating scene object
    scene = InteractiveIndoorScene(
        'Benevolence_1_int',  #scene name: Benevolence, floor number: 1, does it include interactive objects: yes (int). I pick Benevolence on purpose as it is the most memory friendly scene in the iGibson dataset. 
        build_graph=
        True,  #builds the connectivity graph over the given facedown traversibility map (floor plan)
        waypoint_resolution=
        0.1,  #radial distance between 2 consecutive waypoints (10 cm)
        pybullet_load_texture=True
    )  #do you want to include texture and material properties? (you need this for object interaction)

    s.import_ig_scene(scene)  #loading the scene object in the simulator object
    turtlebot = Turtlebot(config)  #generating the robot object
    s.import_robot(
        turtlebot)  #loading the robot object in the simulator object
    init_pos = turtlebot.get_position(
    )  #getting the initial position of the robot base [X:meters, Y:meters, Z:meters] (base: robot's main body. it may have links and the associated joints too. links and joints have positions and orientations as well.)
    init_or = turtlebot.get_rpy(
    )  #getting the initial Euler angles of the robot base [Roll: radians, Pitch: radians, Yaw: radians]

    #sampling random goal states in a desired room of the apartment
    np.random.seed(0)
    goal = scene.get_random_point_by_room_type('living_room')[
        1]  #sampling a random point in the living room

    rnd_path = scene.get_shortest_path(
        0, init_pos[0:2], goal[0:2], entire_path=True
    )[0]  #generate the "entire" a* path between the initial and goal nodes

    for i in range(len(rnd_path[0]) - 1):
        with Profiler(
                'Simulator step'
        ):  #iGibson simulation loop requieres this context manager

            rgb_camera = np.array(
                s.renderer.render_robot_cameras(modes='rgb')
            )  #probing RGB data, you can also probe rgbd or even optical flow if robot has that property in its config file (.yaml file)
            plt.imshow(rgb_camera[0, :, :, 3])  #calling sampled RGB data
            lidar = s.renderer.get_lidar_all()  #probing 360 degrees lidar data

            delta_pos = smt_path[:, i +
                                 1] - smt_path[:,
                                               i]  #direction vector between 2 consucutive waypoints
            delta_yaw = np.arctan2(
                delta_pos[1], delta_pos[0]
            )  #the yaw angle of the robot base while following the sampled bezier path
            delta_qua = e2q(
                init_or[0], init_or[1],
                delta_yaw)  #transforming robot base Euler angles to quaternion
            turtlebot.set_position_orientation([
                smt_path[0, i], smt_path[1, i], init_pos[2]
            ], delta_qua)  #setting the robot base position and the orientation
            s.step()  #proceed one step ahead in simulation time

    s.disconnect()
def benchmark_scene(scene_name, optimized=False, import_robot=True):
    config = parse_config(os.path.join(gibson2.root_path, '../test/test.yaml'))
    assets_version = get_ig_assets_version()
    print('assets_version', assets_version)
    scene = InteractiveIndoorScene(scene_name,
                                   texture_randomization=False,
                                   object_randomization=False)
    settings = MeshRendererSettings(msaa=False,
                                    enable_shadow=False,
                                    optimized=optimized)
    s = Simulator(
        mode='headless',
        image_width=512,
        image_height=512,
        device_idx=0,
        rendering_settings=settings,
    )
    s.import_ig_scene(scene)
    if import_robot:
        turtlebot = Turtlebot(config)
        s.import_robot(turtlebot)

    s.renderer.use_pbr(use_pbr=True, use_pbr_mapping=True)
    fps = []
    physics_fps = []
    render_fps = []
    obj_awake = []
    for i in range(2000):
        # if i % 100 == 0:
        #     scene.randomize_texture()
        start = time.time()
        s.step()
        if import_robot:
            # apply random actions
            turtlebot.apply_action(turtlebot.action_space.sample())
        physics_end = time.time()
        if import_robot:
            _ = s.renderer.render_robot_cameras(modes=('rgb'))
        else:
            _ = s.renderer.render(modes=('rgb'))
        end = time.time()

        #print("Elapsed time: ", end - start)
        print("Render Frequency: ", 1 / (end - physics_end))
        print("Physics Frequency: ", 1 / (physics_end - start))
        print("Step Frequency: ", 1 / (end - start))
        fps.append(1 / (end - start))
        physics_fps.append(1 / (physics_end - start))
        render_fps.append(1 / (end - physics_end))
        obj_awake.append(s.body_links_awake)
    s.disconnect()
    plt.figure(figsize=(7, 25))

    ax = plt.subplot(6, 1, 1)
    plt.hist(render_fps)
    ax.set_xlabel('Render fps')
    ax.set_title(
        'Scene {} version {}\noptimized {} num_obj {}\n import_robot {}'.
        format(scene_name, assets_version, optimized, scene.get_num_objects(),
               import_robot))
    ax = plt.subplot(6, 1, 2)
    plt.hist(physics_fps)
    ax.set_xlabel('Physics fps')
    ax = plt.subplot(6, 1, 3)
    plt.hist(fps)
    ax.set_xlabel('Step fps')
    ax = plt.subplot(6, 1, 4)
    plt.plot(render_fps)
    ax.set_xlabel('Render fps with time')
    ax.set_ylabel('fps')
    ax = plt.subplot(6, 1, 5)
    plt.plot(physics_fps)
    ax.set_xlabel('Physics fps with time, converge to {}'.format(
        np.mean(physics_fps[-100:])))
    ax.set_ylabel('fps')
    ax = plt.subplot(6, 1, 6)
    plt.plot(obj_awake)
    ax.set_xlabel('Num object links awake, converge to {}'.format(
        np.mean(obj_awake[-100:])))

    plt.savefig('scene_benchmark_{}_o_{}_r_{}.pdf'.format(
        scene_name, optimized, import_robot))
Пример #18
0
def test_import_building_viewing():
    download_assets()
    download_demo_data()
    config = parse_config(os.path.join(gibson2.root_path, '../test/test.yaml'))

    s = Simulator(mode='headless')
    scene = StaticIndoorScene('Rs')
    s.import_scene(scene)
    assert s.objects == list(range(2))

    turtlebot1 = Turtlebot(config)
    turtlebot2 = Turtlebot(config)
    turtlebot3 = Turtlebot(config)

    s.import_robot(turtlebot1)
    s.import_robot(turtlebot2)
    s.import_robot(turtlebot3)

    turtlebot1.set_position([0.5, 0, 0.5])
    turtlebot2.set_position([0, 0, 0.5])
    turtlebot3.set_position([-0.5, 0, 0.5])

    for i in range(10):
        s.step()
        #turtlebot1.apply_action(np.random.randint(4))
        #turtlebot2.apply_action(np.random.randint(4))
        #turtlebot3.apply_action(np.random.randint(4))

    s.disconnect()
def main():
    config = parse_config(os.path.join(gibson2.example_config_path, 'turtlebot_demo.yaml')) #robot configuration file path
    settings = MeshRendererSettings() #generating renderer settings object
    
    #generating simulator object
    s = Simulator(mode='headless', #simulating without gui 
    #s = Simulator(mode='gui', #simulating with gui
                  image_width=64, #robot camera pixel width
                  image_height=48, #robot camera pixel height
                  vertical_fov=75, #robot camera view angle (from floor to ceiling 40 degrees)
                  rendering_settings=settings)
    
    #generating scene object
    scene = InteractiveIndoorScene('Benevolence_1_int', #scene name: Benevolence, floor number: 1, does it include interactive objects: yes (int). I pick Benevolence on purpose as it is the most memory friendly scene in the iGibson dataset. 
                              build_graph=True, #builds the connectivity graph over the given facedown traversibility map (floor plan)
                              waypoint_resolution=0.1, #radial distance between 2 consecutive waypoints (10 cm)
                              trav_map_resolution=0.1,
                              trav_map_erosion=2,
                              should_open_all_doors=True,
                              pybullet_load_texture=True) #do you want to include texture and material properties? (you need this for object interaction)

    s.import_ig_scene(scene) #loading the scene object in the simulator object
    turtlebot = Turtlebot(config) #generating the robot object
    s.import_robot(turtlebot) #loading the robot object in the simulator object
    init_pos = turtlebot.get_position() #getting the initial position of the robot base [X:meters, Y:meters, Z:meters] (base: robot's main body. it may have links and the associated joints too. links and joints have positions and orientations as well.)
    init_or = turtlebot.get_rpy() #getting the initial Euler angles of the robot base [Roll: radians, Pitch: radians, Yaw: radians]

    #sampling random goal states in a desired room of the apartment
    np.random.seed(0)

    encoder = keras.models.load_model('./CVAE_encoder')
    decoder = keras.models.load_model('./CVAE_decoder')

    for j in range(30):

        goal1 = scene.get_random_point_by_room_type('living_room')[1] #sampling random points in the living room
        goal2 = scene.get_random_point_by_room_type('living_room')[1]    
        goal3 = scene.get_random_point_by_room_type('living_room')[1]    
        goal4 = init_pos
    
        path1 = scene.get_shortest_path(0,init_pos[0:2],goal1[0:2],entire_path=True)[0] #generate the "entire" a* path between the initial, sub-goal, and terminal goal nodes
        path2 = scene.get_shortest_path(0,goal1[0:2],goal2[0:2],entire_path=True)[0] 
        path3 = scene.get_shortest_path(0,goal2[0:2],goal3[0:2],entire_path=True)[0] 
        path4 = scene.get_shortest_path(0,goal3[0:2],goal4[0:2],entire_path=True)[0] 
    
        rnd_path = np.append(path1,path2[1:],axis=0)
        rnd_path = np.append(rnd_path,path3[1:],axis=0)
        rnd_path = np.append(rnd_path,path4[1:],axis=0)

        #fitting a bezier curve through the a* waypoints and sampling 2000 points from that curve
        path_nodes = np.asfortranarray([rnd_path[:,0].tolist(),rnd_path[:,1].tolist()])
        smt_crv = bezier.Curve.from_nodes(path_nodes)
        s_vals = np.linspace(0,1,2000)
        smt_path = smt_crv.evaluate_multi(s_vals)
        
        #correcting the initial orientation of the robot
        delta_pos = smt_path[:,1] - smt_path[:,0] #direction vector between 2 consecutive waypoints
        delta_yaw = np.arctan2(delta_pos[1],delta_pos[0]) #the yaw angle of the robot base while following the sampled bezier path
        delta_qua = e2q(init_or[0],init_or[1],delta_yaw) #transforming robot base Euler angles to quaternion
        turtlebot.set_position_orientation([smt_path[0,0],smt_path[1,0],init_pos[2]], delta_qua) #setting the robot base position and the orientation

        episode_path = os.getcwd() + '/episodes/episode_' + str(j).zfill(2) #path of the new episode folder
        #os.makedirs(episode_path) #creating a new folder for the episode
     
        gtrth_pth_str = episode_path + '/episode_' + str(j).zfill(2) + 'ground_truth_path.csv' #printing the ground truth path out
        #np.savetxt(gtrth_pth_str, np.atleast_2d(smt_path).T, delimiter=",")
       
        for i in range(len(smt_path[0])-1):
            with Profiler('Simulator step'): #iGibson simulation loop requieres this context manager
            
                rgb_camera = np.array(s.renderer.render_robot_cameras(modes='rgb')) #probing RGB data, you can also probe rgbd or even optical flow if robot has that property in its config file (.yaml file)
                robot_img = rgb_camera[0,:,:,0] #transforming the RGB probe to uint8 format (PIL.Image.fromarray() accepts that format)
                encoded = encoder.predict(np.expand_dims(robot_img, axis=0))
                decoded = decoder.predict(encoded[0])
                robot_img = decoded[0,:,:,0]
                robot_img = robot_img.astype('uint8')
                robot_img = Image.fromarray(robot_img)
                img_str = './iGib_recons' + '/episode_' + str(j).zfill(2) + '_frame_' + str(i).zfill(5) + '.jpeg'
                robot_img.save(img_str)
                #lidar = s.renderer.get_lidar_all() #probing 360 degrees lidar data

                delta_pos = smt_path[:,i+1] - smt_path[:,i] #direction vector between 2 consecutive waypoints
                delta_yaw = np.arctan2(delta_pos[1],delta_pos[0]) #the yaw angle of the robot base while following the sampled bezier path
                delta_qua = e2q(init_or[0],init_or[1],delta_yaw) #transforming robot base Euler angles to quaternion
                turtlebot.set_position_orientation([smt_path[0,i],smt_path[1,i],init_pos[2]], delta_qua) #setting the robot base position and the orientation
		velcmd1, velcmd2 = PID_path_track(delta_pos, delta_qua)
                turtlebot.set_motor_velocity([velcmd1,velcmd2])

                s.step() #proceed one step ahead in simulation time
    

    s.disconnect()