Пример #1
0
def main():
    config = parse_config('../configs/turtlebot_demo.yaml')
    settings = MeshRendererSettings(enable_shadow=False, msaa=False)
    s = Simulator(mode='gui',
                  image_width=256,
                  image_height=256,
                  rendering_settings=settings)

    scene = StaticIndoorScene('Rs',
                              build_graph=True,
                              pybullet_load_texture=True)
    s.import_scene(scene)
    turtlebot = Turtlebot(config)
    s.import_robot(turtlebot)

    for _ in range(10):
        obj = YCBObject('003_cracker_box')
        s.import_object(obj)
        obj.set_position_orientation(np.random.uniform(low=0, high=2, size=3),
                                     [0, 0, 0, 1])

    print(s.renderer.instances)

    for i in range(10000):
        with Profiler('Simulator step'):
            turtlebot.apply_action([0.1, 0.1])
            s.step()
            rgb = s.renderer.render_robot_cameras(modes=('rgb'))
    s.disconnect()
Пример #2
0
    def run_demo(self):
        config = parse_config(
            os.path.join(os.path.dirname(gibson2.__file__),
                         '../examples/configs/turtlebot_demo.yaml'))

        s = Simulator(mode='gui', image_width=700, image_height=700)
        scene = StaticIndoorScene('Rs', pybullet_load_texture=True)
        s.import_scene(scene)
        turtlebot = Turtlebot(config)
        s.import_robot(turtlebot)

        for i in range(1000):
            turtlebot.apply_action([0.1, 0.5])
            s.step()

        s.disconnect()
Пример #3
0
class ToyEnv(object):
    def __init__(self):
        config = parse_config('../../configs/turtlebot_demo.yaml')
        hdr_texture = os.path.join(gibson2.ig_dataset_path, 'scenes',
                                   'background', 'probe_02.hdr')
        hdr_texture2 = os.path.join(gibson2.ig_dataset_path, 'scenes',
                                    'background', 'probe_03.hdr')
        light_modulation_map_filename = os.path.join(gibson2.ig_dataset_path,
                                                     'scenes', 'Rs_int',
                                                     'layout',
                                                     'floor_lighttype_0.png')
        background_texture = os.path.join(gibson2.ig_dataset_path, 'scenes',
                                          'background', 'urban_street_01.jpg')

        settings = MeshRendererSettings(enable_shadow=False, enable_pbr=False)

        self.s = Simulator(mode='headless',
                           image_width=400,
                           image_height=400,
                           rendering_settings=settings)
        scene = StaticIndoorScene('Rs')
        self.s.import_scene(scene)
        #self.s.import_ig_scene(scene)
        self.robot = Turtlebot(config)
        self.s.import_robot(self.robot)

        for _ in range(5):
            obj = YCBObject('003_cracker_box')
            self.s.import_object(obj)
            obj.set_position_orientation(
                np.random.uniform(low=0, high=2, size=3), [0, 0, 0, 1])
        print(self.s.renderer.instances)

    def step(self, a):
        self.robot.apply_action(a)
        self.s.step()
        frame = self.s.renderer.render_robot_cameras(modes=('rgb'))[0]
        return frame

    def close(self):
        self.s.disconnect()
def main():
    config = parse_config('../configs/turtlebot_demo.yaml')
    settings = MeshRendererSettings()
    s = Simulator(mode='gui',
                  image_width=256,
                  image_height=256,
                  rendering_settings=settings)

    scene = StaticIndoorScene('Rs',
                              build_graph=True,
                              pybullet_load_texture=True)
    s.import_scene(scene)
    turtlebot = Turtlebot(config)
    s.import_robot(turtlebot)

    for i in range(10000):
        with Profiler('Simulator step'):
            turtlebot.apply_action([0.1, -0.1])
            s.step()
            lidar = s.renderer.get_lidar_all()
            print(lidar.shape)
            # TODO: visualize lidar scan

    s.disconnect()
def benchmark_scene(scene_name, optimized=False, import_robot=True):
    config = parse_config(os.path.join(gibson2.root_path, '../test/test.yaml'))
    assets_version = get_ig_assets_version()
    print('assets_version', assets_version)
    scene = InteractiveIndoorScene(scene_name,
                                   texture_randomization=False,
                                   object_randomization=False)
    settings = MeshRendererSettings(msaa=False,
                                    enable_shadow=False,
                                    optimized=optimized)
    s = Simulator(
        mode='headless',
        image_width=512,
        image_height=512,
        device_idx=0,
        rendering_settings=settings,
    )
    s.import_ig_scene(scene)
    if import_robot:
        turtlebot = Turtlebot(config)
        s.import_robot(turtlebot)

    s.renderer.use_pbr(use_pbr=True, use_pbr_mapping=True)
    fps = []
    physics_fps = []
    render_fps = []
    obj_awake = []
    for i in range(2000):
        # if i % 100 == 0:
        #     scene.randomize_texture()
        start = time.time()
        s.step()
        if import_robot:
            # apply random actions
            turtlebot.apply_action(turtlebot.action_space.sample())
        physics_end = time.time()
        if import_robot:
            _ = s.renderer.render_robot_cameras(modes=('rgb'))
        else:
            _ = s.renderer.render(modes=('rgb'))
        end = time.time()

        #print("Elapsed time: ", end - start)
        print("Render Frequency: ", 1 / (end - physics_end))
        print("Physics Frequency: ", 1 / (physics_end - start))
        print("Step Frequency: ", 1 / (end - start))
        fps.append(1 / (end - start))
        physics_fps.append(1 / (physics_end - start))
        render_fps.append(1 / (end - physics_end))
        obj_awake.append(s.body_links_awake)
    s.disconnect()
    plt.figure(figsize=(7, 25))

    ax = plt.subplot(6, 1, 1)
    plt.hist(render_fps)
    ax.set_xlabel('Render fps')
    ax.set_title(
        'Scene {} version {}\noptimized {} num_obj {}\n import_robot {}'.
        format(scene_name, assets_version, optimized, scene.get_num_objects(),
               import_robot))
    ax = plt.subplot(6, 1, 2)
    plt.hist(physics_fps)
    ax.set_xlabel('Physics fps')
    ax = plt.subplot(6, 1, 3)
    plt.hist(fps)
    ax.set_xlabel('Step fps')
    ax = plt.subplot(6, 1, 4)
    plt.plot(render_fps)
    ax.set_xlabel('Render fps with time')
    ax.set_ylabel('fps')
    ax = plt.subplot(6, 1, 5)
    plt.plot(physics_fps)
    ax.set_xlabel('Physics fps with time, converge to {}'.format(
        np.mean(physics_fps[-100:])))
    ax.set_ylabel('fps')
    ax = plt.subplot(6, 1, 6)
    plt.plot(obj_awake)
    ax.set_xlabel('Num object links awake, converge to {}'.format(
        np.mean(obj_awake[-100:])))

    plt.savefig('scene_benchmark_{}_o_{}_r_{}.pdf'.format(
        scene_name, optimized, import_robot))
def benchmark(render_to_tensor=False, resolution=512):
    config = parse_config(os.path.join(gibson2.root_path, '../test/test.yaml'))
    s = Simulator(mode='headless',
                  image_width=resolution,
                  image_height=resolution,
                  render_to_tensor=render_to_tensor)
    scene = StaticIndoorScene('Rs',
                              build_graph=True,
                              pybullet_load_texture=True)
    s.import_scene(scene)
    turtlebot = Turtlebot(config)
    s.import_robot(turtlebot)

    n_frame = 500
    start = time.time()
    for i in range(n_frame):
        turtlebot.apply_action([0.1, 0.1])
        s.step()
        rgb = s.renderer.render_robot_cameras(modes=('rgb'))

    physics_render_elapsed = time.time() - start
    physics_render_fps = n_frame / physics_render_elapsed
    print(
        "physics simulation + rendering rgb, resolution {}, render_to_tensor {}: {} fps"
        .format(resolution, render_to_tensor, physics_render_fps))

    start = time.time()
    for i in range(n_frame):
        rgb = s.renderer.render_robot_cameras(modes=('rgb'))

    render_elapsed = time.time() - start
    rgb_fps = n_frame / render_elapsed
    print("Rendering rgb, resolution {}, render_to_tensor {}: {} fps".format(
        resolution, render_to_tensor, n_frame / render_elapsed))

    start = time.time()
    for i in range(n_frame):
        rgb = s.renderer.render_robot_cameras(modes=('3d'))

    render_elapsed = time.time() - start
    pc_fps = n_frame / render_elapsed
    print("Rendering 3d, resolution {}, render_to_tensor {}: {} fps".format(
        resolution, render_to_tensor, n_frame / render_elapsed))

    start = time.time()
    for i in range(n_frame):
        rgb = s.renderer.render_robot_cameras(modes=('normal'))

    normal_fps = n_frame / render_elapsed
    render_elapsed = time.time() - start
    print(
        "Rendering normal, resolution {}, render_to_tensor {}: {} fps".format(
            resolution, render_to_tensor, n_frame / render_elapsed))
    plt.figure()
    plt.bar([0, 1, 2, 3], [physics_render_fps, rgb_fps, pc_fps, normal_fps],
            color='g')
    plt.xticks([0, 1, 2, 3], ['sim+render', 'rgb', '3d', 'normal'])
    plt.ylabel('fps')
    plt.xlabel('mode')
    plt.title('Static Scene Benchmark, resolution {}, to_tensor {}'.format(
        resolution, render_to_tensor))
    plt.savefig('static_scene_benchmark_res{}_tensor{}.pdf'.format(
        resolution, render_to_tensor))

    s.disconnect()