Esempio n. 1
0
def test_import_igsdf(scene_name, scene_source):
    hdr_texture = os.path.join(
        gibson2.ig_dataset_path, 'scenes', 'background', 'probe_02.hdr')
    hdr_texture2 = os.path.join(
        gibson2.ig_dataset_path, 'scenes', 'background', 'probe_03.hdr')

    if scene_source == "IG":
        scene_dir = get_ig_scene_path(scene_name)
    elif scene_source == "CUBICASA":
        scene_dir = get_cubicasa_scene_path(scene_name)
    else:
        scene_dir = get_3dfront_scene_path(scene_name)

    light_modulation_map_filename = os.path.join(
        scene_dir, 'layout', 'floor_lighttype_0.png')
    background_texture = os.path.join(
        gibson2.ig_dataset_path, 'scenes', 'background', 
        'urban_street_01.jpg')

    scene = InteractiveIndoorScene(
                    scene_name, 
                    texture_randomization=False, 
                    object_randomization=False,
                    scene_source=scene_source)

    settings = MeshRendererSettings(env_texture_filename=hdr_texture,
                                    env_texture_filename2=hdr_texture2,
                                    env_texture_filename3=background_texture,
                                    light_modulation_map_filename=light_modulation_map_filename,
                                    enable_shadow=True, msaa=True,
                                    light_dimming_factor=1.0)
    s = Simulator(mode='iggui', image_width=960,
                  image_height=720, device_idx=0, rendering_settings=settings)

    s.import_ig_scene(scene)
    fpss = []

    np.random.seed(0)
    _,(px,py,pz) = scene.get_random_point()
    s.viewer.px = px
    s.viewer.py = py
    s.viewer.pz = 1.7
    s.viewer.update()
    
    for i in range(3000):
        if i == 2500:
            logId = p.startStateLogging(loggingType=p.STATE_LOGGING_PROFILE_TIMINGS, fileName='trace_beechwood')
        start = time.time()
        s.step()
        end = time.time()
        print("Elapsed time: ", end - start)
        print("Frequency: ", 1 / (end - start))
        fpss.append(1 / (end - start))
    p.stopStateLogging(logId)
    s.disconnect()
    print("end")
    
    plt.plot(fpss)
    plt.show()
def main():
    s = Simulator(mode='gui', image_width=512,
                  image_height=512, device_idx=0)
    scene = InteractiveIndoorScene(
        'Rs_int', texture_randomization=True, object_randomization=False)
    s.import_ig_scene(scene)

    for i in range(10000):
        if i % 1000 == 0:
            scene.randomize_texture()
        s.step()
    s.disconnect()
Esempio n. 3
0
def main():
    s = Simulator(mode='gui', image_width=512, image_height=512, device_idx=0)
    scene = InteractiveIndoorScene('Rs_int',
                                   texture_randomization=False,
                                   object_randomization=False,
                                   load_object_categories=['chair'],
                                   load_room_types=['living_room'])
    s.import_ig_scene(scene)

    for _ in range(1000):
        s.step()
    s.disconnect()
def main():
    s = Simulator(mode='gui', image_width=512, image_height=512, device_idx=0)

    for random_seed in range(10):
        scene = InteractiveIndoorScene('Rs_int',
                                       texture_randomization=False,
                                       object_randomization=True,
                                       object_randomization_idx=random_seed)
        s.import_ig_scene(scene)
        for i in range(1000):
            s.step()
        s.reload()

    s.disconnect()
def main():
    s = Simulator(mode='gui', image_width=512,
                  image_height=512, device_idx=0)
    scene = InteractiveIndoorScene(
        'Rs_int', texture_randomization=False, object_randomization=False)
    s.import_ig_scene(scene)

    np.random.seed(0)
    for _ in range(10):
        pt = scene.get_random_point_by_room_type('living_room')[1]
        print('random point in living_room', pt)

    for _ in range(1000):
        s.step()
    s.disconnect()
Esempio n. 6
0
def generate_trav_map(scene_name, scene_source, load_full_scene=True):
    if scene_source not in SCENE_SOURCE:
        raise ValueError('Unsupported scene source: {}'.format(scene_source))
    if scene_source == "IG":
        scene_dir = get_ig_scene_path(scene_name)
    elif scene_source == "CUBICASA":
        scene_dir = get_cubicasa_scene_path(scene_name)
    else:
        scene_dir = get_3dfront_scene_path(scene_name)
    random.seed(0)
    scene = InteractiveIndoorScene(scene_name,
                                   build_graph=False,
                                   texture_randomization=False,
                                   scene_source=scene_source)
    if not load_full_scene:
        scene._set_first_n_objects(3)
    s = Simulator(mode='headless',
                  image_width=512,
                  image_height=512,
                  device_idx=0)
    s.import_ig_scene(scene)

    if load_full_scene:
        scene.open_all_doors()

    for i in range(20):
        s.step()

    vertices_info, faces_info = s.renderer.dump()
    s.disconnect()

    if load_full_scene:
        trav_map_filename_format = 'floor_trav_{}.png'
        obstacle_map_filename_format = 'floor_{}.png'
    else:
        trav_map_filename_format = 'floor_trav_no_obj_{}.png'
        obstacle_map_filename_format = 'floor_no_obj_{}.png'

    gen_trav_map(vertices_info,
                 faces_info,
                 output_folder=os.path.join(scene_dir, 'layout'),
                 trav_map_filename_format=trav_map_filename_format,
                 obstacle_map_filename_format=obstacle_map_filename_format)
Esempio n. 7
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--scene',
                        type=str,
                        help='Name of the scene in the iG Dataset')
    args = parser.parse_args()
    settings = MeshRendererSettings(enable_shadow=True, msaa=False)
    s = Simulator(mode='gui',
                  image_width=256,
                  image_height=256,
                  rendering_settings=settings)

    scene = iGSDFScene(args.scene)
    s.import_ig_scene(scene)

    for i in range(10000):
        with Profiler('Simulator step'):
            s.step()
    s.disconnect()
def test_import_igsdf():
    scene = InteractiveIndoorScene('Rs_int',
                                   texture_randomization=False,
                                   object_randomization=False)
    s = Simulator(mode='headless',
                  image_width=512,
                  image_height=512,
                  device_idx=0)
    s.import_ig_scene(scene)

    s.renderer.use_pbr(use_pbr=True, use_pbr_mapping=True)
    for i in range(10):
        # if i % 100 == 0:
        #     scene.randomize_texture()
        start = time.time()
        s.step()
        end = time.time()
        print("Elapsed time: ", end - start)
        print("Frequency: ", 1 / (end - start))

    s.disconnect()
def benchmark_rendering(scene_list, rendering_presets_list, modality_list):
    config = parse_config(os.path.join(gibson2.root_path, '../test/test.yaml'))
    assets_version = get_ig_assets_version()
    print('assets_version', assets_version)
    result = {}
    for scene_name in scene_list:
        for rendering_preset in rendering_presets_list:
            scene = InteractiveIndoorScene(scene_name,
                                           texture_randomization=False,
                                           object_randomization=False)
            settings = NamedRenderingPresets[rendering_preset]
            if rendering_preset == 'VISUAL_RL':
                image_width = 128
                image_height = 128
            else:
                image_width = 512
                image_height = 512
            s = Simulator(mode='headless',
                          image_width=image_width,
                          image_height=image_height,
                          device_idx=0,
                          rendering_settings=settings,
                          physics_timestep=1 / 240.0)
            s.import_ig_scene(scene)
            turtlebot = Turtlebot(config)
            s.import_robot(turtlebot)

            for mode in modality_list:
                for _ in range(10):
                    s.step()
                    _ = s.renderer.render_robot_cameras(modes=(mode))
                start = time.time()
                for _ in range(200):
                    _ = s.renderer.render_robot_cameras(modes=(mode))
                end = time.time()
                fps = 200 / (end - start)
                result[(scene_name, rendering_preset, mode)] = fps
            s.disconnect()
    return result
Esempio n. 10
0
class ToyEnvInt(object):
    def __init__(self, robot='turtlebot', scene='Rs_int'):
        config = parse_config('../../configs/turtlebot_demo.yaml')
        hdr_texture = os.path.join(gibson2.ig_dataset_path, 'scenes',
                                   'background', 'probe_02.hdr')
        hdr_texture2 = os.path.join(gibson2.ig_dataset_path, 'scenes',
                                    'background', 'probe_03.hdr')
        light_modulation_map_filename = os.path.join(gibson2.ig_dataset_path,
                                                     'scenes', 'Rs_int',
                                                     'layout',
                                                     'floor_lighttype_0.png')
        background_texture = os.path.join(gibson2.ig_dataset_path, 'scenes',
                                          'background', 'urban_street_01.jpg')

        scene = InteractiveIndoorScene(scene,
                                       texture_randomization=False,
                                       object_randomization=False)
        #scene._set_first_n_objects(5)
        scene.open_all_doors()

        settings = MeshRendererSettings(
            env_texture_filename=hdr_texture,
            env_texture_filename2=hdr_texture2,
            env_texture_filename3=background_texture,
            light_modulation_map_filename=light_modulation_map_filename,
            enable_shadow=True,
            msaa=True,
            light_dimming_factor=1.0,
            optimized=True)

        self.s = Simulator(mode='headless',
                           image_width=400,
                           image_height=400,
                           rendering_settings=settings)
        self.s.import_ig_scene(scene)

        if robot == 'turtlebot':
            self.robot = Turtlebot(config)
        else:
            self.robot = Fetch(config)

        self.s.import_robot(self.robot)

        for _ in range(5):
            obj = YCBObject('003_cracker_box')
            self.s.import_object(obj)
            obj.set_position_orientation(
                np.random.uniform(low=0, high=2, size=3), [0, 0, 0, 1])
        print(self.s.renderer.instances)

    def step(self, a):
        action = np.zeros(self.robot.action_space.shape)
        if isinstance(self.robot, Turtlebot):
            action[0] = a[0]
            action[1] = a[1]
        else:
            action[1] = a[0]
            action[0] = a[1]

        self.robot.apply_action(action)
        self.s.step()
        frame = self.s.renderer.render_robot_cameras(modes=('rgb'))[0]
        return frame

    def close(self):
        self.s.disconnect()
Esempio n. 11
0
#################################################################################

###start the gui###
#p.connect(p.GUI)

######################initialize gravity and time###############################
#p.setGravity(0,0,-9.8)
#p.setTimeStep(1./360.)

############################load a scene#######################################
############To load a Interactive Indoor scene using a simulator###############
s = Simulator(mode='gui', image_width=512, image_height=512)
scene = InteractiveIndoorScene('Rs_int',
                               texture_randomization=False,
                               object_randomization=False)
s.import_ig_scene(scene)

################################################################################
##we keep importing things to the simulator s##################
################################################################################
#scene=InteractiveIndoorScene()
#scene=StadiumScene()
#scene.load()

#######################loading a floor#########################################
#print(pybullet_data.getDataPath())
#floor=os.path.join(pybullet_data.getDataPath(),"mjcf/ground_plane.xml")
#p.loadMJCF(floor)

######################loading objects##########################################
Esempio n. 12
0
class BaseEnv(gym.Env):
    '''
    Base Env class, follows OpenAI Gym interface
    Handles loading scene and robot
    Functions like reset and step are not implemented
    '''
    def __init__(self,
                 config_file,
                 scene_id=None,
                 mode='headless',
                 action_timestep=1 / 10.0,
                 physics_timestep=1 / 240.0,
                 render_to_tensor=False,
                 device_idx=0):
        """
        :param config_file: config_file path
        :param scene_id: override scene_id in config file
        :param mode: headless or gui mode
        :param action_timestep: environment executes action per action_timestep second
        :param physics_timestep: physics timestep for pybullet
        :param device_idx: device_idx: which GPU to run the simulation and rendering on
        """
        self.config = parse_config(config_file)
        if scene_id is not None:
            self.config['scene_id'] = scene_id

        self.mode = mode
        self.action_timestep = action_timestep
        self.physics_timestep = physics_timestep
        self.texture_randomization_freq = self.config.get(
            'texture_randomization_freq', None)
        self.object_randomization_freq = self.config.get(
            'object_randomization_freq', None)
        self.object_randomization_idx = 0
        self.num_object_randomization_idx = 10

        enable_shadow = self.config.get('enable_shadow', False)
        enable_pbr = self.config.get('enable_pbr', True)
        texture_scale = self.config.get('texture_scale', 1.0)

        settings = MeshRendererSettings(enable_shadow=enable_shadow,
                                        enable_pbr=enable_pbr,
                                        msaa=False,
                                        texture_scale=texture_scale)

        self.simulator = Simulator(
            mode=mode,
            physics_timestep=physics_timestep,
            render_timestep=action_timestep,
            image_width=self.config.get('image_width', 128),
            image_height=self.config.get('image_height', 128),
            vertical_fov=self.config.get('vertical_fov', 90),
            device_idx=device_idx,
            render_to_tensor=render_to_tensor,
            rendering_settings=settings)
        self.load()

    def reload(self, config_file):
        """
        Reload another config file
        Thhis allows one to change the configuration on the fly

        :param config_file: new config file path
        """
        self.config = parse_config(config_file)
        self.simulator.reload()
        self.load()

    def reload_model(self, scene_id):
        """
        Reload another scene model
        This allows one to change the scene on the fly

        :param scene_id: new scene_id
        """
        self.config['scene_id'] = scene_id
        self.simulator.reload()
        self.load()

    def reload_model_object_randomization(self):
        """
        Reload the same model, with the next object randomization random seed
        """
        if self.object_randomization_freq is None:
            return
        self.object_randomization_idx = (self.object_randomization_idx + 1) % \
            (self.num_object_randomization_idx)
        self.simulator.reload()
        self.load()

    def get_next_scene_random_seed(self):
        """
        Get the next scene random seed
        """
        if self.object_randomization_freq is None:
            return None
        return self.scene_random_seeds[self.scene_random_seed_idx]

    def load(self):
        """
        Load the scene and robot
        """
        if self.config['scene'] == 'empty':
            scene = EmptyScene()
            self.simulator.import_scene(scene,
                                        load_texture=self.config.get(
                                            'load_texture', True))
        elif self.config['scene'] == 'stadium':
            scene = StadiumScene()
            self.simulator.import_scene(scene,
                                        load_texture=self.config.get(
                                            'load_texture', True))
        elif self.config['scene'] == 'gibson':
            scene = StaticIndoorScene(
                self.config['scene_id'],
                waypoint_resolution=self.config.get('waypoint_resolution',
                                                    0.2),
                num_waypoints=self.config.get('num_waypoints', 10),
                build_graph=self.config.get('build_graph', False),
                trav_map_resolution=self.config.get('trav_map_resolution',
                                                    0.1),
                trav_map_erosion=self.config.get('trav_map_erosion', 2),
                pybullet_load_texture=self.config.get('pybullet_load_texture',
                                                      False),
            )
            self.simulator.import_scene(scene,
                                        load_texture=self.config.get(
                                            'load_texture', True))
        elif self.config['scene'] == 'igibson':
            scene = InteractiveIndoorScene(
                self.config['scene_id'],
                waypoint_resolution=self.config.get('waypoint_resolution',
                                                    0.2),
                num_waypoints=self.config.get('num_waypoints', 10),
                build_graph=self.config.get('build_graph', False),
                trav_map_resolution=self.config.get('trav_map_resolution',
                                                    0.1),
                trav_map_erosion=self.config.get('trav_map_erosion', 2),
                trav_map_type=self.config.get('trav_map_type', 'with_obj'),
                pybullet_load_texture=self.config.get('pybullet_load_texture',
                                                      False),
                texture_randomization=self.texture_randomization_freq
                is not None,
                object_randomization=self.object_randomization_freq
                is not None,
                object_randomization_idx=self.object_randomization_idx,
                should_open_all_doors=self.config.get('should_open_all_doors',
                                                      False),
                load_object_categories=self.config.get(
                    'load_object_categories', None),
                load_room_types=self.config.get('load_room_types', None),
                load_room_instances=self.config.get('load_room_instances',
                                                    None),
            )
            # TODO: Unify the function import_scene and take out of the if-else clauses
            first_n = self.config.get('_set_first_n_objects', -1)
            if first_n != -1:
                scene._set_first_n_objects(first_n)
            self.simulator.import_ig_scene(scene)

        if self.config['robot'] == 'Turtlebot':
            robot = Turtlebot(self.config)
        elif self.config['robot'] == 'Husky':
            robot = Husky(self.config)
        elif self.config['robot'] == 'Ant':
            robot = Ant(self.config)
        elif self.config['robot'] == 'Humanoid':
            robot = Humanoid(self.config)
        elif self.config['robot'] == 'JR2':
            robot = JR2(self.config)
        elif self.config['robot'] == 'JR2_Kinova':
            robot = JR2_Kinova(self.config)
        elif self.config['robot'] == 'Freight':
            robot = Freight(self.config)
        elif self.config['robot'] == 'Fetch':
            robot = Fetch(self.config)
        elif self.config['robot'] == 'Locobot':
            robot = Locobot(self.config)
        else:
            raise Exception('unknown robot type: {}'.format(
                self.config['robot']))

        self.scene = scene
        self.robots = [robot]
        for robot in self.robots:
            self.simulator.import_robot(robot)

    def clean(self):
        """
        Clean up
        """
        if self.simulator is not None:
            self.simulator.disconnect()

    def close(self):
        """
        Synonymous function with clean
        """
        self.clean()

    def simulator_step(self):
        """
        Step the simulation.
        This is different from environment step that returns the next
        observation, reward, done, info.
        """
        self.simulator.step()

    def step(self, action):
        """
        Overwritten by subclasses
        """
        return NotImplementedError()

    def reset(self):
        """
        Overwritten by subclasses
        """
        return NotImplementedError()

    def set_mode(self, mode):
        """
        Set simulator mode
        """
        self.simulator.mode = mode
Esempio n. 13
0
def main():
    config = parse_config(
        os.path.join(gibson2.example_config_path,
                     'turtlebot_demo.yaml'))  #robot configuration file path
    #    settings = MeshRendererSettings() #generating renderer settings object

    #generating simulator object
    #s = Simulator(mode='headless', #simulating without gui
    s = Simulator(
        mode='headless',  #simulating with gui
        #                  image_width=256, #robot camera pixel width
        #                  image_height=256, #robot camera pixel height
        #                  vertical_fov=40, #robot camera view angle (from floor to ceiling 40 degrees)
    )
    #rendering_settings=settings)

    #generating scene object
    scene = InteractiveIndoorScene(
        'Benevolence_1_int',  #scene name: Benevolence, floor number: 1, does it include interactive objects: yes (int). I pick Benevolence on purpose as it is the most memory friendly scene in the iGibson dataset. 
        build_graph=
        True,  #builds the connectivity graph over the given facedown traversibility map (floor plan)
        waypoint_resolution=
        0.1,  #radial distance between 2 consecutive waypoints (10 cm)
        pybullet_load_texture=True
    )  #do you want to include texture and material properties? (you need this for object interaction)

    s.import_ig_scene(scene)  #loading the scene object in the simulator object
    turtlebot = Turtlebot(config)  #generating the robot object
    s.import_robot(
        turtlebot)  #loading the robot object in the simulator object
    init_pos = turtlebot.get_position(
    )  #getting the initial position of the robot base [X:meters, Y:meters, Z:meters] (base: robot's main body. it may have links and the associated joints too. links and joints have positions and orientations as well.)
    init_or = turtlebot.get_rpy(
    )  #getting the initial Euler angles of the robot base [Roll: radians, Pitch: radians, Yaw: radians]

    #sampling random goal states in a desired room of the apartment
    np.random.seed(0)
    goal = scene.get_random_point_by_room_type('living_room')[
        1]  #sampling a random point in the living room

    rnd_path = scene.get_shortest_path(
        0, init_pos[0:2], goal[0:2], entire_path=True
    )[0]  #generate the "entire" a* path between the initial and goal nodes

    for i in range(len(rnd_path[0]) - 1):
        with Profiler(
                'Simulator step'
        ):  #iGibson simulation loop requieres this context manager

            rgb_camera = np.array(
                s.renderer.render_robot_cameras(modes='rgb')
            )  #probing RGB data, you can also probe rgbd or even optical flow if robot has that property in its config file (.yaml file)
            plt.imshow(rgb_camera[0, :, :, 3])  #calling sampled RGB data
            lidar = s.renderer.get_lidar_all()  #probing 360 degrees lidar data

            delta_pos = smt_path[:, i +
                                 1] - smt_path[:,
                                               i]  #direction vector between 2 consucutive waypoints
            delta_yaw = np.arctan2(
                delta_pos[1], delta_pos[0]
            )  #the yaw angle of the robot base while following the sampled bezier path
            delta_qua = e2q(
                init_or[0], init_or[1],
                delta_yaw)  #transforming robot base Euler angles to quaternion
            turtlebot.set_position_orientation([
                smt_path[0, i], smt_path[1, i], init_pos[2]
            ], delta_qua)  #setting the robot base position and the orientation
            s.step()  #proceed one step ahead in simulation time

    s.disconnect()
Esempio n. 14
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--scene',
                        type=str,
                        help='Name of the scene in the iG Dataset')
    parser.add_argument('--save_dir',
                        type=str,
                        help='Directory to save the frames.',
                        default='misc')
    parser.add_argument('--seed', type=int, default=15, help='Random seed.')
    parser.add_argument('--domain_rand',
                        dest='domain_rand',
                        action='store_true')
    parser.add_argument('--domain_rand_interval',
                        dest='domain_rand_interval',
                        type=int,
                        default=50)
    parser.add_argument('--object_rand',
                        dest='object_rand',
                        action='store_true')
    args = parser.parse_args()

    # hdr_texture1 = os.path.join(
    # gibson2.ig_dataset_path, 'scenes', 'background', 'photo_studio_01_2k.hdr')
    hdr_texture1 = os.path.join(gibson2.ig_dataset_path, 'scenes',
                                'background', 'probe_03.hdr')
    hdr_texture2 = os.path.join(gibson2.ig_dataset_path, 'scenes',
                                'background', 'probe_02.hdr')
    light_map = os.path.join(get_ig_scene_path(args.scene), 'layout',
                             'floor_lighttype_0.png')

    background_texture = os.path.join(gibson2.ig_dataset_path, 'scenes',
                                      'background', 'urban_street_01.jpg')

    settings = MeshRendererSettings(env_texture_filename=hdr_texture1,
                                    env_texture_filename2=hdr_texture2,
                                    env_texture_filename3=background_texture,
                                    light_modulation_map_filename=light_map,
                                    enable_shadow=True,
                                    msaa=True,
                                    skybox_size=36.,
                                    light_dimming_factor=0.8)

    s = Simulator(mode='headless',
                  image_width=1080,
                  image_height=720,
                  vertical_fov=60,
                  rendering_settings=settings)

    random.seed(args.seed)
    scene = InteractiveIndoorScene(args.scene,
                                   texture_randomization=args.domain_rand,
                                   object_randomization=args.object_rand)

    s.import_ig_scene(scene)

    traj_path = os.path.join(get_ig_scene_path(args.scene), 'misc',
                             'tour_cam_trajectory.txt')
    save_dir = os.path.join(get_ig_scene_path(args.scene), args.save_dir)
    os.makedirs(save_dir, exist_ok=True)
    tmp_dir = os.path.join(save_dir, 'tmp')
    os.makedirs(tmp_dir, exist_ok=True)

    with open(traj_path, 'r') as fp:
        points = [l.rstrip().split(',') for l in fp.readlines()]

    for _ in range(60):
        s.step()
    s.sync()

    for i in range(len(points)):
        if args.domain_rand and i % args.domain_rand_interval == 0:
            scene.randomize_texture()
        x, y, dir_x, dir_y = [float(p) for p in points[i]]
        z = 1.7
        tar_x = x + dir_x
        tar_y = y + dir_y
        tar_z = 1.4
        # cam_loc = np.array([x, y, z])
        s.renderer.set_camera([x, y, z], [tar_x, tar_y, tar_z], [0, 0, 1])

        with Profiler('Render'):
            frame = s.renderer.render(modes=('rgb'))
        img = Image.fromarray(
            (255 * np.concatenate(frame, axis=1)[:, :, :3]).astype(np.uint8))
        img.save(os.path.join(tmp_dir, '{:05d}.png'.format(i)))

    cmd = 'ffmpeg -i {t}/%5d.png -y -an -c:v libx264 -crf 18 -preset veryslow -r 30 {s}/tour.mp4'.format(
        t=tmp_dir, s=save_dir)
    subprocess.call(cmd, shell=True)
    cmd = 'rm -r {}'.format(tmp_dir)
    subprocess.call(cmd, shell=True)

    s.disconnect()
def benchmark_scene(scene_name, optimized=False, import_robot=True):
    config = parse_config(os.path.join(gibson2.root_path, '../test/test.yaml'))
    assets_version = get_ig_assets_version()
    print('assets_version', assets_version)
    scene = InteractiveIndoorScene(scene_name,
                                   texture_randomization=False,
                                   object_randomization=False)
    settings = MeshRendererSettings(msaa=False,
                                    enable_shadow=False,
                                    optimized=optimized)
    s = Simulator(
        mode='headless',
        image_width=512,
        image_height=512,
        device_idx=0,
        rendering_settings=settings,
    )
    s.import_ig_scene(scene)
    if import_robot:
        turtlebot = Turtlebot(config)
        s.import_robot(turtlebot)

    s.renderer.use_pbr(use_pbr=True, use_pbr_mapping=True)
    fps = []
    physics_fps = []
    render_fps = []
    obj_awake = []
    for i in range(2000):
        # if i % 100 == 0:
        #     scene.randomize_texture()
        start = time.time()
        s.step()
        if import_robot:
            # apply random actions
            turtlebot.apply_action(turtlebot.action_space.sample())
        physics_end = time.time()
        if import_robot:
            _ = s.renderer.render_robot_cameras(modes=('rgb'))
        else:
            _ = s.renderer.render(modes=('rgb'))
        end = time.time()

        #print("Elapsed time: ", end - start)
        print("Render Frequency: ", 1 / (end - physics_end))
        print("Physics Frequency: ", 1 / (physics_end - start))
        print("Step Frequency: ", 1 / (end - start))
        fps.append(1 / (end - start))
        physics_fps.append(1 / (physics_end - start))
        render_fps.append(1 / (end - physics_end))
        obj_awake.append(s.body_links_awake)
    s.disconnect()
    plt.figure(figsize=(7, 25))

    ax = plt.subplot(6, 1, 1)
    plt.hist(render_fps)
    ax.set_xlabel('Render fps')
    ax.set_title(
        'Scene {} version {}\noptimized {} num_obj {}\n import_robot {}'.
        format(scene_name, assets_version, optimized, scene.get_num_objects(),
               import_robot))
    ax = plt.subplot(6, 1, 2)
    plt.hist(physics_fps)
    ax.set_xlabel('Physics fps')
    ax = plt.subplot(6, 1, 3)
    plt.hist(fps)
    ax.set_xlabel('Step fps')
    ax = plt.subplot(6, 1, 4)
    plt.plot(render_fps)
    ax.set_xlabel('Render fps with time')
    ax.set_ylabel('fps')
    ax = plt.subplot(6, 1, 5)
    plt.plot(physics_fps)
    ax.set_xlabel('Physics fps with time, converge to {}'.format(
        np.mean(physics_fps[-100:])))
    ax.set_ylabel('fps')
    ax = plt.subplot(6, 1, 6)
    plt.plot(obj_awake)
    ax.set_xlabel('Num object links awake, converge to {}'.format(
        np.mean(obj_awake[-100:])))

    plt.savefig('scene_benchmark_{}_o_{}_r_{}.pdf'.format(
        scene_name, optimized, import_robot))
def main():
    config = parse_config(os.path.join(gibson2.example_config_path, 'turtlebot_demo.yaml')) #robot configuration file path
    settings = MeshRendererSettings() #generating renderer settings object
    
    #generating simulator object
    s = Simulator(mode='headless', #simulating without gui 
    #s = Simulator(mode='gui', #simulating with gui
                  image_width=64, #robot camera pixel width
                  image_height=48, #robot camera pixel height
                  vertical_fov=75, #robot camera view angle (from floor to ceiling 40 degrees)
                  rendering_settings=settings)
    
    #generating scene object
    scene = InteractiveIndoorScene('Benevolence_1_int', #scene name: Benevolence, floor number: 1, does it include interactive objects: yes (int). I pick Benevolence on purpose as it is the most memory friendly scene in the iGibson dataset. 
                              build_graph=True, #builds the connectivity graph over the given facedown traversibility map (floor plan)
                              waypoint_resolution=0.1, #radial distance between 2 consecutive waypoints (10 cm)
                              trav_map_resolution=0.1,
                              trav_map_erosion=2,
                              should_open_all_doors=True,
                              pybullet_load_texture=True) #do you want to include texture and material properties? (you need this for object interaction)

    s.import_ig_scene(scene) #loading the scene object in the simulator object
    turtlebot = Turtlebot(config) #generating the robot object
    s.import_robot(turtlebot) #loading the robot object in the simulator object
    init_pos = turtlebot.get_position() #getting the initial position of the robot base [X:meters, Y:meters, Z:meters] (base: robot's main body. it may have links and the associated joints too. links and joints have positions and orientations as well.)
    init_or = turtlebot.get_rpy() #getting the initial Euler angles of the robot base [Roll: radians, Pitch: radians, Yaw: radians]

    #sampling random goal states in a desired room of the apartment
    np.random.seed(0)

    encoder = keras.models.load_model('./CVAE_encoder')
    decoder = keras.models.load_model('./CVAE_decoder')

    for j in range(30):

        goal1 = scene.get_random_point_by_room_type('living_room')[1] #sampling random points in the living room
        goal2 = scene.get_random_point_by_room_type('living_room')[1]    
        goal3 = scene.get_random_point_by_room_type('living_room')[1]    
        goal4 = init_pos
    
        path1 = scene.get_shortest_path(0,init_pos[0:2],goal1[0:2],entire_path=True)[0] #generate the "entire" a* path between the initial, sub-goal, and terminal goal nodes
        path2 = scene.get_shortest_path(0,goal1[0:2],goal2[0:2],entire_path=True)[0] 
        path3 = scene.get_shortest_path(0,goal2[0:2],goal3[0:2],entire_path=True)[0] 
        path4 = scene.get_shortest_path(0,goal3[0:2],goal4[0:2],entire_path=True)[0] 
    
        rnd_path = np.append(path1,path2[1:],axis=0)
        rnd_path = np.append(rnd_path,path3[1:],axis=0)
        rnd_path = np.append(rnd_path,path4[1:],axis=0)

        #fitting a bezier curve through the a* waypoints and sampling 2000 points from that curve
        path_nodes = np.asfortranarray([rnd_path[:,0].tolist(),rnd_path[:,1].tolist()])
        smt_crv = bezier.Curve.from_nodes(path_nodes)
        s_vals = np.linspace(0,1,2000)
        smt_path = smt_crv.evaluate_multi(s_vals)
        
        #correcting the initial orientation of the robot
        delta_pos = smt_path[:,1] - smt_path[:,0] #direction vector between 2 consecutive waypoints
        delta_yaw = np.arctan2(delta_pos[1],delta_pos[0]) #the yaw angle of the robot base while following the sampled bezier path
        delta_qua = e2q(init_or[0],init_or[1],delta_yaw) #transforming robot base Euler angles to quaternion
        turtlebot.set_position_orientation([smt_path[0,0],smt_path[1,0],init_pos[2]], delta_qua) #setting the robot base position and the orientation

        episode_path = os.getcwd() + '/episodes/episode_' + str(j).zfill(2) #path of the new episode folder
        #os.makedirs(episode_path) #creating a new folder for the episode
     
        gtrth_pth_str = episode_path + '/episode_' + str(j).zfill(2) + 'ground_truth_path.csv' #printing the ground truth path out
        #np.savetxt(gtrth_pth_str, np.atleast_2d(smt_path).T, delimiter=",")
       
        for i in range(len(smt_path[0])-1):
            with Profiler('Simulator step'): #iGibson simulation loop requieres this context manager
            
                rgb_camera = np.array(s.renderer.render_robot_cameras(modes='rgb')) #probing RGB data, you can also probe rgbd or even optical flow if robot has that property in its config file (.yaml file)
                robot_img = rgb_camera[0,:,:,0] #transforming the RGB probe to uint8 format (PIL.Image.fromarray() accepts that format)
                encoded = encoder.predict(np.expand_dims(robot_img, axis=0))
                decoded = decoder.predict(encoded[0])
                robot_img = decoded[0,:,:,0]
                robot_img = robot_img.astype('uint8')
                robot_img = Image.fromarray(robot_img)
                img_str = './iGib_recons' + '/episode_' + str(j).zfill(2) + '_frame_' + str(i).zfill(5) + '.jpeg'
                robot_img.save(img_str)
                #lidar = s.renderer.get_lidar_all() #probing 360 degrees lidar data

                delta_pos = smt_path[:,i+1] - smt_path[:,i] #direction vector between 2 consecutive waypoints
                delta_yaw = np.arctan2(delta_pos[1],delta_pos[0]) #the yaw angle of the robot base while following the sampled bezier path
                delta_qua = e2q(init_or[0],init_or[1],delta_yaw) #transforming robot base Euler angles to quaternion
                turtlebot.set_position_orientation([smt_path[0,i],smt_path[1,i],init_pos[2]], delta_qua) #setting the robot base position and the orientation
		velcmd1, velcmd2 = PID_path_track(delta_pos, delta_qua)
                turtlebot.set_motor_velocity([velcmd1,velcmd2])

                s.step() #proceed one step ahead in simulation time
    

    s.disconnect()