def main():
    s = Simulator(mode='gui', image_width=512, image_height=512, device_idx=0)

    for random_seed in range(10):
        scene = InteractiveIndoorScene('Rs_int',
                                       texture_randomization=False,
                                       object_randomization=True,
                                       object_randomization_idx=random_seed)
        s.import_ig_scene(scene)
        for i in range(1000):
            s.step()
        s.reload()

    s.disconnect()
Пример #2
0
class BaseEnv(gym.Env):
    '''
    Base Env class, follows OpenAI Gym interface
    Handles loading scene and robot
    Functions like reset and step are not implemented
    '''
    def __init__(self,
                 config_file,
                 scene_id=None,
                 mode='headless',
                 action_timestep=1 / 10.0,
                 physics_timestep=1 / 240.0,
                 render_to_tensor=False,
                 device_idx=0):
        """
        :param config_file: config_file path
        :param scene_id: override scene_id in config file
        :param mode: headless or gui mode
        :param action_timestep: environment executes action per action_timestep second
        :param physics_timestep: physics timestep for pybullet
        :param device_idx: device_idx: which GPU to run the simulation and rendering on
        """
        self.config = parse_config(config_file)
        if scene_id is not None:
            self.config['scene_id'] = scene_id

        self.mode = mode
        self.action_timestep = action_timestep
        self.physics_timestep = physics_timestep
        self.texture_randomization_freq = self.config.get(
            'texture_randomization_freq', None)
        self.object_randomization_freq = self.config.get(
            'object_randomization_freq', None)
        self.object_randomization_idx = 0
        self.num_object_randomization_idx = 10

        enable_shadow = self.config.get('enable_shadow', False)
        enable_pbr = self.config.get('enable_pbr', True)
        texture_scale = self.config.get('texture_scale', 1.0)

        settings = MeshRendererSettings(enable_shadow=enable_shadow,
                                        enable_pbr=enable_pbr,
                                        msaa=False,
                                        texture_scale=texture_scale)

        self.simulator = Simulator(
            mode=mode,
            physics_timestep=physics_timestep,
            render_timestep=action_timestep,
            image_width=self.config.get('image_width', 128),
            image_height=self.config.get('image_height', 128),
            vertical_fov=self.config.get('vertical_fov', 90),
            device_idx=device_idx,
            render_to_tensor=render_to_tensor,
            rendering_settings=settings)
        self.load()

    def reload(self, config_file):
        """
        Reload another config file
        Thhis allows one to change the configuration on the fly

        :param config_file: new config file path
        """
        self.config = parse_config(config_file)
        self.simulator.reload()
        self.load()

    def reload_model(self, scene_id):
        """
        Reload another scene model
        This allows one to change the scene on the fly

        :param scene_id: new scene_id
        """
        self.config['scene_id'] = scene_id
        self.simulator.reload()
        self.load()

    def reload_model_object_randomization(self):
        """
        Reload the same model, with the next object randomization random seed
        """
        if self.object_randomization_freq is None:
            return
        self.object_randomization_idx = (self.object_randomization_idx + 1) % \
            (self.num_object_randomization_idx)
        self.simulator.reload()
        self.load()

    def get_next_scene_random_seed(self):
        """
        Get the next scene random seed
        """
        if self.object_randomization_freq is None:
            return None
        return self.scene_random_seeds[self.scene_random_seed_idx]

    def load(self):
        """
        Load the scene and robot
        """
        if self.config['scene'] == 'empty':
            scene = EmptyScene()
            self.simulator.import_scene(scene,
                                        load_texture=self.config.get(
                                            'load_texture', True))
        elif self.config['scene'] == 'stadium':
            scene = StadiumScene()
            self.simulator.import_scene(scene,
                                        load_texture=self.config.get(
                                            'load_texture', True))
        elif self.config['scene'] == 'gibson':
            scene = StaticIndoorScene(
                self.config['scene_id'],
                waypoint_resolution=self.config.get('waypoint_resolution',
                                                    0.2),
                num_waypoints=self.config.get('num_waypoints', 10),
                build_graph=self.config.get('build_graph', False),
                trav_map_resolution=self.config.get('trav_map_resolution',
                                                    0.1),
                trav_map_erosion=self.config.get('trav_map_erosion', 2),
                pybullet_load_texture=self.config.get('pybullet_load_texture',
                                                      False),
            )
            self.simulator.import_scene(scene,
                                        load_texture=self.config.get(
                                            'load_texture', True))
        elif self.config['scene'] == 'igibson':
            scene = InteractiveIndoorScene(
                self.config['scene_id'],
                waypoint_resolution=self.config.get('waypoint_resolution',
                                                    0.2),
                num_waypoints=self.config.get('num_waypoints', 10),
                build_graph=self.config.get('build_graph', False),
                trav_map_resolution=self.config.get('trav_map_resolution',
                                                    0.1),
                trav_map_erosion=self.config.get('trav_map_erosion', 2),
                trav_map_type=self.config.get('trav_map_type', 'with_obj'),
                pybullet_load_texture=self.config.get('pybullet_load_texture',
                                                      False),
                texture_randomization=self.texture_randomization_freq
                is not None,
                object_randomization=self.object_randomization_freq
                is not None,
                object_randomization_idx=self.object_randomization_idx,
                should_open_all_doors=self.config.get('should_open_all_doors',
                                                      False),
                load_object_categories=self.config.get(
                    'load_object_categories', None),
                load_room_types=self.config.get('load_room_types', None),
                load_room_instances=self.config.get('load_room_instances',
                                                    None),
            )
            # TODO: Unify the function import_scene and take out of the if-else clauses
            first_n = self.config.get('_set_first_n_objects', -1)
            if first_n != -1:
                scene._set_first_n_objects(first_n)
            self.simulator.import_ig_scene(scene)

        if self.config['robot'] == 'Turtlebot':
            robot = Turtlebot(self.config)
        elif self.config['robot'] == 'Husky':
            robot = Husky(self.config)
        elif self.config['robot'] == 'Ant':
            robot = Ant(self.config)
        elif self.config['robot'] == 'Humanoid':
            robot = Humanoid(self.config)
        elif self.config['robot'] == 'JR2':
            robot = JR2(self.config)
        elif self.config['robot'] == 'JR2_Kinova':
            robot = JR2_Kinova(self.config)
        elif self.config['robot'] == 'Freight':
            robot = Freight(self.config)
        elif self.config['robot'] == 'Fetch':
            robot = Fetch(self.config)
        elif self.config['robot'] == 'Locobot':
            robot = Locobot(self.config)
        else:
            raise Exception('unknown robot type: {}'.format(
                self.config['robot']))

        self.scene = scene
        self.robots = [robot]
        for robot in self.robots:
            self.simulator.import_robot(robot)

    def clean(self):
        """
        Clean up
        """
        if self.simulator is not None:
            self.simulator.disconnect()

    def close(self):
        """
        Synonymous function with clean
        """
        self.clean()

    def simulator_step(self):
        """
        Step the simulation.
        This is different from environment step that returns the next
        observation, reward, done, info.
        """
        self.simulator.step()

    def step(self, action):
        """
        Overwritten by subclasses
        """
        return NotImplementedError()

    def reset(self):
        """
        Overwritten by subclasses
        """
        return NotImplementedError()

    def set_mode(self, mode):
        """
        Set simulator mode
        """
        self.simulator.mode = mode
Пример #3
0
def render_physics_gifs(main_urdf_file_and_offset):
    step_per_sec = 100
    s = Simulator(mode='headless',
                  image_width=512,
                  image_height=512,
                  physics_timestep=1 / float(step_per_sec))

    root_dir = '/cvgl2/u/chengshu/ig_dataset_v5/objects'
    obj_count = 0
    for i, obj_class_dir in enumerate(sorted(os.listdir(root_dir))):
        obj_class = obj_class_dir
        # if obj_class not in SELECTED_CLASSES:
        #     continue
        obj_class_dir = os.path.join(root_dir, obj_class_dir)
        for obj_inst_dir in os.listdir(obj_class_dir):
            # if obj_inst_dir != '14402':
            #     continue

            imgs = []
            scene = EmptyScene()
            s.import_scene(scene, render_floor_plane=True)
            obj_inst_name = obj_inst_dir
            # urdf_path = obj_inst_name + '.urdf'
            obj_inst_dir = os.path.join(obj_class_dir, obj_inst_dir)
            urdf_path, offset = main_urdf_file_and_offset[obj_inst_dir]
            # urdf_path = os.path.join(obj_inst_dir, urdf_path)
            # print('urdf_path', urdf_path)

            obj = ArticulatedObject(urdf_path)
            s.import_object(obj)

            push_visual_marker = VisualMarker(radius=0.1)
            s.import_object(push_visual_marker)
            push_visual_marker.set_position([100, 100, 0.0])
            z = stable_z_on_aabb(obj.body_id, [[0, 0, 0], [0, 0, 0]])

            # offset is translation from the bounding box center to the base link frame origin
            # need to add another offset that is the translation from the base link frame origin to the inertial frame origin
            # p.resetBasePositionAndOrientation() sets the inertial frame origin instead of the base link frame origin
            # Assuming the bounding box center is at (0, 0, z) where z is half of the extent in z-direction
            base_link_to_inertial = p.getDynamicsInfo(obj.body_id, -1)[3]
            obj.set_position([
                offset[0] + base_link_to_inertial[0],
                offset[1] + base_link_to_inertial[1], z
            ])

            center, extent = get_center_extent(obj.body_id)
            # the bounding box center should be at (0, 0) on the xy plane and the bottom of the bounding box should touch the ground
            if not (np.linalg.norm(center[:2]) < 1e-3
                    and np.abs(center[2] - extent[2] / 2.0) < 1e-3):
                print('-' * 50)
                print('WARNING:', urdf_path, 'xy error',
                      np.linalg.norm(center[:2]), 'z error',
                      np.abs(center[2] - extent[2] / 2.0))

            height = extent[2]

            max_half_extent = max(extent[0], extent[1]) / 2.0
            px = max_half_extent * 2
            py = max_half_extent * 2
            pz = extent[2] * 1.2

            camera_pose = np.array([px, py, pz])
            # camera_pose = np.array([0.01, 0.01, 3])

            s.renderer.set_camera(camera_pose, [0, 0, 0], [0, 0, 1])
            # drop 1 second
            for _ in range(step_per_sec):
                s.step()
                frame = s.renderer.render(modes=('rgb'))
                imgs.append(
                    Image.fromarray(
                        (frame[0][:, :, :3] * 255).astype(np.uint8)))

            ray_start = [max_half_extent * 1.5, max_half_extent * 1.5, 0]
            ray_end = [-100.0, -100.0, 0]
            unit_force = np.array([-1.0, -1.0, 0.0])
            unit_force /= np.linalg.norm(unit_force)
            force_mag = 100.0

            ray_zs = [height * 0.5]
            valid_ray_z = False
            for trial in range(5):
                for ray_z in ray_zs:
                    ray_start[2] = ray_z
                    ray_end[2] = ray_z
                    res = p.rayTest(ray_start, ray_end)
                    if res[0][0] == obj.body_id:
                        valid_ray_z = True
                        break
                if valid_ray_z:
                    break
                increment = 1 / (2**(trial + 1))
                ray_zs = np.arange(increment / 2.0, 1.0, increment) * height

            # push 4 seconds
            for i in range(step_per_sec * 4):
                res = p.rayTest(ray_start, ray_end)
                object_id, link_id, _, hit_pos, hit_normal = res[0]
                if object_id != obj.body_id:
                    break
                push_visual_marker.set_position(hit_pos)
                p.applyExternalForce(object_id, link_id,
                                     unit_force * force_mag, hit_pos,
                                     p.WORLD_FRAME)
                s.step()
                # print(hit_pos)
                frame = s.renderer.render(modes=('rgb'))
                rgb = frame[0][:, :, :3]
                # add red border
                border_width = 10
                border_color = np.array([1.0, 0.0, 0.0])
                rgb[:border_width, :, :] = border_color
                rgb[-border_width:, :, :] = border_color
                rgb[:, :border_width, :] = border_color
                rgb[:, -border_width:, :] = border_color

                imgs.append(Image.fromarray((rgb * 255).astype(np.uint8)))

            gif_path = '{}/visualizations/{}_cm_physics_v3.gif'.format(
                obj_inst_dir, obj_inst_name)
            imgs = imgs[::4]
            imgs[0].save(gif_path,
                         save_all=True,
                         append_images=imgs[1:],
                         optimize=True,
                         duration=40,
                         loop=0)
            obj_count += 1
            # print(obj_count, gif_path, len(imgs), valid_ray_z, ray_z)
            print(obj_count, gif_path)
            s.reload()
Пример #4
0
def main():
    step_per_sec = 100
    num_directions = 12
    obj_count = 0
    root_dir = '/cvgl2/u/chengshu/ig_dataset_v5/objects'

    s = Simulator(mode='headless',
                  image_width=512,
                  image_height=512,
                  physics_timestep=1 / float(step_per_sec))
    p.setGravity(0.0, 0.0, 0.0)

    for obj_class_dir in sorted(os.listdir(root_dir)):
        obj_class_dir = os.path.join(root_dir, obj_class_dir)
        for obj_inst_dir in os.listdir(obj_class_dir):
            obj_inst_name = obj_inst_dir
            urdf_path = obj_inst_name + '.urdf'
            obj_inst_dir = os.path.join(obj_class_dir, obj_inst_dir)
            urdf_path = os.path.join(obj_inst_dir, urdf_path)

            obj = ArticulatedObject(urdf_path)
            s.import_object(obj)

            with open(os.path.join(obj_inst_dir, 'misc/bbox.json'),
                      'r') as bbox_file:
                bbox_data = json.load(bbox_file)
                bbox_max = np.array(bbox_data['max'])
                bbox_min = np.array(bbox_data['min'])
            offset = -(bbox_max + bbox_min) / 2.0

            z = stable_z_on_aabb(obj.body_id, [[0, 0, 0], [0, 0, 0]])

            obj.set_position([offset[0], offset[1], z])
            _, extent = get_center_extent(obj.body_id)

            max_half_extent = max(extent) / 2.0
            px = max_half_extent * 3.0
            py = 0.0
            pz = extent[2] / 2.0
            camera_pose = np.array([px, py, pz])

            s.renderer.set_camera(camera_pose, [0, 0, pz], [0, 0, 1])

            num_joints = p.getNumJoints(obj.body_id)
            if num_joints == 0:
                s.reload()
                continue

            # collect joint low/high limit
            joint_low = []
            joint_high = []
            for j in range(num_joints):
                j_low, j_high = p.getJointInfo(obj.body_id, j)[8:10]
                joint_low.append(j_low)
                joint_high.append(j_high)

            # set joints to their lowest limits
            for j, j_low in zip(range(num_joints), joint_low):
                p.resetJointState(obj.body_id,
                                  j,
                                  targetValue=j_low,
                                  targetVelocity=0.0)
            s.sync()

            # render the images
            joint_low_imgs = []
            for i in range(num_directions):
                yaw = np.pi * 2.0 / num_directions * i
                obj.set_orientation(
                    quatToXYZW(euler2quat(0.0, 0.0, yaw), 'wxyz'))
                s.sync()
                rgb, three_d = s.renderer.render(modes=('rgb', '3d'))
                depth = -three_d[:, :, 2]
                rgb[depth == 0] = 1.0
                joint_low_imgs.append(
                    Image.fromarray((rgb[:, :, :3] * 255).astype(np.uint8)))

            # set joints to their highest limits
            for j, j_high in zip(range(num_joints), joint_high):
                p.resetJointState(obj.body_id,
                                  j,
                                  targetValue=j_high,
                                  targetVelocity=0.0)
            s.sync()

            # render the images
            joint_high_imgs = []
            for i in range(num_directions):
                yaw = np.pi * 2.0 / num_directions * i
                obj.set_orientation(
                    quatToXYZW(euler2quat(0.0, 0.0, yaw), 'wxyz'))
                s.sync()
                rgb, three_d = s.renderer.render(modes=('rgb', '3d'))
                depth = -three_d[:, :, 2]
                rgb[depth == 0] = 1.0
                joint_high_imgs.append(
                    Image.fromarray((rgb[:, :, :3] * 255).astype(np.uint8)))

            # concatenate the images
            imgs = []
            for im1, im2 in zip(joint_low_imgs, joint_high_imgs):
                dst = Image.new('RGB', (im1.width + im2.width, im1.height))
                dst.paste(im1, (0, 0))
                dst.paste(im2, (im1.width, 0))
                imgs.append(dst)
            gif_path = '{}/visualizations/{}_joint_limit.gif'.format(
                obj_inst_dir, obj_inst_name)

            # save the gif
            imgs[0].save(gif_path,
                         save_all=True,
                         append_images=imgs[1:],
                         optimize=True,
                         duration=200,
                         loop=0)

            s.reload()
            obj_count += 1
            print(obj_count, gif_path)