def main(): config = parse_config('../configs/turtlebot_demo.yaml') settings = MeshRendererSettings(enable_shadow=False, msaa=False) s = Simulator(mode='gui', image_width=256, image_height=256, rendering_settings=settings) scene = StaticIndoorScene('Rs', build_graph=True, pybullet_load_texture=True) s.import_scene(scene) turtlebot = Turtlebot(config) s.import_robot(turtlebot) for _ in range(10): obj = YCBObject('003_cracker_box') s.import_object(obj) obj.set_position_orientation(np.random.uniform(low=0, high=2, size=3), [0, 0, 0, 1]) print(s.renderer.instances) for i in range(10000): with Profiler('Simulator step'): turtlebot.apply_action([0.1, 0.1]) s.step() rgb = s.renderer.render_robot_cameras(modes=('rgb')) s.disconnect()
def test_import_building_viewing(): download_assets() download_demo_data() config = parse_config(os.path.join(gibson2.root_path, '../test/test.yaml')) s = Simulator(mode='headless') scene = StaticIndoorScene('Rs') s.import_scene(scene) assert s.objects == list(range(2)) turtlebot1 = Turtlebot(config) turtlebot2 = Turtlebot(config) turtlebot3 = Turtlebot(config) s.import_robot(turtlebot1) s.import_robot(turtlebot2) s.import_robot(turtlebot3) turtlebot1.set_position([0.5, 0, 0.5]) turtlebot2.set_position([0, 0, 0.5]) turtlebot3.set_position([-0.5, 0, 0.5]) for i in range(10): s.step() #turtlebot1.apply_action(np.random.randint(4)) #turtlebot2.apply_action(np.random.randint(4)) #turtlebot3.apply_action(np.random.randint(4)) s.disconnect()
def test_turtlebot(): s = Simulator(mode='headless') scene = StadiumScene() s.import_scene(scene) turtlebot = Turtlebot(config) s.import_robot(turtlebot) nbody = p.getNumBodies() s.disconnect() assert nbody == 5
def test_quadrotor(): s = Simulator(mode='headless') scene = StadiumScene() s.import_scene(scene) quadrotor = Quadrotor(config) s.import_robot(quadrotor) nbody = p.getNumBodies() s.disconnect() assert nbody == 5
def test_jr2(): s = Simulator(mode='headless') scene = StadiumScene() s.import_scene(scene) jr2 = JR2(config) s.import_robot(jr2) nbody = p.getNumBodies() s.disconnect() assert nbody == 5
def test_humanoid(): s = Simulator(mode='headless') scene = StadiumScene() s.import_scene(scene) humanoid = Humanoid(config) s.import_robot(humanoid) nbody = p.getNumBodies() s.disconnect() assert nbody == 5
def test_fetch(): s = Simulator(mode='headless') scene = StadiumScene() s.import_scene(scene) fetch = Fetch(config) s.import_robot(fetch) for i in range(100): fetch.calc_state() s.step() s.disconnect()
def test_humanoid_position(): s = Simulator(mode='headless') scene = StadiumScene() s.import_scene(scene) humanoid = Humanoid(config) s.import_robot(humanoid) humanoid.set_position([0, 0, 5]) nbody = p.getNumBodies() pos = humanoid.get_position() s.disconnect() assert nbody == 5 assert np.allclose(pos, np.array([0, 0, 5]))
def test_turtlebot_position(): s = Simulator(mode='headless') scene = StadiumScene() s.import_scene(scene) turtlebot = Turtlebot(config) s.import_robot(turtlebot) turtlebot.set_position([0, 0, 5]) nbody = p.getNumBodies() pos = turtlebot.get_position() s.disconnect() assert nbody == 5 assert np.allclose(pos, np.array([0, 0, 5]))
def test_ant(): s = Simulator(mode='headless') scene = StadiumScene() s.import_scene(scene) ant = Ant(config) s.import_robot(ant) ant2 = Ant(config) s.import_robot(ant2) ant2.set_position([0, 2, 2]) nbody = p.getNumBodies() for i in range(100): s.step() s.disconnect() assert nbody == 6
def run_demo(self): config = parse_config( os.path.join(os.path.dirname(gibson2.__file__), '../examples/configs/turtlebot_demo.yaml')) s = Simulator(mode='gui', image_width=700, image_height=700) scene = StaticIndoorScene('Rs', pybullet_load_texture=True) s.import_scene(scene) turtlebot = Turtlebot(config) s.import_robot(turtlebot) for i in range(1000): turtlebot.apply_action([0.1, 0.5]) s.step() s.disconnect()
class ToyEnv(object): def __init__(self): config = parse_config('../../configs/turtlebot_demo.yaml') hdr_texture = os.path.join(gibson2.ig_dataset_path, 'scenes', 'background', 'probe_02.hdr') hdr_texture2 = os.path.join(gibson2.ig_dataset_path, 'scenes', 'background', 'probe_03.hdr') light_modulation_map_filename = os.path.join(gibson2.ig_dataset_path, 'scenes', 'Rs_int', 'layout', 'floor_lighttype_0.png') background_texture = os.path.join(gibson2.ig_dataset_path, 'scenes', 'background', 'urban_street_01.jpg') settings = MeshRendererSettings(enable_shadow=False, enable_pbr=False) self.s = Simulator(mode='headless', image_width=400, image_height=400, rendering_settings=settings) scene = StaticIndoorScene('Rs') self.s.import_scene(scene) #self.s.import_ig_scene(scene) self.robot = Turtlebot(config) self.s.import_robot(self.robot) for _ in range(5): obj = YCBObject('003_cracker_box') self.s.import_object(obj) obj.set_position_orientation( np.random.uniform(low=0, high=2, size=3), [0, 0, 0, 1]) print(self.s.renderer.instances) def step(self, a): self.robot.apply_action(a) self.s.step() frame = self.s.renderer.render_robot_cameras(modes=('rgb'))[0] return frame def close(self): self.s.disconnect()
def benchmark_rendering(scene_list, rendering_presets_list, modality_list): config = parse_config(os.path.join(gibson2.root_path, '../test/test.yaml')) assets_version = get_ig_assets_version() print('assets_version', assets_version) result = {} for scene_name in scene_list: for rendering_preset in rendering_presets_list: scene = InteractiveIndoorScene(scene_name, texture_randomization=False, object_randomization=False) settings = NamedRenderingPresets[rendering_preset] if rendering_preset == 'VISUAL_RL': image_width = 128 image_height = 128 else: image_width = 512 image_height = 512 s = Simulator(mode='headless', image_width=image_width, image_height=image_height, device_idx=0, rendering_settings=settings, physics_timestep=1 / 240.0) s.import_ig_scene(scene) turtlebot = Turtlebot(config) s.import_robot(turtlebot) for mode in modality_list: for _ in range(10): s.step() _ = s.renderer.render_robot_cameras(modes=(mode)) start = time.time() for _ in range(200): _ = s.renderer.render_robot_cameras(modes=(mode)) end = time.time() fps = 200 / (end - start) result[(scene_name, rendering_preset, mode)] = fps s.disconnect() return result
def test_multiagent(): s = Simulator(mode='headless') scene = StadiumScene() s.import_scene(scene) turtlebot1 = Turtlebot(config) turtlebot2 = Turtlebot(config) turtlebot3 = Turtlebot(config) s.import_robot(turtlebot1) s.import_robot(turtlebot2) s.import_robot(turtlebot3) turtlebot1.set_position([1, 0, 0.5]) turtlebot2.set_position([0, 0, 0.5]) turtlebot3.set_position([-1, 0, 0.5]) nbody = p.getNumBodies() for i in range(100): s.step() s.disconnect() assert nbody == 7
def test_import_box(): s = Simulator(mode='headless') scene = StadiumScene() s.import_scene(scene) print(s.objects) # wall = [pos, dim] wall = [[[0, 7, 1.01], [10, 0.2, 1]], [[0, -7, 1.01], [6.89, 0.1, 1]], [[7, -1.5, 1.01], [0.1, 5.5, 1]], [[-7, -1, 1.01], [0.1, 6, 1]], [[-8.55, 5, 1.01], [1.44, 0.1, 1]], [[8.55, 4, 1.01], [1.44, 0.1, 1]]] obstacles = [[[-0.5, 2, 1.01], [3.5, 0.1, 1]], [[4.5, -1, 1.01], [1.5, 0.1, 1]], [[-4, -2, 1.01], [0.1, 2, 1]], [[2.5, -4, 1.01], [1.5, 0.1, 1]]] for i in range(len(wall)): curr = wall[i] obj = Cube(curr[0], curr[1]) s.import_object(obj) for i in range(len(obstacles)): curr = obstacles[i] obj = Cube(curr[0], curr[1]) s.import_object(obj) config = parse_config(os.path.join(gibson2.root_path, '../test/test.yaml')) turtlebot1 = Turtlebot(config) turtlebot2 = Turtlebot(config) s.import_robot(turtlebot1) s.import_robot(turtlebot2) turtlebot1.set_position([6., -6., 0.]) turtlebot2.set_position([-3., 4., 0.]) for i in range(100): s.step() s.disconnect()
def main(): config = parse_config('../configs/turtlebot_demo.yaml') settings = MeshRendererSettings() s = Simulator(mode='gui', image_width=256, image_height=256, rendering_settings=settings) scene = StaticIndoorScene('Rs', build_graph=True, pybullet_load_texture=True) s.import_scene(scene) turtlebot = Turtlebot(config) s.import_robot(turtlebot) for i in range(10000): with Profiler('Simulator step'): turtlebot.apply_action([0.1, -0.1]) s.step() lidar = s.renderer.get_lidar_all() print(lidar.shape) # TODO: visualize lidar scan s.disconnect()
def main(): config = parse_config(os.path.join(gibson2.example_config_path, 'turtlebot_demo.yaml')) #robot configuration file path settings = MeshRendererSettings() #generating renderer settings object #generating simulator object s = Simulator(mode='headless', #simulating without gui #s = Simulator(mode='gui', #simulating with gui image_width=64, #robot camera pixel width image_height=48, #robot camera pixel height vertical_fov=75, #robot camera view angle (from floor to ceiling 40 degrees) rendering_settings=settings) #generating scene object scene = InteractiveIndoorScene('Benevolence_1_int', #scene name: Benevolence, floor number: 1, does it include interactive objects: yes (int). I pick Benevolence on purpose as it is the most memory friendly scene in the iGibson dataset. build_graph=True, #builds the connectivity graph over the given facedown traversibility map (floor plan) waypoint_resolution=0.1, #radial distance between 2 consecutive waypoints (10 cm) trav_map_resolution=0.1, trav_map_erosion=2, should_open_all_doors=True, pybullet_load_texture=True) #do you want to include texture and material properties? (you need this for object interaction) s.import_ig_scene(scene) #loading the scene object in the simulator object turtlebot = Turtlebot(config) #generating the robot object s.import_robot(turtlebot) #loading the robot object in the simulator object init_pos = turtlebot.get_position() #getting the initial position of the robot base [X:meters, Y:meters, Z:meters] (base: robot's main body. it may have links and the associated joints too. links and joints have positions and orientations as well.) init_or = turtlebot.get_rpy() #getting the initial Euler angles of the robot base [Roll: radians, Pitch: radians, Yaw: radians] #sampling random goal states in a desired room of the apartment np.random.seed(0) encoder = keras.models.load_model('./CVAE_encoder') decoder = keras.models.load_model('./CVAE_decoder') for j in range(30): goal1 = scene.get_random_point_by_room_type('living_room')[1] #sampling random points in the living room goal2 = scene.get_random_point_by_room_type('living_room')[1] goal3 = scene.get_random_point_by_room_type('living_room')[1] goal4 = init_pos path1 = scene.get_shortest_path(0,init_pos[0:2],goal1[0:2],entire_path=True)[0] #generate the "entire" a* path between the initial, sub-goal, and terminal goal nodes path2 = scene.get_shortest_path(0,goal1[0:2],goal2[0:2],entire_path=True)[0] path3 = scene.get_shortest_path(0,goal2[0:2],goal3[0:2],entire_path=True)[0] path4 = scene.get_shortest_path(0,goal3[0:2],goal4[0:2],entire_path=True)[0] rnd_path = np.append(path1,path2[1:],axis=0) rnd_path = np.append(rnd_path,path3[1:],axis=0) rnd_path = np.append(rnd_path,path4[1:],axis=0) #fitting a bezier curve through the a* waypoints and sampling 2000 points from that curve path_nodes = np.asfortranarray([rnd_path[:,0].tolist(),rnd_path[:,1].tolist()]) smt_crv = bezier.Curve.from_nodes(path_nodes) s_vals = np.linspace(0,1,2000) smt_path = smt_crv.evaluate_multi(s_vals) #correcting the initial orientation of the robot delta_pos = smt_path[:,1] - smt_path[:,0] #direction vector between 2 consecutive waypoints delta_yaw = np.arctan2(delta_pos[1],delta_pos[0]) #the yaw angle of the robot base while following the sampled bezier path delta_qua = e2q(init_or[0],init_or[1],delta_yaw) #transforming robot base Euler angles to quaternion turtlebot.set_position_orientation([smt_path[0,0],smt_path[1,0],init_pos[2]], delta_qua) #setting the robot base position and the orientation episode_path = os.getcwd() + '/episodes/episode_' + str(j).zfill(2) #path of the new episode folder #os.makedirs(episode_path) #creating a new folder for the episode gtrth_pth_str = episode_path + '/episode_' + str(j).zfill(2) + 'ground_truth_path.csv' #printing the ground truth path out #np.savetxt(gtrth_pth_str, np.atleast_2d(smt_path).T, delimiter=",") for i in range(len(smt_path[0])-1): with Profiler('Simulator step'): #iGibson simulation loop requieres this context manager rgb_camera = np.array(s.renderer.render_robot_cameras(modes='rgb')) #probing RGB data, you can also probe rgbd or even optical flow if robot has that property in its config file (.yaml file) robot_img = rgb_camera[0,:,:,0] #transforming the RGB probe to uint8 format (PIL.Image.fromarray() accepts that format) encoded = encoder.predict(np.expand_dims(robot_img, axis=0)) decoded = decoder.predict(encoded[0]) robot_img = decoded[0,:,:,0] robot_img = robot_img.astype('uint8') robot_img = Image.fromarray(robot_img) img_str = './iGib_recons' + '/episode_' + str(j).zfill(2) + '_frame_' + str(i).zfill(5) + '.jpeg' robot_img.save(img_str) #lidar = s.renderer.get_lidar_all() #probing 360 degrees lidar data delta_pos = smt_path[:,i+1] - smt_path[:,i] #direction vector between 2 consecutive waypoints delta_yaw = np.arctan2(delta_pos[1],delta_pos[0]) #the yaw angle of the robot base while following the sampled bezier path delta_qua = e2q(init_or[0],init_or[1],delta_yaw) #transforming robot base Euler angles to quaternion turtlebot.set_position_orientation([smt_path[0,i],smt_path[1,i],init_pos[2]], delta_qua) #setting the robot base position and the orientation velcmd1, velcmd2 = PID_path_track(delta_pos, delta_qua) turtlebot.set_motor_velocity([velcmd1,velcmd2]) s.step() #proceed one step ahead in simulation time s.disconnect()
def main(): config = parse_config('../configs/fetch_p2p_nav.yaml') s = Simulator(mode='gui', physics_timestep=1 / 240.0) scene = EmptyScene() s.import_scene(scene) fetch = Fetch(config) s.import_robot(fetch) robot_id = fetch.robot_ids[0] arm_joints = joints_from_names(robot_id, [ 'torso_lift_joint', 'shoulder_pan_joint', 'shoulder_lift_joint', 'upperarm_roll_joint', 'elbow_flex_joint', 'forearm_roll_joint', 'wrist_flex_joint', 'wrist_roll_joint' ]) #finger_joints = joints_from_names(robot_id, ['l_gripper_finger_joint', 'r_gripper_finger_joint']) fetch.robot_body.reset_position([0, 0, 0]) fetch.robot_body.reset_orientation([0, 0, 1, 0]) x, y, z = fetch.get_end_effector_position() #set_joint_positions(robot_id, finger_joints, [0.04,0.04]) print(x, y, z) visual_marker = p.createVisualShape(p.GEOM_SPHERE, radius=0.02) marker = p.createMultiBody(baseVisualShapeIndex=visual_marker) #max_limits = [0,0] + get_max_limits(robot_id, arm_joints) + [0.05,0.05] #min_limits = [0,0] + get_min_limits(robot_id, arm_joints) + [0,0] #rest_position = [0,0] + list(get_joint_positions(robot_id, arm_joints)) + [0.04,0.04] max_limits = [0, 0] + get_max_limits(robot_id, arm_joints) min_limits = [0, 0] + get_min_limits(robot_id, arm_joints) rest_position = [0, 0] + list(get_joint_positions(robot_id, arm_joints)) joint_range = list(np.array(max_limits) - np.array(min_limits)) joint_range = [item + 1 for item in joint_range] jd = [0.1 for item in joint_range] print(max_limits) print(min_limits) def accurateCalculateInverseKinematics(robotid, endEffectorId, targetPos, threshold, maxIter): sample_fn = get_sample_fn(robotid, arm_joints) set_joint_positions(robotid, arm_joints, sample_fn()) it = 0 while it < maxIter: jointPoses = p.calculateInverseKinematics(robotid, endEffectorId, targetPos, lowerLimits=min_limits, upperLimits=max_limits, jointRanges=joint_range, restPoses=rest_position, jointDamping=jd) set_joint_positions(robotid, arm_joints, jointPoses[2:10]) ls = p.getLinkState(robotid, endEffectorId) newPos = ls[4] dist = np.linalg.norm(np.array(targetPos) - np.array(newPos)) if dist < threshold: break it += 1 print("Num iter: " + str(it) + ", threshold: " + str(dist)) return jointPoses while True: with Profiler("Simulation step"): fetch.robot_body.reset_position([0, 0, 0]) fetch.robot_body.reset_orientation([0, 0, 1, 0]) threshold = 0.01 maxIter = 100 joint_pos = accurateCalculateInverseKinematics( robot_id, fetch.parts['gripper_link'].body_part_index, [x, y, z], threshold, maxIter)[2:10] #set_joint_positions(robot_id, finger_joints, [0.04, 0.04]) s.step() keys = p.getKeyboardEvents() for k, v in keys.items(): if (k == p.B3G_RIGHT_ARROW and (v & p.KEY_IS_DOWN)): x += 0.01 if (k == p.B3G_LEFT_ARROW and (v & p.KEY_IS_DOWN)): x -= 0.01 if (k == p.B3G_UP_ARROW and (v & p.KEY_IS_DOWN)): y += 0.01 if (k == p.B3G_DOWN_ARROW and (v & p.KEY_IS_DOWN)): y -= 0.01 if (k == ord('z') and (v & p.KEY_IS_DOWN)): z += 0.01 if (k == ord('x') and (v & p.KEY_IS_DOWN)): z -= 0.01 p.resetBasePositionAndOrientation(marker, [x, y, z], [0, 0, 0, 1]) s.disconnect()
def benchmark(render_to_tensor=False, resolution=512): config = parse_config(os.path.join(gibson2.root_path, '../test/test.yaml')) s = Simulator(mode='headless', image_width=resolution, image_height=resolution, render_to_tensor=render_to_tensor) scene = StaticIndoorScene('Rs', build_graph=True, pybullet_load_texture=True) s.import_scene(scene) turtlebot = Turtlebot(config) s.import_robot(turtlebot) n_frame = 500 start = time.time() for i in range(n_frame): turtlebot.apply_action([0.1, 0.1]) s.step() rgb = s.renderer.render_robot_cameras(modes=('rgb')) physics_render_elapsed = time.time() - start physics_render_fps = n_frame / physics_render_elapsed print( "physics simulation + rendering rgb, resolution {}, render_to_tensor {}: {} fps" .format(resolution, render_to_tensor, physics_render_fps)) start = time.time() for i in range(n_frame): rgb = s.renderer.render_robot_cameras(modes=('rgb')) render_elapsed = time.time() - start rgb_fps = n_frame / render_elapsed print("Rendering rgb, resolution {}, render_to_tensor {}: {} fps".format( resolution, render_to_tensor, n_frame / render_elapsed)) start = time.time() for i in range(n_frame): rgb = s.renderer.render_robot_cameras(modes=('3d')) render_elapsed = time.time() - start pc_fps = n_frame / render_elapsed print("Rendering 3d, resolution {}, render_to_tensor {}: {} fps".format( resolution, render_to_tensor, n_frame / render_elapsed)) start = time.time() for i in range(n_frame): rgb = s.renderer.render_robot_cameras(modes=('normal')) normal_fps = n_frame / render_elapsed render_elapsed = time.time() - start print( "Rendering normal, resolution {}, render_to_tensor {}: {} fps".format( resolution, render_to_tensor, n_frame / render_elapsed)) plt.figure() plt.bar([0, 1, 2, 3], [physics_render_fps, rgb_fps, pc_fps, normal_fps], color='g') plt.xticks([0, 1, 2, 3], ['sim+render', 'rgb', '3d', 'normal']) plt.ylabel('fps') plt.xlabel('mode') plt.title('Static Scene Benchmark, resolution {}, to_tensor {}'.format( resolution, render_to_tensor)) plt.savefig('static_scene_benchmark_res{}_tensor{}.pdf'.format( resolution, render_to_tensor)) s.disconnect()
class ToyEnvInt(object): def __init__(self, robot='turtlebot', scene='Rs_int'): config = parse_config('../../configs/turtlebot_demo.yaml') hdr_texture = os.path.join(gibson2.ig_dataset_path, 'scenes', 'background', 'probe_02.hdr') hdr_texture2 = os.path.join(gibson2.ig_dataset_path, 'scenes', 'background', 'probe_03.hdr') light_modulation_map_filename = os.path.join(gibson2.ig_dataset_path, 'scenes', 'Rs_int', 'layout', 'floor_lighttype_0.png') background_texture = os.path.join(gibson2.ig_dataset_path, 'scenes', 'background', 'urban_street_01.jpg') scene = InteractiveIndoorScene(scene, texture_randomization=False, object_randomization=False) #scene._set_first_n_objects(5) scene.open_all_doors() settings = MeshRendererSettings( env_texture_filename=hdr_texture, env_texture_filename2=hdr_texture2, env_texture_filename3=background_texture, light_modulation_map_filename=light_modulation_map_filename, enable_shadow=True, msaa=True, light_dimming_factor=1.0, optimized=True) self.s = Simulator(mode='headless', image_width=400, image_height=400, rendering_settings=settings) self.s.import_ig_scene(scene) if robot == 'turtlebot': self.robot = Turtlebot(config) else: self.robot = Fetch(config) self.s.import_robot(self.robot) for _ in range(5): obj = YCBObject('003_cracker_box') self.s.import_object(obj) obj.set_position_orientation( np.random.uniform(low=0, high=2, size=3), [0, 0, 0, 1]) print(self.s.renderer.instances) def step(self, a): action = np.zeros(self.robot.action_space.shape) if isinstance(self.robot, Turtlebot): action[0] = a[0] action[1] = a[1] else: action[1] = a[0] action[0] = a[1] self.robot.apply_action(action) self.s.step() frame = self.s.renderer.render_robot_cameras(modes=('rgb'))[0] return frame def close(self): self.s.disconnect()
np.random.seed(0) for _ in range(10): pt = scene.get_random_point_by_room_type('living_room')[1] print('random point in living_room', pt) ##############################################adding a fetch robot################################################### config = parse_config( os.path.join(gibson2.example_config_path, 'fetch_motion_planning.yaml')) settings = MeshRendererSettings(enable_shadow=False, msaa=False) #turtlebot = Turtlebot(config) #robot=turtlebot #position=[1,1,0] fetchbot = Fetch(config) s.import_robot(fetchbot) fetchbot.set_position([0, 1, 0]) ################################################################################################################## ##tried changing robot control to position and torque but failed,from the documentation it seems turtle bot only## ############################################supports joint velocity############################################### ################################################################################################################## #robot.load() #robot.set_position(position) #robot.robot_specific_reset() #robot.keep_still() #print(robot.action_dim) #print(robot.control) ############################################adding keyboard functionality##################################### #print(p.getKeyboardEvents()) ##############################################start the simulation############################################
class BaseEnv(gym.Env): ''' Base Env class, follows OpenAI Gym interface Handles loading scene and robot Functions like reset and step are not implemented ''' def __init__(self, config_file, scene_id=None, mode='headless', action_timestep=1 / 10.0, physics_timestep=1 / 240.0, render_to_tensor=False, device_idx=0): """ :param config_file: config_file path :param scene_id: override scene_id in config file :param mode: headless or gui mode :param action_timestep: environment executes action per action_timestep second :param physics_timestep: physics timestep for pybullet :param device_idx: device_idx: which GPU to run the simulation and rendering on """ self.config = parse_config(config_file) if scene_id is not None: self.config['scene_id'] = scene_id self.mode = mode self.action_timestep = action_timestep self.physics_timestep = physics_timestep self.texture_randomization_freq = self.config.get( 'texture_randomization_freq', None) self.object_randomization_freq = self.config.get( 'object_randomization_freq', None) self.object_randomization_idx = 0 self.num_object_randomization_idx = 10 enable_shadow = self.config.get('enable_shadow', False) enable_pbr = self.config.get('enable_pbr', True) texture_scale = self.config.get('texture_scale', 1.0) settings = MeshRendererSettings(enable_shadow=enable_shadow, enable_pbr=enable_pbr, msaa=False, texture_scale=texture_scale) self.simulator = Simulator( mode=mode, physics_timestep=physics_timestep, render_timestep=action_timestep, image_width=self.config.get('image_width', 128), image_height=self.config.get('image_height', 128), vertical_fov=self.config.get('vertical_fov', 90), device_idx=device_idx, render_to_tensor=render_to_tensor, rendering_settings=settings) self.load() def reload(self, config_file): """ Reload another config file Thhis allows one to change the configuration on the fly :param config_file: new config file path """ self.config = parse_config(config_file) self.simulator.reload() self.load() def reload_model(self, scene_id): """ Reload another scene model This allows one to change the scene on the fly :param scene_id: new scene_id """ self.config['scene_id'] = scene_id self.simulator.reload() self.load() def reload_model_object_randomization(self): """ Reload the same model, with the next object randomization random seed """ if self.object_randomization_freq is None: return self.object_randomization_idx = (self.object_randomization_idx + 1) % \ (self.num_object_randomization_idx) self.simulator.reload() self.load() def get_next_scene_random_seed(self): """ Get the next scene random seed """ if self.object_randomization_freq is None: return None return self.scene_random_seeds[self.scene_random_seed_idx] def load(self): """ Load the scene and robot """ if self.config['scene'] == 'empty': scene = EmptyScene() self.simulator.import_scene(scene, load_texture=self.config.get( 'load_texture', True)) elif self.config['scene'] == 'stadium': scene = StadiumScene() self.simulator.import_scene(scene, load_texture=self.config.get( 'load_texture', True)) elif self.config['scene'] == 'gibson': scene = StaticIndoorScene( self.config['scene_id'], waypoint_resolution=self.config.get('waypoint_resolution', 0.2), num_waypoints=self.config.get('num_waypoints', 10), build_graph=self.config.get('build_graph', False), trav_map_resolution=self.config.get('trav_map_resolution', 0.1), trav_map_erosion=self.config.get('trav_map_erosion', 2), pybullet_load_texture=self.config.get('pybullet_load_texture', False), ) self.simulator.import_scene(scene, load_texture=self.config.get( 'load_texture', True)) elif self.config['scene'] == 'igibson': scene = InteractiveIndoorScene( self.config['scene_id'], waypoint_resolution=self.config.get('waypoint_resolution', 0.2), num_waypoints=self.config.get('num_waypoints', 10), build_graph=self.config.get('build_graph', False), trav_map_resolution=self.config.get('trav_map_resolution', 0.1), trav_map_erosion=self.config.get('trav_map_erosion', 2), trav_map_type=self.config.get('trav_map_type', 'with_obj'), pybullet_load_texture=self.config.get('pybullet_load_texture', False), texture_randomization=self.texture_randomization_freq is not None, object_randomization=self.object_randomization_freq is not None, object_randomization_idx=self.object_randomization_idx, should_open_all_doors=self.config.get('should_open_all_doors', False), load_object_categories=self.config.get( 'load_object_categories', None), load_room_types=self.config.get('load_room_types', None), load_room_instances=self.config.get('load_room_instances', None), ) # TODO: Unify the function import_scene and take out of the if-else clauses first_n = self.config.get('_set_first_n_objects', -1) if first_n != -1: scene._set_first_n_objects(first_n) self.simulator.import_ig_scene(scene) if self.config['robot'] == 'Turtlebot': robot = Turtlebot(self.config) elif self.config['robot'] == 'Husky': robot = Husky(self.config) elif self.config['robot'] == 'Ant': robot = Ant(self.config) elif self.config['robot'] == 'Humanoid': robot = Humanoid(self.config) elif self.config['robot'] == 'JR2': robot = JR2(self.config) elif self.config['robot'] == 'JR2_Kinova': robot = JR2_Kinova(self.config) elif self.config['robot'] == 'Freight': robot = Freight(self.config) elif self.config['robot'] == 'Fetch': robot = Fetch(self.config) elif self.config['robot'] == 'Locobot': robot = Locobot(self.config) else: raise Exception('unknown robot type: {}'.format( self.config['robot'])) self.scene = scene self.robots = [robot] for robot in self.robots: self.simulator.import_robot(robot) def clean(self): """ Clean up """ if self.simulator is not None: self.simulator.disconnect() def close(self): """ Synonymous function with clean """ self.clean() def simulator_step(self): """ Step the simulation. This is different from environment step that returns the next observation, reward, done, info. """ self.simulator.step() def step(self, action): """ Overwritten by subclasses """ return NotImplementedError() def reset(self): """ Overwritten by subclasses """ return NotImplementedError() def set_mode(self, mode): """ Set simulator mode """ self.simulator.mode = mode
def main(): config = parse_config( os.path.join(gibson2.example_config_path, 'turtlebot_demo.yaml')) #robot configuration file path # settings = MeshRendererSettings() #generating renderer settings object #generating simulator object #s = Simulator(mode='headless', #simulating without gui s = Simulator( mode='headless', #simulating with gui # image_width=256, #robot camera pixel width # image_height=256, #robot camera pixel height # vertical_fov=40, #robot camera view angle (from floor to ceiling 40 degrees) ) #rendering_settings=settings) #generating scene object scene = InteractiveIndoorScene( 'Benevolence_1_int', #scene name: Benevolence, floor number: 1, does it include interactive objects: yes (int). I pick Benevolence on purpose as it is the most memory friendly scene in the iGibson dataset. build_graph= True, #builds the connectivity graph over the given facedown traversibility map (floor plan) waypoint_resolution= 0.1, #radial distance between 2 consecutive waypoints (10 cm) pybullet_load_texture=True ) #do you want to include texture and material properties? (you need this for object interaction) s.import_ig_scene(scene) #loading the scene object in the simulator object turtlebot = Turtlebot(config) #generating the robot object s.import_robot( turtlebot) #loading the robot object in the simulator object init_pos = turtlebot.get_position( ) #getting the initial position of the robot base [X:meters, Y:meters, Z:meters] (base: robot's main body. it may have links and the associated joints too. links and joints have positions and orientations as well.) init_or = turtlebot.get_rpy( ) #getting the initial Euler angles of the robot base [Roll: radians, Pitch: radians, Yaw: radians] #sampling random goal states in a desired room of the apartment np.random.seed(0) goal = scene.get_random_point_by_room_type('living_room')[ 1] #sampling a random point in the living room rnd_path = scene.get_shortest_path( 0, init_pos[0:2], goal[0:2], entire_path=True )[0] #generate the "entire" a* path between the initial and goal nodes for i in range(len(rnd_path[0]) - 1): with Profiler( 'Simulator step' ): #iGibson simulation loop requieres this context manager rgb_camera = np.array( s.renderer.render_robot_cameras(modes='rgb') ) #probing RGB data, you can also probe rgbd or even optical flow if robot has that property in its config file (.yaml file) plt.imshow(rgb_camera[0, :, :, 3]) #calling sampled RGB data lidar = s.renderer.get_lidar_all() #probing 360 degrees lidar data delta_pos = smt_path[:, i + 1] - smt_path[:, i] #direction vector between 2 consucutive waypoints delta_yaw = np.arctan2( delta_pos[1], delta_pos[0] ) #the yaw angle of the robot base while following the sampled bezier path delta_qua = e2q( init_or[0], init_or[1], delta_yaw) #transforming robot base Euler angles to quaternion turtlebot.set_position_orientation([ smt_path[0, i], smt_path[1, i], init_pos[2] ], delta_qua) #setting the robot base position and the orientation s.step() #proceed one step ahead in simulation time s.disconnect()
def show_action_sensor_space(): s = Simulator(mode='headless') scene = StadiumScene() s.import_scene(scene) turtlebot = Turtlebot(config) s.import_robot(turtlebot) turtlebot.set_position([0, 1, 0.5]) ant = Ant(config) s.import_robot(ant) ant.set_position([0, 2, 0.5]) h = Humanoid(config) s.import_robot(h) h.set_position([0, 3, 2]) jr = JR2(config) s.import_robot(jr) jr.set_position([0, 4, 0.5]) jr2 = JR2_Kinova(config) s.import_robot(jr2) jr2.set_position([0, 5, 0.5]) husky = Husky(config) s.import_robot(husky) husky.set_position([0, 6, 0.5]) quad = Quadrotor(config) s.import_robot(quad) quad.set_position([0, 7, 0.5]) for robot in s.robots: print(type(robot), len(robot.ordered_joints), robot.calc_state().shape) for i in range(100): s.step() s.disconnect()
def benchmark_scene(scene_name, optimized=False, import_robot=True): config = parse_config(os.path.join(gibson2.root_path, '../test/test.yaml')) assets_version = get_ig_assets_version() print('assets_version', assets_version) scene = InteractiveIndoorScene(scene_name, texture_randomization=False, object_randomization=False) settings = MeshRendererSettings(msaa=False, enable_shadow=False, optimized=optimized) s = Simulator( mode='headless', image_width=512, image_height=512, device_idx=0, rendering_settings=settings, ) s.import_ig_scene(scene) if import_robot: turtlebot = Turtlebot(config) s.import_robot(turtlebot) s.renderer.use_pbr(use_pbr=True, use_pbr_mapping=True) fps = [] physics_fps = [] render_fps = [] obj_awake = [] for i in range(2000): # if i % 100 == 0: # scene.randomize_texture() start = time.time() s.step() if import_robot: # apply random actions turtlebot.apply_action(turtlebot.action_space.sample()) physics_end = time.time() if import_robot: _ = s.renderer.render_robot_cameras(modes=('rgb')) else: _ = s.renderer.render(modes=('rgb')) end = time.time() #print("Elapsed time: ", end - start) print("Render Frequency: ", 1 / (end - physics_end)) print("Physics Frequency: ", 1 / (physics_end - start)) print("Step Frequency: ", 1 / (end - start)) fps.append(1 / (end - start)) physics_fps.append(1 / (physics_end - start)) render_fps.append(1 / (end - physics_end)) obj_awake.append(s.body_links_awake) s.disconnect() plt.figure(figsize=(7, 25)) ax = plt.subplot(6, 1, 1) plt.hist(render_fps) ax.set_xlabel('Render fps') ax.set_title( 'Scene {} version {}\noptimized {} num_obj {}\n import_robot {}'. format(scene_name, assets_version, optimized, scene.get_num_objects(), import_robot)) ax = plt.subplot(6, 1, 2) plt.hist(physics_fps) ax.set_xlabel('Physics fps') ax = plt.subplot(6, 1, 3) plt.hist(fps) ax.set_xlabel('Step fps') ax = plt.subplot(6, 1, 4) plt.plot(render_fps) ax.set_xlabel('Render fps with time') ax.set_ylabel('fps') ax = plt.subplot(6, 1, 5) plt.plot(physics_fps) ax.set_xlabel('Physics fps with time, converge to {}'.format( np.mean(physics_fps[-100:]))) ax.set_ylabel('fps') ax = plt.subplot(6, 1, 6) plt.plot(obj_awake) ax.set_xlabel('Num object links awake, converge to {}'.format( np.mean(obj_awake[-100:]))) plt.savefig('scene_benchmark_{}_o_{}_r_{}.pdf'.format( scene_name, optimized, import_robot))