def main(): config = parse_config('../configs/turtlebot_demo.yaml') settings = MeshRendererSettings(enable_shadow=False, msaa=False) s = Simulator(mode='gui', image_width=256, image_height=256, rendering_settings=settings) scene = StaticIndoorScene('Rs', build_graph=True, pybullet_load_texture=True) s.import_scene(scene) turtlebot = Turtlebot(config) s.import_robot(turtlebot) for _ in range(10): obj = YCBObject('003_cracker_box') s.import_object(obj) obj.set_position_orientation(np.random.uniform(low=0, high=2, size=3), [0, 0, 0, 1]) print(s.renderer.instances) for i in range(10000): with Profiler('Simulator step'): turtlebot.apply_action([0.1, 0.1]) s.step() rgb = s.renderer.render_robot_cameras(modes=('rgb')) s.disconnect()
def test_import_object(): s = Simulator(mode='headless') scene = StadiumScene() s.import_scene(scene) obj = YCBObject('003_cracker_box') s.import_object(obj) objs = s.objects s.disconnect() assert objs == list(range(5))
def test_import_many_object(): s = Simulator(mode='headless') scene = StadiumScene() s.import_scene(scene) for i in range(30): obj = YCBObject('003_cracker_box') s.import_object(obj) for j in range(1000): s.step() last_obj = s.objects[-1] s.disconnect() assert (last_obj == 33)
def test_simulator(): download_assets() s = Simulator(mode='headless') scene = StadiumScene() s.import_scene(scene) obj = YCBObject('006_mustard_bottle') for i in range(10): s.import_object(obj) obj = YCBObject('002_master_chef_can') for i in range(10): s.import_object(obj) for i in range(1000): s.step() s.disconnect()
def debug_renderer_scaling(): s = Simulator(mode='gui', image_width=512, image_height=512, physics_timestep=1 / float(100)) scene = EmptyScene() s.import_scene(scene, render_floor_plane=True) urdf_path = '/cvgl2/u/chengshu/ig_dataset_v5/objects/window/103070/103070_avg_size_0.urdf' obj = ArticulatedObject(urdf_path) s.import_object(obj) obj.set_position([0, 0, 0]) embed() z = stable_z_on_aabb(obj.body_id, [[0, 0, 0], [0, 0, 0]]) obj.set_position([0, 0, z]) embed() for _ in range(100000000000): s.step()
class ToyEnv(object): def __init__(self): config = parse_config('../../configs/turtlebot_demo.yaml') hdr_texture = os.path.join(gibson2.ig_dataset_path, 'scenes', 'background', 'probe_02.hdr') hdr_texture2 = os.path.join(gibson2.ig_dataset_path, 'scenes', 'background', 'probe_03.hdr') light_modulation_map_filename = os.path.join(gibson2.ig_dataset_path, 'scenes', 'Rs_int', 'layout', 'floor_lighttype_0.png') background_texture = os.path.join(gibson2.ig_dataset_path, 'scenes', 'background', 'urban_street_01.jpg') settings = MeshRendererSettings(enable_shadow=False, enable_pbr=False) self.s = Simulator(mode='headless', image_width=400, image_height=400, rendering_settings=settings) scene = StaticIndoorScene('Rs') self.s.import_scene(scene) #self.s.import_ig_scene(scene) self.robot = Turtlebot(config) self.s.import_robot(self.robot) for _ in range(5): obj = YCBObject('003_cracker_box') self.s.import_object(obj) obj.set_position_orientation( np.random.uniform(low=0, high=2, size=3), [0, 0, 0, 1]) print(self.s.renderer.instances) def step(self, a): self.robot.apply_action(a) self.s.step() frame = self.s.renderer.render_robot_cameras(modes=('rgb'))[0] return frame def close(self): self.s.disconnect()
def test_import_box(): s = Simulator(mode='headless') scene = StadiumScene() s.import_scene(scene) print(s.objects) # wall = [pos, dim] wall = [[[0, 7, 1.01], [10, 0.2, 1]], [[0, -7, 1.01], [6.89, 0.1, 1]], [[7, -1.5, 1.01], [0.1, 5.5, 1]], [[-7, -1, 1.01], [0.1, 6, 1]], [[-8.55, 5, 1.01], [1.44, 0.1, 1]], [[8.55, 4, 1.01], [1.44, 0.1, 1]]] obstacles = [[[-0.5, 2, 1.01], [3.5, 0.1, 1]], [[4.5, -1, 1.01], [1.5, 0.1, 1]], [[-4, -2, 1.01], [0.1, 2, 1]], [[2.5, -4, 1.01], [1.5, 0.1, 1]]] for i in range(len(wall)): curr = wall[i] obj = Cube(curr[0], curr[1]) s.import_object(obj) for i in range(len(obstacles)): curr = obstacles[i] obj = Cube(curr[0], curr[1]) s.import_object(obj) config = parse_config(os.path.join(gibson2.root_path, '../test/test.yaml')) turtlebot1 = Turtlebot(config) turtlebot2 = Turtlebot(config) s.import_robot(turtlebot1) s.import_robot(turtlebot2) turtlebot1.set_position([6., -6., 0.]) turtlebot2.set_position([-3., 4., 0.]) for i in range(100): s.step() s.disconnect()
def test_import_rbo_object(): s = Simulator(mode='headless') try: scene = StadiumScene() s.import_scene(scene) obj = RBOObject('book') s.import_object(obj) obj2 = RBOObject('microwave') s.import_object(obj2) obj.set_position([0, 0, 2]) obj2.set_position([0, 1, 2]) obj3 = ArticulatedObject( os.path.join(gibson2.assets_path, 'models', 'scene_components', 'door.urdf')) s.import_object(obj3) for i in range(100): s.step() finally: s.disconnect()
class ToyEnvInt(object): def __init__(self, robot='turtlebot', scene='Rs_int'): config = parse_config('../../configs/turtlebot_demo.yaml') hdr_texture = os.path.join(gibson2.ig_dataset_path, 'scenes', 'background', 'probe_02.hdr') hdr_texture2 = os.path.join(gibson2.ig_dataset_path, 'scenes', 'background', 'probe_03.hdr') light_modulation_map_filename = os.path.join(gibson2.ig_dataset_path, 'scenes', 'Rs_int', 'layout', 'floor_lighttype_0.png') background_texture = os.path.join(gibson2.ig_dataset_path, 'scenes', 'background', 'urban_street_01.jpg') scene = InteractiveIndoorScene(scene, texture_randomization=False, object_randomization=False) #scene._set_first_n_objects(5) scene.open_all_doors() settings = MeshRendererSettings( env_texture_filename=hdr_texture, env_texture_filename2=hdr_texture2, env_texture_filename3=background_texture, light_modulation_map_filename=light_modulation_map_filename, enable_shadow=True, msaa=True, light_dimming_factor=1.0, optimized=True) self.s = Simulator(mode='headless', image_width=400, image_height=400, rendering_settings=settings) self.s.import_ig_scene(scene) if robot == 'turtlebot': self.robot = Turtlebot(config) else: self.robot = Fetch(config) self.s.import_robot(self.robot) for _ in range(5): obj = YCBObject('003_cracker_box') self.s.import_object(obj) obj.set_position_orientation( np.random.uniform(low=0, high=2, size=3), [0, 0, 0, 1]) print(self.s.renderer.instances) def step(self, a): action = np.zeros(self.robot.action_space.shape) if isinstance(self.robot, Turtlebot): action[0] = a[0] action[1] = a[1] else: action[1] = a[0] action[0] = a[1] self.robot.apply_action(action) self.s.step() frame = self.s.renderer.render_robot_cameras(modes=('rgb'))[0] return frame def close(self): self.s.disconnect()
#obj1 = ArticulatedObject(filename=cabinet_0007) #obj1.load() #obj1.set_position([0, 0, 0.5]) #obj2 = ArticulatedObject(filename=cabinet_0004) #obj2.load() #obj2.set_position([0, 0, 2]) #obj3 = YCBObject('003_cracker_box') #obj3.load() #obj3.set_position_orientation([0, 0, 1.2], [0, 0, 0, 1]) #obj3.set_position([0,0,1.2]) obj4 = ArticulatedObject(filename=carpet) s.import_object(obj4) obj4.set_position([0, 1, 0]) np.random.seed(0) for _ in range(10): pt = scene.get_random_point_by_room_type('living_room')[1] print('random point in living_room', pt) ##############################################adding a fetch robot################################################### config = parse_config( os.path.join(gibson2.example_config_path, 'fetch_motion_planning.yaml')) settings = MeshRendererSettings(enable_shadow=False, msaa=False) #turtlebot = Turtlebot(config) #robot=turtlebot #position=[1,1,0]
def main(): global _mouse_ix, _mouse_iy, down, view_direction model_path = sys.argv[1] print(model_path) model_id = os.path.basename(model_path) category = os.path.basename(os.path.dirname(model_path)) hdr_texture = os.path.join( gibson2.ig_dataset_path, 'scenes', 'background', 'photo_studio_01_2k.hdr') settings = MeshRendererSettings(env_texture_filename=hdr_texture, enable_shadow=True, msaa=True, light_dimming_factor=1.5) s = Simulator(mode='headless', image_width=1800, image_height=1200, vertical_fov=70, rendering_settings=settings ) s.renderer.set_light_position_direction([0,0,10], [0,0,0]) s.renderer.load_object('plane/plane_z_up_0.obj', scale=[3,3,3]) s.renderer.add_instance(0) s.renderer.set_pose([0,0,-1.5,1, 0, 0.0, 0.0], -1) v = [] mesh_path = os.path.join(model_path, 'shape/visual') for fn in os.listdir(mesh_path): if fn.endswith('obj'): vertices, faces = load_obj_np(os.path.join(mesh_path, fn)) v.append(vertices) v = np.vstack(v) print(v.shape) xlen = np.max(v[:,0]) - np.min(v[:,0]) ylen = np.max(v[:,1]) - np.min(v[:,1]) zlen = np.max(v[:,2]) - np.min(v[:,2]) scale = 1.5/(max([xlen, ylen, zlen])) center = np.mean(v, axis=0) centered_v = v - center center = (np.max(v, axis=0) + np.min(v, axis=0)) / 2. urdf_path = os.path.join(model_path, '{}.urdf'.format(model_id)) print(urdf_path) obj = ArticulatedObject(filename=urdf_path, scale=scale) s.import_object(obj) obj.set_position(center) s.sync() print(s.renderer.visual_objects, s.renderer.instances) _mouse_ix, _mouse_iy = -1, -1 down = False theta,r = 0,1.5 px = r*np.sin(theta) py = r*np.cos(theta) pz = 1 camera_pose = np.array([px, py, pz]) s.renderer.set_camera(camera_pose, [0,0,0], [0, 0, 1]) num_views = 6 save_dir = os.path.join(model_path, 'visualizations') for i in range(num_views): theta += np.pi*2/(num_views+1) obj.set_orientation([0., 0., 1.0, np.cos(theta/2)]) s.sync() with Profiler('Render'): frame = s.renderer.render(modes=('rgb')) img = Image.fromarray(( 255*np.concatenate(frame, axis=1)[:,:,:3]).astype(np.uint8)) img.save(os.path.join(save_dir, '{:02d}.png'.format(i))) cmd = 'ffmpeg -framerate 2 -i {s}/%2d.png -y -r 16 -c:v libx264 -pix_fmt yuvj420p {s}/{m}.mp4'.format(s=save_dir,m=model_id) subprocess.call(cmd, shell=True) cmd = 'rm {}/??.png'.format(save_dir) subprocess.call(cmd, shell=True)
def main(): global _mouse_ix, _mouse_iy, down, view_direction args = parser.parse_args() model_path = args.input_dir print(model_path) model_id = os.path.basename(model_path) category = os.path.basename(os.path.dirname(model_path)) hdr_texture = os.path.join(gibson2.ig_dataset_path, 'scenes', 'background', 'probe_03.hdr') settings = MeshRendererSettings(env_texture_filename=hdr_texture, enable_shadow=True, msaa=True) s = Simulator(mode='headless', image_width=1800, image_height=1200, vertical_fov=70, rendering_settings=settings) s.renderer.set_light_position_direction([0, 0, 10], [0, 0, 0]) s.renderer.load_object('plane/plane_z_up_0.obj', scale=[3, 3, 3]) s.renderer.add_instance(0) s.renderer.set_pose([0, 0, -1.5, 1, 0, 0.0, 0.0], -1) ########################### # Get center and scale ########################### bbox_json = os.path.join(model_path, 'misc', 'metadata.json') with open(bbox_json, 'r') as fp: bbox_data = json.load(fp) scale = 1.5 / max(bbox_data['bbox_size']) center = -scale * np.array(bbox_data['base_link_offset']) urdf_path = os.path.join(model_path, '{}.urdf'.format(model_id)) print(urdf_path) obj = ArticulatedObject(filename=urdf_path, scale=scale) s.import_object(obj) obj.set_position(center) s.sync() _mouse_ix, _mouse_iy = -1, -1 down = False theta, r = 0, 1.5 px = r * np.sin(theta) py = r * np.cos(theta) pz = 1 camera_pose = np.array([px, py, pz]) s.renderer.set_camera(camera_pose, [0, 0, 0], [0, 0, 1]) num_views = 6 save_dir = os.path.join(model_path, 'visualizations') os.makedirs(save_dir, exist_ok=True) for i in range(num_views): theta += np.pi * 2 / (num_views + 1) obj.set_orientation([0., 0., 1.0, np.cos(theta / 2)]) s.sync() with Profiler('Render'): frame = s.renderer.render(modes=('rgb')) img = Image.fromarray( (255 * np.concatenate(frame, axis=1)[:, :, :3]).astype(np.uint8)) img.save(os.path.join(save_dir, '{:02d}.png'.format(i))) if which('ffmpeg') is not None: cmd = 'ffmpeg -framerate 2 -i {s}/%2d.png -y -r 16 -c:v libx264 -pix_fmt yuvj420p {s}/{m}.mp4'.format( s=save_dir, m=model_id) subprocess.call(cmd, shell=True)
def render_physics_gifs(main_urdf_file_and_offset): step_per_sec = 100 s = Simulator(mode='headless', image_width=512, image_height=512, physics_timestep=1 / float(step_per_sec)) root_dir = '/cvgl2/u/chengshu/ig_dataset_v5/objects' obj_count = 0 for i, obj_class_dir in enumerate(sorted(os.listdir(root_dir))): obj_class = obj_class_dir # if obj_class not in SELECTED_CLASSES: # continue obj_class_dir = os.path.join(root_dir, obj_class_dir) for obj_inst_dir in os.listdir(obj_class_dir): # if obj_inst_dir != '14402': # continue imgs = [] scene = EmptyScene() s.import_scene(scene, render_floor_plane=True) obj_inst_name = obj_inst_dir # urdf_path = obj_inst_name + '.urdf' obj_inst_dir = os.path.join(obj_class_dir, obj_inst_dir) urdf_path, offset = main_urdf_file_and_offset[obj_inst_dir] # urdf_path = os.path.join(obj_inst_dir, urdf_path) # print('urdf_path', urdf_path) obj = ArticulatedObject(urdf_path) s.import_object(obj) push_visual_marker = VisualMarker(radius=0.1) s.import_object(push_visual_marker) push_visual_marker.set_position([100, 100, 0.0]) z = stable_z_on_aabb(obj.body_id, [[0, 0, 0], [0, 0, 0]]) # offset is translation from the bounding box center to the base link frame origin # need to add another offset that is the translation from the base link frame origin to the inertial frame origin # p.resetBasePositionAndOrientation() sets the inertial frame origin instead of the base link frame origin # Assuming the bounding box center is at (0, 0, z) where z is half of the extent in z-direction base_link_to_inertial = p.getDynamicsInfo(obj.body_id, -1)[3] obj.set_position([ offset[0] + base_link_to_inertial[0], offset[1] + base_link_to_inertial[1], z ]) center, extent = get_center_extent(obj.body_id) # the bounding box center should be at (0, 0) on the xy plane and the bottom of the bounding box should touch the ground if not (np.linalg.norm(center[:2]) < 1e-3 and np.abs(center[2] - extent[2] / 2.0) < 1e-3): print('-' * 50) print('WARNING:', urdf_path, 'xy error', np.linalg.norm(center[:2]), 'z error', np.abs(center[2] - extent[2] / 2.0)) height = extent[2] max_half_extent = max(extent[0], extent[1]) / 2.0 px = max_half_extent * 2 py = max_half_extent * 2 pz = extent[2] * 1.2 camera_pose = np.array([px, py, pz]) # camera_pose = np.array([0.01, 0.01, 3]) s.renderer.set_camera(camera_pose, [0, 0, 0], [0, 0, 1]) # drop 1 second for _ in range(step_per_sec): s.step() frame = s.renderer.render(modes=('rgb')) imgs.append( Image.fromarray( (frame[0][:, :, :3] * 255).astype(np.uint8))) ray_start = [max_half_extent * 1.5, max_half_extent * 1.5, 0] ray_end = [-100.0, -100.0, 0] unit_force = np.array([-1.0, -1.0, 0.0]) unit_force /= np.linalg.norm(unit_force) force_mag = 100.0 ray_zs = [height * 0.5] valid_ray_z = False for trial in range(5): for ray_z in ray_zs: ray_start[2] = ray_z ray_end[2] = ray_z res = p.rayTest(ray_start, ray_end) if res[0][0] == obj.body_id: valid_ray_z = True break if valid_ray_z: break increment = 1 / (2**(trial + 1)) ray_zs = np.arange(increment / 2.0, 1.0, increment) * height # push 4 seconds for i in range(step_per_sec * 4): res = p.rayTest(ray_start, ray_end) object_id, link_id, _, hit_pos, hit_normal = res[0] if object_id != obj.body_id: break push_visual_marker.set_position(hit_pos) p.applyExternalForce(object_id, link_id, unit_force * force_mag, hit_pos, p.WORLD_FRAME) s.step() # print(hit_pos) frame = s.renderer.render(modes=('rgb')) rgb = frame[0][:, :, :3] # add red border border_width = 10 border_color = np.array([1.0, 0.0, 0.0]) rgb[:border_width, :, :] = border_color rgb[-border_width:, :, :] = border_color rgb[:, :border_width, :] = border_color rgb[:, -border_width:, :] = border_color imgs.append(Image.fromarray((rgb * 255).astype(np.uint8))) gif_path = '{}/visualizations/{}_cm_physics_v3.gif'.format( obj_inst_dir, obj_inst_name) imgs = imgs[::4] imgs[0].save(gif_path, save_all=True, append_images=imgs[1:], optimize=True, duration=40, loop=0) obj_count += 1 # print(obj_count, gif_path, len(imgs), valid_ray_z, ray_z) print(obj_count, gif_path) s.reload()
def main(): step_per_sec = 100 num_directions = 12 obj_count = 0 root_dir = '/cvgl2/u/chengshu/ig_dataset_v5/objects' s = Simulator(mode='headless', image_width=512, image_height=512, physics_timestep=1 / float(step_per_sec)) p.setGravity(0.0, 0.0, 0.0) for obj_class_dir in sorted(os.listdir(root_dir)): obj_class_dir = os.path.join(root_dir, obj_class_dir) for obj_inst_dir in os.listdir(obj_class_dir): obj_inst_name = obj_inst_dir urdf_path = obj_inst_name + '.urdf' obj_inst_dir = os.path.join(obj_class_dir, obj_inst_dir) urdf_path = os.path.join(obj_inst_dir, urdf_path) obj = ArticulatedObject(urdf_path) s.import_object(obj) with open(os.path.join(obj_inst_dir, 'misc/bbox.json'), 'r') as bbox_file: bbox_data = json.load(bbox_file) bbox_max = np.array(bbox_data['max']) bbox_min = np.array(bbox_data['min']) offset = -(bbox_max + bbox_min) / 2.0 z = stable_z_on_aabb(obj.body_id, [[0, 0, 0], [0, 0, 0]]) obj.set_position([offset[0], offset[1], z]) _, extent = get_center_extent(obj.body_id) max_half_extent = max(extent) / 2.0 px = max_half_extent * 3.0 py = 0.0 pz = extent[2] / 2.0 camera_pose = np.array([px, py, pz]) s.renderer.set_camera(camera_pose, [0, 0, pz], [0, 0, 1]) num_joints = p.getNumJoints(obj.body_id) if num_joints == 0: s.reload() continue # collect joint low/high limit joint_low = [] joint_high = [] for j in range(num_joints): j_low, j_high = p.getJointInfo(obj.body_id, j)[8:10] joint_low.append(j_low) joint_high.append(j_high) # set joints to their lowest limits for j, j_low in zip(range(num_joints), joint_low): p.resetJointState(obj.body_id, j, targetValue=j_low, targetVelocity=0.0) s.sync() # render the images joint_low_imgs = [] for i in range(num_directions): yaw = np.pi * 2.0 / num_directions * i obj.set_orientation( quatToXYZW(euler2quat(0.0, 0.0, yaw), 'wxyz')) s.sync() rgb, three_d = s.renderer.render(modes=('rgb', '3d')) depth = -three_d[:, :, 2] rgb[depth == 0] = 1.0 joint_low_imgs.append( Image.fromarray((rgb[:, :, :3] * 255).astype(np.uint8))) # set joints to their highest limits for j, j_high in zip(range(num_joints), joint_high): p.resetJointState(obj.body_id, j, targetValue=j_high, targetVelocity=0.0) s.sync() # render the images joint_high_imgs = [] for i in range(num_directions): yaw = np.pi * 2.0 / num_directions * i obj.set_orientation( quatToXYZW(euler2quat(0.0, 0.0, yaw), 'wxyz')) s.sync() rgb, three_d = s.renderer.render(modes=('rgb', '3d')) depth = -three_d[:, :, 2] rgb[depth == 0] = 1.0 joint_high_imgs.append( Image.fromarray((rgb[:, :, :3] * 255).astype(np.uint8))) # concatenate the images imgs = [] for im1, im2 in zip(joint_low_imgs, joint_high_imgs): dst = Image.new('RGB', (im1.width + im2.width, im1.height)) dst.paste(im1, (0, 0)) dst.paste(im2, (im1.width, 0)) imgs.append(dst) gif_path = '{}/visualizations/{}_joint_limit.gif'.format( obj_inst_dir, obj_inst_name) # save the gif imgs[0].save(gif_path, save_all=True, append_images=imgs[1:], optimize=True, duration=200, loop=0) s.reload() obj_count += 1 print(obj_count, gif_path)