def main(): global _mouse_ix, _mouse_iy, down, view_direction model_path = sys.argv[1] print(model_path) model_id = os.path.basename(model_path) category = os.path.basename(os.path.dirname(model_path)) hdr_texture = os.path.join( gibson2.ig_dataset_path, 'scenes', 'background', 'photo_studio_01_2k.hdr') settings = MeshRendererSettings(env_texture_filename=hdr_texture, enable_shadow=True, msaa=True, light_dimming_factor=1.5) s = Simulator(mode='headless', image_width=1800, image_height=1200, vertical_fov=70, rendering_settings=settings ) s.renderer.set_light_position_direction([0,0,10], [0,0,0]) s.renderer.load_object('plane/plane_z_up_0.obj', scale=[3,3,3]) s.renderer.add_instance(0) s.renderer.set_pose([0,0,-1.5,1, 0, 0.0, 0.0], -1) v = [] mesh_path = os.path.join(model_path, 'shape/visual') for fn in os.listdir(mesh_path): if fn.endswith('obj'): vertices, faces = load_obj_np(os.path.join(mesh_path, fn)) v.append(vertices) v = np.vstack(v) print(v.shape) xlen = np.max(v[:,0]) - np.min(v[:,0]) ylen = np.max(v[:,1]) - np.min(v[:,1]) zlen = np.max(v[:,2]) - np.min(v[:,2]) scale = 1.5/(max([xlen, ylen, zlen])) center = np.mean(v, axis=0) centered_v = v - center center = (np.max(v, axis=0) + np.min(v, axis=0)) / 2. urdf_path = os.path.join(model_path, '{}.urdf'.format(model_id)) print(urdf_path) obj = ArticulatedObject(filename=urdf_path, scale=scale) s.import_object(obj) obj.set_position(center) s.sync() print(s.renderer.visual_objects, s.renderer.instances) _mouse_ix, _mouse_iy = -1, -1 down = False theta,r = 0,1.5 px = r*np.sin(theta) py = r*np.cos(theta) pz = 1 camera_pose = np.array([px, py, pz]) s.renderer.set_camera(camera_pose, [0,0,0], [0, 0, 1]) num_views = 6 save_dir = os.path.join(model_path, 'visualizations') for i in range(num_views): theta += np.pi*2/(num_views+1) obj.set_orientation([0., 0., 1.0, np.cos(theta/2)]) s.sync() with Profiler('Render'): frame = s.renderer.render(modes=('rgb')) img = Image.fromarray(( 255*np.concatenate(frame, axis=1)[:,:,:3]).astype(np.uint8)) img.save(os.path.join(save_dir, '{:02d}.png'.format(i))) cmd = 'ffmpeg -framerate 2 -i {s}/%2d.png -y -r 16 -c:v libx264 -pix_fmt yuvj420p {s}/{m}.mp4'.format(s=save_dir,m=model_id) subprocess.call(cmd, shell=True) cmd = 'rm {}/??.png'.format(save_dir) subprocess.call(cmd, shell=True)
def main(): parser = argparse.ArgumentParser() parser.add_argument('--scene', type=str, help='Name of the scene in the iG Dataset') parser.add_argument('--save_dir', type=str, help='Directory to save the frames.', default='misc') parser.add_argument('--seed', type=int, default=15, help='Random seed.') parser.add_argument('--domain_rand', dest='domain_rand', action='store_true') parser.add_argument('--domain_rand_interval', dest='domain_rand_interval', type=int, default=50) parser.add_argument('--object_rand', dest='object_rand', action='store_true') args = parser.parse_args() # hdr_texture1 = os.path.join( # gibson2.ig_dataset_path, 'scenes', 'background', 'photo_studio_01_2k.hdr') hdr_texture1 = os.path.join(gibson2.ig_dataset_path, 'scenes', 'background', 'probe_03.hdr') hdr_texture2 = os.path.join(gibson2.ig_dataset_path, 'scenes', 'background', 'probe_02.hdr') light_map = os.path.join(get_ig_scene_path(args.scene), 'layout', 'floor_lighttype_0.png') background_texture = os.path.join(gibson2.ig_dataset_path, 'scenes', 'background', 'urban_street_01.jpg') settings = MeshRendererSettings(env_texture_filename=hdr_texture1, env_texture_filename2=hdr_texture2, env_texture_filename3=background_texture, light_modulation_map_filename=light_map, enable_shadow=True, msaa=True, skybox_size=36., light_dimming_factor=0.8) s = Simulator(mode='headless', image_width=1080, image_height=720, vertical_fov=60, rendering_settings=settings) random.seed(args.seed) scene = InteractiveIndoorScene(args.scene, texture_randomization=args.domain_rand, object_randomization=args.object_rand) s.import_ig_scene(scene) traj_path = os.path.join(get_ig_scene_path(args.scene), 'misc', 'tour_cam_trajectory.txt') save_dir = os.path.join(get_ig_scene_path(args.scene), args.save_dir) os.makedirs(save_dir, exist_ok=True) tmp_dir = os.path.join(save_dir, 'tmp') os.makedirs(tmp_dir, exist_ok=True) with open(traj_path, 'r') as fp: points = [l.rstrip().split(',') for l in fp.readlines()] for _ in range(60): s.step() s.sync() for i in range(len(points)): if args.domain_rand and i % args.domain_rand_interval == 0: scene.randomize_texture() x, y, dir_x, dir_y = [float(p) for p in points[i]] z = 1.7 tar_x = x + dir_x tar_y = y + dir_y tar_z = 1.4 # cam_loc = np.array([x, y, z]) s.renderer.set_camera([x, y, z], [tar_x, tar_y, tar_z], [0, 0, 1]) with Profiler('Render'): frame = s.renderer.render(modes=('rgb')) img = Image.fromarray( (255 * np.concatenate(frame, axis=1)[:, :, :3]).astype(np.uint8)) img.save(os.path.join(tmp_dir, '{:05d}.png'.format(i))) cmd = 'ffmpeg -i {t}/%5d.png -y -an -c:v libx264 -crf 18 -preset veryslow -r 30 {s}/tour.mp4'.format( t=tmp_dir, s=save_dir) subprocess.call(cmd, shell=True) cmd = 'rm -r {}'.format(tmp_dir) subprocess.call(cmd, shell=True) s.disconnect()
def main(): global _mouse_ix, _mouse_iy, down, view_direction args = parser.parse_args() model_path = args.input_dir print(model_path) model_id = os.path.basename(model_path) category = os.path.basename(os.path.dirname(model_path)) hdr_texture = os.path.join(gibson2.ig_dataset_path, 'scenes', 'background', 'probe_03.hdr') settings = MeshRendererSettings(env_texture_filename=hdr_texture, enable_shadow=True, msaa=True) s = Simulator(mode='headless', image_width=1800, image_height=1200, vertical_fov=70, rendering_settings=settings) s.renderer.set_light_position_direction([0, 0, 10], [0, 0, 0]) s.renderer.load_object('plane/plane_z_up_0.obj', scale=[3, 3, 3]) s.renderer.add_instance(0) s.renderer.set_pose([0, 0, -1.5, 1, 0, 0.0, 0.0], -1) ########################### # Get center and scale ########################### bbox_json = os.path.join(model_path, 'misc', 'metadata.json') with open(bbox_json, 'r') as fp: bbox_data = json.load(fp) scale = 1.5 / max(bbox_data['bbox_size']) center = -scale * np.array(bbox_data['base_link_offset']) urdf_path = os.path.join(model_path, '{}.urdf'.format(model_id)) print(urdf_path) obj = ArticulatedObject(filename=urdf_path, scale=scale) s.import_object(obj) obj.set_position(center) s.sync() _mouse_ix, _mouse_iy = -1, -1 down = False theta, r = 0, 1.5 px = r * np.sin(theta) py = r * np.cos(theta) pz = 1 camera_pose = np.array([px, py, pz]) s.renderer.set_camera(camera_pose, [0, 0, 0], [0, 0, 1]) num_views = 6 save_dir = os.path.join(model_path, 'visualizations') os.makedirs(save_dir, exist_ok=True) for i in range(num_views): theta += np.pi * 2 / (num_views + 1) obj.set_orientation([0., 0., 1.0, np.cos(theta / 2)]) s.sync() with Profiler('Render'): frame = s.renderer.render(modes=('rgb')) img = Image.fromarray( (255 * np.concatenate(frame, axis=1)[:, :, :3]).astype(np.uint8)) img.save(os.path.join(save_dir, '{:02d}.png'.format(i))) if which('ffmpeg') is not None: cmd = 'ffmpeg -framerate 2 -i {s}/%2d.png -y -r 16 -c:v libx264 -pix_fmt yuvj420p {s}/{m}.mp4'.format( s=save_dir, m=model_id) subprocess.call(cmd, shell=True)
def main(): step_per_sec = 100 num_directions = 12 obj_count = 0 root_dir = '/cvgl2/u/chengshu/ig_dataset_v5/objects' s = Simulator(mode='headless', image_width=512, image_height=512, physics_timestep=1 / float(step_per_sec)) p.setGravity(0.0, 0.0, 0.0) for obj_class_dir in sorted(os.listdir(root_dir)): obj_class_dir = os.path.join(root_dir, obj_class_dir) for obj_inst_dir in os.listdir(obj_class_dir): obj_inst_name = obj_inst_dir urdf_path = obj_inst_name + '.urdf' obj_inst_dir = os.path.join(obj_class_dir, obj_inst_dir) urdf_path = os.path.join(obj_inst_dir, urdf_path) obj = ArticulatedObject(urdf_path) s.import_object(obj) with open(os.path.join(obj_inst_dir, 'misc/bbox.json'), 'r') as bbox_file: bbox_data = json.load(bbox_file) bbox_max = np.array(bbox_data['max']) bbox_min = np.array(bbox_data['min']) offset = -(bbox_max + bbox_min) / 2.0 z = stable_z_on_aabb(obj.body_id, [[0, 0, 0], [0, 0, 0]]) obj.set_position([offset[0], offset[1], z]) _, extent = get_center_extent(obj.body_id) max_half_extent = max(extent) / 2.0 px = max_half_extent * 3.0 py = 0.0 pz = extent[2] / 2.0 camera_pose = np.array([px, py, pz]) s.renderer.set_camera(camera_pose, [0, 0, pz], [0, 0, 1]) num_joints = p.getNumJoints(obj.body_id) if num_joints == 0: s.reload() continue # collect joint low/high limit joint_low = [] joint_high = [] for j in range(num_joints): j_low, j_high = p.getJointInfo(obj.body_id, j)[8:10] joint_low.append(j_low) joint_high.append(j_high) # set joints to their lowest limits for j, j_low in zip(range(num_joints), joint_low): p.resetJointState(obj.body_id, j, targetValue=j_low, targetVelocity=0.0) s.sync() # render the images joint_low_imgs = [] for i in range(num_directions): yaw = np.pi * 2.0 / num_directions * i obj.set_orientation( quatToXYZW(euler2quat(0.0, 0.0, yaw), 'wxyz')) s.sync() rgb, three_d = s.renderer.render(modes=('rgb', '3d')) depth = -three_d[:, :, 2] rgb[depth == 0] = 1.0 joint_low_imgs.append( Image.fromarray((rgb[:, :, :3] * 255).astype(np.uint8))) # set joints to their highest limits for j, j_high in zip(range(num_joints), joint_high): p.resetJointState(obj.body_id, j, targetValue=j_high, targetVelocity=0.0) s.sync() # render the images joint_high_imgs = [] for i in range(num_directions): yaw = np.pi * 2.0 / num_directions * i obj.set_orientation( quatToXYZW(euler2quat(0.0, 0.0, yaw), 'wxyz')) s.sync() rgb, three_d = s.renderer.render(modes=('rgb', '3d')) depth = -three_d[:, :, 2] rgb[depth == 0] = 1.0 joint_high_imgs.append( Image.fromarray((rgb[:, :, :3] * 255).astype(np.uint8))) # concatenate the images imgs = [] for im1, im2 in zip(joint_low_imgs, joint_high_imgs): dst = Image.new('RGB', (im1.width + im2.width, im1.height)) dst.paste(im1, (0, 0)) dst.paste(im2, (im1.width, 0)) imgs.append(dst) gif_path = '{}/visualizations/{}_joint_limit.gif'.format( obj_inst_dir, obj_inst_name) # save the gif imgs[0].save(gif_path, save_all=True, append_images=imgs[1:], optimize=True, duration=200, loop=0) s.reload() obj_count += 1 print(obj_count, gif_path)