translation=np.array([-0.3, 0.0, 0.0]), from_frame='camera', to_frame='world') # Create a VirtualCamera camera = VirtualCamera(ci, cp) # Add the camera to the scene scene.camera = camera #==================================== # Render images #==================================== # Render raw numpy arrays containing color and depth color_image_raw, depth_image_raw = scene.render(render_color=True) # Alternatively, just render a depth image depth_image_raw = scene.render(render_color=False) # Alternatively, collect wrapped images wrapped_color, wrapped_depth, wrapped_segmask = scene.wrapped_render( [RenderMode.COLOR, RenderMode.DEPTH, RenderMode.SEGMASK]) wrapped_color.save('output/color.jpg') wrapped_depth.save('output/depth.jpg') # Test random variables cfg = { 'focal_length': { 'min': 520,
class Generator: def __init__(self): DATASET_DIR = pt.abspath('.') OUTPUT_DIR = pt.abspath('./data') self.sampler = ModelSampler(DATASET_DIR) self.scene = Scene() self.local_scene = Scene() self.grip_scene = Scene() self.dataset_dir = DATASET_DIR self.output_dir = OUTPUT_DIR self.image_dir = 'color-input-synth' self.depth_dir = 'depth-input-synth' self.seg_dir = 'label-synth' clear_dir(pt.join(self.output_dir, self.image_dir)) clear_dir(pt.join(self.output_dir, self.depth_dir)) clear_dir(pt.join(self.output_dir, self.seg_dir)) ci = CameraIntrinsics(frame='camera', fx=617.0, fy=617.0, cx=320.0, cy=240.0, skew=0.0, height=480, width=640) # Set up the camera pose (z axis faces away from scene, x to right, y up) cp1 = RigidTransform(rotation=trimesh.transformations.rotation_matrix( np.deg2rad(-30), [1, 0, 0])[:3, :3] @ trimesh.transformations.rotation_matrix( np.deg2rad(180), [0, 1, 0])[:3, :3], translation=np.array([0.0, 0.75, 1.0]), from_frame='camera', to_frame='world') cp2 = RigidTransform(rotation=trimesh.transformations.rotation_matrix( np.deg2rad(37), [1, 0, 0])[:3, :3], translation=np.array([0.0, 0.0, 1.0]), from_frame='camera', to_frame='world') camera1 = VirtualCamera(ci, cp1) camera2 = VirtualCamera(ci, cp2) # Add the camera to the scene self.scene.camera = camera1 self.local_scene.camera = camera1 self.grip_scene.camera = camera1 def clear_scene(self, scene): obj_names = scene.objects.keys() light_names = scene.lights.keys() for obj_name in list(obj_names): scene.remove_object(obj_name) for light_name in list(light_names): scene.remove_light(light_name) def save_sample(self, idx, color, depth, segmask): image_filename = pt.join(self.output_dir, self.image_dir, '{:05d}.png'.format(idx)) depth_filename = pt.join(self.output_dir, self.depth_dir, '{:05d}.png'.format(idx)) seg_filename = pt.join(self.output_dir, self.seg_dir, '{:05d}.png'.format(idx)) cv.imwrite(image_filename, color.data) cv.imwrite(depth_filename, (10000 * depth.data).astype(np.uint16)) #in 0.1mm cv.imwrite(seg_filename, segmask) def process_depths(self, depths, grip_depths): '''Process raw depths to generate true segmask ''' assert (len(depths) > 0) self.depths = depths self.grip_depths = grip_depths ds = np.sum(np.stack(depths), axis=0) gds = np.sum(np.stack(grip_depths), axis=0) ds[ds == 0.0] = 255 ds[ds != 255] = 0 ds[gds != 0] = 1 ds = ds.astype(np.uint8) return ds def generate_scene(self): depths = [] grip_depths = [] self.scene.add_object('ground', self.sampler.sample_ground_obj()) for model_obj, grip_obj, model_name in self.sampler.sample_scene_objs( ): self.scene.add_object(model_name, model_obj) self.local_scene.add_object(model_name, model_obj) self.grip_scene.add_object(model_name, grip_obj) depth = self.local_scene.render(render_color=False) depth_grip = self.grip_scene.render(render_color=False) depths.append(depth) grip_depths.append(depth_grip) self.clear_scene(self.local_scene) self.clear_scene(self.grip_scene) # Create an ambient light #self.depths = depths ambient = self.sampler.sample_ambient_light() self.scene.ambient_light = ambient # only one ambient light per scene directional_lights = self.sampler.sample_direc_lights() for i, directional_light in enumerate(directional_lights): self.scene.add_light('direc_{}'.format(i), directional_light) return self.process_depths(depths, grip_depths) def prepare_batch(self, num=3): #if dir exist data will be replaced! imdir = pt.join(self.output_dir, self.image_dir) dpdir = pt.join(self.output_dir, self.depth_dir) segdir = pt.join(self.output_dir, self.seg_dir) clear_dir(imdir) clear_dir(dpdir) clear_dir(segdir) for i in range(num): segmask = self.generate_scene() wrapped_color, wrapped_depth = self.scene.wrapped_render( [RenderMode.COLOR, RenderMode.DEPTH]) self.save_sample(i, wrapped_color, wrapped_depth, segmask) self.clear_scene(self.scene)