Example #1
0
def do_stuff(pc, indices, model, rotated_shadow, img_file):
    scene = Scene()
    camera = VirtualCamera(ci, cp)
    scene.camera = camera

    # Works
    shadow_obj = SceneObject(rotated_shadow)
    scene.add_object('shadow', shadow_obj)
    wd = scene.wrapped_render([RenderMode.DEPTH])[0]
    wd_bi = wd.to_binary()
    vis2d.figure()
    vis2d.imshow(wd_bi)
    vis2d.show()

    # Doesn't work yet
    plane = pc.data.T[indices]
    plane_pc = PointCloud(plane.T, pc.frame)
    di = ci.project_to_image(plane_pc)
    bi = di.to_binary()
    vis2d.figure()
    vis2d.imshow(bi)
    vis2d.show()

    # Works
    both = bi.mask_binary(wd_bi)
    vis2d.figure()
    vis2d.imshow(both)
    vis2d.show()
Example #2
0
# Add the camera to the scene
scene.camera = camera

#====================================
# Render images
#====================================

# Render raw numpy arrays containing color and depth
color_image_raw, depth_image_raw = scene.render(render_color=True)

# Alternatively, just render a depth image
depth_image_raw = scene.render(render_color=False)

# Alternatively, collect wrapped images
wrapped_color, wrapped_depth, wrapped_segmask = scene.wrapped_render(
    [RenderMode.COLOR, RenderMode.DEPTH, RenderMode.SEGMASK])

wrapped_color.save('output/color.jpg')
wrapped_depth.save('output/depth.jpg')

# Test random variables
cfg = {
    'focal_length': {
        'min': 520,
        'max': 530,
    },
    'delta_optical_center': {
        'min': 0.0,
        'max': 0.0,
    },
    'radius': {
Example #3
0
        sensor = RgbdSensorFactory.sensor(sensor_type, sensor_config)
        sensors[sensor_name] = sensor
        
        # start the sensor
        sensor.start()
        camera_intr = sensor.ir_intrinsics
        camera_intr = camera_intr.resize(im_rescale_factor)
        camera_intrs[sensor_name] = camera_intr        
        
        # render image of static workspace
        scene = Scene()
        camera = VirtualCamera(camera_intr, T_camera_world)
        scene.camera = camera
        for obj_key, scene_obj in workspace_objects.iteritems():
            scene.add_object(obj_key, scene_obj)
        workspace_ims[sensor_name] = scene.wrapped_render([RenderMode.DEPTH])[0]

        # fix dataset config
        dataset_config['fields']['raw_color_ims']['height'] = camera_intr.height
        dataset_config['fields']['raw_color_ims']['width'] = camera_intr.width
        dataset_config['fields']['raw_depth_ims']['height'] = camera_intr.height
        dataset_config['fields']['raw_depth_ims']['width'] = camera_intr.width 
        dataset_config['fields']['color_ims']['height'] = camera_intr.height
        dataset_config['fields']['color_ims']['width'] = camera_intr.width 
        dataset_config['fields']['depth_ims']['height'] = camera_intr.height
        dataset_config['fields']['depth_ims']['width'] = camera_intr.width 
        dataset_config['fields']['segmasks']['height'] = camera_intr.height
        dataset_config['fields']['segmasks']['width'] = camera_intr.width 
       
        # open dataset
        sensor_dataset_filename = os.path.join(output_dir, sensor_name)
Example #4
0
        sensor = RgbdSensorFactory.sensor(sensor_type, sensor_config)
        sensors[sensor_name] = sensor

        # start the sensor
        sensor.start()
        camera_intr = sensor.ir_intrinsics
        camera_intr = camera_intr.resize(im_rescale_factor)
        camera_intrs[sensor_name] = camera_intr

        # render image of static workspace
        scene = Scene()
        camera = VirtualCamera(camera_intr, T_camera_world)
        scene.camera = camera
        for obj_key, scene_obj in workspace_objects.iteritems():
            scene.add_object(obj_key, scene_obj)
        workspace_ims[sensor_name] = scene.wrapped_render(["depth"])[0]

        # fix dataset config
        dataset_config["fields"]["raw_color_ims"][
            "height"
        ] = camera_intr.height
        dataset_config["fields"]["raw_color_ims"]["width"] = camera_intr.width
        dataset_config["fields"]["raw_depth_ims"][
            "height"
        ] = camera_intr.height
        dataset_config["fields"]["raw_depth_ims"]["width"] = camera_intr.width
        dataset_config["fields"]["color_ims"]["height"] = camera_intr.height
        dataset_config["fields"]["color_ims"]["width"] = camera_intr.width
        dataset_config["fields"]["depth_ims"]["height"] = camera_intr.height
        dataset_config["fields"]["depth_ims"]["width"] = camera_intr.width
        dataset_config["fields"]["segmasks"]["height"] = camera_intr.height
Example #5
0
class Generator:
    def __init__(self):
        DATASET_DIR = pt.abspath('.')
        OUTPUT_DIR = pt.abspath('./data')
        self.sampler = ModelSampler(DATASET_DIR)
        self.scene = Scene()
        self.local_scene = Scene()
        self.grip_scene = Scene()
        self.dataset_dir = DATASET_DIR
        self.output_dir = OUTPUT_DIR
        self.image_dir = 'color-input-synth'
        self.depth_dir = 'depth-input-synth'
        self.seg_dir = 'label-synth'
        clear_dir(pt.join(self.output_dir, self.image_dir))
        clear_dir(pt.join(self.output_dir, self.depth_dir))
        clear_dir(pt.join(self.output_dir, self.seg_dir))

        ci = CameraIntrinsics(frame='camera',
                              fx=617.0,
                              fy=617.0,
                              cx=320.0,
                              cy=240.0,
                              skew=0.0,
                              height=480,
                              width=640)

        # Set up the camera pose (z axis faces away from scene, x to right, y up)
        cp1 = RigidTransform(rotation=trimesh.transformations.rotation_matrix(
            np.deg2rad(-30), [1, 0, 0])[:3, :3]
                             @ trimesh.transformations.rotation_matrix(
                                 np.deg2rad(180), [0, 1, 0])[:3, :3],
                             translation=np.array([0.0, 0.75, 1.0]),
                             from_frame='camera',
                             to_frame='world')
        cp2 = RigidTransform(rotation=trimesh.transformations.rotation_matrix(
            np.deg2rad(37), [1, 0, 0])[:3, :3],
                             translation=np.array([0.0, 0.0, 1.0]),
                             from_frame='camera',
                             to_frame='world')
        camera1 = VirtualCamera(ci, cp1)
        camera2 = VirtualCamera(ci, cp2)
        # Add the camera to the scene
        self.scene.camera = camera1
        self.local_scene.camera = camera1
        self.grip_scene.camera = camera1

    def clear_scene(self, scene):
        obj_names = scene.objects.keys()
        light_names = scene.lights.keys()
        for obj_name in list(obj_names):
            scene.remove_object(obj_name)
        for light_name in list(light_names):
            scene.remove_light(light_name)

    def save_sample(self, idx, color, depth, segmask):
        image_filename = pt.join(self.output_dir, self.image_dir,
                                 '{:05d}.png'.format(idx))
        depth_filename = pt.join(self.output_dir, self.depth_dir,
                                 '{:05d}.png'.format(idx))
        seg_filename = pt.join(self.output_dir, self.seg_dir,
                               '{:05d}.png'.format(idx))

        cv.imwrite(image_filename, color.data)
        cv.imwrite(depth_filename,
                   (10000 * depth.data).astype(np.uint16))  #in 0.1mm
        cv.imwrite(seg_filename, segmask)

    def process_depths(self, depths, grip_depths):
        '''Process raw depths to generate true segmask
        '''
        assert (len(depths) > 0)
        self.depths = depths
        self.grip_depths = grip_depths
        ds = np.sum(np.stack(depths), axis=0)
        gds = np.sum(np.stack(grip_depths), axis=0)
        ds[ds == 0.0] = 255
        ds[ds != 255] = 0
        ds[gds != 0] = 1
        ds = ds.astype(np.uint8)
        return ds

    def generate_scene(self):
        depths = []
        grip_depths = []
        self.scene.add_object('ground', self.sampler.sample_ground_obj())
        for model_obj, grip_obj, model_name in self.sampler.sample_scene_objs(
        ):
            self.scene.add_object(model_name, model_obj)
            self.local_scene.add_object(model_name, model_obj)
            self.grip_scene.add_object(model_name, grip_obj)
            depth = self.local_scene.render(render_color=False)
            depth_grip = self.grip_scene.render(render_color=False)
            depths.append(depth)
            grip_depths.append(depth_grip)
            self.clear_scene(self.local_scene)
            self.clear_scene(self.grip_scene)

        # Create an ambient light
        #self.depths = depths
        ambient = self.sampler.sample_ambient_light()
        self.scene.ambient_light = ambient  # only one ambient light per scene
        directional_lights = self.sampler.sample_direc_lights()
        for i, directional_light in enumerate(directional_lights):
            self.scene.add_light('direc_{}'.format(i), directional_light)

        return self.process_depths(depths, grip_depths)

    def prepare_batch(self, num=3):
        #if dir exist data will be replaced!
        imdir = pt.join(self.output_dir, self.image_dir)
        dpdir = pt.join(self.output_dir, self.depth_dir)
        segdir = pt.join(self.output_dir, self.seg_dir)
        clear_dir(imdir)
        clear_dir(dpdir)
        clear_dir(segdir)
        for i in range(num):
            segmask = self.generate_scene()
            wrapped_color, wrapped_depth = self.scene.wrapped_render(
                [RenderMode.COLOR, RenderMode.DEPTH])
            self.save_sample(i, wrapped_color, wrapped_depth, segmask)
            self.clear_scene(self.scene)
Example #6
0
def fast_grid_search(pc, indices, model, shadow):
    length, width, height = shadow.extents
    split_size = max(length, width)
    pc_data, ind = get_pc_data(pc, indices)
    maxes = np.max(pc_data, axis=0)
    mins = np.min(pc_data, axis=0)
    bin_base = mins[2]
    plane_normal = model[0:3]

    #di_temp = ci.project_to_image(pc)
    #vis2d.figure()
    #vis2d.imshow(di_temp)
    #vis2d.show()
    #plane_data = pc.data.T[indices]
    #plane_pc = PointCloud(plane_data.T, pc.frame)
    #di = ci.project_to_image(plane_pc)
    #bi = di.to_binary()

    plane_data = get_plane_data(pc, indices)
    plane_pc = PointCloud(plane_data.T, pc.frame)
    #vis3d.figure()
    #vis3d.points(plane_pc)
    #vis3d.show()
    plane_pc = cp.inverse().apply(plane_pc)
    di = ci.project_to_image(plane_pc)
    bi = di.to_binary()
    bi = bi.inverse()
    #vis2d.figure()
    #vis2d.imshow(bi)
    #vis2d.show()

    scene = Scene()
    camera = VirtualCamera(ci, cp)
    scene.camera = camera
    shadow_obj = SceneObject(shadow)
    scene.add_object('shadow', shadow_obj)
    orig_tow = shadow_obj.T_obj_world
    #tr = transforms(pc, pc_data, shadow, mins[0], mins[1], mins[0]+split_size, mins[1]+split_size, 8, orig_tow)
    #shadow_obj.T_obj_world = tr[0]
    wd = scene.wrapped_render([RenderMode.DEPTH])[0]
    wd_bi = wd.to_binary()
    #vis2d.figure()
    #vis2d.imshow(wd_bi)
    #vis2d.show()

    scores = np.zeros((int(np.round((maxes[0]-mins[0])/split_size)), int(np.round((maxes[1]-mins[1])/split_size))))
    for i in range(int(np.round((maxes[0]-mins[0])/split_size))):
        x = mins[0] + i*split_size
        for j in range(int(np.round((maxes[1]-mins[1])/split_size))):
            y = mins[1] + j*split_size

            for tow in transforms(pc, pc_data, shadow, x, y, x+split_size, y+split_size, 8, orig_tow):
                shadow_obj.T_obj_world = tow
                scores[i][j] = under_shadow(scene, bi)
                shadow_obj.T_obj_world = orig_tow

 
    print("\nScores: \n" + str(scores))
    best = best_cell(scores)
    print("\nBest Cell: " + str(best) + ", with score = " + str(scores[best[0]][best[1]]))
    #-------
    # Visualize best placement
    vis3d.figure()
    x = mins[0] + best[0]*split_size
    y = mins[1] + best[1]*split_size
    cell_indices = np.where((x < pc_data[:,0]) & (pc_data[:,0] < x+split_size) & (y < pc_data[:,1]) & (pc_data[:,1] < y+split_size))[0]
    points = pc_data[cell_indices]
    rest = pc_data[np.setdiff1d(np.arange(len(pc_data)), cell_indices)]
    vis3d.points(points, color=(0,1,1))
    vis3d.points(rest, color=(1,0,1))
    vis3d.show()