Ejemplo n.º 1
0
    def generate_images(self, salient_edge_set, n_samples=1):
        """Generate depth image, normal image, and binary edge mask tuples.

        Parameters
        ----------
        salient_edge_set : SalientEdgeSet
            A salient edge set to generate images of.
        n_samples : int
            The number of samples to generate.

        Returns
        -------
        depth_ims : (n,) list of perception.DepthImage
            Randomly-rendered depth images of object.
        normal_ims : (n,) list of perception.PointCloudImage
            Normals for the given image
        edge_masks : (n,) list of perception.BinaryImage
            Masks for pixels on the salient edges of the object.
        """
        # Compute stable poses of mesh
        mesh = salient_edge_set.mesh

        stp_pose_tfs, probs = mesh.compute_stable_poses()
        probs = probs / sum(probs)

        # Generate n renders
        depth_ims, normal_ims, edge_masks = [], [], []
        scene = Scene()
        so = SceneObject(mesh, RigidTransform(from_frame='obj', to_frame='world'))
        scene.add_object('object', so)

        for i in range(n_samples):
            # Sample random stable pose.
            tf_id = np.random.choice(np.arange(len(probs)), p=probs)
            tf = stp_pose_tfs[tf_id]
            T_obj_world = RigidTransform(tf[:3,:3], tf[:3,3], from_frame='obj', to_frame='world')
            so.T_obj_world = T_obj_world

            # Create the random uniform workspace sampler
            ws_cfg = self._config['worksurface_rv_config']
            uvs = UniformPlanarWorksurfaceImageRandomVariable('object', scene, [RenderMode.DEPTH], frame='camera', config=ws_cfg)

            # Sample and extract the depth image, camera intrinsics, and T_obj_camera
            sample = uvs.sample()
            depth_im = sample.renders[RenderMode.DEPTH]
            cs = sample.camera
            ci = CameraIntrinsics(frame='camera', fx=cs.focal, fy=cs.focal, cx=cs.cx, cy=cs.cy,
                                  skew=0.0, height=ws_cfg['im_height'], width=ws_cfg['im_width'])
            T_obj_camera = cs.T_camera_world.inverse().dot(T_obj_world)
            edge_mask = self._compute_edge_mask(salient_edge_set, depth_im, ci, T_obj_camera)
            point_cloud_im = ci.deproject_to_image(depth_im)
            normal_im =  point_cloud_im.normal_cloud_im()


            depth_ims.append(depth_im)
            normal_ims.append(normal_im)
            edge_masks.append(edge_mask)

        return depth_ims, normal_ims, edge_masks
Ejemplo n.º 2
0
    'elevation': {
        'min': 0.10,
        'max': 10.0,
    },
    'roll': {
        'min': -0.2,
        'max': 0.2,
    },
    'x': {
        'min': -0.01,
        'max': 0.01,
    },
    'y': {
        'min': -0.01,
        'max': 0.01,
    },
    'im_width': 600,
    'im_height': 600
}

urv = UniformPlanarWorksurfaceImageRandomVariable('pawn', scene,
                                                  [RenderMode.COLOR], 'camera',
                                                  cfg)
renders = urv.sample(10, front_and_back=True)

for i, render in enumerate(renders):
    color = render.renders[RenderMode.COLOR]
    color.save('output/random_{}.jpg'.format(i))

v = SceneViewer(scene, raymond_lighting=True)
Ejemplo n.º 3
0
    def generate_examples(self, salient_edge_set_filename, n_samples=1):
        """Generate RegistrationExamples for evaluating the algorithm.

        Parameters
        ----------
        salient_edge_set_filename : str
            A file containing the salient edge set to generate images of.
        n_samples : int
            The number of samples to generate.

        Returns
        -------
        list of RegistrationExample
            A list of RegistrationExamples.
        """
        # Compute stable poses of mesh
        salient_edge_set = SalientEdgeSet.load(salient_edge_set_filename)
        mesh = salient_edge_set.mesh

        stp_pose_tfs, probs = mesh.compute_stable_poses()
        probs = probs / sum(probs)

        # Generate n renders
        examples = []
        scene = Scene()
        so = SceneObject(mesh,
                         RigidTransform(from_frame='obj', to_frame='world'))
        scene.add_object('object', so)

        for i in range(n_samples):
            # Sample random stable pose.
            tf_id = np.random.choice(np.arange(len(probs)), p=probs)
            tf = stp_pose_tfs[tf_id]
            T_obj_world = RigidTransform(tf[:3, :3],
                                         tf[:3, 3],
                                         from_frame='obj',
                                         to_frame='world')
            so.T_obj_world = T_obj_world

            # Create the random uniform workspace sampler
            ws_cfg = self._config['worksurface_rv_config']
            uvs = UniformPlanarWorksurfaceImageRandomVariable(
                'object',
                scene, [RenderMode.DEPTH],
                frame='camera',
                config=ws_cfg)

            # Sample and extract the depth image, camera intrinsics, and T_obj_camera
            sample = uvs.sample()
            depth_im = sample.renders[RenderMode.DEPTH]
            cs = sample.camera
            ci = CameraIntrinsics(frame='camera',
                                  fx=cs.focal,
                                  fy=cs.focal,
                                  cx=cs.cx,
                                  cy=cs.cy,
                                  skew=0.0,
                                  height=ws_cfg['im_height'],
                                  width=ws_cfg['im_width'])
            T_obj_camera = cs.T_camera_world.inverse().dot(T_obj_world)
            examples.append(
                RegistrationExample(salient_edge_set_filename, depth_im, ci,
                                    T_obj_camera))

        return examples
#   },
#   'x': {
#       'min' : -0.01,
#       'max' : -0.01,
#   },
#   'y': {
#       'min' : -0.01,
#       'max' : 0.01,
#   },
#   'im_width': 640,
#   'im_height': 480
# }

# num_images: 874 for test dataset, 3730 for train dataset

urv = UniformPlanarWorksurfaceImageRandomVariable(
    'to_render', scene, [RenderMode.COLOR, RenderMode.DEPTH], 'camera', cfg)
renders = urv.sample(args.num_images, front_and_back=True)

# v = SceneViewer(scene, raymond_lighting=True)

camera_intr = None
camera_poses = {}

for i, render in enumerate(renders):
    color = render.renders[RenderMode.COLOR]
    depth = render.renders[RenderMode.DEPTH]
    # print(np.max(depth.data))
    # print(np.min(depth.data))
    # print(1/0)
    # depth_scaled = depth.data/0.2 * 255.
    # depth_to_grayscale = np.stack([depth_scaled]*3, axis=2)