예제 #1
0
    def process_input(self):
        """
        Function to extract data from the input array.
        Input array is the output of the optimization step
        which holds the generated sparse model of the object
        and the relative scene transformations.
        """
        #get the relative scene transforamtions from input array
        self.scene_tfs = np.empty((0, 4, 4))
        for input_arr in self.input_arrays:
            num_scenes = input_arr['ref'].shape[0]
            out_ts = input_arr['scenes'][:(num_scenes) * 3].reshape(
                (num_scenes, 3))
            out_qs = input_arr['scenes'][(num_scenes) * 3:(num_scenes) *
                                         7].reshape((num_scenes, 4))
            out_tfs = np.asarray([
                tfa.compose(t, tfq.quat2mat(q), np.ones(3))
                for t, q in zip(out_ts, out_qs)
            ])
            self.scene_tfs = np.vstack((self.scene_tfs, out_tfs))

        self.object_model = []
        for obj in self.model_paths:
            #read sparse model from input array
            sparse_model = SparseModel().reader(obj)
            #read dense model from .PLY file
            dense_model = o3d.io.read_point_cloud(
                os.path.join(os.path.dirname(obj), "dense.ply"))
            self.object_model.append(
                [sparse_model, np.asarray(dense_model.points)])
        return
예제 #2
0
    def process_input(self):
        """
        Function to extract data from the input array.
        Input array is the output of the optimization step
        which holds the generated sparse model of the object
        and the relative scene transformations.
        """
        #get the relative scene transforamtions from input array
        out_ts  = self.input_array['scenes'][ :(self.num_scenes)*3].reshape((self.num_scenes, 3))
        out_qs  = self.input_array['scenes'][(self.num_scenes)*3 : (self.num_scenes)*7].reshape((self.num_scenes, 4))
        out_tfs = np.asarray([tfa.compose(t, tfq.quat2mat(q), np.ones(3)) for t,q in zip(out_ts, out_qs)])
        self.scene_tfs = out_tfs

        #get object model from input_array
        self.object_model = SparseModel().reader(self.model_path)
        return
예제 #3
0
 def closest_points(self, transformation):
     pcd_tree = o3d.geometry.KDTreeFlann(self.ply_model)
     # Get sparse model points in scene frame
     sparse_points = np.asarray(self.sparse_model.points)
     sparse_points = np.vstack(
         [sparse_points.transpose(),
          np.ones(sparse_points.shape[0])])
     sparse_points = np.dot(transformation, sparse_points).transpose()
     # Find the closest point in PLY model to each sparse point
     closest_points = []
     for point in sparse_points:
         [_, nn_idx, _] = pcd_tree.search_knn_vector_3d(point[:3], 1)
         nn_point = np.asarray(self.ply_model.points)[nn_idx][0]
         closest_points.append(nn_point)
     # Create PointCloud of nearest neightbors
     nn_points = np.asarray(closest_points)
     nn_points = np.vstack(
         [nn_points.transpose(),
          np.ones(nn_points.shape[0])])
     nn_points = np.dot(np.linalg.inv(transformation),
                        nn_points).transpose()
     nn_pcd = o3d.geometry.PointCloud()
     nn_pcd.points = o3d.utility.Vector3dVector(nn_points[:, :3])
     write_path = os.path.join(os.path.dirname(self.path_to_sparse_model),
                               "gt_sparse.txt")
     SparseModel().writer(np.asarray(nn_pcd.points), write_path, True)
     write_path = os.path.join(os.path.dirname(self.path_to_sparse_model),
                               "gt_cad.ply")
     o3d.io.write_point_cloud(
         write_path,
         self.ply_model.transform(np.linalg.inv(transformation)))
     return
예제 #4
0
    def __init__(self, dataset_dir_path, sparse_model_path, dense_model_path, scene_meta_path, visualize=False):
        """
        Constructor for Annotations class.
        Input arguments:
        dataset_dir_path   - path to root dataset directory
        sparse_model_path  - path to sparse model (*.txt)
        dense_model_path   - path to dense model (*.ply)
        scene_meta_path    - path to scenes' meta info (*.npz)
        visualize          - set 'True' to visualize
        """
        self.dataset_path = dataset_dir_path
        self.input_array  = np.load(scene_meta_path)
        self.visualize    = visualize
        #read sparse model from input array
        sparse_model = SparseModel().reader(sparse_model_path)
        #read dense model from .PLY file
        dense_model = o3d.io.read_point_cloud(dense_model_path)
        dense_model = dense_model.voxel_down_sample(voxel_size=0.005)

        #read camera intrinsics matrix from camera.txt in root directory
        self.cam_mat = np.eye(3)
        with open(os.path.join(self.dataset_path, 'camera.txt'), 'r') as file:
            camera_intrinsics = file.readlines()[0].split()
            camera_intrinsics = list(map(float, camera_intrinsics))
        self.cam_mat[0,0] = camera_intrinsics[0]
        self.cam_mat[1,1] = camera_intrinsics[1]
        self.cam_mat[0,2] = camera_intrinsics[2]
        self.cam_mat[1,2] = camera_intrinsics[3]

        #get number of scenes and number of keypoints
        self.num_scenes = int(self.input_array['scenes'].shape[0]/7)
        self.num_keypts = sparse_model.shape[0]

        #paths to each of the scene dirs inside root dir
        self.list_of_scene_dirs = [d for d in os.listdir(self.dataset_path)
                                   if os.path.isdir(os.path.join(self.dataset_path, d))]
        self.list_of_scene_dirs.sort()
        self.list_of_scene_dirs = self.list_of_scene_dirs[:self.num_scenes]
        print("List of scenes: ", self.list_of_scene_dirs)
        print("Number of scenes: ", self.num_scenes)
        print("Number of keypoints: ", self.num_keypts)

        #excect images to be 640x480
        self.width = 640
        self.height = 480

        #bounding-box needs to scaled up to avoid excessive cropping
        self.bbox_scale = 1.5
        #define a ratio of labeled samples to produce
        self.ratio = 10

        #this is the object model
        self.object_model = [sparse_model, np.asarray(dense_model.points)]
        #these are the relative scene transformations
        self.scene_tfs = []
예제 #5
0
    def define_grasp_point(self, ply_path):
        """
        Function to define grasp pose.
        """
        # create output dir if not exists
        if not os.path.isdir(self.output_dir):
            os.makedirs(self.output_dir)

        if (self.scene_kpts.shape) != (1, 3, 2):
            raise Exception("2 3D points must exist to define a grasp pose.")
        if not os.path.exists(ply_path):
            raise Exception("%s path does not exist" % ply_path)
        #get 3 user-defined 3D points
        point_1 = self.scene_kpts.transpose(0, 2, 1)[0, 0]
        point_2 = self.scene_kpts.transpose(0, 2, 1)[0, 1]
        #read point cloud and get normals
        scene_cloud = o3d.io.read_point_cloud(ply_path)
        scene_cloud.estimate_normals()
        scene_cloud.normalize_normals()
        scene_points = scene_cloud.points
        scene_normals = scene_cloud.normals
        scene_tree = o3d.geometry.KDTreeFlann(scene_cloud)
        #get neightbors and find normal direction
        [_, idx, _] = scene_tree.search_knn_vector_3d(point_1, 200)
        normals = np.asarray(scene_normals)[list(idx)]
        normal_dir = normals.mean(0)
        normal_dir = normal_dir / np.linalg.norm(normal_dir)
        #get intersection of point and plane
        # d = (a.x_0 + b.y_0 + c.z_0)/(a.x_u + b.y_u + c.z_u)
        lamda = np.dot(normal_dir, point_1) / np.dot(normal_dir, point_2)
        point_i = lamda * point_2
        vector_x = (point_i - point_1)
        vector_y = np.cross(normal_dir, vector_x)
        #normalize
        vector_x = vector_x / np.linalg.norm(vector_x)
        vector_y = vector_y / np.linalg.norm(vector_y)
        #create rotation matrix
        rot_mat = np.array([vector_x, vector_y, normal_dir])
        tf_mat = tfa.compose(point_1, rot_mat, np.ones(3))

        #save the grasp point
        SparseModel().grasp_writer(
            tf_mat, os.path.join(self.output_dir, "sparse_model.txt"))

        #visualize in open3d
        vis_mesh_list = []
        vis_mesh_list.append(scene_cloud)
        coordinate_frame = o3d.geometry.TriangleMesh.create_coordinate_frame(
            0.2)
        coordinate_frame.transform(tf_mat)
        vis_mesh_list.append(coordinate_frame)
        o3d.visualization.draw_geometries(vis_mesh_list)
        return True, tf_mat
예제 #6
0
    def get_true_poses(self):
        """
        Function to find 3D positions of each defined keypoints in the frame of
        every scene origin. Uses the .npz zipped archive to get relative scene
        transformations, the selection matrix etc.
        Returns a list of (Nx3) 2D numpy arrays where each i-th array in the list
        holds the 3D keypoint pose configuration of the object in the i-th scene.
        """
        list_of_poses = []
        # get selected points from input npz array
        ref_keypts = self.input_array['ref']
        # get selection matrix frrom input npz array
        select_mat = self.input_array['sm']
        # convert selection matrix to a binary matrix
        col_splits = np.hsplit(select_mat, select_mat.shape[1] // 3)
        row_splits = [np.vsplit(col, col.shape[0] // 3) for col in col_splits]
        vis_list = [
            sum(map(lambda x: (x == np.eye(3)).all(), mat))
            for mat in row_splits
        ]
        # binary matrix of shape (num of scenes x num of keypoints)
        vis_mat = np.reshape(vis_list, [self.num_scenes, self.num_keypts])

        for sce_id, visibility in zip(range(len(self.list_of_scene_dirs)),
                                      vis_mat):
            # read true model from .pp file
            tru_model = SparseModel().reader(self.picked_pts, 1 / 1000)
            # partial user-annotated 3D model
            obj_manual = ref_keypts[sce_id].transpose()[np.nonzero(visibility)
                                                        [0]]
            # select corresponding points in true model
            tru_model_part = tru_model[np.nonzero(visibility)[0]]
            # use procrustes analysis to align true model to annotated points
            _, _, tform = procrustes(tru_model_part, obj_manual, False)

            T = tfa.compose(tform['translation'],
                            np.linalg.inv(tform['rotation']), np.ones(3))
            T = np.linalg.inv(T)
            true_object = np.asarray([(T[:3, :3].dot(pt) + T[:3, 3])
                                      for pt in tru_model])
            list_of_poses.append(true_object)
        return list_of_poses
예제 #7
0
    def __init__(self, sparse_model_path, ply_model_path, pp_file_path,
                 visualize):
        # paths to model files
        self.path_to_sparse_model = sparse_model_path
        self.path_to_ply_model = ply_model_path
        self.path_to_picked_points = pp_file_path
        # visualization flag
        self.visualize = visualize

        # icp max-correspondence-distance
        self.threshold = 0.01  # 1cm distance threshold

        # read sparse model
        sparse_model_array = SparseModel().reader(self.path_to_sparse_model)
        self.sparse_model = o3d.geometry.PointCloud()
        self.sparse_model.points = o3d.utility.Vector3dVector(
            sparse_model_array)

        # read ply model
        self.ply_model = o3d.io.read_point_cloud(self.path_to_ply_model)
        self.ply_model.scale(PLY_SCALE, np.zeros(3))
예제 #8
0
    def process(self):
        # read picked points
        pp_array = SparseModel().reader(self.path_to_picked_points, PP_SCALE)
        pp_model = o3d.geometry.PointCloud()
        pp_model.points = o3d.utility.Vector3dVector(pp_array)
        corr = np.zeros((len(pp_array), 2))
        corr[:, 0] = list(range(len(pp_array)))
        corr[:, 1] = list(range(len(pp_array)))

        # estimate rough transformation using correspondences
        p2p = o3d.registration.TransformationEstimationPointToPoint()
        trans_init = p2p.compute_transformation(
            self.sparse_model, pp_model, o3d.utility.Vector2iVector(corr))
        # point-to-point ICP for refinement
        reg_p2p = o3d.registration.registration_icp(self.sparse_model,
                                                    self.ply_model,
                                                    self.threshold, trans_init)

        # visualize if required
        if self.visualize:
            draw_registration_result(self.sparse_model, self.ply_model,
                                     reg_p2p.transformation)
        return reg_p2p.transformation
예제 #9
0
    def compute(self, sparse_model_flag=False):
        """
        Function to compute the sparse model and the relative scene transformations
        through optimization. Output directory will be created if not exists.
        Returns a success boolean.
        """
        # create output dir if not exists
        if not os.path.isdir(self.output_dir):
            os.makedirs(self.output_dir)

        #populate selection matrix from select_vec
        total_kpt_count = len(self.select_vec)
        found_kpt_count = len(np.nonzero(self.select_vec)[0])
        selection_matrix = np.zeros((found_kpt_count * 3, total_kpt_count * 3))
        for idx, nz_idx in enumerate(np.nonzero(self.select_vec)[0]):
            selection_matrix[(idx * 3):(idx * 3) + 3,
                             (nz_idx * 3):(nz_idx * 3) + 3] = np.eye(3)

        computed_vector = []
        success_flag = False
        if sparse_model_flag:
            object_model = SparseModel().reader(self.sparse_model_file)
            success_flag, res = app.geo_constrain.predict(
                object_model, self.scene_kpts.transpose(0, 2, 1),
                self.select_vec)
            scene_t = np.asarray([np.array(i[:3, 3]) for i in res])
            scene_q = np.asarray(
                [tfq.mat2quat(np.array(i[:3, :3])) for i in res])
            computed_vector = np.concatenate(
                (scene_t.flatten(), scene_q.flatten()))
        else:
            #initialize quaternions and translations for each scene
            scene_t_ini = np.array([[0, 0,
                                     0]]).repeat(self.scene_kpts.shape[0],
                                                 axis=0)
            scene_q_ini = np.array([[1, 0, 0,
                                     0]]).repeat(self.scene_kpts.shape[0],
                                                 axis=0)
            scene_P_ini = np.array([[[0, 0,
                                      0]]]).repeat(self.scene_kpts.shape[2],
                                                   axis=0)
            #initialize with known keypoints
            scene_P_ini = self.scene_kpts[0].transpose()[:, np.newaxis, :]

            #main optimization step
            res = app.optimize.predict(self.scene_kpts, scene_t_ini,
                                       scene_q_ini, scene_P_ini,
                                       selection_matrix)

            #extract generated sparse object model optimization output
            len_ts = scene_t_ini[1:].size
            len_qs = scene_q_ini[1:].size
            object_model = res.x[len_ts + len_qs:].reshape(scene_P_ini.shape)
            object_model = object_model.squeeze()
            #save the generated sparse object model
            SparseModel().writer(
                object_model, os.path.join(self.output_dir,
                                           "sparse_model.txt"), True)
            computed_vector = np.concatenate([
                np.zeros(3), res.x[:len_ts],
                tfq.qeye(), res.x[len_ts:len_ts + len_qs]
            ])
            success_flag = res.success

        #save the input and the output from optimization step
        out_fn = os.path.join(self.output_dir, 'saved_meta_data')
        np.savez(out_fn,
                 model=object_model,
                 scenes=computed_vector,
                 ref=self.scene_kpts,
                 sm=selection_matrix)

        if success_flag:
            print("--------\n--------\n--------")
            print("Computed results saved at {}".format(out_fn))
            print("--------\n--------\n--------")

        return success_flag, object_model
예제 #10
0
class PartialModel:
    def __init__(self, dataset_path, input_arr_path, input_model_path):
        """
        Constructor for PartialModel class.
        Input arguments:
        dataset_path   - path to root dataset directory
        input_arr_path - path to input npz zipped archive
        input_model_path - path to sparse model file
        """
        self.dataset_path = dataset_path
        self.input_array  = np.load(input_arr_path)
        self.model_path   = input_model_path

        #get number of scenes and number of keypoints
        self.num_scenes = self.input_array['ref'].shape[0]
        self.num_keypts = self.input_array['ref'].shape[2]

        #paths to each of the scene dirs inside root dir
        self.list_of_scene_dirs = [d for d in os.listdir(dataset_path) if os.path.isdir(os.path.join(dataset_path, d))]
        self.list_of_scene_dirs.sort()
        self.list_of_scene_dirs = self.list_of_scene_dirs[:self.num_scenes]
        print("List of scenes: ", self.list_of_scene_dirs)
        print("Number of scenes: ", self.num_scenes)
        print("Number of keypoints: ", self.num_keypts)

        #this is the object model
        self.object_model = []
        #these are the relative scene transformations
        self.scene_tfs = []

    def process_input(self):
        """
        Function to extract data from the input array.
        Input array is the output of the optimization step
        which holds the generated sparse model of the object
        and the relative scene transformations.
        """
        #get the relative scene transforamtions from input array
        out_ts  = self.input_array['scenes'][ :(self.num_scenes)*3].reshape((self.num_scenes, 3))
        out_qs  = self.input_array['scenes'][(self.num_scenes)*3 : (self.num_scenes)*7].reshape((self.num_scenes, 4))
        out_tfs = np.asarray([tfa.compose(t, tfq.quat2mat(q), np.ones(3)) for t,q in zip(out_ts, out_qs)])
        self.scene_tfs = out_tfs

        #get object model from input_array
        self.object_model = SparseModel().reader(self.model_path)
        return

    def get_fragments_by_radius(self, radius=0.05):
        """
        Function to extract point cloud clusters using a radius vector
        around each keypoint in sparse model, in each scene. Returns the
        list of partial models transformed into world frame.
        """
        point_cloud_list = []
        #iterate through a zip of list of scene dirs and the relative scene tfs
        for data_dir_idx, (cur_scene_dir, sce_t) in enumerate(zip(self.list_of_scene_dirs, self.scene_tfs)):
            #load scene point cloud from .PLY file
            cur_ply_path = os.path.join(self.dataset_path, cur_scene_dir, cur_scene_dir + '.ply')
            pcd = o3d.io.read_point_cloud(cur_ply_path)

            #build KDTree
            pcd_tree = o3d.geometry.KDTreeFlann(pcd)

            #get object keypoints in scene frame
            model_points = np.vstack([self.object_model.transpose(),
                                      np.ones(self.object_model.shape[0])])
            model_points = np.dot(sce_t, model_points).transpose()

            fragment = o3d.geometry.PointCloud()
            for keypt in model_points:
                [_, idx, _] = pcd_tree.search_radius_vector_3d(keypt[:3], radius)
                fragment.points.extend(o3d.utility.Vector3dVector(np.asarray(pcd.points)[idx]))
            fragment.transform(np.linalg.inv(sce_t))
            point_cloud_list.append(fragment)
        return point_cloud_list

    def get_regions(self):
        """
        Function to extract point cloud clusters using region-growing clustering
        with keypoints as initial seed points. Returns the
        list of partial models transformed into world frame.
        """
        point_cloud_list = []

        #initialize region growing class
        reg = RegionGrowing((5/180)*3.14, 1.0)

        #iterate through a zip of list of scene dirs and the relative scene tfs
        for data_dir_idx, (cur_scene_dir, sce_t) in enumerate(zip(self.list_of_scene_dirs, self.scene_tfs)):
            #get object keypoints in scene frame
            model_points = np.vstack([self.object_model.transpose(),
                                      np.ones(self.object_model.shape[0])])
            model_points = np.dot(sce_t, model_points).transpose()

            #load scene point cloud from .PLY file
            cur_ply_path = os.path.join(self.dataset_path, cur_scene_dir, cur_scene_dir + '.ply')
            pcd = o3d.io.read_point_cloud(cur_ply_path)

            #extract regions using model points as initial seeds
            reg.set_input_cloud(pcd)
            reg.box_crop(model_points[:,:3].mean(0), 0.003, 0.2)
            reg.set_seeds(model_points[:,:3])
            regions = reg.extract()

            #transform each region to scene tf and add to list
            point_cloud_list.extend([region.transform(np.linalg.inv(sce_t)) for region in regions])
        return point_cloud_list
예제 #11
0
def process(input_array, num_out_scenes, output_dir):
    #process input
    scene_kpts = input_array['ref']
    selection_matrix = input_array['sm']

    #get number of scene and keypoints in original experiment
    num_scenes = scene_kpts.shape[0]
    num_keypts = scene_kpts.shape[2]

    # convert selection matrix to a binary matrix
    col_splits = np.hsplit(selection_matrix, selection_matrix.shape[1] // 3)
    row_splits = [np.vsplit(col, col.shape[0] // 3) for col in col_splits]
    vis_list = [
        sum(map(lambda x: (x == np.eye(3)).all(), mat)) for mat in row_splits
    ]
    # binary matrix of shape (num of scenes x num of keypoints)
    vis_mat = np.reshape(vis_list, [num_scenes, num_keypts])

    #slice binary matrix
    vis_mat = vis_mat[:num_out_scenes]

    # convert binary matrix back to selection matrix
    select_vec = vis_mat.flatten()
    total_kpt_count = len(select_vec)
    found_kpt_count = len(np.nonzero(select_vec)[0])
    selection_matrix = np.zeros((found_kpt_count * 3, total_kpt_count * 3))
    for idx, nz_idx in enumerate(np.nonzero(select_vec)[0]):
        selection_matrix[(idx * 3):(idx * 3) + 3,
                         (nz_idx * 3):(nz_idx * 3) + 3] = np.eye(3)

    #slice scene keypoints
    scene_kpts = scene_kpts[:num_out_scenes]

    #initialize quaternions and translations for each scene
    scene_t_ini = np.array([[0, 0, 0]]).repeat(scene_kpts.shape[0], axis=0)
    scene_q_ini = np.array([[1, 0, 0, 0]]).repeat(scene_kpts.shape[0], axis=0)
    scene_P_ini = np.array([[[0, 0, 0]]]).repeat(scene_kpts.shape[2], axis=0)
    #initialize with known keypoints
    scene_P_ini = scene_kpts[0].transpose()[:, np.newaxis, :]

    #main optimization step
    res = app.optimize.predict(scene_kpts, scene_t_ini, scene_q_ini,
                               scene_P_ini, selection_matrix)

    #extract generated sparse object model optimization output
    len_ts = scene_t_ini[1:].size
    len_qs = scene_q_ini[1:].size
    object_model = res.x[len_ts + len_qs:].reshape(scene_P_ini.shape)
    object_model = object_model.squeeze()
    #save the generated sparse object model
    SparseModel().writer(object_model,
                         os.path.join(output_dir, "sparse_model.txt"))
    computed_vector = res.x[:(len_ts + len_qs)]
    success_flag = res.success

    #save the input and the output from optimization step
    out_fn = os.path.join(output_dir, 'saved_meta_data')
    np.savez(out_fn,
             model=object_model,
             scenes=computed_vector,
             ref=scene_kpts,
             sm=selection_matrix)

    if success_flag:
        print("--------\n--------\n--------")
        print("Computed results saved at {}".format(out_fn))
        print("--------\n--------\n--------")
    return object_model
예제 #12
0
    #set scale factors
    PLY_SCALE = 1
    PP_SCALE = 1

    path_to_dataset_dir = opt.dataset
    path_to_experiment_dir = opt.experiment

    # Set up paths (hard-coded for now)
    path_to_sparse_model = os.path.join(opt.experiment, "sparse_model.txt")
    path_to_pp_model = os.path.join(opt.dataset, os.path.basename(opt.dataset) + "_picked_points.pp")
    path_to_input_tfs = os.path.join(opt.experiment, "saved_meta_data.npz")
    path_to_output_tfs = os.path.join(opt.experiment, "gt_meta_data.npz")

    # Read sparse model 
    sparse_model_array = SparseModel().reader(path_to_sparse_model)
    sparse_model = o3d.geometry.PointCloud()
    sparse_model.points = o3d.utility.Vector3dVector(sparse_model_array)

    # Read PickedPoints model
    pp_array = SparseModel().reader(path_to_pp_model, PP_SCALE)
    pp_model = o3d.geometry.PointCloud()
    pp_model.points = o3d.utility.Vector3dVector(pp_array)

    # Paths to each of the scene dirs inside root dir
    list_of_scene_dirs = [d for d in os.listdir(path_to_dataset_dir)
                          if os.path.isdir(os.path.join(path_to_dataset_dir, d))]
    list_of_scene_dirs.sort()
    num_scenes = len(list_of_scene_dirs)
    num_keypts = len(pp_model.points)
    print("List of scenes: ", list_of_scene_dirs)