Ejemplo n.º 1
0
def refine_registration(scene1, scene2, trans, voxel_size, max_iteration):
    distance_threshold = voxel_size * 0.4
    result = open3d.registration_icp(
        scene1, scene2, distance_threshold, trans,
        open3d.TransformationEstimationPointToPoint(),
        open3d.ICPConvergenceCriteria(max_iteration=100))
    return result
Ejemplo n.º 2
0
    def estimateTransform(self, source, target):
        source = od.read_point_cloud(source)
        target = od.read_point_cloud(target)
        current_transformation = self.base_transform

        for scale in range(len(self.voxel_radius)):
            iterations = self.max_iter[scale]
            radius = self.voxel_radius[scale]

            source_down = od.voxel_down_sample(source, radius)
            target_down = od.voxel_down_sample(target, radius)

            od.estimate_normals(
                source_down,
                od.KDTreeSearchParamHybrid(radius=2 * radius,
                                           max_nn=self.max_nn))
            od.estimate_normals(
                target_down,
                od.KDTreeSearchParamHybrid(radius=2 * radius,
                                           max_nn=self.max_nn))

            result_icp = od.registration_colored_icp(
                source_down, target_down, radius, current_transformation,
                od.ICPConvergenceCriteria(
                    relative_fitness=self.relative_fitness,
                    relative_rmse=self.relative_rmse,
                    max_iteration=iterations))

            current_transformation = result_icp.transformation
            # self.draw_registration_result_original_color(
            # source_down, target_down, current_transformation)

        return current_transformation
Ejemplo n.º 3
0
    def stitch_pointclouds_open3D(self, target, source, threshold = 1, trans_init = np.eye(4), max_iteration = 2000, with_plot = True):
        source = self.check_pointcloud_type(np.array(source))
        target = self.check_pointcloud_type(np.array(target))
        target.paint_uniform_color([0.1, 0.1, 0.7])
        print dir(open3d)
        if with_plot:
            self.draw_registration_result(source, target, trans_init)
            
        print("Initial alignment")
        evaluation = open3d.evaluate_registration(source, target, threshold, trans_init)
        print(evaluation)

        print("Apply point-to-point ICP")
        reg_p2p = open3d.registration_icp(source, target, threshold, trans_init,
                open3d.TransformationEstimationPointToPoint(),
                open3d.ICPConvergenceCriteria(max_iteration = max_iteration))
        print("Transformation is:")
        print(reg_p2p.transformation)
        print("")
        if with_plot:
            self.draw_registration_result(source, target, reg_p2p.transformation)
        
        xyz_source = np.asarray(source.points)
        xyz_target = np.asarray(target.points)
        xyz_global = np.concatenate([xyz_source, xyz_target], axis = 0)
        return xyz_global
Ejemplo n.º 4
0
def get_evaluation(pcd_temp_,
                   pcd_scene_,
                   inlier_thres,
                   tf,
                   final_th=0,
                   n_iter=5):  #queue
    tf_pcd = np.eye(4)

    reg_p2p = open3d.registration_icp(
        pcd_temp_, pcd_scene_, inlier_thres, np.eye(4),
        open3d.TransformationEstimationPointToPoint(),
        open3d.ICPConvergenceCriteria(max_iteration=1))  #5?
    tf = np.matmul(reg_p2p.transformation, tf)
    tf_pcd = np.matmul(reg_p2p.transformation, tf_pcd)
    pcd_temp_.transform(reg_p2p.transformation)

    for i in range(4):
        inlier_thres = reg_p2p.inlier_rmse * 3
        if inlier_thres == 0:
            continue

        reg_p2p = open3d.registration_icp(
            pcd_temp_, pcd_scene_, inlier_thres, np.eye(4),
            open3d.TransformationEstimationPointToPlane(),
            open3d.ICPConvergenceCriteria(max_iteration=1))  #5?
        tf = np.matmul(reg_p2p.transformation, tf)
        tf_pcd = np.matmul(reg_p2p.transformation, tf_pcd)
        pcd_temp_.transform(reg_p2p.transformation)
    inlier_rmse = reg_p2p.inlier_rmse

    ##Calculate fitness with depth_inlier_th
    if (final_th > 0):
        inlier_thres = final_th  #depth_inlier_th*2 #reg_p2p.inlier_rmse*3
        reg_p2p = registration_icp(
            pcd_temp_, pcd_scene_, inlier_thres, np.eye(4),
            TransformationEstimationPointToPlane(),
            ICPConvergenceCriteria(max_iteration=1))  #5?

    if (np.abs(np.linalg.det(tf[:3, :3]) - 1) > 0.001):
        tf[:3, 0] = tf[:3, 0] / np.linalg.norm(tf[:3, 0])
        tf[:3, 1] = tf[:3, 1] / np.linalg.norm(tf[:3, 1])
        tf[:3, 2] = tf[:3, 2] / np.linalg.norm(tf[:3, 2])
    if (np.linalg.det(tf) < 0):
        tf[:3, 2] = -tf[:3, 2]

    return tf, inlier_rmse, tf_pcd, reg_p2p.fitness
Ejemplo n.º 5
0
def registration_point_to_plane(scene1, scene2, voxel_size):

    # scene1 and 2 are point cloud data
    # voxel_size is grid size

    #draw_registration_result(scene1,scene2,np.identity(4))

    # voxel down sampling
    scene1_down = open3d.voxel_down_sample(scene1, voxel_size)
    scene2_down = open3d.voxel_down_sample(scene2, voxel_size)

    #draw_registration_result(scene1_down,scene2_down,np.identity(4))

    # estimate normal with search radius voxel_size*2.0
    radius_normal = voxel_size * 2.0
    open3d.estimate_normals(
        scene1, open3d.KDTreeSearchParamHybrid(radius=radius_normal,
                                               max_nn=30))
    open3d.estimate_normals(
        scene2, open3d.KDTreeSearchParamHybrid(radius=radius_normal,
                                               max_nn=30))
    open3d.estimate_normals(
        scene1_down,
        open3d.KDTreeSearchParamHybrid(radius=radius_normal, max_nn=30))
    open3d.estimate_normals(
        scene2_down,
        open3d.KDTreeSearchParamHybrid(radius=radius_normal, max_nn=30))

    # compute fpfh feature with search radius voxel_size/2.0
    radius_feature = voxel_size * 2.0
    scene1_fpfh = open3d.compute_fpfh_feature(
        scene1_down,
        search_param=open3d.KDTreeSearchParamHybrid(radius=radius_feature,
                                                    max_nn=100))
    scene2_fpfh = open3d.compute_fpfh_feature(
        scene2_down,
        search_param=open3d.KDTreeSearchParamHybrid(radius=radius_feature,
                                                    max_nn=100))

    # compute ransac registration
    ransac_result = execute_global_registration(scene1_down, scene2_down,
                                                scene1_fpfh, scene2_fpfh,
                                                voxel_size)
    #draw_registration_result(scene1,scene2,ransac_result.transformation)

    # point to point ICP resigtration
    distance_threshold = voxel_size * 10.0
    result = open3d.registration_icp(
        scene1, scene2, distance_threshold, ransac_result.transformation,
        open3d.TransformationEstimationPointToPlane(),
        open3d.ICPConvergenceCriteria(max_iteration=1000))

    #draw_registration_result(scene1,scene2,result.transformation)
    print(result)

    return result
def refine_registration(source, target, voxel_size, gross_matching):
    distance_threshold = voxel_size * 0.4
    print(":: Point-to-plane ICP registration is applied on original point")
    print("   clouds to refine the alignment. This time we use a strict")
    print("   distance threshold %.3f." % distance_threshold)
    result = op3.registration_icp(
        source, target, distance_threshold, gross_matching.transformation,
        op3.TransformationEstimationPointToPlane(),
        op3.ICPConvergenceCriteria(max_iteration=2000))
    return result
Ejemplo n.º 7
0
 def registration_icp(source,
                      target,
                      threshold,
                      transformation=np.eye(4),
                      **kwargs):
     # call the open3d registration_icp
     reg_p2p = open3d.registration_icp(
         source, target, threshold, transformation,
         open3d.TransformationEstimationPointToPoint(),
         open3d.ICPConvergenceCriteria(max_iteration=30))
     return reg_p2p.transformation
Ejemplo n.º 8
0
    def register(self, iteration=None, voxel_size=None):
        iteration = 100 if iteration is None else iteration
        voxel_size = 0.01 if voxel_size is None else voxel_size

        source, target = self._prepare(voxel_size=voxel_size)
        result = open3d.registration_icp(
            source,  # points_from_depth
            target,  # points_from_cad
            2 * voxel_size,
            tf.inverse_matrix(self._transform),
            open3d.TransformationEstimationPointToPoint(False),
            open3d.ICPConvergenceCriteria(max_iteration=iteration),
        )
        return tf.inverse_matrix(result.transformation)
Ejemplo n.º 9
0
    def register_iterative(self, iteration=None, voxel_size=None):
        iteration = 100 if iteration is None else iteration
        voxel_size = 0.01 if voxel_size is None else voxel_size

        yield self._transform

        source, target = self._prepare(voxel_size=voxel_size)
        for i in range(iteration):
            result = open3d.registration_icp(
                source,  # points_from_depth
                target,  # points_from_cad
                2 * voxel_size,
                tf.inverse_matrix(self._transform),
                open3d.TransformationEstimationPointToPoint(False),
                open3d.ICPConvergenceCriteria(max_iteration=1),
            )
            print(f"[{i:08d}] fitness={result.fitness:.2g} "
                  f"inlier_rmse={result.inlier_rmse:.2g}")
            self._transform = tf.inverse_matrix(result.transformation)
            yield self._transform
Ejemplo n.º 10
0
def ICP(source, target):
    """

    :param source: source point cloud
    :param target:  target point cloud
    :return: source pointcloud registered
    """
    start = time.time()
    use_torch = False
    if isinstance(source, torch.Tensor):
        use_torch = True
        source = source.squeeze().cpu().numpy()
    if isinstance(target, torch.Tensor):
        target = target.squeeze().cpu().numpy()

    pcd_target = open3d.PointCloud()
    pcd_target.points = open3d.Vector3dVector(target)

    pcd_source = open3d.PointCloud()
    pcd_source.points = open3d.Vector3dVector(source)

    pcd_source = copy.deepcopy(pcd_source)
    pcd_target = copy.deepcopy(pcd_target)

    reg_p2p = open3d.registration_icp(
        pcd_source,
        pcd_target,
        0.1,
        criteria=open3d.ICPConvergenceCriteria(max_iteration=30))

    pcd_source.transform(reg_p2p.transformation)
    source = np.asarray(pcd_source.points)

    if use_torch:
        source = torch.from_numpy(source).cuda()

    end = time.time()
    timings_ICP.update(end - start)
    # print("ellapsed time for a ICP forward pass : ", timings_ICP.value())
    return source
Ejemplo n.º 11
0
	def update_keyframe(self, cloud):
		criteria = open3d.ICPConvergenceCriteria(max_iteration=100)
		reg = open3d.registration_icp(cloud, self.keyframes[-1].cloud, 1.0, self.last_frame_transformation, open3d.TransformationEstimationPointToPlane(), criteria=criteria)

		angle = pyquaternion.Quaternion(matrix=reg.transformation[:3, :3]).degrees
		trans = numpy.linalg.norm(reg.transformation[:3, 3])

		if abs(angle) < self.keyframe_angle_thresh_deg and abs(trans) < self.keyframe_trans_thresh_m:
			self.last_frame_transformation = reg.transformation
			return False

		odom = reg.transformation.dot(self.keyframes[-1].odom)
		self.keyframes.append(Keyframe(len(self.keyframes), cloud, odom))
		self.graph.nodes.append(self.keyframes[-1].node)
		self.vis.add_geometry(self.keyframes[-1].transformed)

		self.last_frame_transformation = numpy.identity(4)

		information = open3d.get_information_matrix_from_point_clouds(self.keyframes[-1].cloud, self.keyframes[-2].cloud, 1.0, reg.transformation)
		edge = open3d.PoseGraphEdge(self.keyframes[-1].id, self.keyframes[-2].id, reg.transformation, information, uncertain=False)

		self.graph.edges.append(edge)

		return True
# transform target point cloud
th = np.deg2rad(10.0)
target.transform(np.array([[np.cos(th), -np.sin(th), 0.0, 0.0],
                           [np.sin(th), np.cos(th), 0.0, 0.0],
                           [0.0, 0.0, 1.0, 0.0],
                           [0.0, 0.0, 0.0, 1.0]]))

vis = o3.Visualizer()
vis.create_window()
result = copy.deepcopy(source)
source.paint_uniform_color([1, 0, 0])
target.paint_uniform_color([0, 1, 0])
result.paint_uniform_color([0, 0, 1])
vis.add_geometry(source)
vis.add_geometry(target)
vis.add_geometry(result)
threshold = 0.05
icp_iteration = 100
save_image = False

for i in range(icp_iteration):
    reg_p2p = o3.registration_icp(result, target, threshold,
                np.identity(4), o3.TransformationEstimationPointToPoint(),
                o3.ICPConvergenceCriteria(max_iteration=1))
    result.transform(reg_p2p.transformation)
    vis.update_geometry()
    vis.poll_events()
    vis.update_renderer()
    if save_image:
        vis.capture_screen_image("image_%04d.jpg" % i)
vis.run()
Ejemplo n.º 13
0
def open3d_icp_normal(pa,
                      pb,
                      trans_init=np.eye(4),
                      max_distance=0.02,
                      b_graph=False):
    '''
    (Transformation A->B)

    pa : numpy array
    pb : numpy array

    return: 
        transformation
            - 4x4 numpy array
        result
            - transformation
            - fitness
            - inlier_rmse
            - correspondence_set
    '''

    import open3d as op3
    import copy

    def draw_registration_result(source, target, transformation):
        source_temp = copy.deepcopy(source)
        target_temp = copy.deepcopy(target)
        source_temp.paint_uniform_color([1, 0.706, 0])
        target_temp.paint_uniform_color([0, 0.651, 0.929])
        source_temp.transform(transformation)
        op3.draw_geometries([source_temp, target_temp])

    pc1 = op3.PointCloud()
    pc1.points = op3.Vector3dVector(pa)

    pc2 = op3.PointCloud()
    pc2.points = op3.Vector3dVector(pb)

    op3.estimate_normals(pc1,
                         search_param=op3.KDTreeSearchParamHybrid(radius=10.0,
                                                                  max_nn=30))
    op3.estimate_normals(pc2,
                         search_param=op3.KDTreeSearchParamHybrid(radius=10.0,
                                                                  max_nn=30))

    # op3.draw_geometries([pc1, pc2])

    # max_distance = 0.2
    # trans_init = np.asarray(
    # [[0.862, 0.011, -0.507,  0.5],
    # [-0.139, 0.967, -0.215,  0.7],
    # [0.487, 0.255,  0.835, -1.4],
    # [0.0, 0.0, 0.0, 1.0]])
    # trans_init = np.eye(4)
    # tsl = pb.mean(axis=0) - pa.mean(axis=0)
    # trans_init = TransformationMatrixFromEulerAngleTranslation(euler, tsl)
    # trans_init = transform

    #### # print("Apply point-to-point ICP")
    #### reg_p2p = op3.registration_icp(pc1, pc2, max_distance, trans_init,
    ####         # op3.TransformationEstimationPointToPoint())
    ####         op3.TransformationEstimationPointToPoint(),
    ####         op3.ICPConvergenceCriteria(max_iteration = 2000))
    #### # print(reg_p2p)
    #### # print("Transformation is:")
    #### # print(reg_p2p.transformation)
    #### # print(EulerAngleTranslationFromTransformationMatrix(reg_p2p.transformation))
    #### # print("")

    # print("Apply point-to-plane ICP")
    reg_p2l = op3.registration_icp(
        pc1, pc2, max_distance, trans_init,
        op3.TransformationEstimationPointToPlane(),
        op3.ICPConvergenceCriteria(max_iteration=2000))
    # print(reg_p2l)
    # print("Transformation is:")
    # print(reg_p2l.transformation)
    # print("")

    if b_graph:
        draw_registration_result(pc1, pc2, reg_p2l.transformation)

    # print(reg_p2p.transformation)
    # print(reg_p2p.fitness)
    # print(reg_p2p.inlier_rmse)
    # print(reg_p2p.correspondence_set)

    return reg_p2l.transformation, reg_p2l
Ejemplo n.º 14
0
    verts_tgt_visi *= scale
    verts_tgt_visi_nhf *= scale

    # do ICP
    source = open3d.PointCloud()
    target = open3d.PointCloud()
    source.points = open3d.Vector3dVector(verts_tgt_visi_nhf)
    target.points = open3d.Vector3dVector(verts_gt_visi)

    reg_p2p = open3d.registration_icp(
        source,
        target,
        10,
        np.identity(4),
        open3d.TransformationEstimationPointToPoint(),
        open3d.ICPConvergenceCriteria(max_iteration=10000),
    )
    trans_mat = reg_p2p.transformation
    verts_tgt_fit = np.zeros(verts_tgt.shape)
    verts_tgt_visi_fit = np.zeros(verts_tgt_visi.shape)

    for j in range(len(verts_tgt)):
        verts_tgt_fit[j] = np.dot(trans_mat[:3, :3],
                                  verts_tgt[j]) + trans_mat[:3, 3]

    for j in range(len(verts_tgt_visi)):
        verts_tgt_visi_fit[j] = np.dot(trans_mat[:3, :3],
                                       verts_tgt_visi[j]) + trans_mat[:3, 3]

    # compute error
    avergedist_tgt, _, crsp_ind_tgt = knnsearch(verts_tgt_fit, verts_gt)
Ejemplo n.º 15
0
  def __getitem__(self, idx):
    drive = self.files[idx][0]
    t0, t1 = self.files[idx][1], self.files[idx][2]
    all_odometry = self.get_video_odometry(drive, [t0, t1])
    positions = [self.odometry_to_positions(odometry) for odometry in all_odometry]
    fname0 = self._get_velodyne_fn(drive, t0)
    fname1 = self._get_velodyne_fn(drive, t1)

    # XYZ and reflectance
    xyzr0 = np.fromfile(fname0, dtype=np.float32).reshape(-1, 4)
    xyzr1 = np.fromfile(fname1, dtype=np.float32).reshape(-1, 4)

    xyz0 = xyzr0[:, :3]
    xyz1 = xyzr1[:, :3]

    coords0 = (xyz0 - xyz0.min(0)) / 0.05
    coords1 = (xyz1 - xyz1.min(0)) / 0.05

    sel0 = ME.utils.sparse_quantize(coords0, return_index=True)
    sel1 = ME.utils.sparse_quantize(coords1, return_index=True)

    xyz0 = xyz0[sel0]
    xyz1 = xyz1[sel1]

    # r0 = xyzr0[:, -1].reshape(-1, 1)
    # r1 = xyzr1[:, -1].reshape(-1, 1)

    # pcd0 = make_open3d_point_cloud(xyz0_t, 0.7 * np.ones((len(xyz0), 3)))
    # pcd1 = make_open3d_point_cloud(xyz1, 0.3 * np.ones((len(xyz1), 3)))

    key = '%d_%d_%d' % (drive, t0, t1)
    filename = self.icp_path + '/' + key + '.npy'
    if key not in kitti_icp_cache:
      if not os.path.exists(filename):
        if self.IS_ODOMETRY:
          M = (self.velo2cam @ positions[0].T @ np.linalg.inv(positions[1].T)
               @ np.linalg.inv(self.velo2cam)).T
        else:
          M = self.get_position_transform(positions[0], positions[1], invert=True).T
        xyz0_t = self.apply_transform(xyz0, M)
        pcd0 = make_open3d_point_cloud(xyz0_t)
        pcd1 = make_open3d_point_cloud(xyz1)
        reg = o3d.registration_icp(pcd0, pcd1, 0.2, np.eye(4),
                                   o3d.TransformationEstimationPointToPoint(),
                                   o3d.ICPConvergenceCriteria(max_iteration=200))
        pcd0.transform(reg.transformation)
        # pcd0.transform(M2) or self.apply_transform(xyz0, M2)
        M2 = M @ reg.transformation
        # o3d.draw_geometries([pcd0, pcd1])
        # write to a file
        np.save(filename, M2)
      else:
        M2 = np.load(filename)
      kitti_icp_cache[key] = M2
    else:
      M2 = kitti_icp_cache[key]

    if self.random_rotation:
      T0 = sample_random_trans(xyz0, self.randg, np.pi / 4)
      T1 = sample_random_trans(xyz1, self.randg, np.pi / 4)
      trans = T1 @ M2 @ np.linalg.inv(T0)

      xyz0 = self.apply_transform(xyz0, T0)
      xyz1 = self.apply_transform(xyz1, T1)
    else:
      trans = M2

    matching_search_voxel_size = self.matching_search_voxel_size
    if self.random_scale and random.random() < 0.95:
      scale = self.min_scale + \
          (self.max_scale - self.min_scale) * random.random()
      matching_search_voxel_size *= scale
      xyz0 = scale * xyz0
      xyz1 = scale * xyz1

    # Voxelization
    coords0 = np.floor(xyz0 / self.voxel_size)
    coords1 = np.floor(xyz1 / self.voxel_size)
    sel0 = ME.utils.sparse_quantize(coords0, return_index=True)
    sel1 = ME.utils.sparse_quantize(coords1, return_index=True)
    coords0, coords1 = coords0[sel0], coords1[sel1]
    # r0, r1 = r0[sel0], r1[sel1]

    pcd0 = make_open3d_point_cloud(xyz0)
    pcd1 = make_open3d_point_cloud(xyz1)

    # Select features and points using the returned voxelized indices
    pcd0.points = o3d.utility.Vector3dVector(np.array(pcd0.points)[sel0])
    pcd1.points = o3d.utility.Vector3dVector(np.array(pcd1.points)[sel1])

    # Get matches
    matches = get_matching_indices(pcd0, pcd1, trans, matching_search_voxel_size)
    if len(matches) < 1000:
      raise ValueError(f"{drive}, {t0}, {t1}")

    feats_train0, feats_train1 = [], []

    feats_train0.append(np.ones((len(sel0), 1)))
    feats_train1.append(np.ones((len(sel1), 1)))

    feats0 = np.hstack(feats_train0)
    feats1 = np.hstack(feats_train1)

    # Get coords
    xyz0 = np.array(pcd0.points)
    xyz1 = np.array(pcd1.points)

    if self.transform:
      coords0, feats0 = self.transform(coords0, feats0)
      coords1, feats1 = self.transform(coords1, feats1)

    return (xyz0, xyz1, coords0, coords1, feats0, feats1, matches, trans)
Ejemplo n.º 16
0
        print([iter, radius, scale])

        print("3-1. Downsample with a voxel size %.2f" % radius)
        # source_down = source
        # target_down = target
        source_down = od.voxel_down_sample(source, radius)
        target_down = od.voxel_down_sample(target, radius)

        print("3-2. Estimate normal.")
        od.estimate_normals(
            source_down, od.KDTreeSearchParamHybrid(radius=2 * radius,
                                                    max_nn=5))
        od.estimate_normals(
            target_down, od.KDTreeSearchParamHybrid(radius=2 * radius,
                                                    max_nn=5))

        print("3-3. Applying colored point cloud registration")
        result_icp = od.registration_colored_icp(
            source_down, target_down, radius, current_transformation,
            od.ICPConvergenceCriteria(relative_fitness=1e-6,
                                      relative_rmse=1e-6,
                                      max_iteration=iter))
        current_transformation = result_icp.transformation
        print("Current Transform: ", current_transformation)
        draw_registration_result_original_color(source_down, target_down,
                                                current_transformation)

    draw_registration_result_original_color(source, target,
                                            current_transformation)
    np.save("transform", current_transformation)
Ejemplo n.º 17
0
    print (Tg)'''
    #Tg[:3, 3] -= offset

    # preprocess
    '''source, _ = geometry.statistical_outlier_removal(source, 20, 2)
    target, _ = geometry.statistical_outlier_removal(target, 20, 2)
    source_for_color = copy.deepcopy(source) # color icp needs more points
    source = geometry.uniform_down_sample(source, round(cloudSize / 10000))'''

    print(source)
    print(target)

    current_transformation = np.identity(4)
    draw_registration_result_original_color(source, target,
                                            current_transformation)
    myCriteria = o3.ICPConvergenceCriteria(1e-8, 1e-8, 100)

    # point to point ICP
    t1 = time.time()
    current_transformation = np.identity(4)
    print("Point-to-point ICP")
    result_icp = o3.registration_icp(source, target, 0.25,
                                     current_transformation,
                                     o3.TransformationEstimationPointToPoint(),
                                     myCriteria)
    t2 = time.time()
    print("Time Consumed: ", t2 - t1)
    print(result_icp)
    print(result_icp.transformation)
    err_t, err_R = eval_err(result_icp.transformation, Tg)
    print("err_t: ", err_t)
Ejemplo n.º 18
0
def icp_registration():
    """
    Execute Point-to-plane ICP registration

    TODO:
        Add point-to-plane ICP option (and update normals)

    Based on http://www.open3d.org/docs/tutorial/Basic/icp_registration.html#point-to-plane-icp
    """
    node = hou.pwd()
    node_geo = node.geometry()
    node_geo_target = node.inputs()[1].geometry()
    threshold = node.parm("threshold").eval()
    transform = node.parm("transform").eval()
    max_iter = node.parm("max_iter").eval()
    single_pass = node.parm("single_pass").eval()

    has_xform_source = bool(node_geo.findGlobalAttrib("xform"))
    has_n_source = bool(node_geo.findPointAttrib("N"))
    has_n_target = bool(node_geo_target.findPointAttrib("N"))

    if not has_xform_source:
        node_geo.addAttrib(hou.attribType.Global, "xform", default_value=(0.0,)*16, create_local_variable=False)

    if not has_n_source or not has_n_target:
        raise hou.NodeError("One of the inputs does not have 'N' attribute.")
    
    trans_init = node_geo.floatListAttribValue("xform")
    trans_init = np.array(trans_init).reshape(4,4).T

    # to numpy
    np_pos_str_source = node_geo.pointFloatAttribValuesAsString("P", float_type=hou.numericData.Float32)
    np_pos_source = np.fromstring(np_pos_str_source, dtype=np.float32).reshape(-1, 3)
    np_n_str_source = node_geo.pointFloatAttribValuesAsString("N", float_type=hou.numericData.Float32)
    np_n_source = np.fromstring(np_n_str_source, dtype=np.float32).reshape(-1, 3)
    
    np_pos_str_target = node_geo_target.pointFloatAttribValuesAsString("P", float_type=hou.numericData.Float32)
    np_pos_target = np.fromstring(np_pos_str_target, dtype=np.float32).reshape(-1, 3)
    np_n_str_target = node_geo_target.pointFloatAttribValuesAsString("N", float_type=hou.numericData.Float32)
    np_n_target = np.fromstring(np_n_str_target, dtype=np.float32).reshape(-1, 3)

    # to open3d
    source = open3d.PointCloud()
    source.points = open3d.Vector3dVector(np_pos_source.astype(np.float64))
    source.normals = open3d.Vector3dVector(np_n_source.astype(np.float64))

    target = open3d.PointCloud()
    target.points = open3d.Vector3dVector(np_pos_target.astype(np.float64))
    target.normals = open3d.Vector3dVector(np_n_target.astype(np.float64))

    # icp
    #init_evaluation = open3d.evaluate_registration(source, target, threshold, trans_init)

    if single_pass:
        reg_p2l = open3d.registration_icp(source, target, threshold, np.identity(4), open3d.TransformationEstimationPointToPoint(), open3d.ICPConvergenceCriteria(max_iteration=1))
    else:
        reg_p2l = open3d.registration_icp(source, target, threshold, trans_init, open3d.TransformationEstimationPointToPoint(), open3d.ICPConvergenceCriteria(max_iteration=max_iter))

    # print init_evaluation
    # print reg_p2l

    # to houdini
    registration_xform = hou.Matrix4(reg_p2l.transformation)
    registration_xform = registration_xform.transposed()

    if transform:
        node_geo.transform(registration_xform)
    node_geo.setGlobalAttribValue("xform", registration_xform.asTuple())