예제 #1
0
def ppf_icp_registration(source_cloud, target_cloud, n_points=3000, n_iter=100, tolerance=0.001, num_levels=5, scale=0.001):
    """ align the source cloud to the target cloud using point pair feature (PPF) match
    Args: 
        source_cloud (open3d.geometry.PointCloud): source open3d point cloud
        target_cloud (open3d.geometry.PointCloud): target open3d point cloud
        for other parameter, go to https://docs.opencv.org/master/dc/d9b/classcv_1_1ppf__match__3d_1_1ICP.html
    Returns:
        pose (np.array): 4x4 transformation between source and targe cloud
        residual (float): the output resistration error
    """
    source_cloud = copy.deepcopy(source_cloud)
    source_cloud = source_cloud.voxel_down_sample(scale)
    target_cloud = copy.deepcopy(target_cloud)
    n_source_points = np.shape(source_cloud.points)[0]
    n_target_points = np.shape(target_cloud.points)[0]
    n_sample = np.min([n_source_points, n_target_points, n_points])
    if n_sample == 0:
        return None, 10000
    if n_source_points > n_points:
        source_idxes = np.random.choice(n_source_points, n_sample, replace=False)
        source_cloud = source_cloud.select_down_sample(source_idxes)
    if n_target_points > n_points:
        target_idxes = np.random.choice(n_target_points, n_sample, replace=False)
        target_cloud = target_cloud.select_down_sample(target_idxes)
    target_cloud.estimate_normals(search_param=open3d.geometry.KDTreeSearchParamHybrid(radius=0.1, max_nn=30))

    source_np_cloud = np.concatenate([np.asarray(source_cloud.points), np.asarray(source_cloud.normals)], axis=1).astype(np.float32)
    target_np_cloud = np.concatenate([np.asarray(target_cloud.points), np.asarray(target_cloud.normals)], axis=1).astype(np.float32)
    icp_fnc = cv2.ppf_match_3d_ICP(n_iter, tolerence=tolerance, rejectionScale=2.5, numLevels=num_levels) 
    try:
        retval, residual, pose = icp_fnc.registerModelToScene(source_np_cloud, target_np_cloud)
    except:
        return None, 10000
    else:
        return pose, residual
예제 #2
0
def icp_refinement(pts_tgt, obj_model, rot_pred, tra_pred, cam_K, ren):
    centroid_tgt = np.array([
        np.mean(pts_tgt[:, 0]),
        np.mean(pts_tgt[:, 1]),
        np.mean(pts_tgt[:, 2])
    ])
    if (tra_pred[2] < 300 or tra_pred[2] > 5000):
        tra_pred = centroid_tgt * 1000

    #opengl renderer (bit slower, more precise)
    if (gpu_rendering):
        img_init, depth_init = render_obj_gpu(obj_model, rot_pred,
                                              tra_pred / 1000, cam_K, ren)
    else:
        img_init, depth_init = render_obj(obj_models[obj_order_id], rot_pred,
                                          tra_pred / 1000, cam_K, ren)
    init_mask = depth_init > 0
    bbox_init = get_bbox_from_mask(init_mask > 0)
    tf = np.eye(4)
    if (bbox_init[2] - bbox_init[0] < 10 or bbox_init[3] - bbox_init[1] < 10):
        return tf, -1
    if (np.sum(init_mask) < 10):
        return tf, -1
    points_src = np.zeros(
        (bbox_init[2] - bbox_init[0], bbox_init[3] - bbox_init[1], 6),
        np.float32)
    points_src[:, :, :3] = getXYZ(depth_init, cam_K[0, 0], cam_K[1, 1],
                                  cam_K[0, 2], cam_K[1, 2], bbox_init)
    points_src[:, :, 3:] = get_normal(depth_init,
                                      fx=cam_K[0, 0],
                                      fy=cam_K[1, 1],
                                      cx=cam_K[0, 2],
                                      cy=cam_K[1, 2],
                                      refine=True,
                                      bbox=bbox_init)
    points_src = points_src[init_mask[bbox_init[0]:bbox_init[2],
                                      bbox_init[1]:bbox_init[3]] > 0]

    #adjust the initial translation using centroids of visible points
    centroid_src = np.array([
        np.mean(points_src[:, 0]),
        np.mean(points_src[:, 1]),
        np.mean(points_src[:, 2])
    ])
    trans_adjust = centroid_tgt - centroid_src
    tra_pred = tra_pred + trans_adjust * 1000
    points_src[:, :3] += trans_adjust

    icp_fnc = cv2.ppf_match_3d_ICP(100, tolerence=0.05, numLevels=4)  #1cm
    retval, residual, pose = icp_fnc.registerModelToScene(
        points_src.reshape(-1, 6), pts_tgt.reshape(-1, 6))

    tf[:3, :3] = rot_pred
    tf[:3, 3] = tra_pred / 1000  #in m
    tf = np.matmul(pose, tf)
    return tf, residual
예제 #3
0
 def icp_refinement(self, pts_tgt, obj_model, rot_pred, tra_pred):
     centroid_tgt = np.array([
         np.mean(pts_tgt[:, 0]),
         np.mean(pts_tgt[:, 1]),
         np.mean(pts_tgt[:, 2])
     ])
     if (tra_pred[2] < 300 or tra_pred[2] > 5000):
         #when estimated translation is weired, set centroid of tgt points as translation
         tra_pred = centroid_tgt * 1000
     img_init, depth_init = self.render_obj_pyrender(
         obj_model, rot_pred, tra_pred / 1000)
     init_mask = depth_init > 0
     bbox_init = get_bbox_from_mask(init_mask > 0)
     if (bbox_init[2] - bbox_init[0] < 10
             or bbox_init[3] - bbox_init[1] < 10):
         return tf, -1
     if (np.sum(init_mask) < 10):
         return tf, -1
     points_src = np.zeros(
         (bbox_init[2] - bbox_init[0], bbox_init[3] - bbox_init[1], 6),
         np.float32)
     points_src[:, :, :3] = getXYZ(depth_init, self.camK[0, 0],
                                   self.camK[1, 1], self.camK[0, 2],
                                   self.camK[1, 2], bbox_init)
     points_src[:, :, 3:] = get_normal(depth_init,
                                       fx=self.camK[0, 0],
                                       fy=self.camK[1, 1],
                                       cx=self.camK[0, 2],
                                       cy=self.camK[1, 2],
                                       refine=True,
                                       bbox=bbox_init)
     points_src = points_src[init_mask[bbox_init[0]:bbox_init[2],
                                       bbox_init[1]:bbox_init[3]] > 0]
     centroid_src = np.array([
         np.mean(points_src[:, 0]),
         np.mean(points_src[:, 1]),
         np.mean(points_src[:, 2])
     ])
     trans_adjust = centroid_tgt - centroid_src
     tra_pred = tra_pred + trans_adjust * 1000
     points_src[:, :3] += trans_adjust
     icp_fnc = cv2.ppf_match_3d_ICP(100, tolerence=0.05, numLevels=5)  #1cm
     retval, residual, pose = icp_fnc.registerModelToScene(
         points_src.reshape(-1, 6), pts_tgt.reshape(-1, 6))
     tf = np.eye(4)
     tf[:3, :3] = rot_pred
     tf[:3, 3] = tra_pred / 1000  #in m
     tf = np.matmul(pose, tf)
     return tf, residual
예제 #4
0
def computeMatch(scene):

    # pick a height
    height = int(scene.height / 2)
    # pick x coords near front and center
    middle_x = int(scene.width / 2)
    # examine point
    middle, scenePCL = read_depth(middle_x, height, scene)
    # do stuff with middle
    pcd = open3d.geometry.PointCloud()
    pcd.points = open3d.utility.Vector3dVector(scenePCL)
    scenePCL = cv2.ppf_match_3d.loadPLYSimple(pcd, 1)
    rospy.loginfo(scenePCL.shape)
    #o3d.visualization.draw_geometries([pcd])

    #rospy.loginfo(scenePCL.shape)
    #scenePCL = scene
    #xyz_array = ros_numpy.point_cloud2.get_xyz_points(scenePCL)
    #matchPub(xyz_array)

    # Find the matching for the scene we have with our detector
    rospy.loginfo('Matching...')

    resultsDetect = detector.match(pcd.points, 1.0 / 40.0, 0.05)

    print('Performing ICP...')
    icp = cv2.ppf_match_3d_ICP(100)
    _, results = icp.registerModelToScene(pc, scenePCL, resultsDetect[:N])

    print("Poses: ")
    for i, result in enumerate(results):
        result.printPose()
        print(
            "\n-- Pose to Model Index %d: NumVotes = %d, Residual = %f\n%s\n" %
            (result.modelIndex, result.numVotes, result.residual, result.pose))
        if i == 0:
            pct = cv2.ppf_match_3d.transformPCPose(pc, result.pose)

    rospy.loginfo('Finish reprojecting the Matching 3d object')
    point_cloud2 = convert_numpy_2_pointcloud2_color(pct)
    matchPub.publish(point_cloud2)

    # Testing new cloud
    """
예제 #5
0
                   sparse=False,
                   indexing='xy')
z = np.zeros((height, width))

cloud[0] = np.dstack((x, y, z)).reshape((-1, 3)).astype(np.float32)
cloud[1] = noise.astype(np.float32) + cloud[0]
cloud[2] = cloud[1]
cloud[2][:, 2] += noise2.astype(np.float32)

R = rotation([
    0,  #np.random.uniform(-max_deg, max_deg),
    np.random.uniform(-max_deg, max_deg),
    0,  #np.random.uniform(-max_deg, max_deg)
])
t = np.zeros((3, 1))
Rt = np.vstack((np.hstack((R, t)), np.array([0, 0, 0, 1]))).astype(np.float32)

icp = cv.ppf_match_3d_ICP(100)

I = np.eye(4)
print("Unaligned error:\t%.6f" % np.linalg.norm(I - Rt))
sprintfStr = "Unaligned error:\t%.6f\n" % np.linalg.norm(I - Rt)
for i in range(3):
    rotated_cloud[i] = np.matmul(Rt[0:3, 0:3], cloud[i].T).T + Rt[:3, 3].T
    retval[i], residual[i], pose[i] = icp.registerModelToScene(
        rotated_cloud[i], cloud[i])
    print("ICP error:\t\t%.6f" % np.linalg.norm(I - np.matmul(pose[0], Rt)))
    sprintfStr += "ICP error:\t\t%.6f\n" % np.linalg.norm(
        I - np.matmul(pose[0], Rt))

Mbox('ICP complete', sprintfStr, 1)
예제 #6
0
    range(-width//2, width//2),
    range(-height//2, height//2),
    sparse=False, indexing='xy'
)
z = np.zeros((height, width))

cloud[0] = np.dstack((x, y, z)).reshape((-1, 3)).astype(np.float32)
cloud[1] = noise.astype(np.float32) + cloud[0]
cloud[2] = cloud[1]
cloud[2][:, 2] += noise2.astype(np.float32)

R = rotation([
    0, #np.random.uniform(-max_deg, max_deg),
    np.random.uniform(-max_deg, max_deg),
    0, #np.random.uniform(-max_deg, max_deg)
])
t = np.zeros((3, 1))
Rt = np.vstack((
    np.hstack((R, t)),
    np.array([0, 0, 0, 1])
)).astype(np.float32)

icp = cv.ppf_match_3d_ICP(100)

I = np.eye(4)
print("Unaligned error:\t%.6f" % np.linalg.norm(I - Rt))
for i in range(3):
    rotated_cloud[i] = np.matmul(Rt[0:3,0:3], cloud[i].T).T + Rt[:3,3].T
    retval[i], residual[i], pose[i] = icp.registerModelToScene(rotated_cloud[i], cloud[i])
    print("ICP error:\t\t%.6f" % np.linalg.norm(I - np.matmul(pose[0], Rt)))
예제 #7
0
        points_tgt = np.zeros((depth.shape[0], depth.shape[1], 6), np.float32)
        points_tgt[:, :, :3] = to.getXYZ(depth,
                                         fx=cam_K[0, 0],
                                         fy=cam_K[1, 1],
                                         cx=cam_K[0, 2],
                                         cy=cam_K[1, 2])
        points_tgt[:, :, 3:], _ = to.get_normal(depth,
                                                fx=cam_K[0, 0],
                                                fy=cam_K[1, 1],
                                                cx=cam_K[0, 2],
                                                cy=cam_K[1, 2])

        pts_tgt = points_tgt[mask]

        icp_fnc = cv2.ppf_match_3d_ICP(100, tolerence=0.05, numLevels=4)  #1cm
        retval, residual, pose = icp_fnc.registerModelToScene(
            points_src.reshape(-1, 6), pts_tgt.reshape(-1, 6))
        tf_mat = np.matmul(pose, tf_mat)
        simple_xyz = renderer.predict(
            [np.array([vertices]),
             np.array([faces]),
             np.array([tf_mat])])[0]
        depth_r = simple_xyz[:, :, 3]
        mask = np.logical_and(np.abs(depth_r - depth) < depth_th, depth_r > 0)

    #compute valid bbox and resize a cropped image and mask to 256,256

    img_masked = np.copy(img)
    img_masked = img_masked / 255
예제 #8
0
def run_estimation(image, image_dep, model, score_threshold, threeD_boxes,
                   model_6, model_9, model_10, model_11, model_61):
    obj_names = []
    obj_poses = []
    obj_confs = []

    image_mask = copy.deepcopy(image)
    image = preprocess_image(image)
    #image_mask = copy.deepcopy(image)

    #cv2.imwrite('/home/sthalham/retnetpose_image.jpg', image)

    if keras.backend.image_data_format() == 'channels_first':
        image = image.transpose((2, 0, 1))

    #with graph.as_default():
    boxes3D, scores, mask = model.predict_on_batch(
        np.expand_dims(image, axis=0))

    for inv_cls in range(scores.shape[2]):

        if inv_cls == 0:
            true_cls = 5
        elif inv_cls == 1:
            true_cls = 8
        elif inv_cls == 2:
            true_cls = 9
        elif inv_cls == 3:
            true_cls = 10
        elif inv_cls == 4:
            true_cls = 21
        #true_cat = inv_cls + 1
        #true_cls = true_cat

        cls_mask = scores[0, :, inv_cls]

        cls_indices = np.where(cls_mask > score_threshold)
        #cls_indices = np.argmax(cls_mask)
        #print(cls_mask[cls_indices])

        cls_img = image
        obj_mask = mask[0, :, inv_cls]
        if inv_cls == 0:
            obj_col = [1, 255, 255]
        elif inv_cls == 1:
            obj_col = [1, 1, 128]
        elif inv_cls == 2:
            obj_col = [255, 255, 1]
        elif inv_cls == 3:
            obj_col = [220, 245, 245]
        elif inv_cls == 4:
            obj_col = [128, 1, 1]
        cls_img = np.where(obj_mask > 0.5, 1, 0)
        cls_img = cls_img.reshape((60, 80)).astype(np.uint8)
        cls_img = np.asarray(
            Pilimage.fromarray(cls_img).resize((640, 480), Pilimage.NEAREST))
        depth_mask = copy.deepcopy(cls_img)
        cls_img = np.repeat(cls_img[:, :, np.newaxis], 3, 2)
        cls_img = cls_img.astype(np.uint8)
        cls_img[:, :, 0] *= obj_col[0]
        cls_img[:, :, 1] *= obj_col[1]
        cls_img[:, :, 2] *= obj_col[2]
        image_mask = np.where(cls_img > 0, cls_img,
                              image_mask.astype(np.uint8))
        #cv2.imwrite('/stefan/mask.png', image_mask)

        #if len(cls_indices[0]) < 1:
        if len(cls_indices[0]) < 1:
            continue

        if true_cls == 5:
            name = '006_mustard_bottle'
            pcd_model = model_6
        elif true_cls == 8:
            name = '009_gelatin_box'
            pcd_model = model_9
        elif true_cls == 9:
            name = '010_potted_meat_can'
            pcd_model = model_10
        elif true_cls == 10:
            name = '011_banana'
            pcd_model = model_11
        elif true_cls == 21:
            name = '061_foam_brick'
            pcd_model = model_61
        else:
            continue

        obj_names.append(name)
        #obj_confs.append(np.sum(cls_mask[cls_indices[0]]))
        obj_confs.append(np.sum(cls_mask[cls_indices]))

        k_hyp = len(cls_indices[0])
        #k_hyp = 1
        ori_points = np.ascontiguousarray(
            threeD_boxes[(true_cls), :, :],
            dtype=np.float32)  # .reshape((8, 1, 3))
        K = np.float32([fxkin, 0., cxkin, 0., fykin, cykin, 0., 0.,
                        1.]).reshape(3, 3)

        ##############################
        # pnp
        pose_votes = boxes3D[0, cls_indices, :]
        est_points = np.ascontiguousarray(pose_votes,
                                          dtype=np.float32).reshape(
                                              (int(k_hyp * 8), 1, 2))
        obj_points = np.repeat(ori_points[np.newaxis, :, :], k_hyp, axis=0)
        obj_points = obj_points.reshape((int(k_hyp * 8), 1, 3))
        retval, orvec, otvec, inliers = cv2.solvePnPRansac(
            objectPoints=obj_points,
            imagePoints=est_points,
            cameraMatrix=K,
            distCoeffs=None,
            rvec=None,
            tvec=None,
            useExtrinsicGuess=False,
            iterationsCount=300,
            reprojectionError=5.0,
            confidence=0.99,
            flags=cv2.SOLVEPNP_ITERATIVE)
        R_est, _ = cv2.Rodrigues(orvec)
        t_est = otvec[:, 0]

        if np.sum(depth_mask) > 3000:

            print('--------------------- ICP refinement -------------------')

            print('cls: ', true_cls)
            pcd_img = np.where(depth_mask, image_dep, np.NaN)
            pcd_img = create_point_cloud(pcd_img, fxkin, fykin, cxkin, cykin,
                                         1.0)
            pcd_img = pcd_img[~np.isnan(pcd_img).any(axis=1)]
            pcd_crop = open3d.PointCloud()
            pcd_crop.points = open3d.Vector3dVector(pcd_img)
            open3d.estimate_normals(
                pcd_crop,
                search_param=open3d.KDTreeSearchParamHybrid(radius=20.0,
                                                            max_nn=30))

            guess = np.zeros((4, 4), dtype=np.float32)
            guess[:3, :3] = R_est
            guess[:3, 3] = t_est.T * 1000.0
            guess[3, :] = np.array([0.0, 0.0, 0.0, 1.0], dtype=np.float32).T

            pcd_model = open3d.geometry.voxel_down_sample(pcd_model,
                                                          voxel_size=5.0)
            pcd_crop = open3d.geometry.voxel_down_sample(pcd_crop,
                                                         voxel_size=5.0)
            open3d.estimate_normals(
                pcd_crop,
                search_param=open3d.KDTreeSearchParamHybrid(radius=10.0,
                                                            max_nn=10))
            open3d.estimate_normals(
                pcd_model,
                search_param=open3d.KDTreeSearchParamHybrid(radius=10.0,
                                                            max_nn=10))

            pcd_model.transform(guess)

            # remove model vertices facing away from camera
            points_unfiltered = np.asarray(pcd_model.points)
            last_pcd_temp = []
            for i, normal in enumerate(pcd_model.normals):
                if normal[2] < 0:
                    last_pcd_temp.append(points_unfiltered[i, :])

            pcd_model.points = open3d.Vector3dVector(np.asarray(last_pcd_temp))
            open3d.estimate_normals(
                pcd_model,
                search_param=open3d.KDTreeSearchParamHybrid(radius=20.0,
                                                            max_nn=10))

            # align model with median depth of scene
            mean_crop = np.median(np.array(pcd_crop.points), axis=0)
            mean_model = np.mean(np.array(pcd_model.points), axis=0)
            pcd_crop_filt = copy.deepcopy(pcd_crop)

            pcd_min = guess[2, 3] - 75
            pcd_max = guess[2, 3] + 75
            new_pcd_scene = []
            for i, point in enumerate(pcd_crop.points):
                if point[2] > pcd_min and point[2] < pcd_max:
                    new_pcd_scene.append(point)

            #mean_crop = np.mean(np.array(pcd_crop_filt.points), axis=0)

            print(mean_crop, mean_crop.shape)
            #print(guess[:3, 3], guess[:3, 3].shape)
            #print(mean_crop-guess[:3, 3])
            print('euclid: ', np.linalg.norm((mean_crop - guess[:3, 3]),
                                             ord=2))
            print('num_points: ', len(new_pcd_scene))
            if len(new_pcd_scene) < 50 or np.linalg.norm(
                (mean_crop - guess[:3, 3]), ord=2) > 75:
                print('use pcd mean')
                if len(new_pcd_scene) > 50 and np.linalg.norm(
                    (mean_crop - guess[:3, 3]), ord=2) > 75:
                    print('recalc mean')
                    pcd_crop_filt.points = open3d.Vector3dVector(
                        np.asarray(new_pcd_scene))
                    mean_crop = np.mean(np.array(pcd_crop_filt.points), axis=0)
                pcd_diff = mean_crop - mean_model
                pcd_model.translate(pcd_diff)
                open3d.estimate_normals(
                    pcd_model,
                    search_param=open3d.KDTreeSearchParamHybrid(radius=10.0,
                                                                max_nn=10))
                guess[:3, 3] = mean_crop
            else:
                print('use pose')
                pcd_crop.points = open3d.Vector3dVector(
                    np.asarray(new_pcd_scene))

            open3d.estimate_normals(
                pcd_crop,
                search_param=open3d.KDTreeSearchParamHybrid(radius=20.0,
                                                            max_nn=10))

            #reg_p2p = open3d.registration.registration_icp(pcd_model, pcd_crop, 5.0, np.eye(4),
            #                                                open3d.registration.TransformationEstimationPointToPlane(), open3d.registration.ICPConvergenceCriteria(max_iteration=100))
            reg_icp = cv2.ppf_match_3d_ICP(100, tolerence=0.075, numLevels=4)
            model_points = np.asarray(pcd_model.points, dtype=np.float32)
            model_normals = np.asarray(pcd_model.normals, dtype=np.float32)
            crop_points = np.asarray(pcd_crop.points, dtype=np.float32)
            crop_normals = np.asarray(pcd_crop.normals, dtype=np.float32)
            pcd_source = np.zeros((model_points.shape[0], 6), dtype=np.float32)
            pcd_target = np.zeros((crop_points.shape[0], 6), dtype=np.float32)
            pcd_source[:, :3] = model_points * 0.001
            pcd_source[:, 3:] = model_normals
            pcd_target[:, :3] = crop_points * 0.001
            pcd_target[:, 3:] = crop_normals

            retval, residual, pose = reg_icp.registerModelToScene(
                pcd_source, pcd_target)

            print('residual: ', residual)
            #pcd_model.transform(reg_p2p.transformation)
            guess[:3, 3] = guess[:3, 3] * 0.001
            guess = np.matmul(pose, guess)
            R_est = guess[:3, :3]
            t_est = guess[:3, 3]

            #print('guess: ', guess)

        est_pose = np.zeros((7), dtype=np.float32)
        est_pose[:3] = t_est
        est_pose[3:] = tf3d.quaternions.mat2quat(R_est)
        obj_poses.append(est_pose)

    bridge = CvBridge()

    image_mask_msg = bridge.cv2_to_imgmsg(image_mask)

    mask_pub.publish(image_mask_msg)
    return obj_names, obj_poses, obj_confs
예제 #9
0
def icp_refinement(depth_measured,
                   depth_rendered,
                   object_mask_measured,
                   cam_K,
                   TCO_pred,
                   n_min_points=1000):
    # Inspired from https://github.com/kirumang/Pix2Pose/blob/843effe0097e9982f4b07dd90b04ede2b9ee9294/tools/5_evaluation_bop_icp3d.py#L57

    points_tgt = np.zeros(
        (depth_measured.shape[0], depth_measured.shape[1], 6), np.float32)
    points_tgt[:, :, :3] = getXYZ(depth_measured,
                                  fx=cam_K[0, 0],
                                  fy=cam_K[1, 1],
                                  cx=cam_K[0, 2],
                                  cy=cam_K[1, 2])
    points_tgt[:, :, 3:] = get_normal(depth_measured,
                                      fx=cam_K[0, 0],
                                      fy=cam_K[1, 1],
                                      cx=cam_K[0, 2],
                                      cy=cam_K[1, 2],
                                      refine=True)
    depth_valid = np.logical_and(depth_measured > 0.2, depth_measured < 5)
    depth_valid = np.logical_and(depth_valid, object_mask_measured)
    points_tgt = points_tgt[depth_valid]

    points_src = np.zeros(
        (depth_measured.shape[0], depth_measured.shape[1], 6), np.float32)
    points_src[:, :, :3] = getXYZ(depth_rendered, cam_K[0, 0], cam_K[1, 1],
                                  cam_K[0, 2], cam_K[1, 2])
    points_src[:, :, 3:] = get_normal(depth_rendered,
                                      fx=cam_K[0, 0],
                                      fy=cam_K[1, 1],
                                      cx=cam_K[0, 2],
                                      cy=cam_K[1, 2],
                                      refine=True)
    points_src = points_src[np.logical_and(depth_valid, depth_rendered > 0)]

    if len(points_tgt) < n_min_points or len(points_src) < n_min_points:
        return np.eye(4) * float('nan'), -1

    TCO_pred_refined = TCO_pred.copy()

    # adjust the initial translation using centroids of visible points
    centroid_src = np.mean(points_src[:, :3], axis=0)
    centroid_tgt = np.mean(points_tgt[:, :3], axis=0)
    dists_centroid = centroid_tgt - centroid_src
    TCO_pred_refined[:3, -1] += dists_centroid.reshape(-1)
    points_src[:, :3] += dists_centroid[None]

    # import trimesh
    # print(points_src.shape, points_tgt.shape)
    # trimesh.Trimesh(vertices=points_src[:, :3], normals=points_src[:, 3:]).export(DEBUG_DATA_DIR / 'src.ply')
    # trimesh.Trimesh(vertices=points_tgt[:, :3], normals=points_tgt[:, 3:]).export(DEBUG_DATA_DIR / 'tgt.ply')
    # raise ValueError

    tolerence = 0.05
    icp_fnc = cv2.ppf_match_3d_ICP(100, tolerence=tolerence, numLevels=4)
    retval, residual, pose = icp_fnc.registerModelToScene(
        points_src.reshape(-1, 6), points_tgt.reshape(-1, 6))
    TCO_pred_refined = pose @ TCO_pred_refined
    TCO_pred_refined = torch.tensor(TCO_pred_refined,
                                    dtype=torch.float32).cuda()

    # print(retval, residual, pose)
    # # print(retval, residual)
    # # raise ValueError
    if residual > tolerence or residual < 0:
        retval = -1
    return TCO_pred_refined, retval
예제 #10
0
#!/usr/bin/python

import cv2
import numpy as np

model = cv2.ppf_match_3d.loadPLYSimple('model-low.ply', 0)  #No nominal
print model.shape

scene = cv2.ppf_match_3d.loadPLYSimple('scene-high.ply', 0)  #No nominal

icp = cv2.ppf_match_3d_ICP(100, 0.2, 2.0, 8)

pose = np.array([[1, 0, 0, 0], [0, 1, 0, 14], [0, 0, 1, 0], [0, 0, 0, 1]])

modela = cv2.ppf_match_3d.transformPCPose(model, pose)

ret, residu, posea = icp.registerModelToScene(modela, scene)

print "pose", posea