コード例 #1
0
    def calculate_icp(self):
        return
        print "calculate"
        main_cell = self.cells[0]  # type: ArtRobotCalibration
        for c in self.cells:  # type: ArtRobotCalibration
            if c is main_cell:
                continue
            if c.last_pc is None or not c.calibrated:
                return

            converged, transf, estimate, fitness = icp(main_cell.last_pc,
                                                       transformed_cloud,
                                                       max_iter=25)

            print converged
            print fitness
コード例 #2
0
ファイル: svm.py プロジェクト: brainvisa/morpho-deepsulci
 def compute_distmatrix(self, sbck):
     X = []
     for bck_filtered, searched_pattern in zip(self.bck_filtered_list,
                                               self.searched_pattern_list):
         pc1 = pcl.PointCloud(np.asarray(sbck, dtype=np.float32))
         # Try different initialization and select the best score
         dist_min = 100.
         for trans in self.transrot_init:
             pc2 = pcl.PointCloud(
                 np.asarray(apply_trans(trans, bck_filtered),
                            dtype=np.float32))
             bool, transrot, trans_pc2, d = reg.icp(pc2, pc1)
             if d < dist_min:
                 dist_min = d
                 transrot_min = np.dot(transrot, trans)
         trans_searched_pattern = apply_trans(transrot_min,
                                              searched_pattern)
         X.append(distance_data_to_model(trans_searched_pattern, sbck))
     return X
コード例 #3
0
def register_icp(cam_points, log_points):
    cam_pc = pcl.PointCloud(cam_points.astype(np.float32))
    log_pc = pcl.PointCloud(log_points.astype(np.float32))

    converged, transform, estimate, fitness = None, None, None, float('inf')
    for scale in np.arange(0.1, 1, 0.1):
        scaled_cam_pc = pcl.PointCloud((scale * cam_points).astype(np.float32))
        c, t, e, f = icp(scaled_cam_pc, log_pc, max_iter=100000)
        if f < fitness:
            print(scale)
            converged, transform, estimate, fitness = c, t, e, f

    print(converged, transform, estimate, fitness)
    result = estimate.to_array()

    fig = plt.figure()

    ax = fig.add_subplot(121, projection='3d')
    plt.axis('equal')
    plt.ticklabel_format(useOffset=False)
    ax.plot(cam_points[:, 0],
            cam_points[:, 1],
            cam_points[:, 2],
            'bo',
            markersize=0.5)

    ax = fig.add_subplot(122, projection='3d')
    plt.axis('equal')
    plt.ticklabel_format(useOffset=False)
    ax.plot(log_points[:, 0],
            log_points[:, 1],
            log_points[:, 2],
            'go',
            markersize=0.5)
    ax.plot(result[:, 0], result[:, 1], result[:, 2], 'ro', markersize=0.5)

    plt.show()
コード例 #4
0
    def testICP(self):
        self.check_algo(icp)

        transf1 = icp(self.source, self.target, max_iter=1)[1]
        transf2 = icp(self.source, self.target, max_iter=2)[1]
        self.assertFalse(np.allclose(transf1, transf2, 0, 0.1),
                         "First and second transformation should be unequal"
                         " in this complicated registration.")

        transf1 = icp(self.source, self.target, transformationEpsilon=0)[1]
        transf2 = icp(self.source, self.target, transformationEpsilon=1)[1]
        self.assertFalse(np.allclose(transf1, transf2, 0, 0.1),
                         "Transformations should be unequal"
                         " with different stopping criteria.")

        transf1 = icp(self.source, self.target, euclideanFitnessEpsilon=0)[1]
        transf2 = icp(self.source, self.target, euclideanFitnessEpsilon=1)[1]
        self.assertFalse(np.allclose(transf1, transf2, 0, 0.1),
                         "Transformations should be unequal with different"
                         " stopping criteria.")
コード例 #5
0
    T_pv_v[3, 3] = 1

    velo_points = np.dot(T_pv_v, velo_points.T)
    #####################################################################################################

    source_points = velo_points[0:3, :].T
    source_points = source_points.astype(np.float32)
    pc_source = pcl.PointCloud()
    pc_source.from_array(source_points)

    target_points = world_points_cframe[0:3, :].T
    target_points = target_points.astype(np.float32)
    pc_target = pcl.PointCloud()
    pc_target.from_array(target_points)

    _, T_cam0_v, _, fitness = icp(pc_source, pc_target)
    #TODO: Construct the y values (possibly using Lie algebra)
    y[6 * i] = T_cam0_v[0, 3]
    y[6 * i + 1] = T_cam0_v[1, 3]
    y[6 * i + 2] = T_cam0_v[2, 3]
    #next 3 lines only valid for small rotations
    y[6 * i + 3] = T_cam0_v[2, 1]
    y[6 * i + 4] = T_cam0_v[0, 2]
    y[6 * i + 5] = T_cam0_v[1, 0]

    print('Alignment fitness at time k = ' + str(i) + ': ' + str(fitness))
    print('\tvelo->cam0 translation :\n\n' + str(T_cam0_v))

    velo_points_cam0frame = np.dot(T_cam0_v, velo_points)

    ###Sanity check
コード例 #6
0
theta = [0., 0., np.pi]
rot_x = [[1, 0, 0], [0, cos(theta[0]), -sin(theta[0])],
         [0, sin(theta[0]), cos(theta[0])]]
rot_y = [[cos(theta[1]), 0, sin(theta[1])], [0, 1, 0],
         [-sin(theta[1]), 0, cos(theta[1])]]
rot_z = [[cos(theta[2]), -sin(theta[1]), 0], [sin(theta[2]),
                                              cos(theta[1]), 0], [0, 0, 1]]
transform = np.dot(rot_x, np.dot(rot_y, rot_z))

source = np.random.randn(100, 3)
source_pc = pcl.PointCloud(source.astype(np.float32))
target = np.dot(transform, source.T).T
target_pc = pcl.PointCloud(target.astype(np.float32))

converged, transf, estimate, fitness = icp(source_pc,
                                           target_pc,
                                           max_iter=10000)

print(converged, transf, estimate, fitness)
result = estimate.to_array()

fig = plt.figure()
ax = fig.add_subplot(221, projection='3d')
ax.scatter(source[:, 0], source[:, 1], source[:, 2])
ax = fig.add_subplot(222, projection='3d')
ax.scatter(target[:, 0], target[:, 1], target[:, 2])
ax = fig.add_subplot(212, projection='3d')
ax.scatter(source[:, 0], source[:, 1], source[:, 2], 'b')
ax.scatter(result[:, 0], result[:, 1], result[:, 2], 'r')

plt.show()
コード例 #7
0
ファイル: demo_odometry.py プロジェクト: jdmarr/se_project
            -dataset_raw.calib.T_cam0unrect_velo[2, 3],
            color='r')

plt.show()

source_points = velo_points_cframe[0:3, :].T
source_points = source_points.astype(np.float32)
pc_source = pcl.PointCloud()
pc_source.from_array(source_points)

target_points = velo_points_wframe[0:3, :].T
target_points = target_points.astype(np.float32)
pc_target = pcl.PointCloud()
pc_target.from_array(target_points)

_, alignment, _, _ = icp(pc_source, pc_target)

f5 = plt.figure()
ax5 = f5.add_subplot(111, projection='3d')

aligned_source = np.dot(alignment, velo_points_cframe)

ax5.scatter(aligned_source[0, :],
            aligned_source[1, :],
            aligned_source[2, :],
            color='r')
#ax3.set_title('Third Velodyne scan (subsampled, cam0 frame)')

ax5.scatter(second_scan_wframe[0, :], second_scan_wframe[1, :],
            second_scan_wframe[2, :])
コード例 #8
0
	velo_points[:,0:3] = dataset.velo[i][velo_range, 0:3]
	velo_points[:,3] = np.ones((len(velo_range)))
	#velo_points_pvframe is 4 x npoints
	velo_points_pvframe = np.dot(T_pv_v , velo_points.T)
	##########################################################################################
	source_points = velo_points_pvframe[0:3,:].T
	source_points = source_points.astype(np.float32)
	pc_source = pcl.PointCloud()
	pc_source.from_array(source_points)

	target_points = world_points_cframe[0:3,:].T
	target_points = target_points.astype(np.float32)
	pc_target = pcl.PointCloud()
	pc_target.from_array(target_points)

	_, alignment, _, fitness = icp(pc_source, pc_target)
	y[2*i] = alignment[0,3]
	y[2*i + 1] = alignment[2,3]
	print('Alignment fitness at time k = ' + str(i) + ': ' + str(fitness))
	print('\tTranslation : [' + str(alignment[0,3]) + ',' + str(alignment[1,3]) + ',' + str(alignment[2,3]) + ']')
	T_cam0_pv = np.identity(4)
	T_cam0_pv[0,3] = alignment[0,3]
	T_cam0_pv[2,3] = alignment[2,3]
	velo_points_cam0frame = np.dot( T_cam0_pv , velo_points_pvframe )

	###Sanity check
	
	if i == 0:
		f2 = plt.figure()
		ax2 = f2.add_subplot(111, projection='3d')
コード例 #9
0
ファイル: batch_pose_only.py プロジェクト: jdmarr/se_project
    #store the scan in homogeneous coordinates [x y z 1]
    velo_points[:, 0:3] = dataset.velo[i][velo_range, 0:3]
    velo_points[:, 3] = np.ones((len(velo_range)))

    source_points = velo_points[:, 0:3]
    source_points = source_points.astype(np.float32)
    pc_source = pcl.PointCloud()
    pc_source.from_array(source_points)

    target_points = world_points_vframe[0:3, :].T
    target_points = target_points.astype(np.float32)
    pc_target = pcl.PointCloud()
    pc_target.from_array(target_points)

    _, T_v_w_resid_inv, _, fitness = icp(pc_source, pc_target)
    T_v_w_resid = invert_transform(T_v_w_resid_inv)

    T_v_w_icp = np.dot(
        T_v_w_resid,
        T_v_w)  #full world->velo transformation after applying icp correction

    #print('Alignment fitness at time k = ' + str(i) + ': ' + str(fitness))
    #print('\tworld->velo residual translation :\n\n' + str(T_v_w_resid))
    """
	if i == 19:
		f2 = plt.figure()
		ax2 = f2.add_subplot(111, projection='3d')


		#ax2.scatter(velo_points[:,0],
コード例 #10
0
    # with open('p1_1.pts','w') as file:
    # 	for i in range(temp.shape[0]):
    # 		file.write('{},{},{},{}\n'.format(temp[i,0],temp[i,1],temp[i,2],i1[i]))

    # del temp
    # exit()

    # T, dist = icp(pc1[:,:3], pc2[:,:3])

    # print(T, dist)

    p1 = _pcl.PointCloud(np.float32(pc1[:, :3]))
    p2 = _pcl.PointCloud(np.float32(pc2[:, :3]))
    cur = dt.now()
    c, T, p, err = reg.icp(p1, p2, max_iter=1000)
    new_cur = dt.now()
    delt = new_cur - cur
    print('Time Taken: %s' % (str(delt)))

    final_p1 = p.to_array()
    print('Error %f' % (err))
    print('Transformation Matrix:')
    print(T)

    # # exit()
    # ms = None
    # print()

    # ms, X, R, t, final_p1 = ICP(pc1[:,:], pc2[:,:])
コード例 #11
0
ファイル: get_R_2.py プロジェクト: jdmarr/se_project
    T_dist[2, 3] = z_dist

    first_scan_dist = np.dot(T_dist, first_scan_wframe)

    source_points = first_scan_dist[0:3, :].T
    source_points = source_points.astype(np.float32)
    pc_source = pcl.PointCloud()
    pc_source.from_array(source_points)

    target_points = world_points_wframe[0:3, :].T
    target_points = target_points.astype(np.float32)
    pc_target = pcl.PointCloud()
    pc_target.from_array(target_points)

    _, T_recovered_inv, _, fitness = icp(
        pc_source,
        pc_target)  #this is the inverse transformation to the one we applied
    fitness_arr[i] = fitness

    #Build the transformation from its inverse
    T_recovered = np.identity(4)
    T_recovered[0:3, 0:3] = T_recovered_inv[0:3, 0:3].T
    T_recovered[0:3, 3] = -1 * np.dot(T_recovered_inv[0:3, 0:3].T,
                                      T_recovered_inv[0:3, 3])

    w, v = np.linalg.eig(T_recovered[0:3, 0:3])

    index = np.argmin(np.absolute(w.imag))
    #print(w[index])
    a_recovered = v[:, index].real
    #print(a_recovered)