def generate_loss_2Dplots(self, axis , x_axis_param):
		# Parameters to deal with:
		# axis 					# This will decide the rotation or translation of point cloud about a particular axis. 'x' or 'y' or 'z'
		# x_axis_param			# This will decide either to rotate or translate the point cloud 'rotation' or 'translation'.

		template_data = self.templates[self.template_idx,:,:].reshape((1,MAX_NUM_POINT,3))		# Extract the template and reshape it.
		template_data = template_data[:,0:self.NUM_POINT,:]

		loss = []																			# Store the losses.
		if x_axis_param == 'rotation':
			angles = []						# Store the angles.
			# Loop to find loss for various angles from -90 to 90.
			for i in range(-90,91):
				if axis == 'X':
					gt_pose = np.array([[0.0, 0.0, 0.0, i*(np.pi/180), 0.0, 0.0]])			# New poses as per each index.
				if axis == 'Y':
					gt_pose = np.array([[0.0, 0.0, 0.0, 0.0, i*(np.pi/180), 0.0]])			# New poses as per each index.
				if axis == 'Z':
					gt_pose = np.array([[0.0, 0.0, 0.0, 0.0, 0.0, i*(np.pi/180)]])			# New poses as per each index.

				source_data = helper.apply_transformation(template_data,gt_pose)		# Generate Source Data.
				final_pose, TRANSFORMATIONS, loss_i, predicted_data, transformed_source_data, _, _ = self.test_one_case(template_data, source_data)	# Find final transformation by network.
				loss.append(loss_i)
				angles.append(i)

				# helper.display_three_clouds(template_data[0],source_data[0],transformed_source_data,"Results")

			plt.plot(angles, loss, linewidth=6)
			plt.xlabel('Rotation about '+axis+',Y,Z-axes (in degrees)', fontsize=40)
			plt.ylabel('Error in Pose (L2 Norm)', fontsize=40)
			plt.ylim((-0.5,2))
			plt.tick_params(labelsize=30)
			plt.show()

		if x_axis_param == 'translation':
			position = []						# Store the angles.
			# Loop to find loss for various angles from -90 to 90.
			for i in range(-10,11):
				if axis == 'X':
					gt_pose = np.array([[i/10, 0.0, 0.0, 0.0, 0.0, 0.0]])			# New poses as per each index.
				if axis == 'Y':
					gt_pose = np.array([[0.0, i/10, 0.0, 0.0, 0.0, 0.0]])			# New poses as per each index.
				if axis == 'Z':
					gt_pose = np.array([[0.0, 0.0, i/10, 0.0, 0.0, 0.0]])			# New poses as per each index.

				source_data = helper.apply_transformation(template_data,gt_pose)		# Generate Source Data.
				final_pose, TRANSFORMATIONS, loss_i, predicted_data, transformed_source_data, _, _ = self.test_one_case(template_data,source_data,MAX_LOOPS)	# Find final transformation by network.
				loss.append(np.sum(np.square(final_pose[0]-gt_pose[0]))/6)				# Calculate L2 Norm between gt pose and predicted pose.
				position.append(i/10.0)

				# helper.display_three_clouds(template_data[0],source_data[0],transformed_source_data,"Results")

			plt.plot(position, loss, linewidth=6)
			plt.ylim((-0.5,2))
			plt.xlabel('Translations about '+axis+'-axis', fontsize=40)
			plt.ylabel('Error in Poses (L2 Norm)', fontsize=40)
			plt.tick_params(labelsize=30)
			plt.show()
	def generate_stat_data(self, filename):
		eval_poses = helper.read_poses(FLAGS.data_dict, FLAGS.eval_poses)
		template_data = self.templates[self.template_idx,:,:].reshape((1,MAX_NUM_POINT,3))
		TIME, ITR, Trans_Err, Rot_Err = [], [], [], []
		for pose in eval_poses:
			source_data = helper.apply_transformation(self.templates[self.template_idx,:,:],pose.reshape((-1,6)))
			final_pose, _, _, _, _, elapsed_time, itr = self.test_one_case(source_data,template_data)
			translation_error, rotational_error = self.find_errors(pose.reshape((-1,6)), final_pose)
			TIME.append(elapsed_time)
			ITR.append(itr)
			Trans_Err.append(translation_error)
			Rot_Err.append(rotational_error)
		
		TIME_mean, ITR_mean, Trans_Err_mean, Rot_Err_mean = sum(TIME)/len(TIME), sum(ITR)/len(ITR), sum(Trans_Err)/len(Trans_Err), sum(Rot_Err)/len(Rot_Err)
		TIME_var, ITR_var, Trans_Err_var, Rot_Err_var = np.var(np.array(TIME)), np.var(np.array(ITR)), np.var(np.array(Trans_Err)), np.var(np.array(Rot_Err))
		import csv
		with open(filename + '.csv','w') as csvfile:
			csvwriter = csv.writer(csvfile)
			for i in range(len(TIME)):
				csvwriter.writerow([i, TIME[i], ITR[i], Trans_Err[i], Rot_Err[i]])
		with open(filename+'.txt','w') as file:
			file.write("Mean of Time: {}".format(TIME_mean))
			file.write("Mean of Iterations: {}".format(ITR_mean))
			file.write("Mean of Translation Error: {}".format(Trans_Err_mean))
			file.write("Mean of Rotation Error: {}".format(Rot_Err_mean))

			file.write("Variance in Time: {}".format(TIME_var))
			file.write("Variance in Iterations: {}".format(ITR_var))
			file.write("Variance in Translation Error: {}".format(Trans_Err_var))
			file.write("Variance in Rotation Error: {}".format(Rot_Err_var))
예제 #3
0
    def eval_one_epoch(self, model, poses, templates, BATCH_SIZE=32):
        num_batches = int(poses.shape[0] / BATCH_SIZE)
        total_loss = 0.0
        weight = 4
        with torch.no_grad():
            for batch_idx in range(num_batches):
                start_idx = batch_idx * BATCH_SIZE
                end_idx = (batch_idx + 1) * BATCH_SIZE

                template_data = templates[start_idx:end_idx]
                source_data = helper.apply_transformation(
                    template_data, poses[0:BATCH_SIZE])

                template_data = template_data - np.mean(
                    template_data, axis=1, keepdims=True)
                source_data = source_data - np.mean(
                    source_data, axis=1, keepdims=True)

                source_data = torch.from_numpy(source_data).cuda().double()
                template_data = torch.from_numpy(template_data).cuda().double()

                loss = model(source_data, template_data, self.maxItr) * weight

                total_loss += loss.item()
        return total_loss / num_batches
예제 #4
0
def read_data():
    # Output ->
    # source:		Torch tensor on CPU [Nx3]
    # template:		Torch tensor on CPU [Nx3]
    # rotation_ab:	Torch tensor on CPU [Nx3]
    template = helper.loadData('train_data')
    template = template[0, 0:1024, :].reshape(1, -1, 3)
    poses = np.array(
        [[0, 0, 0, 30 * (np.pi / 180), 40 * (np.pi / 180), 0 * (np.pi / 180)]])
    source = helper.apply_transformation(template, poses)
    return torch.from_numpy(source), torch.from_numpy(template)
	def generate_icp_results(self, gt_pose):
		from icp import icp_test
		from scipy.spatial import KDTree
		M_given = self.templates[self.template_idx,:,:]

		S_given = helper.apply_transformation(M_given.reshape((1,-1,3)), gt_pose)[0]
		# S_given = S_given + np.random.normal(0,1,S_given.shape)						# Noisy Data

		M_given = M_given[0:self.NUM_POINT,:]				# template data
		S_given = S_given[0:self.NUM_POINT,:]				# source data

		tree_M = KDTree(M_given)
		tree_M_sampled = KDTree(M_given[0:100,:])

		final_pose, model_data, sensor_data, predicted_data, title, _, _ = icp_test(S_given[0:100,:], M_given, tree_M, M_given[0:100,:], tree_M_sampled, S_given, gt_pose.reshape((1,6)), self.MAX_LOOPS, self.ftol)		
		self.find_errors(gt_pose, final_pose)
		helper.display_three_clouds(model_data, sensor_data, predicted_data, title)
	def generate_results(self, ftol, gt_pose, swap_case):
		template_data = self.templates[self.template_idx,:,:].reshape((1,MAX_NUM_POINT,3))		# Extract the template and reshape it.
		template_data = template_data[:,0:self.NUM_POINT,:]

		source_data = helper.apply_transformation(template_data,gt_pose)		# Generate Source Data.
		# source_data = source_data + np.random.normal(0,0.001,source_data.shape)	# Noisy Data

		if swap_case:
			final_pose, TRANSFORMATIONS, loss_i, predicted_data, transformed_source_data, elapsed_time, itr = self.test_one_case(source_data, template_data)	# Find final transformation by network.
		else:
			final_pose, TRANSFORMATIONS, loss_i, predicted_data, transformed_source_data, elapsed_time, itr = self.test_one_case(template_data, source_data)	# Find final transformation by network.

		if not swap_case:
			title = "Actual T (Red->Green): "
			for i in range(len(gt_pose[0])):
				if i>2:
					title += str(round(gt_pose[0][i]*(180/np.pi),2))
				else:
					title += str(gt_pose[0][i])
				title += ', '
			title += "\nPredicted T (Red->Blue): "
			for i in range(len(final_pose[0])):
				if i>2:
					title += str(round(final_pose[0,i]*(180/np.pi),3))
				else:
					title += str(round(final_pose[0,i],3))
				title += ', '	
		else:
			title = "Predicted Transformation: "
			for i in range(len(final_pose[0])):
				if i>2:
					title += str(round(final_pose[0,i]*(180/np.pi),3))
				else:
					title += str(round(final_pose[0,i],3))
				title += ', '	

		title += '\nElapsed Time: '+str(np.round(elapsed_time*1000,3))+' ms'+' & Iterations: '+str(itr)
		title += ' & Iterative Network'

		self.find_errors(gt_pose, final_pose)
		if swap_case:
			helper.display_three_clouds(source_data[0], template_data[0], transformed_source_data, title)
		else:
			helper.display_three_clouds(template_data[0], source_data[0], transformed_source_data, title)
예제 #7
0
def split_template_source(template_data,
                          batch_euler_poses,
                          NUM_POINT,
                          centroid_subtraction_switch,
                          ADD_NOISE,
                          S_RAND_POINTS,
                          SPARSE=1):
    if np.random.random_sample() < S_RAND_POINTS:
        #sparse:
        if SPARSE == 1:
            template_data = template_data[:, :(2 * NUM_POINT), ...]
        elif SPARSE == 2:
            template_data = template_data[:, :(4 * NUM_POINT), ...]

        template_data = helper.select_random_points(template_data,
                                                    (2 * NUM_POINT))
        source_data = template_data[:, NUM_POINT:(2 * NUM_POINT), ...]
        template_data = template_data[:, :NUM_POINT, ...]
    else:
        source_data = template_data[:, :(NUM_POINT), ...]
        template_data = template_data[:, :NUM_POINT, ...]

    source_data = helper.apply_transformation(
        source_data, batch_euler_poses
    )  # Apply the poses on the templates to get source data.

    if centroid_subtraction_switch:
        source_data = source_data - np.mean(source_data, axis=1, keepdims=True)
    # template_data = template_data - np.mean(template_data, axis=1, keepdims=True)

    if np.random.random_sample() < ADD_NOISE:
        source_data = helper.add_noise(source_data)

    # Only chose limited number of points from the source and template data.
    source_data = source_data[:, 0:NUM_POINT, :]
    template_data = template_data[:, 0:NUM_POINT, :]

    # # To visualize the source and point clouds:
    # if display_ptClouds:
    # 	helper.display_clouds_data(source_data[0])
    # 	helper.display_clouds_data(template_data[0])

    return template_data, source_data
	def generate_loss_3Dplots(self, axis, x_axis_param):
		# Parameters to deal with:
		# axis					This will decide the rotation or translation of point cloud about a particular axis. 'x' or 'y' or 'z'
		# x_axis_param			This will decide either to rotate or translate the point cloud 'rotation' or 'translation'.

		template_data = self.templates[self.template_idx,:,:].reshape((1,MAX_NUM_POINT,3))		# Extract the template and reshape it.
		template_data = template_data[:,0:self.NUM_POINT,:]

		loss = []
		angles_x = []
		angles_y = []														# Store the losses.
		if x_axis_param == 'rotation':
			# Loop to find loss for various angles from -90 to 90.
			for i in range(-90,91):
				print('I: {}'.format(i))
				for j in range(-90,91):
					if axis == 'XY':
						gt_pose = np.array([[0.0, 0.0, 0.0, i*(np.pi/180), j*(np.pi/180), 0.0]])			# New poses as per each index.
					if axis == 'YZ':
						gt_pose = np.array([[0.0, 0.0, 0.0, 0.0, i*(np.pi/180), j*(np.pi/180)]])			# New poses as per each index.
					if axis == 'XZ':
						gt_pose = np.array([[0.0, 0.0, 0.0, i*(np.pi/180), 0.0, j*(np.pi/180)]])			# New poses as per each index.

					source_data = helper.apply_transformation(template_data,gt_pose)		# Generate Source Data.
					final_pose, TRANSFORMATIONS, loss_i, predicted_data, transformed_source_data, _, _ = self.test_one_case(template_data, source_data)	# Find final transformation by network.
					loss.append(loss_i)
					angles_x.append(i)
					angles_y.append(j)
					# helper.display_three_clouds(template_data[0],source_data[0],transformed_source_data,"Results")

			fig = plt.figure()

			ax = fig.add_subplot(111,projection='3d')
			ax.scatter(angles_x,angles_y,loss)
			ax.set_xlabel('Rotation Angle about '+axis[0]+'-axis', fontsize=25, labelpad=25)
			ax.set_ylabel('Rotation Angle about '+axis[1]+'-axis', fontsize=25, labelpad=25)
			ax.set_zlabel('Error in Poses (L2 Norm)', fontsize=25, labelpad=25)
			ax.tick_params(labelsize=25)
			plt.show()
예제 #9
0
def eval_network(sess, ops, templates, poses, pairs):
    # Arguments:
    # sess: 		Tensorflow session to handle tensors.
    # ops:			Dictionary for tensors of Network
    # templates:	Training Point Cloud data.
    # poses: 		Training pose data.

    is_training = False
    display_ptClouds = False
    display_poses = False
    display_poses_in_itr = False
    display_ptClouds_in_itr = False

    loss_sum = 0  # Total Loss in each batch.
    num_batches = int(poses.shape[0] /
                      BATCH_SIZE)  # Number of batches in an epoch.
    print('Number of batches to be executed: {}'.format(num_batches))

    # Store time taken, no of iterations, translation error and rotation error for registration.
    TIME, ITR, Trans_Err, Rot_Err = [], [], [], []
    idxs_5_5, idxs_10_1, idxs_20_2 = [], [], []

    if FLAGS.use_noise_data:
        print(FLAGS.data_dict)
        templates, sources = helper.read_noise_data(FLAGS.data_dict)
        print(templates.shape, sources.shape)

    for fn in range(num_batches):
        start_idx = fn * BATCH_SIZE  # Start index of poses.
        end_idx = (fn + 1) * BATCH_SIZE  # End index of poses.

        if FLAGS.use_noise_data:
            template_data = np.copy(templates[fn, :, :]).reshape(
                1, -1, 3)  # As template_data is changing.
            source_data = np.copy(sources[fn, :, :]).reshape(1, -1, 3)
            batch_euler_poses = poses[
                start_idx:end_idx]  # Extract poses for batch training.
        else:
            # template_idx = pairs[fn,1]
            template_data = np.copy(templates[0, :, :]).reshape(
                1, -1, 3)  # As template_data is changing.

            batch_euler_poses = poses[
                start_idx:end_idx]  # Extract poses for batch training.
            source_data = helper.apply_transformation(
                template_data, batch_euler_poses
            )  # Apply the poses on the templates to get source data.

        template_data = template_data[:, 0:NUM_POINT, :]
        source_data = source_data[:, 0:NUM_POINT, :]

        # Just to visualize the data.
        TEMPLATE_DATA = np.copy(
            template_data)  # Store the initial template to visualize results.
        SOURCE_DATA = np.copy(
            source_data)  # Store the initial source to visualize results.

        # Subtract the Centroids from the Point Clouds.
        if centroid_subtraction_switch:
            source_data = source_data - np.mean(
                source_data, axis=1, keepdims=True)
            template_data = template_data - np.mean(
                template_data, axis=1, keepdims=True)

        # To visualize the source and point clouds:
        if display_ptClouds:
            helper.display_clouds_data(source_data[0])
            helper.display_clouds_data(template_data[0])

        TRANSFORMATIONS = np.identity(
            4)  # Initialize identity transformation matrix.
        TRANSFORMATIONS = npm.repmat(TRANSFORMATIONS, BATCH_SIZE, 1).reshape(
            BATCH_SIZE, 4,
            4)  # Intialize identity matrices of size equal to batch_size

        # previous_pose = np.array([0,0,0,1,0,0,0])
        previous_T = np.eye(4)

        start = time.time()  # Log start time.
        # Iterations for pose refinement.
        for loop_idx in range(MAX_LOOPS):
            for network_itr in range(7):
                # Feed the placeholders of Network19 with template data and source data.
                feed_dict = {
                    ops['source_pointclouds_pl']: source_data,
                    ops['template_pointclouds_pl']: template_data,
                    ops['is_training_pl']: is_training
                }
                predicted_transformation = sess.run(
                    [ops['predicted_transformation']], feed_dict=feed_dict
                )  # Ask the network to predict the pose.

                # Apply the transformation on the source data and multiply it to transformation matrix obtained in previous iteration.
                TRANSFORMATIONS, source_data = helper.transformation_quat2mat(
                    predicted_transformation, TRANSFORMATIONS, source_data)

                # Display Results after each iteration.
                if display_poses_in_itr:
                    print(predicted_transformation[0, 0:3])
                    print(predicted_transformation[0, 3:7] * (180 / np.pi))
                if display_ptClouds_in_itr:
                    helper.display_clouds_data(template_data[0])

            # Feed the placeholders of Network_L with source data and template data obtained from N-Iterations.
            feed_dict = {
                ops['source_pointclouds_pl']: source_data,
                ops['template_pointclouds_pl']: template_data,
                ops['is_training_pl']: is_training
            }

            # Ask the network to predict transformation, calculate loss using distance between actual points.
            predicted_transformation = sess.run(
                [ops['predicted_transformation']], feed_dict=feed_dict)

            # Apply the final transformation on the source data and multiply it with the transformation matrix obtained from N-Iterations.
            TRANSFORMATIONS, source_data = helper.transformation_quat2mat(
                predicted_transformation, TRANSFORMATIONS, source_data)

            if check_convergenceT(previous_T, TRANSFORMATIONS[0]):
                break
            else:
                previous_T = np.copy(TRANSFORMATIONS[0])
        end = time.time()  # Log end time.

        final_pose = helper.find_final_pose_inv(
            TRANSFORMATIONS
        )  # Find the final pose (translation, orientation (euler angles in degrees)) from transformation matrix.
        final_pose[0,
                   0:3] = final_pose[0, 0:3] + np.mean(SOURCE_DATA, axis=1)[0]

        translation_error, rotational_error = find_errors(
            batch_euler_poses[0], final_pose[0])

        TIME.append(end - start)
        ITR.append(loop_idx + 1)
        Trans_Err.append(translation_error)
        Rot_Err.append(rotational_error)

        if rotational_error < 20 and translation_error < 0.2:
            if rotational_error < 10 and translation_error < 0.1:
                if rotational_error < 5 and translation_error < 0.05:
                    idxs_5_5.append(fn)
                idxs_10_1.append(fn)
            idxs_20_2.append(fn)

        # Display the ground truth pose and predicted pose for first Point Cloud in batch
        if display_poses:
            print('Ground Truth Position: {}'.format(
                batch_euler_poses[0, 0:3].tolist()))
            print('Predicted Position: {}'.format(final_pose[0, 0:3].tolist()))
            print('Ground Truth Orientation: {}'.format(
                (batch_euler_poses[0, 3:6] * (180 / np.pi)).tolist()))
            print('Predicted Orientation: {}'.format(
                (final_pose[0, 3:6] * (180 / np.pi)).tolist()))

        # Display Loss Value.
        # helper.display_three_clouds(TEMPLATE_DATA[0],SOURCE_DATA[0],template_data[0],"")
        print("Batch: {} & time: {}, iteration: {}".format(
            fn, end - start, loop_idx + 1))

    log = {
        'TIME': TIME,
        'ITR': ITR,
        'Trans_Err': Trans_Err,
        'Rot_Err': Rot_Err,
        'idxs_5_5': idxs_5_5,
        'idxs_10_1': idxs_10_1,
        'idxs_20_2': idxs_20_2,
        'num_batches': num_batches
    }

    helper.log_test_results(FLAGS.log_dir, FLAGS.filename, log)
예제 #10
0
def eval_one_epoch(sess, ops, eval_writer, templates, poses):
	# Arguments:
	# sess: 		Tensorflow session to handle tensors.
	# ops:			Dictionary for tensors of Network
	# templates:	Training Point Cloud data.
	# poses: 		Training pose data.

	is_training = False
	display_ptClouds = False
	display_poses = False
	display_poses_in_itr = False
	display_ptClouds_in_itr = False

	#templates = helper.shuffle_templates(templates)
	#poses = helper.shuffle_poses(poses)

	loss_sum = 0			
	#poses = poses[0:4000,:]								# Total Loss in each batch.
	num_batches = int(poses.shape[0]/BATCH_SIZE) 				# Number of batches in an epoch.
	#num_batches=2
	
	for fn in range(num_batches):
		#shuffled_poses = helper.shuffle_poses(poses)

		start_idx = fn*BATCH_SIZE 			# Start index of poses.
		end_idx = (fn+1)*BATCH_SIZE 		# End index of poses.
		
		#template_data = np.copy(templates[start_idx:end_idx])
		template_data = np.copy(templates[0,:,:]).reshape(1,-1,3)
		template_data = np.tile(template_data, (BATCH_SIZE, 1, 1))

		batch_euler_poses = poses[0:BATCH_SIZE,:]			# Extract poses for batch training.
		source_data = helper.apply_transformation(template_data, batch_euler_poses)		# Apply the poses on the templates to get source data.

		if centroid_subtraction_switch:
			source_data = source_data - np.mean(source_data, axis=1, keepdims=True)
			template_data = template_data - np.mean(template_data, axis=1, keepdims=True)

		if FLAGS.use_partial_data:
			complete_source_data = np.copy(source_data)
			source_data = helper.find_partial_data(complete_source_data)
			template_data = helper.find_partial_data(template_data)

		# Chose Random Points from point clouds for training.
		if np.random.random_sample()<0.0:
			source_data = helper.select_random_points(source_data, NUM_POINT)						# 30% probability that source data has different points than template
		else:
			source_data = source_data[:,0:NUM_POINT,:]
		if np.random.random_sample()<ADD_NOISE:
			source_data = helper.add_noise(source_data)	

		# Only chose limited number of points from the source and template data.
		source_data = source_data[:,0:NUM_POINT,:]
		template_data = template_data[:,0:NUM_POINT,:]

		template_voxel = helper.voxelization(template_data, size=32)

		# To visualize the source and point clouds:
		if display_ptClouds:
			helper.display_clouds_data(source_data[0])
			helper.display_clouds_data(template_data[0])

		TRANSFORMATIONS = np.identity(4)				# Initialize identity transformation matrix.
		TRANSFORMATIONS = npm.repmat(TRANSFORMATIONS,BATCH_SIZE,1).reshape(BATCH_SIZE,4,4)		# Intialize identity matrices of size equal to batch_size

		# Iterations for pose refinement.
		for loop_idx in range(MAX_LOOPS-1):
			source_voxel = helper.voxelization(source_data, size=32)
			# 4a
			# Feed the placeholders of Network with template data and source data.
			feed_dict = {ops['source_pointclouds_pl']: source_voxel,
						 ops['template_pointclouds_pl']: template_voxel,
						 ops['is_training_pl']: is_training}
			predicted_transformation = sess.run([ops['predicted_transformation']], feed_dict=feed_dict)		# Ask the network to predict the pose.

			# 4b,4c
			# Apply the transformation on the template data and multiply it to transformation matrix obtained in previous iteration.
			if FLAGS.use_partial_data:
				TRANSFORMATIONS, complete_source_data = helper.transformation_quat2mat(predicted_transformation, TRANSFORMATIONS, complete_source_data)
				source_data = helper.find_partial_data(complete_source_data)
			else:
				TRANSFORMATIONS, source_data = helper.transformation_quat2mat(predicted_transformation, TRANSFORMATIONS, source_data)

			# Display Results after each iteration.
			if display_poses_in_itr:
				print(predicted_transformation[0,0:3])
				print(predicted_transformation[0,3:7]*(180/np.pi))
			if display_ptClouds_in_itr:
				helper.display_clouds_data(source_data[0])

		source_voxel = helper.voxelization(source_data, size=32)
		# Feed the placeholders of Network with source data and template data obtained from N-Iterations.
		feed_dict = {ops['source_pointclouds_pl']: source_voxel,
					 ops['template_pointclouds_pl']: template_voxel,
					 ops['transformation_pl']: TRANSFORMATIONS,
					 ops['gt_transformation_pl']: helper.pose2mat_inv(batch_euler_poses),
					 ops['is_training_pl']: is_training}

		# Ask the network to predict transformation, calculate loss using distance between actual points.
		summary, step, loss_val, predicted_transformation = sess.run([ops['merged'], ops['step'], ops['loss'], ops['predicted_transformation']], feed_dict=feed_dict)

		eval_writer.add_summary(summary, step)			# Add all the summary to the tensorboard.

		# Apply the final transformation on the template data and multiply it with the transformation matrix obtained from N-Iterations.
		if FLAGS.use_partial_data:
			TRANSFORMATIONS, complete_source_data = helper.transformation_quat2mat(predicted_transformation, TRANSFORMATIONS, complete_source_data)
			source_data = helper.find_partial_data(complete_source_data)
		else:
			TRANSFORMATIONS, source_data = helper.transformation_quat2mat(predicted_transformation, TRANSFORMATIONS, source_data)

		final_pose = helper.find_final_pose_inv(TRANSFORMATIONS)		# Find the final pose (translation, orientation (euler angles in degrees)) from transformation matrix.

		# Display the ground truth pose and predicted pose for first Point Cloud in batch 
		if display_poses:
			print('Ground Truth Position: {}'.format(batch_euler_poses[0,0:3].tolist()))
			print('Predicted Position: {}'.format(final_pose[0,0:3].tolist()))
			print('Ground Truth Orientation: {}'.format((batch_euler_poses[0,3:6]*(180/np.pi)).tolist()))
			print('Predicted Orientation: {}'.format((final_pose[0,3:6]*(180/np.pi)).tolist()))

		# Display Loss Value.
		print("Batch: {}, Loss: {}\r".format(fn, loss_val),end='')

		# Add loss for each batch.
		loss_sum += loss_val
	print('\n')
	log_string('Eval Mean loss: %f' % (loss_sum/num_batches))		# Store and display mean loss of epoch.
def test_one_epoch(sess, ops_L, templates, shuffled_poses, saver, model_path):
    # Arguments:
    # sess: 		Tensorflow session to handle tensors.
    # ops_L:		Dictionary for tensors of Network_L
    # ops19: 		Dictionary for tensors of Network19
    # templates:	Training Point Cloud data.
    # poses: 		Training pose data.
    # saver: 		To restore the weights.
    # model_path: 	Path of log directory.

    saver.restore(sess, model_path)  # Restore the weights of trained network.

    is_training = False
    display_ptClouds = False
    display_poses = False
    display_poses_in_itr = False
    display_ptClouds_in_itr = False
    swap_case = False

    templates = helper.process_templates('templates')
    template_data = np.zeros((BATCH_SIZE, MAX_NUM_POINT,
                              3))  # Extract Templates for batch training.
    for i in range(BATCH_SIZE):
        template_data[i, :, :] = np.copy(templates[1, :, :])
    batch_euler_poses = shuffled_poses[0].reshape(
        (1, 6))  # Extract poses for batch training.

    # Self defined test case.
    batch_euler_poses[0] = [
        0.5, 0.0, 0.2, 50 * (np.pi / 180), 0 * (np.pi / 180),
        10 * (np.pi / 180)
    ]
    source_data = helper.apply_transformation(
        template_data, batch_euler_poses
    )  # Apply the poses on the templates to get source data.

    # Only chose limited number of points from the source and template data.
    template_data = template_data[:, 0:NUM_POINT, :]
    source_data = source_data[:, 0:NUM_POINT, :]

    if swap_case:
        source_data, template_data = template_data, source_data  # Swap the template and source.
        transformation_template2source = helper.transformation(
            batch_euler_poses)
        transformation_source2template = np.linalg.inv(
            transformation_template2source[0])
        [euler_z, euler_y,
         euler_x] = t3d.mat2euler(transformation_source2template[0:3, 0:3],
                                  'szyx')
        trans_x = transformation_source2template[0, 3]
        trans_y = transformation_source2template[1, 3]
        trans_z = transformation_source2template[2, 3]
        pose_source2template = [
            trans_x, trans_y, trans_z, euler_x * (18 / np.pi),
            euler_y * (180 / np.pi), euler_z * (180 / np.pi)
        ]
        batch_euler_poses[0] = pose_source2template

    TEMPLATE_DATA = np.copy(
        template_data)  # Store the initial template to visualize results.
    SOURCE_DATA = np.copy(
        source_data)  # Store the initial source to visualize results.

    # To visualize the source and point clouds:
    if display_ptClouds:
        helper.display_clouds_data(source_data[0])
        helper.display_clouds_data(template_data[0])

    # Subtract the Centroids from the Point Clouds.
    if centroid_subtraction_switch:
        source_data, template_data, centroid_translation_pose = helper.centroid_subtraction(
            source_data, template_data)

    TRANSFORMATIONS = np.identity(
        4)  # Initialize identity transformation matrix.
    TRANSFORMATIONS = np.matlib.repmat(TRANSFORMATIONS, BATCH_SIZE, 1).reshape(
        BATCH_SIZE, 4,
        4)  # Intialize identity matrices of size equal to batch_size

    # Feed the placeholders of Network_L with source data and template data obtained from N-Iterations.
    feed_dict = {
        ops_L['source_pointclouds_pl']: source_data,
        ops_L['template_pointclouds_pl']: template_data,
        ops_L['is_training_pl']: is_training
    }

    # Ask the network to predict transformation, calculate loss using distance between actual points.
    import time
    start = time.time()
    step, predicted_transformation = sess.run(
        [ops_L['step'], ops_L['predicted_transformation']],
        feed_dict=feed_dict)
    end = time.time()
    print(end - start)

    # Apply the final transformation on the template data and multiply it with the transformation matrix obtained from N-Iterations.
    TRANSFORMATIONS, template_data = helper.transformation_quat2mat(
        predicted_transformation, TRANSFORMATIONS, template_data)

    if centroid_subtraction_switch:  # If centroid is subtracted then apply the centorid translation back to point clouds.
        TRANSFORMATIONS, template_data = helper.transformation_quat2mat(
            centroid_translation_pose, TRANSFORMATIONS, template_data)

    final_pose = helper.find_final_pose(TRANSFORMATIONS)

    if not swap_case:
        title = "Actual T (Red->Green): "
        for i in range(len(batch_euler_poses[0])):
            if i > 2:
                title += str(round(batch_euler_poses[0][i] * (180 / np.pi), 2))
            else:
                title += str(batch_euler_poses[0][i])
            title += ', '
        title += "\nPredicted T (Red->Blue): "
        for i in range(len(final_pose[0])):
            if i > 2:
                title += str(round(final_pose[0, i] * (180 / np.pi), 3))
            else:
                title += str(round(final_pose[0, i], 3))
            title += ', '
    else:
        title = "Predicted T (Red->Blue): "
        for i in range(len(final_pose[0])):
            if i > 2:
                title += str(round(final_pose[0, i] * (180 / np.pi), 3))
            else:
                title += str(round(final_pose[0, i], 3))
            title += ', '

    # Display the ground truth pose and predicted pose for first Point Cloud in batch
    if display_poses:
        print('Ground Truth Position: {}'.format(
            batch_euler_poses[0, 0:3].tolist()))
        print('Predicted Position: {}'.format(final_pose[0, 0:3].tolist()))
        print('Ground Truth Orientation: {}'.format(
            (batch_euler_poses[0, 3:6] * (180 / np.pi)).tolist()))
        print('Predicted Orientation: {}'.format(
            (final_pose[0, 3:6] * (180 / np.pi)).tolist()))

    helper.display_three_clouds(TEMPLATE_DATA[0], SOURCE_DATA[0],
                                template_data[0], title)

    print("Loss: {}".format(loss_val))
def eval_one_epoch(sess, ops_L, eval_writer, templates, poses):
    # Arguments:
    # sess: 		Tensorflow session to handle tensors.
    # ops_L:		Dictionary for tensors of Network_L
    # ops19: 		Dictionary for tensors of Network19
    # templates:	Training Point Cloud data.
    # poses: 		Training pose data.

    is_training = False
    display_ptClouds = False
    display_poses = False
    display_poses_in_itr = False
    display_ptClouds_in_itr = False

    #templates = helper.shuffle_templates(templates)
    #poses = helper.shuffle_poses(poses)

    loss_sum = 0  # Total Loss in each batch.
    num_batches = int(poses.shape[0] /
                      BATCH_SIZE)  # Number of batches in an epoch.

    for fn in range(num_batches):
        start_idx = fn * BATCH_SIZE  # Start index of poses.
        end_idx = (fn + 1) * BATCH_SIZE  # End index of poses.

        template_data = np.copy(templates[2, :, :]).reshape(1, -1, 3)
        template_data = np.tile(template_data, (BATCH_SIZE, 1, 1))
        batch_euler_poses = poses[start_idx:
                                  end_idx]  # Extract poses for batch training.

        source_data = helper.apply_transformation(
            template_data, batch_euler_poses
        )  # Apply the poses on the templates to get source data.

        # Chose Random Points from point clouds for training.
        if np.random.random_sample() < 0:
            source_data = helper.select_random_points(
                source_data, NUM_POINT
            )  # 30% probability that source data has different points than template
        else:
            source_data = source_data[:, 0:NUM_POINT, :]

        if np.random.random_sample() < 0:
            source_data = helper.add_noise(
                source_data)  # 50% chance of having noise in training data.

        # Only chose limited number of points from the source and template data.
        template_data = template_data[:, 0:NUM_POINT, :]
        source_data = source_data[:, 0:NUM_POINT, :]

        # Subtract the Centroids from the Point Clouds.
        if centroid_subtraction_switch:
            source_data = source_data - np.mean(
                source_data, axis=1, keepdims=True)
            template_data = template_data - np.mean(
                template_data, axis=1, keepdims=True)

        # To visualize the source and point clouds:
        if display_ptClouds:
            helper.display_clouds_data(source_data[0])
            helper.display_clouds_data(template_data[0])

        # Feed the placeholders of Network_L with source data and template data obtained from N-Iterations.
        feed_dict = {
            ops_L['source_pointclouds_pl']: source_data,
            ops_L['template_pointclouds_pl']: template_data,
            ops_L['is_training_pl']: is_training
        }

        # Ask the network to predict transformation, calculate loss using distance between actual points.
        summary, step, loss_val, predicted_transformation = sess.run(
            [
                ops_L['merged'], ops_L['step'], ops_L['loss'],
                ops_L['predicted_transformation']
            ],
            feed_dict=feed_dict)
        eval_writer.add_summary(
            summary, step)  # Add all the summary to the tensorboard.

        # Display the ground truth pose and predicted pose for first Point Cloud in batch
        if display_poses:
            print('Ground Truth Position: {}'.format(
                batch_euler_poses[0, 0:3].tolist()))
            print('Predicted Position: {}'.format(final_pose[0, 0:3].tolist()))
            print('Ground Truth Orientation: {}'.format(
                (batch_euler_poses[0, 3:6] * (180 / np.pi)).tolist()))
            print('Predicted Orientation: {}'.format(
                (final_pose[0, 3:6] * (180 / np.pi)).tolist()))

        # Display Loss Value.
        print("Batch: {} & Loss: {}\r".format(fn, loss_val), end='')

        # Add loss for each batch.
        loss_sum += loss_val
    print('\n')
    log_string(
        'Eval Mean loss: %f' %
        (loss_sum / num_batches))  # Store and display mean loss of epoch.
예제 #13
0
def run():
    NUM_POINT = FLAGS.num_point

    if not use_noise_data:
        templates = helper.loadData(FLAGS.data_dict)
        pairs = helper.read_pairs(FLAGS.data_dict, FLAGS.pairs_file)
    else:
        templates, sources = helper.read_noise_data(FLAGS.data_dict)
        # templates = helper.loadData(FLAGS.data_dict)
    eval_poses = helper.read_poses(
        FLAGS.data_dict,
        FLAGS.eval_poses)  # Read all the poses data for evaluation.
    eval_poses = eval_poses[0:1, :]
    num_batches = eval_poses.shape[0]

    TIME, ITR, Trans_Err, Rot_Err = [], [], [], []
    idxs_5_5, idxs_10_1, idxs_20_2 = [], [], []

    counter = 0
    for fn, gt_pose in enumerate(eval_poses):
        if fn > 0:
            break
        if not use_noise_data:
            # template_idx = pairs[fn,1]
            template_idx = 0
            M_given = templates[template_idx, :, :]
            S_given = helper.apply_transformation(M_given.reshape((1, -1, 3)),
                                                  gt_pose.reshape((1, 6)))[0]
        else:
            M_given = templates[fn, :, :]
            S_given = sources[fn, :, :]

        # M_given = np.loadtxt('template_car_itr.txt')
        # S_given = np.loadtxt('source_car_itr.txt')
        # helper.display_clouds_data(M_given)
        # helper.display_clouds_data(S_given)

        # To generate point cloud for Xueqian: For CAD model figures
        # gt_pose = np.array([[0.5,0.2,0.4,40*(np.pi/180),20*(np.pi/180),30*(np.pi/180)]])
        # templates = helper.loadData('unseen_data')
        # gt_pose = np.array([[-0.3,-0.7,0.4,-34*(np.pi/180),31*(np.pi/180),-27*(np.pi/180)]])
        # gt_pose = np.array([[0.5929,-0.0643,-0.961,0.4638,-0.3767,-0.6253]])
        # M_given = templates[48,:,:]
        # S_given = helper.apply_transformation(M_given.reshape(1,-1,3),gt_pose)
        # S_given = helper.add_noise(S_given)
        # S_given = S_given[0]

        M_given = M_given[0:NUM_POINT, :]  # template data
        S_given = S_given[0:NUM_POINT, :]  # source data

        tree_M = KDTree(M_given)
        tree_M_sampled = KDTree(M_given[0:100, :])

        final_pose, model_data, sensor_data, predicted_data, _, time_elapsed, itr = icp.icp_test(
            S_given[0:100, :], M_given,
            tree_M, M_given[0:100, :], tree_M_sampled, S_given,
            gt_pose.reshape((1, 6)), 100, FLAGS.threshold)
        translation_error, rotational_error = find_errors(
            gt_pose[0], final_pose[0])
        print(translation_error, rotational_error)

        TIME.append(time_elapsed)
        ITR.append(itr)
        Trans_Err.append(translation_error)
        Rot_Err.append(rotational_error)

        if rotational_error < 20 and translation_error < 0.2:
            if rotational_error < 10 and translation_error < 0.1:
                if rotational_error < 5 and translation_error < 0.05:
                    idxs_5_5.append(fn)
                idxs_10_1.append(fn)
            idxs_20_2.append(fn)

        print('Batch: {}, Iterations: {}, Time: {}'.format(
            counter, itr, time_elapsed))
        # counter += 1

        # helper.display_three_clouds(M_given, S_given, predicted_data, "")
        # np.savetxt('template_piano.txt',M_given)
        # np.savetxt('source_piano.txt',S_given)
        # np.savetxt('predicted_piano.txt',predicted_data)

    log = {
        'TIME': TIME,
        'ITR': ITR,
        'Trans_Err': Trans_Err,
        'Rot_Err': Rot_Err,
        'idxs_5_5': idxs_5_5,
        'idxs_10_1': idxs_10_1,
        'idxs_20_2': idxs_20_2,
        'num_batches': num_batches
    }

    helper.log_test_results(FLAGS.log_dir, FLAGS.filename, log)
예제 #14
0
def test_one_epoch(sess, ops, templates, poses, saver, model_path):
    # Arguments:
    # sess: 		Tensorflow session to handle tensors.
    # ops:			Dictionary for tensors of Network
    # templates:	Training Point Cloud data.
    # poses: 		Training pose data.
    # saver: 		To restore the weights.
    # model_path: 	Path of log directory.

    saver.restore(sess, model_path)  # Restore the weights of trained network.

    is_training = False
    display_ptClouds = False
    display_poses = False
    display_poses_in_itr = False
    display_ptClouds_in_itr = False
    swap_case = False
    MAX_LOOPS = 4

    template_data = np.zeros((BATCH_SIZE, MAX_NUM_POINT,
                              3))  # Extract Templates for batch training.
    template_data[0] = np.copy(templates[FLAGS.template_idx, :, :])

    batch_euler_poses = poses[0].reshape(
        (1, 6))  # Extract poses for batch training.

    # Define test case.
    batch_euler_poses[0] = [
        0.4, 0.5, 0.1, 10 * (np.pi / 180), 20 * (np.pi / 180),
        20 * (np.pi / 180)
    ]
    source_data = helper.apply_transformation(
        template_data, batch_euler_poses
    )  # Apply the poses on the templates to get source data.

    # Chose Random Points from point clouds for training.
    if np.random.random_sample() < 0:
        source_data = helper.select_random_points(
            source_data, NUM_POINT
        )  # probability that source data has different points than template
    else:
        source_data = source_data[:, 0:NUM_POINT, :]
    # Add noise to source point cloud.
    if np.random.random_sample() < 1.0:
        source_data = helper.add_noise(source_data)

    # Only choose limited number of points from the source and template data.
    source_data = source_data[:, 0:NUM_POINT, :]
    template_data = template_data[:, 0:NUM_POINT, :]

    TEMPLATE_DATA = np.copy(
        template_data)  # Store the initial template to visualize results.
    SOURCE_DATA = np.copy(
        source_data)  # Store the initial source to visualize results.

    # Subtract the Centroids from the Point Clouds.
    if centroid_subtraction_switch:
        source_data = source_data - np.mean(source_data, axis=1, keepdims=True)
        template_data = template_data - np.mean(
            template_data, axis=1, keepdims=True)

    # To visualize the source and point clouds:
    if display_ptClouds:
        helper.display_clouds_data(source_data[0])
        helper.display_clouds_data(template_data[0])

    TRANSFORMATIONS = np.identity(
        4)  # Initialize identity transformation matrix.
    TRANSFORMATIONS = npm.repmat(TRANSFORMATIONS, BATCH_SIZE, 1).reshape(
        BATCH_SIZE, 4,
        4)  # Intialize identity matrices of size equal to batch_size

    # Store the transformed point clouds after each iteration.
    ITR = np.zeros((MAX_LOOPS, template_data.shape[0], template_data.shape[1],
                    template_data.shape[2]))

    # Iterations for pose refinement.
    for loop_idx in range(MAX_LOOPS - 1):
        # 4a
        # Feed the placeholders of Network with template data and source data.
        feed_dict = {
            ops['source_pointclouds_pl']: source_data,
            ops['template_pointclouds_pl']: template_data,
            ops['is_training_pl']: is_training
        }
        predicted_transformation = sess.run(
            [ops['predicted_transformation']],
            feed_dict=feed_dict)  # Ask the network to predict the pose.
        #print (predicted_transformation[0])

        # 4b,4c
        # Apply the transformation on the template data and multiply it to transformation matrix obtained in previous iteration.
        TRANSFORMATIONS, source_data = helper.transformation_quat2mat(
            predicted_transformation, TRANSFORMATIONS, source_data)

        # Display Results after each iteration.
        if display_poses_in_itr:
            print(predicted_transformation[0, 0:3])
            print(predicted_transformation[0, 3:7] * (180 / np.pi))
        if display_ptClouds_in_itr:
            helper.display_clouds_data(source_data[0])
        ITR[loop_idx, :, :, :] = source_data

    # Feed the placeholders of Network with source data and template data obtained from N-Iterations.
    feed_dict = {
        ops['source_pointclouds_pl']: source_data,
        ops['template_pointclouds_pl']: template_data,
        ops['is_training_pl']: is_training
    }

    # Ask the network to predict transformation, calculate loss using distance between actual points.
    step, predicted_transformation = sess.run(
        [ops['step'], ops['predicted_transformation']], feed_dict=feed_dict)

    # Apply the final transformation on the template data and multiply it with the transformation matrix obtained from N-Iterations.
    TRANSFORMATIONS, source_data = helper.transformation_quat2mat(
        predicted_transformation, TRANSFORMATIONS, source_data)

    final_pose = helper.find_final_pose_inv(TRANSFORMATIONS)
    final_pose[0, 0:3] = final_pose[0, 0:3] + np.mean(SOURCE_DATA, axis=1)[0]

    title = "Actual T (Red->Green): "
    for i in range(len(batch_euler_poses[0])):
        if i > 2:
            title += str(round(batch_euler_poses[0][i] * (180 / np.pi), 2))
        else:
            title += str(batch_euler_poses[0][i])
        title += ', '
    title += "\nPredicted T (Red->Blue): "
    for i in range(len(final_pose[0])):
        if i > 2:
            title += str(round(final_pose[0, i] * (180 / np.pi), 3))
        else:
            title += str(round(final_pose[0, i], 3))
        title += ', '

    # Display the ground truth pose and predicted pose for first Point Cloud in batch
    if display_poses:
        print('Ground Truth Position: {}'.format(
            batch_euler_poses[0, 0:3].tolist()))
        print('Predicted Position: {}'.format(final_pose[0, 0:3].tolist()))
        print('Ground Truth Orientation: {}'.format(
            (batch_euler_poses[0, 3:6] * (180 / np.pi)).tolist()))
        print('Predicted Orientation: {}'.format(
            (final_pose[0, 3:6] * (180 / np.pi)).tolist()))

    helper.display_three_clouds(TEMPLATE_DATA[0], SOURCE_DATA[0],
                                source_data[0], title)
예제 #15
0
def train_one_epoch(sess, ops, train_writer, templates, poses):
    # Arguments:
    # sess: 		Tensorflow session to handle tensors.
    # ops:			Dictionary for tensors of Network
    # templates:	Training Point Cloud data.
    # poses: 		Training pose data.
    print(datetime.now())

    is_training = True
    display_ptClouds = False
    display_poses = False
    display_poses_in_itr = False
    display_ptClouds_in_itr = False

    poses = poses[0:5070, :]
    poses = helper.shuffle_poses(poses)  # Shuffle Poses.

    loss_sum = 0  # Total Loss in each batch.
    num_batches = int(templates.shape[0] /
                      BATCH_SIZE)  # Number of batches in an epoch.

    # Training for each batch.
    for fn in range(num_batches):
        start_idx = fn * BATCH_SIZE  # Start index of poses.
        end_idx = (fn + 1) * BATCH_SIZE  # End index of poses.

        template_data = np.copy(templates[start_idx:end_idx])

        batch_euler_poses = poses[start_idx:
                                  end_idx]  # Extract poses for batch training.
        if SPARSE_SAMPLING > 0:
            template_data, source_data = helper.split_template_source(
                template_data,
                batch_euler_poses,
                NUM_POINT,
                centroid_subtraction_switch,
                ADD_NOISE,
                S_RAND_POINTS,
                SPARSE=SPARSE_SAMPLING)
        else:
            if template_random_pose:
                template_data = helper.apply_transformation(
                    template_data, batch_euler_poses / 2)
                template_data = template_data - np.mean(
                    template_data, axis=1, keepdims=True)

            source_data = helper.apply_transformation(
                template_data, batch_euler_poses
            )  # Apply the poses on the templates to get source data.

            if centroid_subtraction_switch:
                source_data = source_data - np.mean(
                    source_data, axis=1, keepdims=True)
                # template_data = template_data - np.mean(template_data, axis=1, keepdims=True)

            # Chose Random Points from point clouds for training.
            if np.random.random_sample() < S_RAND_POINTS:
                template_data = helper.select_random_points(
                    template_data, NUM_POINT)
                source_data = helper.select_random_points(
                    source_data, NUM_POINT
                )  # 50% probability that source data has different points than template
            else:
                source_data = source_data[:, 0:NUM_POINT, :]
            if np.random.random_sample() < ADD_NOISE:
                source_data = helper.add_noise(source_data)

            # Only chose limited number of points from the source and template data.
            source_data = source_data[:, 0:NUM_POINT, :]
            template_data = template_data[:, 0:NUM_POINT, :]

            # To visualize the source and point clouds:
            if display_ptClouds:
                helper.display_clouds_data(source_data[0])
                helper.display_clouds_data(template_data[0])

        if FLAGS.add_occlusions > 0.0:
            source_data = helper.add_occlusions(source_data,
                                                FLAGS.add_occlusions)

        TRANSFORMATIONS = np.identity(
            4)  # Initialize identity transformation matrix.
        TRANSFORMATIONS = npm.repmat(TRANSFORMATIONS, BATCH_SIZE, 1).reshape(
            BATCH_SIZE, 4,
            4)  # Intialize identity matrices of size equal to batch_size

        # Iterations for pose refinement.
        for loop_idx in range(MAX_LOOPS - 1):
            # 4a
            # Feed the placeholders of Network with template data and source data.
            if ADD_NOISE_MODEL:
                feed_dict = {
                    ops['source_pointclouds_pl']: source_data,
                    ops['template_pointclouds_pl']: template_data,
                    ops['is_training_pl']: is_training,
                    ops['is_training_pl_1']: False,
                    ops['labels12']: np.ones([BATCH_SIZE, NUM_POINT]),
                    ops['add_noise']: np.zeros([BATCH_SIZE, NUM_POINT, 3])
                }
            else:
                feed_dict = {
                    ops['source_pointclouds_pl']: source_data,
                    ops['template_pointclouds_pl']: template_data,
                    ops['is_training_pl']: is_training,
                    ops['is_training_pl_1']: False,
                    ops['labels12']: np.ones([BATCH_SIZE, NUM_POINT])
                }
            if bool(FLAGS.train_single
                    ):  #train every iteration, or only on the Nth iter
                summary, step, _, loss_val, predicted_transformation = sess.run(
                    [
                        ops['merged'], ops['step'], ops['train_op'],
                        ops['loss'], ops['predicted_transformation']
                    ],
                    feed_dict=feed_dict)
            else:
                predicted_transformation = sess.run(
                    [ops['predicted_transformation']], feed_dict=feed_dict
                )  # Ask the network to predict the pose.

            # 4b,4c
            # Apply the transformation on the template data and multiply it to transformation matrix obtained in previous iteration.
            TRANSFORMATIONS, source_data = helper.transformation_quat2mat(
                predicted_transformation, TRANSFORMATIONS, source_data)

            # Display Results after each iteration.
            if display_poses_in_itr:
                print(predicted_transformation[0, 0:3])
                print(predicted_transformation[0, 3:7] * (180 / np.pi))
            if display_ptClouds_in_itr:
                helper.display_clouds_data(source_data[0])

        # Feed the placeholders of Network with source data and template data obtained from N-Iterations.
        if ADD_NOISE_MODEL:
            feed_dict = {
                ops['source_pointclouds_pl']: source_data,
                ops['template_pointclouds_pl']: template_data,
                ops['is_training_pl']: is_training,
                ops['is_training_pl_1']: False,
                ops['labels12']: np.ones([BATCH_SIZE, NUM_POINT]),
                ops['add_noise']: np.zeros([BATCH_SIZE, NUM_POINT, 3])
            }
        else:
            feed_dict = {
                ops['source_pointclouds_pl']: source_data,
                ops['template_pointclouds_pl']: template_data,
                ops['is_training_pl']: is_training,
                ops['is_training_pl_1']: False,
                ops['labels12']: np.ones([BATCH_SIZE, NUM_POINT])
            }

        # Ask the network to predict transformation, calculate loss using distance between actual points, calculate & apply gradients for Network and copy the weights to Network19.
        summary, step, _, loss_val, predicted_transformation = sess.run(
            [
                ops['merged'], ops['step'], ops['train_op'], ops['loss'],
                ops['predicted_transformation']
            ],
            feed_dict=feed_dict)
        train_writer.add_summary(
            summary, step)  # Add all the summary to the tensorboard.

        # Apply the final transformation on the template data and multiply it with the transformation matrix obtained from N-Iterations.
        TRANSFORMATIONS, source_data = helper.transformation_quat2mat(
            predicted_transformation, TRANSFORMATIONS, source_data)

        # final_pose = helper.find_final_pose_inv(TRANSFORMATIONS)			# Find the final pose (translation, orientation (euler angles in degrees)) from transformation matrix.

        # Display the ground truth pose and predicted pose for first Point Cloud in batch
        if display_poses:
            print('Ground Truth Position: {}'.format(
                batch_euler_poses[0, 0:3].tolist()))
            print('Predicted Position: {}'.format(final_pose[0, 0:3].tolist()))
            print('Ground Truth Orientation: {}'.format(
                (batch_euler_poses[0, 3:6] * (180 / np.pi)).tolist()))
            print('Predicted Orientation: {}'.format(
                (final_pose[0, 3:6] * (180 / np.pi)).tolist()))
            # print(batch_euler_poses[0,0:3],batch_euler_poses[0,3:6]*(180/np.pi))
            # print(final_pose[0,0:3],final_pose[0,3:6]*(180/np.pi))

        # Display Loss Value.
        print("Batch: {} & Loss: {}\r".format(fn, loss_val), end='')

        # Add loss for each batch.
        loss_sum += loss_val
    print('\n')
    log_string(
        'Train Mean loss: %f\n' %
        (loss_sum / num_batches))  # Store and display mean loss of epoch.
예제 #16
0
    templates = helper.process_templates('multi_model_templates')

    template_idx = 201
    NUM_POINT = 1024
    MAX_LOOPS = 500

    M_given = templates[template_idx, :, :]

    x_trans = 0.5
    y_trans = 0.5
    z_trans = 0.5
    x_rot = 45 * (np.pi / 180)
    y_rot = 45 * (np.pi / 180)
    z_rot = 45 * (np.pi / 180)
    gt_pose = np.array([[x_trans, y_trans, z_trans, x_rot, y_rot,
                         z_rot]])  # Pose: Source to Template

    S_given = helper.apply_transformation(M_given.reshape((1, -1, 3)),
                                          gt_pose)[0]
    S_given = S_given + np.random.normal(0, 1, S_given.shape)

    M_given = M_given[0:NUM_POINT, :]  # template data
    S_given = S_given[0:NUM_POINT, :]  # source data

    tree_M = KDTree(M_given)
    tree_M_sampled = KDTree(M_given[0:100, :])

    final_pose, model_data, sensor_data, predicted_data, title = icp_test(
        S_given[0:100, :], M_given, tree_M, M_given[0:100, :], tree_M_sampled,
        S_given, gt_pose.reshape((1, 6)), MAX_LOOPS, 1e-05)
    helper.display_three_clouds(model_data, sensor_data, predicted_data, title)
def eval_network(sess, ops, templates, poses):
	# Arguments:
	# sess: 		Tensorflow session to handle tensors.
	# ops:			Dictionary for tensors of Network
	# templates:	Training Point Cloud data.
	# poses: 		Training pose data.

	is_training = False
	display_ptClouds = False
	display_poses = False
	display_poses_in_itr = False
	display_ptClouds_in_itr = False

	loss_sum = 0											# Total Loss in each batch.
	# print(int(poses.shape[0]/BATCH_SIZE))
	# print(int(len(templates)/BATCH_SIZE))
	# poses = poses[:1000]
	num_batches = int(poses.shape[0]/BATCH_SIZE) # Number of batches in an epoch.
	indx = np.arange(0,len(templates))
	while len(indx)<poses.shape[0]:
		indx = np.concatenate([indx, indx], 0)
	# num_batches = int(len(templates)/BATCH_SIZE)
	# exit()
	print('Number of batches to be executed: {}'.format(num_batches))

	# Store time taken, no of iterations, translation error and rotation error for registration.
	TIME, ITR, Trans_Err, Rot_Err = [], [], [], []
	idxs_25_5, idxs_5_5, idxs_10_1, idxs_20_2 = [], [], [], []

	if FLAGS.use_noise_data:
		print(FLAGS.data_dict)
		templates, sources = helper.read_noise_data(FLAGS.data_dict)
		print(templates.shape, sources.shape)

	TE = np.zeros([MAX_LOOPS+1,num_batches]) #translation error
	RE = np.zeros([MAX_LOOPS+1,num_batches]) #rotation error
	CE = np.zeros([MAX_LOOPS+1,num_batches]) #convergence error
	Failures = []
	ques = np.zeros([MAX_LOOPS,7,num_batches])
	for fn in range(num_batches):
		start_idx = fn*BATCH_SIZE 			# Start index of poses.
		end_idx = (fn+1)*BATCH_SIZE 		# End index of poses.
		if SPARSE_SAMPLING>0:
			template_data = np.copy(templates[indx[fn], :, :]).reshape(1, -1, 3)
			batch_euler_poses = poses[start_idx:end_idx]  # Extract poses for batch training.
			template_data,source_data = helper.split_template_source(template_data,batch_euler_poses,NUM_POINT,centroid_subtraction_switch=False,ADD_NOISE=FLAGS.use_noise_data,S_RAND_POINTS=S_RAND_POINTS,SPARSE=SPARSE_SAMPLING)
		else:
			if FLAGS.use_noise_data:
				template_data = np.copy(templates[fn,:,:]).reshape(1,-1,3)				# As template_data is changing.
				source_data = np.copy(sources[fn,:,:]).reshape(1,-1,3)
				batch_euler_poses = poses[start_idx:end_idx]			# Extract poses for batch training.
			else:
				# template_idx = pairs[fn,1]
				template_data = np.copy(templates[indx[fn],:,:]).reshape(1,-1,3)				# As template_data is changing.

				batch_euler_poses = poses[start_idx:end_idx]			# Extract poses for batch training.
				# print(batch_euler_poses)
				if template_random_pose:
					template_data = helper.apply_transformation(template_data, batch_euler_poses / 2)
					template_data = template_data - np.mean(template_data, axis=1, keepdims=True)
				source_data = helper.apply_transformation(template_data, batch_euler_poses)		# Apply the poses on the templates to get source data.

			# SOURCE_DATA = np.copy(source_data[:,0:NUM_POINT,:]) #movement is calculated from initial pose

			if np.random.random_sample()<S_RAND_POINTS:
				template_data = helper.select_random_points(template_data, NUM_POINT)
				source_data = helper.select_random_points(source_data, NUM_POINT)						# 50% probability that source data has different points than template
				template_data = template_data[:,0:NUM_POINT,:]
			else:
				source_data = source_data[:,0:NUM_POINT,:]
				template_data = template_data[:,0:NUM_POINT,:]


		# Just to visualize the data.
		TEMPLATE_DATA = np.copy(template_data)				# Store the initial template to visualize results.
		SOURCE_DATA = np.copy(source_data)					# Store the initial source to visualize results.

		# Subtract the Centroids from the Point Clouds.
		if centroid_subtraction_switch:
			# print(np.mean(source_data, axis=1, keepdims=True))
			T = np.mean(source_data, axis=1)
			print(T)
			print(np.mean(template_data, axis=1))
			print(np.mean(source_data, axis=1))
			source_data = source_data - np.mean(source_data, axis=1, keepdims=True)
			# print(np.mean(template_data, axis=1, keepdims=True))
			# template_data = template_data - np.mean(template_data, axis=1, keepdims=True)
		else:
			T=[[0,0,0]]

		if FLAGS.add_occlusions>0.0:
			source_data = helper.add_occlusions(source_data,FLAGS.add_occlusions)

		# exit()
		# To visualize the source and point clouds:
		if display_ptClouds:
			helper.display_clouds_data(source_data[0])
			helper.display_clouds_data(template_data[0])

		TRANSFORMATIONS = np.identity(4)				# Initialize identity transformation matrix.
		TRANSFORMATIONS = npm.repmat(TRANSFORMATIONS,BATCH_SIZE,1).reshape(BATCH_SIZE,4,4)		# Intialize identity matrices of size equal to batch_size

		# previous_pose = np.array([0,0,0,1,0,0,0])
		previous_T = np.eye(4)

		start = time.time()												# Log start time.
		# Iterations for pose refinement.
		translation_error, rotational_error, final_pose = get_error(TRANSFORMATIONS, SOURCE_DATA,
														batch_euler_poses,T)
		TE[0, fn] = translation_error
		RE[0, fn] = rotational_error
		CE[0, fn] = 1
		print(fn)
		for loop_idx in range(MAX_LOOPS):
			# for network_itr in range(7):
			# 	# Feed the placeholders of Network19 with template data and source data.
			# 	feed_dict = {ops['source_pointclouds_pl']: source_data,
			# 				 ops['template_pointclouds_pl']: template_data,
			# 				 ops['is_training_pl']: is_training}
			# 	predicted_transformation = sess.run([ops['predicted_transformation']], feed_dict=feed_dict)		# Ask the network to predict the pose.
            #
			# 	# Apply the transformation on the source data and multiply it to transformation matrix obtained in previous iteration.
			# 	TRANSFORMATIONS, source_data = helper.transformation_quat2mat(predicted_transformation, TRANSFORMATIONS, source_data)
            #
			# 	# Display Results after each iteration.
			# 	if display_poses_in_itr:
			# 		print(predicted_transformation[0,0:3])
			# 		print(predicted_transformation[0,3:7]*(180/np.pi))
			# 	if display_ptClouds_in_itr:
			# 		helper.display_clouds_data(template_data[0])

			# Feed the placeholders of Network_L with source data and template data obtained from N-Iterations.
			feed_dict = {ops['source_pointclouds_pl']: source_data,
						 ops['template_pointclouds_pl']: template_data,
						 ops['is_training_pl']: is_training}

			# Ask the network to predict transformation, calculate loss using distance between actual points.
			predicted_transformation = sess.run([ops['predicted_transformation']], feed_dict=feed_dict)

			# print(predicted_transformation)
			# Apply the final transformation on the source data and multiply it with the transformation matrix obtained from N-Iterations.
			TRANSFORMATIONS, source_data = helper.transformation_quat2mat(predicted_transformation, TRANSFORMATIONS, source_data)
			translation_error, rotational_error, final_pose = get_error(TRANSFORMATIONS, SOURCE_DATA,
															batch_euler_poses,T)
			ck_con_T,convergence_error = check_convergenceT(previous_T, TRANSFORMATIONS[0])
			ques[loop_idx,:,fn] = np.squeeze(predicted_transformation)
			print(ques[loop_idx,:,fn])
			TE[loop_idx+1,fn] = translation_error
			RE[loop_idx+1,fn] = rotational_error
			CE[loop_idx+1,fn] = convergence_error

			if ck_con_T:
				# break
				print('converge iteration:',loop_idx)
			else:
				previous_T = np.copy(TRANSFORMATIONS[0])
			previous_T = np.copy(TRANSFORMATIONS[0])

		end = time.time()													# Log end time.

		# final_pose = helper.find_final_pose_inv(TRANSFORMATIONS)		# Find the final pose (translation, orientation (euler angles in degrees)) from transformation matrix.
		# final_pose[0,0:3] = final_pose[0,0:3] + np.mean(SOURCE_DATA, axis=1)[0]#\
		# 			 #- np.mean(TEMPLATE_DATA, axis=1)[0]
        #
		# translation_error, rotational_error = find_errors(batch_euler_poses[0], final_pose[0])
		translation_error, rotational_error, final_pose = get_error(TRANSFORMATIONS, SOURCE_DATA, batch_euler_poses,T)

		TIME.append(end-start)
		ITR.append(loop_idx+1)
		Trans_Err.append(translation_error)
		Rot_Err.append(rotational_error)

		if rotational_error<20 and translation_error<0.2:
			if rotational_error<10 and translation_error<0.1:
				if rotational_error<5 and translation_error<0.05:
					if rotational_error < 2.5:
						idxs_25_5.append(fn)
					idxs_5_5.append(fn)
				idxs_10_1.append(fn)
			idxs_20_2.append(fn)

		# if rotational_error>20:
		# 	Failures.append(fn)
		# 	helper.display_three_clouds(template_data[0],SOURCE_DATA[0],source_data[0], str(fn)+"_"+str(rotational_error))
		# 	print('added failure image')
		# else:
		# 	print(rotational_error)
		# Display the ground truth pose and predicted pose for first Point Cloud in batch 
		if display_poses:
			print('Ground Truth Position: {}'.format(batch_euler_poses[0,0:3].tolist()))
			print('Predicted Position: {}'.format(final_pose[0,0:3].tolist()))
			print('Ground Truth Orientation: {}'.format((batch_euler_poses[0,3:6]*(180/np.pi)).tolist()))
			print('Predicted Orientation: {}'.format((final_pose[0,3:6]*(180/np.pi)).tolist()))

		# Display Loss Value.
		# helper.display_three_clouds(TEMPLATE_DATA[0],SOURCE_DATA[0],template_data[0],"")
		print("Batch: {} & time: {}, iteration: {}".format(fn, end-start, loop_idx+1))

	plot_iter_graph(TE,FLAGS.log_dir,'translation error')
	plot_iter_graph(RE,FLAGS.log_dir,'rotation error')
	plot_iter_graph(CE,FLAGS.log_dir,'convergence error')

	log = {'TIME': TIME, 'ITR':ITR, 'Trans_Err': Trans_Err, 'Rot_Err': Rot_Err,'idxs_25_5':idxs_25_5, 'idxs_5_5': idxs_5_5, 'idxs_10_1': idxs_10_1, 'idxs_20_2': idxs_20_2, 'num_batches': num_batches}

	helper.log_test_results(FLAGS.log_dir, FLAGS.filename, log)
	hf = h5py.File(os.path.join(FLAGS.log_dir, 'log_data.h5'), 'w')
	hf.create_dataset('TE', data=TE)
	hf.create_dataset('RE', data=RE)
	hf.create_dataset('CE', data=CE)
	hf.close()
예제 #18
0
def eval_one_epoch(sess, ops, eval_writer, poses):
    # Arguments:
    # sess: 		Tensorflow session to handle tensors.
    # ops:			Dictionary for tensors of Network
    # templates:	Training Point Cloud data.
    # poses: 		Training pose data.

    is_training = False
    display_ptClouds = False
    display_poses = False
    display_poses_in_itr = False
    display_ptClouds_in_itr = False

    loss_sum = 0  # Total Loss in each batch.
    templates, sources, poses = helper.read_partial_data(
        'train_data', 'partial_data_eval.h5')
    num_batches = int(templates.shape[0] /
                      BATCH_SIZE)  # Number of batches in an epoch.

    for fn in range(num_batches):
        start_idx = fn * BATCH_SIZE  # Start index of poses.
        end_idx = (fn + 1) * BATCH_SIZE  # End index of poses.

        template_data = np.copy(templates[start_idx:end_idx])
        batch_euler_poses = np.copy(poses[start_idx:end_idx])

        # Check for partial source.
        if np.random.sample() < ADD_PARTIAL:
            source_data = np.copy(sources[start_idx:end_idx])
            source_full_data = helper.apply_transformation(
                template_data, batch_euler_poses)
        else:
            source_data = np.copy(templates[start_idx:end_idx, 0:512])
            source_data = helper.apply_transformation(source_data,
                                                      batch_euler_poses)
            source_full_data = helper.apply_transformation(
                template_data, batch_euler_poses)

        # Check for noise in source.
        if np.random.sample() < ADD_NOISE:
            source_data = helper.add_noise(source_data)

        if centroid_subtraction_switch:
            source_data = source_data - np.mean(
                source_data, axis=1, keepdims=True)
            template_data = template_data - np.mean(
                template_data, axis=1, keepdims=True)
            source_full_data = source_full_data - np.mean(
                source_full_data, axis=1, keepdims=True)

        # Only chose limited number of points from the source and template data.
        template_data = template_data[:, 0:NUM_POINT, :]
        source_full_data = source_full_data[:, 0:NUM_POINT, :]

        # To visualize the source and point clouds:
        if display_ptClouds:
            helper.display_clouds_data(source_data[0])
            helper.display_clouds_data(template_data[0])

        TRANSFORMATIONS = np.identity(
            4)  # Initialize identity transformation matrix.
        TRANSFORMATIONS = npm.repmat(TRANSFORMATIONS, BATCH_SIZE, 1).reshape(
            BATCH_SIZE, 4,
            4)  # Intialize identity matrices of size equal to batch_size

        # Iterations for pose refinement.
        for loop_idx in range(MAX_LOOPS - 1):
            # 4a
            # Feed the placeholders of Network with template data and source data.
            feed_dict = {
                ops['source_pointclouds_pl']: source_data,
                ops['template_pointclouds_pl']: template_data,
                ops['is_training_pl']: is_training
            }
            predicted_transformation = sess.run(
                [ops['predicted_transformation']],
                feed_dict=feed_dict)  # Ask the network to predict the pose.

            TRANSFORMATIONS, source_data = helper.transformation_quat2mat(
                predicted_transformation, TRANSFORMATIONS, source_data)

            # Display Results after each iteration.
            if display_poses_in_itr:
                print(predicted_transformation[0, 0:3])
                print(predicted_transformation[0, 3:7] * (180 / np.pi))
            if display_ptClouds_in_itr:
                helper.display_clouds_data(source_data[0])

        # Feed the placeholders of Network with source data and template data obtained from N-Iterations.
        feed_dict = {
            ops['source_pointclouds_pl']: source_data,
            ops['template_pointclouds_pl']: template_data,
            ops['full_source_pointclouds_pl']: source_full_data,
            ops['is_training_pl']: is_training
        }

        # Ask the network to predict transformation, calculate loss using distance between actual points.
        summary, step, loss_val, predicted_transformation = sess.run(
            [
                ops['merged'], ops['step'], ops['loss'],
                ops['predicted_transformation']
            ],
            feed_dict=feed_dict)

        eval_writer.add_summary(
            summary, step)  # Add all the summary to the tensorboard.

        TRANSFORMATIONS, source_data = helper.transformation_quat2mat(
            predicted_transformation, TRANSFORMATIONS, source_data)

        final_pose = helper.find_final_pose_inv(
            TRANSFORMATIONS
        )  # Find the final pose (translation, orientation (euler angles in degrees)) from transformation matrix.

        # Display the ground truth pose and predicted pose for first Point Cloud in batch
        if display_poses:
            print('Ground Truth Position: {}'.format(
                batch_euler_poses[0, 0:3].tolist()))
            print('Predicted Position: {}'.format(final_pose[0, 0:3].tolist()))
            print('Ground Truth Orientation: {}'.format(
                (batch_euler_poses[0, 3:6] * (180 / np.pi)).tolist()))
            print('Predicted Orientation: {}'.format(
                (final_pose[0, 3:6] * (180 / np.pi)).tolist()))

        # Display Loss Value.
        print("Batch: {}, Loss: {}\r".format(fn, loss_val), end='')

        # Add loss for each batch.
        loss_sum += loss_val
    print('\n')
    log_string(
        'Eval Mean loss: %f' %
        (loss_sum / num_batches))  # Store and display mean loss of epoch.