def test_one_case(self, template_data, source_data):
		# template_data: 				Input Template Data for network
		# source_data:					Input Source Data for network

		display_poses_in_itr = False
		display_ptClouds_in_itr = False
		template = np.copy(template_data)
		source = np.copy(source_data)
		template_check = np.copy(template_data)

		TRANSFORMATIONS = np.identity(4)		# Initialize identity transformation matrix.
		TRANSFORMATIONS = np.matlib.repmat(TRANSFORMATIONS,BATCH_SIZE,1).reshape(BATCH_SIZE,4,4)
		TRANSFORMATIONS_check = np.copy(TRANSFORMATIONS)

		start = time.time()
		# helper.display_three_clouds(template_data[0],source_data[0],source_data[0],"Iteration: 0")
		for loop_idx in range(self.MAX_LOOPS-1):
			# Feed the placeholders of Network19 with template data and source data.
			feed_dict = {self.ops19['source_pointclouds_pl']: source,
						 self.ops19['template_pointclouds_pl']: template,
						 self.ops19['is_training_pl']: self.is_training}
			predicted_transformation = self.sess.run([self.ops19['predicted_transformation']], feed_dict=feed_dict) 		# Ask the network to predict the pose.

			# Apply the transformation on the template data and multiply it to transformation matrix obtained in previous iteration.
			TRANSFORMATIONS, template = helper.transformation_quat2mat(predicted_transformation,TRANSFORMATIONS,template)

			if np.sum(np.abs(template[:,0:100,:] - template_check[:,0:100,:])) < self.ftol:
			# if np.sum(np.dot(np.linalg.inv(TRANSFORMATIONS_check), TRANSFORMATIONS)) < ftol:
				break
			else:
				# TRANSFORMATIONS_check = np.copy(TRANSFORMATIONS)
				template_check = np.copy(template)

			# Display Results after each iteration.
			if display_poses_in_itr:
				print(predicted_transformation[0,0:3])
				print(predicted_transformation[0,3:7]*(180/np.pi))
			if display_ptClouds_in_itr:
				helper.display_clouds_data(template[0])
				# transformed_source_data = np.dot(np.linalg.inv(TRANSFORMATIONS[0])[0:3,0:3], source[0].T).T + np.linalg.inv(TRANSFORMATIONS[0])[0:3,3]
				# helper.display_three_clouds(template_data[0], source[0], transformed_source_data, "Iteration: "+str(loop_idx+1))

		# Feed the placeholders of Network_L with source data and template data obtained from N-Iterations.
		feed_dict = {self.ops_L['source_pointclouds_pl']: source,
					 self.ops_L['template_pointclouds_pl']: template,
					 self.ops_L['is_training_pl']: self.is_training}

		# Ask the network to predict transformation, calculate loss using distance between actual points.
		step, predicted_transformation = self.sess.run([self.ops_L['step'], self.ops_L['predicted_transformation']], feed_dict=feed_dict)

		end = time.time()
		loss_val = 0

		# Apply the final transformation on the template data and multiply it with the transformation matrix obtained from N-Iterations.
		TRANSFORMATIONS, template = helper.transformation_quat2mat(predicted_transformation, TRANSFORMATIONS, template)
		final_pose = helper.find_final_pose(TRANSFORMATIONS)
		transformed_source_data = np.dot(np.linalg.inv(TRANSFORMATIONS[0])[0:3,0:3], source[0].T).T + np.linalg.inv(TRANSFORMATIONS[0])[0:3,3]

		return final_pose, TRANSFORMATIONS, loss_val, template, transformed_source_data, end-start, (loop_idx+1)
Exemplo n.º 2
0
def eval_network(sess, ops, templates, poses, pairs):
    # Arguments:
    # sess: 		Tensorflow session to handle tensors.
    # ops:			Dictionary for tensors of Network
    # templates:	Training Point Cloud data.
    # poses: 		Training pose data.

    is_training = False
    display_ptClouds = False
    display_poses = False
    display_poses_in_itr = False
    display_ptClouds_in_itr = False

    loss_sum = 0  # Total Loss in each batch.
    num_batches = int(poses.shape[0] /
                      BATCH_SIZE)  # Number of batches in an epoch.
    print('Number of batches to be executed: {}'.format(num_batches))

    # Store time taken, no of iterations, translation error and rotation error for registration.
    TIME, ITR, Trans_Err, Rot_Err = [], [], [], []
    idxs_5_5, idxs_10_1, idxs_20_2 = [], [], []

    if FLAGS.use_noise_data:
        print(FLAGS.data_dict)
        templates, sources = helper.read_noise_data(FLAGS.data_dict)
        print(templates.shape, sources.shape)

    for fn in range(num_batches):
        start_idx = fn * BATCH_SIZE  # Start index of poses.
        end_idx = (fn + 1) * BATCH_SIZE  # End index of poses.

        if FLAGS.use_noise_data:
            template_data = np.copy(templates[fn, :, :]).reshape(
                1, -1, 3)  # As template_data is changing.
            source_data = np.copy(sources[fn, :, :]).reshape(1, -1, 3)
            batch_euler_poses = poses[
                start_idx:end_idx]  # Extract poses for batch training.
        else:
            # template_idx = pairs[fn,1]
            template_data = np.copy(templates[0, :, :]).reshape(
                1, -1, 3)  # As template_data is changing.

            batch_euler_poses = poses[
                start_idx:end_idx]  # Extract poses for batch training.
            source_data = helper.apply_transformation(
                template_data, batch_euler_poses
            )  # Apply the poses on the templates to get source data.

        template_data = template_data[:, 0:NUM_POINT, :]
        source_data = source_data[:, 0:NUM_POINT, :]

        # Just to visualize the data.
        TEMPLATE_DATA = np.copy(
            template_data)  # Store the initial template to visualize results.
        SOURCE_DATA = np.copy(
            source_data)  # Store the initial source to visualize results.

        # Subtract the Centroids from the Point Clouds.
        if centroid_subtraction_switch:
            source_data = source_data - np.mean(
                source_data, axis=1, keepdims=True)
            template_data = template_data - np.mean(
                template_data, axis=1, keepdims=True)

        # To visualize the source and point clouds:
        if display_ptClouds:
            helper.display_clouds_data(source_data[0])
            helper.display_clouds_data(template_data[0])

        TRANSFORMATIONS = np.identity(
            4)  # Initialize identity transformation matrix.
        TRANSFORMATIONS = npm.repmat(TRANSFORMATIONS, BATCH_SIZE, 1).reshape(
            BATCH_SIZE, 4,
            4)  # Intialize identity matrices of size equal to batch_size

        # previous_pose = np.array([0,0,0,1,0,0,0])
        previous_T = np.eye(4)

        start = time.time()  # Log start time.
        # Iterations for pose refinement.
        for loop_idx in range(MAX_LOOPS):
            for network_itr in range(7):
                # Feed the placeholders of Network19 with template data and source data.
                feed_dict = {
                    ops['source_pointclouds_pl']: source_data,
                    ops['template_pointclouds_pl']: template_data,
                    ops['is_training_pl']: is_training
                }
                predicted_transformation = sess.run(
                    [ops['predicted_transformation']], feed_dict=feed_dict
                )  # Ask the network to predict the pose.

                # Apply the transformation on the source data and multiply it to transformation matrix obtained in previous iteration.
                TRANSFORMATIONS, source_data = helper.transformation_quat2mat(
                    predicted_transformation, TRANSFORMATIONS, source_data)

                # Display Results after each iteration.
                if display_poses_in_itr:
                    print(predicted_transformation[0, 0:3])
                    print(predicted_transformation[0, 3:7] * (180 / np.pi))
                if display_ptClouds_in_itr:
                    helper.display_clouds_data(template_data[0])

            # Feed the placeholders of Network_L with source data and template data obtained from N-Iterations.
            feed_dict = {
                ops['source_pointclouds_pl']: source_data,
                ops['template_pointclouds_pl']: template_data,
                ops['is_training_pl']: is_training
            }

            # Ask the network to predict transformation, calculate loss using distance between actual points.
            predicted_transformation = sess.run(
                [ops['predicted_transformation']], feed_dict=feed_dict)

            # Apply the final transformation on the source data and multiply it with the transformation matrix obtained from N-Iterations.
            TRANSFORMATIONS, source_data = helper.transformation_quat2mat(
                predicted_transformation, TRANSFORMATIONS, source_data)

            if check_convergenceT(previous_T, TRANSFORMATIONS[0]):
                break
            else:
                previous_T = np.copy(TRANSFORMATIONS[0])
        end = time.time()  # Log end time.

        final_pose = helper.find_final_pose_inv(
            TRANSFORMATIONS
        )  # Find the final pose (translation, orientation (euler angles in degrees)) from transformation matrix.
        final_pose[0,
                   0:3] = final_pose[0, 0:3] + np.mean(SOURCE_DATA, axis=1)[0]

        translation_error, rotational_error = find_errors(
            batch_euler_poses[0], final_pose[0])

        TIME.append(end - start)
        ITR.append(loop_idx + 1)
        Trans_Err.append(translation_error)
        Rot_Err.append(rotational_error)

        if rotational_error < 20 and translation_error < 0.2:
            if rotational_error < 10 and translation_error < 0.1:
                if rotational_error < 5 and translation_error < 0.05:
                    idxs_5_5.append(fn)
                idxs_10_1.append(fn)
            idxs_20_2.append(fn)

        # Display the ground truth pose and predicted pose for first Point Cloud in batch
        if display_poses:
            print('Ground Truth Position: {}'.format(
                batch_euler_poses[0, 0:3].tolist()))
            print('Predicted Position: {}'.format(final_pose[0, 0:3].tolist()))
            print('Ground Truth Orientation: {}'.format(
                (batch_euler_poses[0, 3:6] * (180 / np.pi)).tolist()))
            print('Predicted Orientation: {}'.format(
                (final_pose[0, 3:6] * (180 / np.pi)).tolist()))

        # Display Loss Value.
        # helper.display_three_clouds(TEMPLATE_DATA[0],SOURCE_DATA[0],template_data[0],"")
        print("Batch: {} & time: {}, iteration: {}".format(
            fn, end - start, loop_idx + 1))

    log = {
        'TIME': TIME,
        'ITR': ITR,
        'Trans_Err': Trans_Err,
        'Rot_Err': Rot_Err,
        'idxs_5_5': idxs_5_5,
        'idxs_10_1': idxs_10_1,
        'idxs_20_2': idxs_20_2,
        'num_batches': num_batches
    }

    helper.log_test_results(FLAGS.log_dir, FLAGS.filename, log)
Exemplo n.º 3
0
def eval_one_epoch(sess, ops, eval_writer, templates, poses):
	# Arguments:
	# sess: 		Tensorflow session to handle tensors.
	# ops:			Dictionary for tensors of Network
	# templates:	Training Point Cloud data.
	# poses: 		Training pose data.

	is_training = False
	display_ptClouds = False
	display_poses = False
	display_poses_in_itr = False
	display_ptClouds_in_itr = False

	#templates = helper.shuffle_templates(templates)
	#poses = helper.shuffle_poses(poses)

	loss_sum = 0			
	#poses = poses[0:4000,:]								# Total Loss in each batch.
	num_batches = int(poses.shape[0]/BATCH_SIZE) 				# Number of batches in an epoch.
	#num_batches=2
	
	for fn in range(num_batches):
		#shuffled_poses = helper.shuffle_poses(poses)

		start_idx = fn*BATCH_SIZE 			# Start index of poses.
		end_idx = (fn+1)*BATCH_SIZE 		# End index of poses.
		
		#template_data = np.copy(templates[start_idx:end_idx])
		template_data = np.copy(templates[0,:,:]).reshape(1,-1,3)
		template_data = np.tile(template_data, (BATCH_SIZE, 1, 1))

		batch_euler_poses = poses[0:BATCH_SIZE,:]			# Extract poses for batch training.
		source_data = helper.apply_transformation(template_data, batch_euler_poses)		# Apply the poses on the templates to get source data.

		if centroid_subtraction_switch:
			source_data = source_data - np.mean(source_data, axis=1, keepdims=True)
			template_data = template_data - np.mean(template_data, axis=1, keepdims=True)

		if FLAGS.use_partial_data:
			complete_source_data = np.copy(source_data)
			source_data = helper.find_partial_data(complete_source_data)
			template_data = helper.find_partial_data(template_data)

		# Chose Random Points from point clouds for training.
		if np.random.random_sample()<0.0:
			source_data = helper.select_random_points(source_data, NUM_POINT)						# 30% probability that source data has different points than template
		else:
			source_data = source_data[:,0:NUM_POINT,:]
		if np.random.random_sample()<ADD_NOISE:
			source_data = helper.add_noise(source_data)	

		# Only chose limited number of points from the source and template data.
		source_data = source_data[:,0:NUM_POINT,:]
		template_data = template_data[:,0:NUM_POINT,:]

		template_voxel = helper.voxelization(template_data, size=32)

		# To visualize the source and point clouds:
		if display_ptClouds:
			helper.display_clouds_data(source_data[0])
			helper.display_clouds_data(template_data[0])

		TRANSFORMATIONS = np.identity(4)				# Initialize identity transformation matrix.
		TRANSFORMATIONS = npm.repmat(TRANSFORMATIONS,BATCH_SIZE,1).reshape(BATCH_SIZE,4,4)		# Intialize identity matrices of size equal to batch_size

		# Iterations for pose refinement.
		for loop_idx in range(MAX_LOOPS-1):
			source_voxel = helper.voxelization(source_data, size=32)
			# 4a
			# Feed the placeholders of Network with template data and source data.
			feed_dict = {ops['source_pointclouds_pl']: source_voxel,
						 ops['template_pointclouds_pl']: template_voxel,
						 ops['is_training_pl']: is_training}
			predicted_transformation = sess.run([ops['predicted_transformation']], feed_dict=feed_dict)		# Ask the network to predict the pose.

			# 4b,4c
			# Apply the transformation on the template data and multiply it to transformation matrix obtained in previous iteration.
			if FLAGS.use_partial_data:
				TRANSFORMATIONS, complete_source_data = helper.transformation_quat2mat(predicted_transformation, TRANSFORMATIONS, complete_source_data)
				source_data = helper.find_partial_data(complete_source_data)
			else:
				TRANSFORMATIONS, source_data = helper.transformation_quat2mat(predicted_transformation, TRANSFORMATIONS, source_data)

			# Display Results after each iteration.
			if display_poses_in_itr:
				print(predicted_transformation[0,0:3])
				print(predicted_transformation[0,3:7]*(180/np.pi))
			if display_ptClouds_in_itr:
				helper.display_clouds_data(source_data[0])

		source_voxel = helper.voxelization(source_data, size=32)
		# Feed the placeholders of Network with source data and template data obtained from N-Iterations.
		feed_dict = {ops['source_pointclouds_pl']: source_voxel,
					 ops['template_pointclouds_pl']: template_voxel,
					 ops['transformation_pl']: TRANSFORMATIONS,
					 ops['gt_transformation_pl']: helper.pose2mat_inv(batch_euler_poses),
					 ops['is_training_pl']: is_training}

		# Ask the network to predict transformation, calculate loss using distance between actual points.
		summary, step, loss_val, predicted_transformation = sess.run([ops['merged'], ops['step'], ops['loss'], ops['predicted_transformation']], feed_dict=feed_dict)

		eval_writer.add_summary(summary, step)			# Add all the summary to the tensorboard.

		# Apply the final transformation on the template data and multiply it with the transformation matrix obtained from N-Iterations.
		if FLAGS.use_partial_data:
			TRANSFORMATIONS, complete_source_data = helper.transformation_quat2mat(predicted_transformation, TRANSFORMATIONS, complete_source_data)
			source_data = helper.find_partial_data(complete_source_data)
		else:
			TRANSFORMATIONS, source_data = helper.transformation_quat2mat(predicted_transformation, TRANSFORMATIONS, source_data)

		final_pose = helper.find_final_pose_inv(TRANSFORMATIONS)		# Find the final pose (translation, orientation (euler angles in degrees)) from transformation matrix.

		# Display the ground truth pose and predicted pose for first Point Cloud in batch 
		if display_poses:
			print('Ground Truth Position: {}'.format(batch_euler_poses[0,0:3].tolist()))
			print('Predicted Position: {}'.format(final_pose[0,0:3].tolist()))
			print('Ground Truth Orientation: {}'.format((batch_euler_poses[0,3:6]*(180/np.pi)).tolist()))
			print('Predicted Orientation: {}'.format((final_pose[0,3:6]*(180/np.pi)).tolist()))

		# Display Loss Value.
		print("Batch: {}, Loss: {}\r".format(fn, loss_val),end='')

		# Add loss for each batch.
		loss_sum += loss_val
	print('\n')
	log_string('Eval Mean loss: %f' % (loss_sum/num_batches))		# Store and display mean loss of epoch.
def test_one_epoch(sess, ops_L, templates, shuffled_poses, saver, model_path):
    # Arguments:
    # sess: 		Tensorflow session to handle tensors.
    # ops_L:		Dictionary for tensors of Network_L
    # ops19: 		Dictionary for tensors of Network19
    # templates:	Training Point Cloud data.
    # poses: 		Training pose data.
    # saver: 		To restore the weights.
    # model_path: 	Path of log directory.

    saver.restore(sess, model_path)  # Restore the weights of trained network.

    is_training = False
    display_ptClouds = False
    display_poses = False
    display_poses_in_itr = False
    display_ptClouds_in_itr = False
    swap_case = False

    templates = helper.process_templates('templates')
    template_data = np.zeros((BATCH_SIZE, MAX_NUM_POINT,
                              3))  # Extract Templates for batch training.
    for i in range(BATCH_SIZE):
        template_data[i, :, :] = np.copy(templates[1, :, :])
    batch_euler_poses = shuffled_poses[0].reshape(
        (1, 6))  # Extract poses for batch training.

    # Self defined test case.
    batch_euler_poses[0] = [
        0.5, 0.0, 0.2, 50 * (np.pi / 180), 0 * (np.pi / 180),
        10 * (np.pi / 180)
    ]
    source_data = helper.apply_transformation(
        template_data, batch_euler_poses
    )  # Apply the poses on the templates to get source data.

    # Only chose limited number of points from the source and template data.
    template_data = template_data[:, 0:NUM_POINT, :]
    source_data = source_data[:, 0:NUM_POINT, :]

    if swap_case:
        source_data, template_data = template_data, source_data  # Swap the template and source.
        transformation_template2source = helper.transformation(
            batch_euler_poses)
        transformation_source2template = np.linalg.inv(
            transformation_template2source[0])
        [euler_z, euler_y,
         euler_x] = t3d.mat2euler(transformation_source2template[0:3, 0:3],
                                  'szyx')
        trans_x = transformation_source2template[0, 3]
        trans_y = transformation_source2template[1, 3]
        trans_z = transformation_source2template[2, 3]
        pose_source2template = [
            trans_x, trans_y, trans_z, euler_x * (18 / np.pi),
            euler_y * (180 / np.pi), euler_z * (180 / np.pi)
        ]
        batch_euler_poses[0] = pose_source2template

    TEMPLATE_DATA = np.copy(
        template_data)  # Store the initial template to visualize results.
    SOURCE_DATA = np.copy(
        source_data)  # Store the initial source to visualize results.

    # To visualize the source and point clouds:
    if display_ptClouds:
        helper.display_clouds_data(source_data[0])
        helper.display_clouds_data(template_data[0])

    # Subtract the Centroids from the Point Clouds.
    if centroid_subtraction_switch:
        source_data, template_data, centroid_translation_pose = helper.centroid_subtraction(
            source_data, template_data)

    TRANSFORMATIONS = np.identity(
        4)  # Initialize identity transformation matrix.
    TRANSFORMATIONS = np.matlib.repmat(TRANSFORMATIONS, BATCH_SIZE, 1).reshape(
        BATCH_SIZE, 4,
        4)  # Intialize identity matrices of size equal to batch_size

    # Feed the placeholders of Network_L with source data and template data obtained from N-Iterations.
    feed_dict = {
        ops_L['source_pointclouds_pl']: source_data,
        ops_L['template_pointclouds_pl']: template_data,
        ops_L['is_training_pl']: is_training
    }

    # Ask the network to predict transformation, calculate loss using distance between actual points.
    import time
    start = time.time()
    step, predicted_transformation = sess.run(
        [ops_L['step'], ops_L['predicted_transformation']],
        feed_dict=feed_dict)
    end = time.time()
    print(end - start)

    # Apply the final transformation on the template data and multiply it with the transformation matrix obtained from N-Iterations.
    TRANSFORMATIONS, template_data = helper.transformation_quat2mat(
        predicted_transformation, TRANSFORMATIONS, template_data)

    if centroid_subtraction_switch:  # If centroid is subtracted then apply the centorid translation back to point clouds.
        TRANSFORMATIONS, template_data = helper.transformation_quat2mat(
            centroid_translation_pose, TRANSFORMATIONS, template_data)

    final_pose = helper.find_final_pose(TRANSFORMATIONS)

    if not swap_case:
        title = "Actual T (Red->Green): "
        for i in range(len(batch_euler_poses[0])):
            if i > 2:
                title += str(round(batch_euler_poses[0][i] * (180 / np.pi), 2))
            else:
                title += str(batch_euler_poses[0][i])
            title += ', '
        title += "\nPredicted T (Red->Blue): "
        for i in range(len(final_pose[0])):
            if i > 2:
                title += str(round(final_pose[0, i] * (180 / np.pi), 3))
            else:
                title += str(round(final_pose[0, i], 3))
            title += ', '
    else:
        title = "Predicted T (Red->Blue): "
        for i in range(len(final_pose[0])):
            if i > 2:
                title += str(round(final_pose[0, i] * (180 / np.pi), 3))
            else:
                title += str(round(final_pose[0, i], 3))
            title += ', '

    # Display the ground truth pose and predicted pose for first Point Cloud in batch
    if display_poses:
        print('Ground Truth Position: {}'.format(
            batch_euler_poses[0, 0:3].tolist()))
        print('Predicted Position: {}'.format(final_pose[0, 0:3].tolist()))
        print('Ground Truth Orientation: {}'.format(
            (batch_euler_poses[0, 3:6] * (180 / np.pi)).tolist()))
        print('Predicted Orientation: {}'.format(
            (final_pose[0, 3:6] * (180 / np.pi)).tolist()))

    helper.display_three_clouds(TEMPLATE_DATA[0], SOURCE_DATA[0],
                                template_data[0], title)

    print("Loss: {}".format(loss_val))
Exemplo n.º 5
0
def eval_one_epoch(sess, ops, eval_writer, poses):
    # Arguments:
    # sess: 		Tensorflow session to handle tensors.
    # ops:			Dictionary for tensors of Network
    # templates:	Training Point Cloud data.
    # poses: 		Training pose data.

    is_training = False
    display_ptClouds = False
    display_poses = False
    display_poses_in_itr = False
    display_ptClouds_in_itr = False

    loss_sum = 0  # Total Loss in each batch.
    templates, sources, poses = helper.read_partial_data(
        'train_data', 'partial_data_eval.h5')
    num_batches = int(templates.shape[0] /
                      BATCH_SIZE)  # Number of batches in an epoch.

    for fn in range(num_batches):
        start_idx = fn * BATCH_SIZE  # Start index of poses.
        end_idx = (fn + 1) * BATCH_SIZE  # End index of poses.

        template_data = np.copy(templates[start_idx:end_idx])
        batch_euler_poses = np.copy(poses[start_idx:end_idx])

        # Check for partial source.
        if np.random.sample() < ADD_PARTIAL:
            source_data = np.copy(sources[start_idx:end_idx])
            source_full_data = helper.apply_transformation(
                template_data, batch_euler_poses)
        else:
            source_data = np.copy(templates[start_idx:end_idx, 0:512])
            source_data = helper.apply_transformation(source_data,
                                                      batch_euler_poses)
            source_full_data = helper.apply_transformation(
                template_data, batch_euler_poses)

        # Check for noise in source.
        if np.random.sample() < ADD_NOISE:
            source_data = helper.add_noise(source_data)

        if centroid_subtraction_switch:
            source_data = source_data - np.mean(
                source_data, axis=1, keepdims=True)
            template_data = template_data - np.mean(
                template_data, axis=1, keepdims=True)
            source_full_data = source_full_data - np.mean(
                source_full_data, axis=1, keepdims=True)

        # Only chose limited number of points from the source and template data.
        template_data = template_data[:, 0:NUM_POINT, :]
        source_full_data = source_full_data[:, 0:NUM_POINT, :]

        # To visualize the source and point clouds:
        if display_ptClouds:
            helper.display_clouds_data(source_data[0])
            helper.display_clouds_data(template_data[0])

        TRANSFORMATIONS = np.identity(
            4)  # Initialize identity transformation matrix.
        TRANSFORMATIONS = npm.repmat(TRANSFORMATIONS, BATCH_SIZE, 1).reshape(
            BATCH_SIZE, 4,
            4)  # Intialize identity matrices of size equal to batch_size

        # Iterations for pose refinement.
        for loop_idx in range(MAX_LOOPS - 1):
            # 4a
            # Feed the placeholders of Network with template data and source data.
            feed_dict = {
                ops['source_pointclouds_pl']: source_data,
                ops['template_pointclouds_pl']: template_data,
                ops['is_training_pl']: is_training
            }
            predicted_transformation = sess.run(
                [ops['predicted_transformation']],
                feed_dict=feed_dict)  # Ask the network to predict the pose.

            TRANSFORMATIONS, source_data = helper.transformation_quat2mat(
                predicted_transformation, TRANSFORMATIONS, source_data)

            # Display Results after each iteration.
            if display_poses_in_itr:
                print(predicted_transformation[0, 0:3])
                print(predicted_transformation[0, 3:7] * (180 / np.pi))
            if display_ptClouds_in_itr:
                helper.display_clouds_data(source_data[0])

        # Feed the placeholders of Network with source data and template data obtained from N-Iterations.
        feed_dict = {
            ops['source_pointclouds_pl']: source_data,
            ops['template_pointclouds_pl']: template_data,
            ops['full_source_pointclouds_pl']: source_full_data,
            ops['is_training_pl']: is_training
        }

        # Ask the network to predict transformation, calculate loss using distance between actual points.
        summary, step, loss_val, predicted_transformation = sess.run(
            [
                ops['merged'], ops['step'], ops['loss'],
                ops['predicted_transformation']
            ],
            feed_dict=feed_dict)

        eval_writer.add_summary(
            summary, step)  # Add all the summary to the tensorboard.

        TRANSFORMATIONS, source_data = helper.transformation_quat2mat(
            predicted_transformation, TRANSFORMATIONS, source_data)

        final_pose = helper.find_final_pose_inv(
            TRANSFORMATIONS
        )  # Find the final pose (translation, orientation (euler angles in degrees)) from transformation matrix.

        # Display the ground truth pose and predicted pose for first Point Cloud in batch
        if display_poses:
            print('Ground Truth Position: {}'.format(
                batch_euler_poses[0, 0:3].tolist()))
            print('Predicted Position: {}'.format(final_pose[0, 0:3].tolist()))
            print('Ground Truth Orientation: {}'.format(
                (batch_euler_poses[0, 3:6] * (180 / np.pi)).tolist()))
            print('Predicted Orientation: {}'.format(
                (final_pose[0, 3:6] * (180 / np.pi)).tolist()))

        # Display Loss Value.
        print("Batch: {}, Loss: {}\r".format(fn, loss_val), end='')

        # Add loss for each batch.
        loss_sum += loss_val
    print('\n')
    log_string(
        'Eval Mean loss: %f' %
        (loss_sum / num_batches))  # Store and display mean loss of epoch.
def eval_network(sess, ops, templates, poses):
	# Arguments:
	# sess: 		Tensorflow session to handle tensors.
	# ops:			Dictionary for tensors of Network
	# templates:	Training Point Cloud data.
	# poses: 		Training pose data.

	is_training = False
	display_ptClouds = False
	display_poses = False
	display_poses_in_itr = False
	display_ptClouds_in_itr = False

	loss_sum = 0											# Total Loss in each batch.
	# print(int(poses.shape[0]/BATCH_SIZE))
	# print(int(len(templates)/BATCH_SIZE))
	# poses = poses[:1000]
	num_batches = int(poses.shape[0]/BATCH_SIZE) # Number of batches in an epoch.
	indx = np.arange(0,len(templates))
	while len(indx)<poses.shape[0]:
		indx = np.concatenate([indx, indx], 0)
	# num_batches = int(len(templates)/BATCH_SIZE)
	# exit()
	print('Number of batches to be executed: {}'.format(num_batches))

	# Store time taken, no of iterations, translation error and rotation error for registration.
	TIME, ITR, Trans_Err, Rot_Err = [], [], [], []
	idxs_25_5, idxs_5_5, idxs_10_1, idxs_20_2 = [], [], [], []

	if FLAGS.use_noise_data:
		print(FLAGS.data_dict)
		templates, sources = helper.read_noise_data(FLAGS.data_dict)
		print(templates.shape, sources.shape)

	TE = np.zeros([MAX_LOOPS+1,num_batches]) #translation error
	RE = np.zeros([MAX_LOOPS+1,num_batches]) #rotation error
	CE = np.zeros([MAX_LOOPS+1,num_batches]) #convergence error
	Failures = []
	ques = np.zeros([MAX_LOOPS,7,num_batches])
	for fn in range(num_batches):
		start_idx = fn*BATCH_SIZE 			# Start index of poses.
		end_idx = (fn+1)*BATCH_SIZE 		# End index of poses.
		if SPARSE_SAMPLING>0:
			template_data = np.copy(templates[indx[fn], :, :]).reshape(1, -1, 3)
			batch_euler_poses = poses[start_idx:end_idx]  # Extract poses for batch training.
			template_data,source_data = helper.split_template_source(template_data,batch_euler_poses,NUM_POINT,centroid_subtraction_switch=False,ADD_NOISE=FLAGS.use_noise_data,S_RAND_POINTS=S_RAND_POINTS,SPARSE=SPARSE_SAMPLING)
		else:
			if FLAGS.use_noise_data:
				template_data = np.copy(templates[fn,:,:]).reshape(1,-1,3)				# As template_data is changing.
				source_data = np.copy(sources[fn,:,:]).reshape(1,-1,3)
				batch_euler_poses = poses[start_idx:end_idx]			# Extract poses for batch training.
			else:
				# template_idx = pairs[fn,1]
				template_data = np.copy(templates[indx[fn],:,:]).reshape(1,-1,3)				# As template_data is changing.

				batch_euler_poses = poses[start_idx:end_idx]			# Extract poses for batch training.
				# print(batch_euler_poses)
				if template_random_pose:
					template_data = helper.apply_transformation(template_data, batch_euler_poses / 2)
					template_data = template_data - np.mean(template_data, axis=1, keepdims=True)
				source_data = helper.apply_transformation(template_data, batch_euler_poses)		# Apply the poses on the templates to get source data.

			# SOURCE_DATA = np.copy(source_data[:,0:NUM_POINT,:]) #movement is calculated from initial pose

			if np.random.random_sample()<S_RAND_POINTS:
				template_data = helper.select_random_points(template_data, NUM_POINT)
				source_data = helper.select_random_points(source_data, NUM_POINT)						# 50% probability that source data has different points than template
				template_data = template_data[:,0:NUM_POINT,:]
			else:
				source_data = source_data[:,0:NUM_POINT,:]
				template_data = template_data[:,0:NUM_POINT,:]


		# Just to visualize the data.
		TEMPLATE_DATA = np.copy(template_data)				# Store the initial template to visualize results.
		SOURCE_DATA = np.copy(source_data)					# Store the initial source to visualize results.

		# Subtract the Centroids from the Point Clouds.
		if centroid_subtraction_switch:
			# print(np.mean(source_data, axis=1, keepdims=True))
			T = np.mean(source_data, axis=1)
			print(T)
			print(np.mean(template_data, axis=1))
			print(np.mean(source_data, axis=1))
			source_data = source_data - np.mean(source_data, axis=1, keepdims=True)
			# print(np.mean(template_data, axis=1, keepdims=True))
			# template_data = template_data - np.mean(template_data, axis=1, keepdims=True)
		else:
			T=[[0,0,0]]

		if FLAGS.add_occlusions>0.0:
			source_data = helper.add_occlusions(source_data,FLAGS.add_occlusions)

		# exit()
		# To visualize the source and point clouds:
		if display_ptClouds:
			helper.display_clouds_data(source_data[0])
			helper.display_clouds_data(template_data[0])

		TRANSFORMATIONS = np.identity(4)				# Initialize identity transformation matrix.
		TRANSFORMATIONS = npm.repmat(TRANSFORMATIONS,BATCH_SIZE,1).reshape(BATCH_SIZE,4,4)		# Intialize identity matrices of size equal to batch_size

		# previous_pose = np.array([0,0,0,1,0,0,0])
		previous_T = np.eye(4)

		start = time.time()												# Log start time.
		# Iterations for pose refinement.
		translation_error, rotational_error, final_pose = get_error(TRANSFORMATIONS, SOURCE_DATA,
														batch_euler_poses,T)
		TE[0, fn] = translation_error
		RE[0, fn] = rotational_error
		CE[0, fn] = 1
		print(fn)
		for loop_idx in range(MAX_LOOPS):
			# for network_itr in range(7):
			# 	# Feed the placeholders of Network19 with template data and source data.
			# 	feed_dict = {ops['source_pointclouds_pl']: source_data,
			# 				 ops['template_pointclouds_pl']: template_data,
			# 				 ops['is_training_pl']: is_training}
			# 	predicted_transformation = sess.run([ops['predicted_transformation']], feed_dict=feed_dict)		# Ask the network to predict the pose.
            #
			# 	# Apply the transformation on the source data and multiply it to transformation matrix obtained in previous iteration.
			# 	TRANSFORMATIONS, source_data = helper.transformation_quat2mat(predicted_transformation, TRANSFORMATIONS, source_data)
            #
			# 	# Display Results after each iteration.
			# 	if display_poses_in_itr:
			# 		print(predicted_transformation[0,0:3])
			# 		print(predicted_transformation[0,3:7]*(180/np.pi))
			# 	if display_ptClouds_in_itr:
			# 		helper.display_clouds_data(template_data[0])

			# Feed the placeholders of Network_L with source data and template data obtained from N-Iterations.
			feed_dict = {ops['source_pointclouds_pl']: source_data,
						 ops['template_pointclouds_pl']: template_data,
						 ops['is_training_pl']: is_training}

			# Ask the network to predict transformation, calculate loss using distance between actual points.
			predicted_transformation = sess.run([ops['predicted_transformation']], feed_dict=feed_dict)

			# print(predicted_transformation)
			# Apply the final transformation on the source data and multiply it with the transformation matrix obtained from N-Iterations.
			TRANSFORMATIONS, source_data = helper.transformation_quat2mat(predicted_transformation, TRANSFORMATIONS, source_data)
			translation_error, rotational_error, final_pose = get_error(TRANSFORMATIONS, SOURCE_DATA,
															batch_euler_poses,T)
			ck_con_T,convergence_error = check_convergenceT(previous_T, TRANSFORMATIONS[0])
			ques[loop_idx,:,fn] = np.squeeze(predicted_transformation)
			print(ques[loop_idx,:,fn])
			TE[loop_idx+1,fn] = translation_error
			RE[loop_idx+1,fn] = rotational_error
			CE[loop_idx+1,fn] = convergence_error

			if ck_con_T:
				# break
				print('converge iteration:',loop_idx)
			else:
				previous_T = np.copy(TRANSFORMATIONS[0])
			previous_T = np.copy(TRANSFORMATIONS[0])

		end = time.time()													# Log end time.

		# final_pose = helper.find_final_pose_inv(TRANSFORMATIONS)		# Find the final pose (translation, orientation (euler angles in degrees)) from transformation matrix.
		# final_pose[0,0:3] = final_pose[0,0:3] + np.mean(SOURCE_DATA, axis=1)[0]#\
		# 			 #- np.mean(TEMPLATE_DATA, axis=1)[0]
        #
		# translation_error, rotational_error = find_errors(batch_euler_poses[0], final_pose[0])
		translation_error, rotational_error, final_pose = get_error(TRANSFORMATIONS, SOURCE_DATA, batch_euler_poses,T)

		TIME.append(end-start)
		ITR.append(loop_idx+1)
		Trans_Err.append(translation_error)
		Rot_Err.append(rotational_error)

		if rotational_error<20 and translation_error<0.2:
			if rotational_error<10 and translation_error<0.1:
				if rotational_error<5 and translation_error<0.05:
					if rotational_error < 2.5:
						idxs_25_5.append(fn)
					idxs_5_5.append(fn)
				idxs_10_1.append(fn)
			idxs_20_2.append(fn)

		# if rotational_error>20:
		# 	Failures.append(fn)
		# 	helper.display_three_clouds(template_data[0],SOURCE_DATA[0],source_data[0], str(fn)+"_"+str(rotational_error))
		# 	print('added failure image')
		# else:
		# 	print(rotational_error)
		# Display the ground truth pose and predicted pose for first Point Cloud in batch 
		if display_poses:
			print('Ground Truth Position: {}'.format(batch_euler_poses[0,0:3].tolist()))
			print('Predicted Position: {}'.format(final_pose[0,0:3].tolist()))
			print('Ground Truth Orientation: {}'.format((batch_euler_poses[0,3:6]*(180/np.pi)).tolist()))
			print('Predicted Orientation: {}'.format((final_pose[0,3:6]*(180/np.pi)).tolist()))

		# Display Loss Value.
		# helper.display_three_clouds(TEMPLATE_DATA[0],SOURCE_DATA[0],template_data[0],"")
		print("Batch: {} & time: {}, iteration: {}".format(fn, end-start, loop_idx+1))

	plot_iter_graph(TE,FLAGS.log_dir,'translation error')
	plot_iter_graph(RE,FLAGS.log_dir,'rotation error')
	plot_iter_graph(CE,FLAGS.log_dir,'convergence error')

	log = {'TIME': TIME, 'ITR':ITR, 'Trans_Err': Trans_Err, 'Rot_Err': Rot_Err,'idxs_25_5':idxs_25_5, 'idxs_5_5': idxs_5_5, 'idxs_10_1': idxs_10_1, 'idxs_20_2': idxs_20_2, 'num_batches': num_batches}

	helper.log_test_results(FLAGS.log_dir, FLAGS.filename, log)
	hf = h5py.File(os.path.join(FLAGS.log_dir, 'log_data.h5'), 'w')
	hf.create_dataset('TE', data=TE)
	hf.create_dataset('RE', data=RE)
	hf.create_dataset('CE', data=CE)
	hf.close()
Exemplo n.º 7
0
def train_one_epoch(sess, ops, train_writer, templates, poses):
    # Arguments:
    # sess: 		Tensorflow session to handle tensors.
    # ops:			Dictionary for tensors of Network
    # templates:	Training Point Cloud data.
    # poses: 		Training pose data.
    print(datetime.now())

    is_training = True
    display_ptClouds = False
    display_poses = False
    display_poses_in_itr = False
    display_ptClouds_in_itr = False

    poses = poses[0:5070, :]
    poses = helper.shuffle_poses(poses)  # Shuffle Poses.

    loss_sum = 0  # Total Loss in each batch.
    num_batches = int(templates.shape[0] /
                      BATCH_SIZE)  # Number of batches in an epoch.

    # Training for each batch.
    for fn in range(num_batches):
        start_idx = fn * BATCH_SIZE  # Start index of poses.
        end_idx = (fn + 1) * BATCH_SIZE  # End index of poses.

        template_data = np.copy(templates[start_idx:end_idx])

        batch_euler_poses = poses[start_idx:
                                  end_idx]  # Extract poses for batch training.
        if SPARSE_SAMPLING > 0:
            template_data, source_data = helper.split_template_source(
                template_data,
                batch_euler_poses,
                NUM_POINT,
                centroid_subtraction_switch,
                ADD_NOISE,
                S_RAND_POINTS,
                SPARSE=SPARSE_SAMPLING)
        else:
            if template_random_pose:
                template_data = helper.apply_transformation(
                    template_data, batch_euler_poses / 2)
                template_data = template_data - np.mean(
                    template_data, axis=1, keepdims=True)

            source_data = helper.apply_transformation(
                template_data, batch_euler_poses
            )  # Apply the poses on the templates to get source data.

            if centroid_subtraction_switch:
                source_data = source_data - np.mean(
                    source_data, axis=1, keepdims=True)
                # template_data = template_data - np.mean(template_data, axis=1, keepdims=True)

            # Chose Random Points from point clouds for training.
            if np.random.random_sample() < S_RAND_POINTS:
                template_data = helper.select_random_points(
                    template_data, NUM_POINT)
                source_data = helper.select_random_points(
                    source_data, NUM_POINT
                )  # 50% probability that source data has different points than template
            else:
                source_data = source_data[:, 0:NUM_POINT, :]
            if np.random.random_sample() < ADD_NOISE:
                source_data = helper.add_noise(source_data)

            # Only chose limited number of points from the source and template data.
            source_data = source_data[:, 0:NUM_POINT, :]
            template_data = template_data[:, 0:NUM_POINT, :]

            # To visualize the source and point clouds:
            if display_ptClouds:
                helper.display_clouds_data(source_data[0])
                helper.display_clouds_data(template_data[0])

        if FLAGS.add_occlusions > 0.0:
            source_data = helper.add_occlusions(source_data,
                                                FLAGS.add_occlusions)

        TRANSFORMATIONS = np.identity(
            4)  # Initialize identity transformation matrix.
        TRANSFORMATIONS = npm.repmat(TRANSFORMATIONS, BATCH_SIZE, 1).reshape(
            BATCH_SIZE, 4,
            4)  # Intialize identity matrices of size equal to batch_size

        # Iterations for pose refinement.
        for loop_idx in range(MAX_LOOPS - 1):
            # 4a
            # Feed the placeholders of Network with template data and source data.
            if ADD_NOISE_MODEL:
                feed_dict = {
                    ops['source_pointclouds_pl']: source_data,
                    ops['template_pointclouds_pl']: template_data,
                    ops['is_training_pl']: is_training,
                    ops['is_training_pl_1']: False,
                    ops['labels12']: np.ones([BATCH_SIZE, NUM_POINT]),
                    ops['add_noise']: np.zeros([BATCH_SIZE, NUM_POINT, 3])
                }
            else:
                feed_dict = {
                    ops['source_pointclouds_pl']: source_data,
                    ops['template_pointclouds_pl']: template_data,
                    ops['is_training_pl']: is_training,
                    ops['is_training_pl_1']: False,
                    ops['labels12']: np.ones([BATCH_SIZE, NUM_POINT])
                }
            if bool(FLAGS.train_single
                    ):  #train every iteration, or only on the Nth iter
                summary, step, _, loss_val, predicted_transformation = sess.run(
                    [
                        ops['merged'], ops['step'], ops['train_op'],
                        ops['loss'], ops['predicted_transformation']
                    ],
                    feed_dict=feed_dict)
            else:
                predicted_transformation = sess.run(
                    [ops['predicted_transformation']], feed_dict=feed_dict
                )  # Ask the network to predict the pose.

            # 4b,4c
            # Apply the transformation on the template data and multiply it to transformation matrix obtained in previous iteration.
            TRANSFORMATIONS, source_data = helper.transformation_quat2mat(
                predicted_transformation, TRANSFORMATIONS, source_data)

            # Display Results after each iteration.
            if display_poses_in_itr:
                print(predicted_transformation[0, 0:3])
                print(predicted_transformation[0, 3:7] * (180 / np.pi))
            if display_ptClouds_in_itr:
                helper.display_clouds_data(source_data[0])

        # Feed the placeholders of Network with source data and template data obtained from N-Iterations.
        if ADD_NOISE_MODEL:
            feed_dict = {
                ops['source_pointclouds_pl']: source_data,
                ops['template_pointclouds_pl']: template_data,
                ops['is_training_pl']: is_training,
                ops['is_training_pl_1']: False,
                ops['labels12']: np.ones([BATCH_SIZE, NUM_POINT]),
                ops['add_noise']: np.zeros([BATCH_SIZE, NUM_POINT, 3])
            }
        else:
            feed_dict = {
                ops['source_pointclouds_pl']: source_data,
                ops['template_pointclouds_pl']: template_data,
                ops['is_training_pl']: is_training,
                ops['is_training_pl_1']: False,
                ops['labels12']: np.ones([BATCH_SIZE, NUM_POINT])
            }

        # Ask the network to predict transformation, calculate loss using distance between actual points, calculate & apply gradients for Network and copy the weights to Network19.
        summary, step, _, loss_val, predicted_transformation = sess.run(
            [
                ops['merged'], ops['step'], ops['train_op'], ops['loss'],
                ops['predicted_transformation']
            ],
            feed_dict=feed_dict)
        train_writer.add_summary(
            summary, step)  # Add all the summary to the tensorboard.

        # Apply the final transformation on the template data and multiply it with the transformation matrix obtained from N-Iterations.
        TRANSFORMATIONS, source_data = helper.transformation_quat2mat(
            predicted_transformation, TRANSFORMATIONS, source_data)

        # final_pose = helper.find_final_pose_inv(TRANSFORMATIONS)			# Find the final pose (translation, orientation (euler angles in degrees)) from transformation matrix.

        # Display the ground truth pose and predicted pose for first Point Cloud in batch
        if display_poses:
            print('Ground Truth Position: {}'.format(
                batch_euler_poses[0, 0:3].tolist()))
            print('Predicted Position: {}'.format(final_pose[0, 0:3].tolist()))
            print('Ground Truth Orientation: {}'.format(
                (batch_euler_poses[0, 3:6] * (180 / np.pi)).tolist()))
            print('Predicted Orientation: {}'.format(
                (final_pose[0, 3:6] * (180 / np.pi)).tolist()))
            # print(batch_euler_poses[0,0:3],batch_euler_poses[0,3:6]*(180/np.pi))
            # print(final_pose[0,0:3],final_pose[0,3:6]*(180/np.pi))

        # Display Loss Value.
        print("Batch: {} & Loss: {}\r".format(fn, loss_val), end='')

        # Add loss for each batch.
        loss_sum += loss_val
    print('\n')
    log_string(
        'Train Mean loss: %f\n' %
        (loss_sum / num_batches))  # Store and display mean loss of epoch.
Exemplo n.º 8
0
def test_one_epoch(sess, ops, templates, poses, saver, model_path):
    # Arguments:
    # sess: 		Tensorflow session to handle tensors.
    # ops:			Dictionary for tensors of Network
    # templates:	Training Point Cloud data.
    # poses: 		Training pose data.
    # saver: 		To restore the weights.
    # model_path: 	Path of log directory.

    saver.restore(sess, model_path)  # Restore the weights of trained network.

    is_training = False
    display_ptClouds = False
    display_poses = False
    display_poses_in_itr = False
    display_ptClouds_in_itr = False
    swap_case = False
    MAX_LOOPS = 4

    template_data = np.zeros((BATCH_SIZE, MAX_NUM_POINT,
                              3))  # Extract Templates for batch training.
    template_data[0] = np.copy(templates[FLAGS.template_idx, :, :])

    batch_euler_poses = poses[0].reshape(
        (1, 6))  # Extract poses for batch training.

    # Define test case.
    batch_euler_poses[0] = [
        0.4, 0.5, 0.1, 10 * (np.pi / 180), 20 * (np.pi / 180),
        20 * (np.pi / 180)
    ]
    source_data = helper.apply_transformation(
        template_data, batch_euler_poses
    )  # Apply the poses on the templates to get source data.

    # Chose Random Points from point clouds for training.
    if np.random.random_sample() < 0:
        source_data = helper.select_random_points(
            source_data, NUM_POINT
        )  # probability that source data has different points than template
    else:
        source_data = source_data[:, 0:NUM_POINT, :]
    # Add noise to source point cloud.
    if np.random.random_sample() < 1.0:
        source_data = helper.add_noise(source_data)

    # Only choose limited number of points from the source and template data.
    source_data = source_data[:, 0:NUM_POINT, :]
    template_data = template_data[:, 0:NUM_POINT, :]

    TEMPLATE_DATA = np.copy(
        template_data)  # Store the initial template to visualize results.
    SOURCE_DATA = np.copy(
        source_data)  # Store the initial source to visualize results.

    # Subtract the Centroids from the Point Clouds.
    if centroid_subtraction_switch:
        source_data = source_data - np.mean(source_data, axis=1, keepdims=True)
        template_data = template_data - np.mean(
            template_data, axis=1, keepdims=True)

    # To visualize the source and point clouds:
    if display_ptClouds:
        helper.display_clouds_data(source_data[0])
        helper.display_clouds_data(template_data[0])

    TRANSFORMATIONS = np.identity(
        4)  # Initialize identity transformation matrix.
    TRANSFORMATIONS = npm.repmat(TRANSFORMATIONS, BATCH_SIZE, 1).reshape(
        BATCH_SIZE, 4,
        4)  # Intialize identity matrices of size equal to batch_size

    # Store the transformed point clouds after each iteration.
    ITR = np.zeros((MAX_LOOPS, template_data.shape[0], template_data.shape[1],
                    template_data.shape[2]))

    # Iterations for pose refinement.
    for loop_idx in range(MAX_LOOPS - 1):
        # 4a
        # Feed the placeholders of Network with template data and source data.
        feed_dict = {
            ops['source_pointclouds_pl']: source_data,
            ops['template_pointclouds_pl']: template_data,
            ops['is_training_pl']: is_training
        }
        predicted_transformation = sess.run(
            [ops['predicted_transformation']],
            feed_dict=feed_dict)  # Ask the network to predict the pose.
        #print (predicted_transformation[0])

        # 4b,4c
        # Apply the transformation on the template data and multiply it to transformation matrix obtained in previous iteration.
        TRANSFORMATIONS, source_data = helper.transformation_quat2mat(
            predicted_transformation, TRANSFORMATIONS, source_data)

        # Display Results after each iteration.
        if display_poses_in_itr:
            print(predicted_transformation[0, 0:3])
            print(predicted_transformation[0, 3:7] * (180 / np.pi))
        if display_ptClouds_in_itr:
            helper.display_clouds_data(source_data[0])
        ITR[loop_idx, :, :, :] = source_data

    # Feed the placeholders of Network with source data and template data obtained from N-Iterations.
    feed_dict = {
        ops['source_pointclouds_pl']: source_data,
        ops['template_pointclouds_pl']: template_data,
        ops['is_training_pl']: is_training
    }

    # Ask the network to predict transformation, calculate loss using distance between actual points.
    step, predicted_transformation = sess.run(
        [ops['step'], ops['predicted_transformation']], feed_dict=feed_dict)

    # Apply the final transformation on the template data and multiply it with the transformation matrix obtained from N-Iterations.
    TRANSFORMATIONS, source_data = helper.transformation_quat2mat(
        predicted_transformation, TRANSFORMATIONS, source_data)

    final_pose = helper.find_final_pose_inv(TRANSFORMATIONS)
    final_pose[0, 0:3] = final_pose[0, 0:3] + np.mean(SOURCE_DATA, axis=1)[0]

    title = "Actual T (Red->Green): "
    for i in range(len(batch_euler_poses[0])):
        if i > 2:
            title += str(round(batch_euler_poses[0][i] * (180 / np.pi), 2))
        else:
            title += str(batch_euler_poses[0][i])
        title += ', '
    title += "\nPredicted T (Red->Blue): "
    for i in range(len(final_pose[0])):
        if i > 2:
            title += str(round(final_pose[0, i] * (180 / np.pi), 3))
        else:
            title += str(round(final_pose[0, i], 3))
        title += ', '

    # Display the ground truth pose and predicted pose for first Point Cloud in batch
    if display_poses:
        print('Ground Truth Position: {}'.format(
            batch_euler_poses[0, 0:3].tolist()))
        print('Predicted Position: {}'.format(final_pose[0, 0:3].tolist()))
        print('Ground Truth Orientation: {}'.format(
            (batch_euler_poses[0, 3:6] * (180 / np.pi)).tolist()))
        print('Predicted Orientation: {}'.format(
            (final_pose[0, 3:6] * (180 / np.pi)).tolist()))

    helper.display_three_clouds(TEMPLATE_DATA[0], SOURCE_DATA[0],
                                source_data[0], title)