def get_error(TRANSFORMATIONS,SOURCE_DATA,batch_euler_poses,T):
	final_pose = helper.find_final_pose_inv(
		TRANSFORMATIONS)  # Find the final pose (translation, orientation (euler angles in degrees)) from transformation matrix.
	# final_pose[0, 0:3] = final_pose[0, 0:3] + np.mean(SOURCE_DATA, axis=1)[0]  # \
	# - np.mean(TEMPLATE_DATA, axis=1)[0]
	# print(T.shape)
	final_pose[0, 0:3] = final_pose[0, 0:3] + T[0]


	translation_error, rotational_error = find_errors(batch_euler_poses[0], final_pose[0])
	return translation_error, rotational_error, final_pose
def eval_network(sess, ops, templates, poses, pairs):
    # Arguments:
    # sess: 		Tensorflow session to handle tensors.
    # ops:			Dictionary for tensors of Network
    # templates:	Training Point Cloud data.
    # poses: 		Training pose data.

    is_training = False
    display_ptClouds = False
    display_poses = False
    display_poses_in_itr = False
    display_ptClouds_in_itr = False

    loss_sum = 0  # Total Loss in each batch.
    num_batches = int(poses.shape[0] /
                      BATCH_SIZE)  # Number of batches in an epoch.
    print('Number of batches to be executed: {}'.format(num_batches))

    # Store time taken, no of iterations, translation error and rotation error for registration.
    TIME, ITR, Trans_Err, Rot_Err = [], [], [], []
    idxs_5_5, idxs_10_1, idxs_20_2 = [], [], []

    if FLAGS.use_noise_data:
        print(FLAGS.data_dict)
        templates, sources = helper.read_noise_data(FLAGS.data_dict)
        print(templates.shape, sources.shape)

    for fn in range(num_batches):
        start_idx = fn * BATCH_SIZE  # Start index of poses.
        end_idx = (fn + 1) * BATCH_SIZE  # End index of poses.

        if FLAGS.use_noise_data:
            template_data = np.copy(templates[fn, :, :]).reshape(
                1, -1, 3)  # As template_data is changing.
            source_data = np.copy(sources[fn, :, :]).reshape(1, -1, 3)
            batch_euler_poses = poses[
                start_idx:end_idx]  # Extract poses for batch training.
        else:
            # template_idx = pairs[fn,1]
            template_data = np.copy(templates[0, :, :]).reshape(
                1, -1, 3)  # As template_data is changing.

            batch_euler_poses = poses[
                start_idx:end_idx]  # Extract poses for batch training.
            source_data = helper.apply_transformation(
                template_data, batch_euler_poses
            )  # Apply the poses on the templates to get source data.

        template_data = template_data[:, 0:NUM_POINT, :]
        source_data = source_data[:, 0:NUM_POINT, :]

        # Just to visualize the data.
        TEMPLATE_DATA = np.copy(
            template_data)  # Store the initial template to visualize results.
        SOURCE_DATA = np.copy(
            source_data)  # Store the initial source to visualize results.

        # Subtract the Centroids from the Point Clouds.
        if centroid_subtraction_switch:
            source_data = source_data - np.mean(
                source_data, axis=1, keepdims=True)
            template_data = template_data - np.mean(
                template_data, axis=1, keepdims=True)

        # To visualize the source and point clouds:
        if display_ptClouds:
            helper.display_clouds_data(source_data[0])
            helper.display_clouds_data(template_data[0])

        TRANSFORMATIONS = np.identity(
            4)  # Initialize identity transformation matrix.
        TRANSFORMATIONS = npm.repmat(TRANSFORMATIONS, BATCH_SIZE, 1).reshape(
            BATCH_SIZE, 4,
            4)  # Intialize identity matrices of size equal to batch_size

        # previous_pose = np.array([0,0,0,1,0,0,0])
        previous_T = np.eye(4)

        start = time.time()  # Log start time.
        # Iterations for pose refinement.
        for loop_idx in range(MAX_LOOPS):
            for network_itr in range(7):
                # Feed the placeholders of Network19 with template data and source data.
                feed_dict = {
                    ops['source_pointclouds_pl']: source_data,
                    ops['template_pointclouds_pl']: template_data,
                    ops['is_training_pl']: is_training
                }
                predicted_transformation = sess.run(
                    [ops['predicted_transformation']], feed_dict=feed_dict
                )  # Ask the network to predict the pose.

                # Apply the transformation on the source data and multiply it to transformation matrix obtained in previous iteration.
                TRANSFORMATIONS, source_data = helper.transformation_quat2mat(
                    predicted_transformation, TRANSFORMATIONS, source_data)

                # Display Results after each iteration.
                if display_poses_in_itr:
                    print(predicted_transformation[0, 0:3])
                    print(predicted_transformation[0, 3:7] * (180 / np.pi))
                if display_ptClouds_in_itr:
                    helper.display_clouds_data(template_data[0])

            # Feed the placeholders of Network_L with source data and template data obtained from N-Iterations.
            feed_dict = {
                ops['source_pointclouds_pl']: source_data,
                ops['template_pointclouds_pl']: template_data,
                ops['is_training_pl']: is_training
            }

            # Ask the network to predict transformation, calculate loss using distance between actual points.
            predicted_transformation = sess.run(
                [ops['predicted_transformation']], feed_dict=feed_dict)

            # Apply the final transformation on the source data and multiply it with the transformation matrix obtained from N-Iterations.
            TRANSFORMATIONS, source_data = helper.transformation_quat2mat(
                predicted_transformation, TRANSFORMATIONS, source_data)

            if check_convergenceT(previous_T, TRANSFORMATIONS[0]):
                break
            else:
                previous_T = np.copy(TRANSFORMATIONS[0])
        end = time.time()  # Log end time.

        final_pose = helper.find_final_pose_inv(
            TRANSFORMATIONS
        )  # Find the final pose (translation, orientation (euler angles in degrees)) from transformation matrix.
        final_pose[0,
                   0:3] = final_pose[0, 0:3] + np.mean(SOURCE_DATA, axis=1)[0]

        translation_error, rotational_error = find_errors(
            batch_euler_poses[0], final_pose[0])

        TIME.append(end - start)
        ITR.append(loop_idx + 1)
        Trans_Err.append(translation_error)
        Rot_Err.append(rotational_error)

        if rotational_error < 20 and translation_error < 0.2:
            if rotational_error < 10 and translation_error < 0.1:
                if rotational_error < 5 and translation_error < 0.05:
                    idxs_5_5.append(fn)
                idxs_10_1.append(fn)
            idxs_20_2.append(fn)

        # Display the ground truth pose and predicted pose for first Point Cloud in batch
        if display_poses:
            print('Ground Truth Position: {}'.format(
                batch_euler_poses[0, 0:3].tolist()))
            print('Predicted Position: {}'.format(final_pose[0, 0:3].tolist()))
            print('Ground Truth Orientation: {}'.format(
                (batch_euler_poses[0, 3:6] * (180 / np.pi)).tolist()))
            print('Predicted Orientation: {}'.format(
                (final_pose[0, 3:6] * (180 / np.pi)).tolist()))

        # Display Loss Value.
        # helper.display_three_clouds(TEMPLATE_DATA[0],SOURCE_DATA[0],template_data[0],"")
        print("Batch: {} & time: {}, iteration: {}".format(
            fn, end - start, loop_idx + 1))

    log = {
        'TIME': TIME,
        'ITR': ITR,
        'Trans_Err': Trans_Err,
        'Rot_Err': Rot_Err,
        'idxs_5_5': idxs_5_5,
        'idxs_10_1': idxs_10_1,
        'idxs_20_2': idxs_20_2,
        'num_batches': num_batches
    }

    helper.log_test_results(FLAGS.log_dir, FLAGS.filename, log)
def eval_one_epoch(sess, ops, eval_writer, templates, poses):
	# Arguments:
	# sess: 		Tensorflow session to handle tensors.
	# ops:			Dictionary for tensors of Network
	# templates:	Training Point Cloud data.
	# poses: 		Training pose data.

	is_training = False
	display_ptClouds = False
	display_poses = False
	display_poses_in_itr = False
	display_ptClouds_in_itr = False

	#templates = helper.shuffle_templates(templates)
	#poses = helper.shuffle_poses(poses)

	loss_sum = 0			
	#poses = poses[0:4000,:]								# Total Loss in each batch.
	num_batches = int(poses.shape[0]/BATCH_SIZE) 				# Number of batches in an epoch.
	#num_batches=2
	
	for fn in range(num_batches):
		#shuffled_poses = helper.shuffle_poses(poses)

		start_idx = fn*BATCH_SIZE 			# Start index of poses.
		end_idx = (fn+1)*BATCH_SIZE 		# End index of poses.
		
		#template_data = np.copy(templates[start_idx:end_idx])
		template_data = np.copy(templates[0,:,:]).reshape(1,-1,3)
		template_data = np.tile(template_data, (BATCH_SIZE, 1, 1))

		batch_euler_poses = poses[0:BATCH_SIZE,:]			# Extract poses for batch training.
		source_data = helper.apply_transformation(template_data, batch_euler_poses)		# Apply the poses on the templates to get source data.

		if centroid_subtraction_switch:
			source_data = source_data - np.mean(source_data, axis=1, keepdims=True)
			template_data = template_data - np.mean(template_data, axis=1, keepdims=True)

		if FLAGS.use_partial_data:
			complete_source_data = np.copy(source_data)
			source_data = helper.find_partial_data(complete_source_data)
			template_data = helper.find_partial_data(template_data)

		# Chose Random Points from point clouds for training.
		if np.random.random_sample()<0.0:
			source_data = helper.select_random_points(source_data, NUM_POINT)						# 30% probability that source data has different points than template
		else:
			source_data = source_data[:,0:NUM_POINT,:]
		if np.random.random_sample()<ADD_NOISE:
			source_data = helper.add_noise(source_data)	

		# Only chose limited number of points from the source and template data.
		source_data = source_data[:,0:NUM_POINT,:]
		template_data = template_data[:,0:NUM_POINT,:]

		template_voxel = helper.voxelization(template_data, size=32)

		# To visualize the source and point clouds:
		if display_ptClouds:
			helper.display_clouds_data(source_data[0])
			helper.display_clouds_data(template_data[0])

		TRANSFORMATIONS = np.identity(4)				# Initialize identity transformation matrix.
		TRANSFORMATIONS = npm.repmat(TRANSFORMATIONS,BATCH_SIZE,1).reshape(BATCH_SIZE,4,4)		# Intialize identity matrices of size equal to batch_size

		# Iterations for pose refinement.
		for loop_idx in range(MAX_LOOPS-1):
			source_voxel = helper.voxelization(source_data, size=32)
			# 4a
			# Feed the placeholders of Network with template data and source data.
			feed_dict = {ops['source_pointclouds_pl']: source_voxel,
						 ops['template_pointclouds_pl']: template_voxel,
						 ops['is_training_pl']: is_training}
			predicted_transformation = sess.run([ops['predicted_transformation']], feed_dict=feed_dict)		# Ask the network to predict the pose.

			# 4b,4c
			# Apply the transformation on the template data and multiply it to transformation matrix obtained in previous iteration.
			if FLAGS.use_partial_data:
				TRANSFORMATIONS, complete_source_data = helper.transformation_quat2mat(predicted_transformation, TRANSFORMATIONS, complete_source_data)
				source_data = helper.find_partial_data(complete_source_data)
			else:
				TRANSFORMATIONS, source_data = helper.transformation_quat2mat(predicted_transformation, TRANSFORMATIONS, source_data)

			# Display Results after each iteration.
			if display_poses_in_itr:
				print(predicted_transformation[0,0:3])
				print(predicted_transformation[0,3:7]*(180/np.pi))
			if display_ptClouds_in_itr:
				helper.display_clouds_data(source_data[0])

		source_voxel = helper.voxelization(source_data, size=32)
		# Feed the placeholders of Network with source data and template data obtained from N-Iterations.
		feed_dict = {ops['source_pointclouds_pl']: source_voxel,
					 ops['template_pointclouds_pl']: template_voxel,
					 ops['transformation_pl']: TRANSFORMATIONS,
					 ops['gt_transformation_pl']: helper.pose2mat_inv(batch_euler_poses),
					 ops['is_training_pl']: is_training}

		# Ask the network to predict transformation, calculate loss using distance between actual points.
		summary, step, loss_val, predicted_transformation = sess.run([ops['merged'], ops['step'], ops['loss'], ops['predicted_transformation']], feed_dict=feed_dict)

		eval_writer.add_summary(summary, step)			# Add all the summary to the tensorboard.

		# Apply the final transformation on the template data and multiply it with the transformation matrix obtained from N-Iterations.
		if FLAGS.use_partial_data:
			TRANSFORMATIONS, complete_source_data = helper.transformation_quat2mat(predicted_transformation, TRANSFORMATIONS, complete_source_data)
			source_data = helper.find_partial_data(complete_source_data)
		else:
			TRANSFORMATIONS, source_data = helper.transformation_quat2mat(predicted_transformation, TRANSFORMATIONS, source_data)

		final_pose = helper.find_final_pose_inv(TRANSFORMATIONS)		# Find the final pose (translation, orientation (euler angles in degrees)) from transformation matrix.

		# Display the ground truth pose and predicted pose for first Point Cloud in batch 
		if display_poses:
			print('Ground Truth Position: {}'.format(batch_euler_poses[0,0:3].tolist()))
			print('Predicted Position: {}'.format(final_pose[0,0:3].tolist()))
			print('Ground Truth Orientation: {}'.format((batch_euler_poses[0,3:6]*(180/np.pi)).tolist()))
			print('Predicted Orientation: {}'.format((final_pose[0,3:6]*(180/np.pi)).tolist()))

		# Display Loss Value.
		print("Batch: {}, Loss: {}\r".format(fn, loss_val),end='')

		# Add loss for each batch.
		loss_sum += loss_val
	print('\n')
	log_string('Eval Mean loss: %f' % (loss_sum/num_batches))		# Store and display mean loss of epoch.
Esempio n. 4
0
def eval_one_epoch(sess, ops, eval_writer, poses):
    # Arguments:
    # sess: 		Tensorflow session to handle tensors.
    # ops:			Dictionary for tensors of Network
    # templates:	Training Point Cloud data.
    # poses: 		Training pose data.

    is_training = False
    display_ptClouds = False
    display_poses = False
    display_poses_in_itr = False
    display_ptClouds_in_itr = False

    loss_sum = 0  # Total Loss in each batch.
    templates, sources, poses = helper.read_partial_data(
        'train_data', 'partial_data_eval.h5')
    num_batches = int(templates.shape[0] /
                      BATCH_SIZE)  # Number of batches in an epoch.

    for fn in range(num_batches):
        start_idx = fn * BATCH_SIZE  # Start index of poses.
        end_idx = (fn + 1) * BATCH_SIZE  # End index of poses.

        template_data = np.copy(templates[start_idx:end_idx])
        batch_euler_poses = np.copy(poses[start_idx:end_idx])

        # Check for partial source.
        if np.random.sample() < ADD_PARTIAL:
            source_data = np.copy(sources[start_idx:end_idx])
            source_full_data = helper.apply_transformation(
                template_data, batch_euler_poses)
        else:
            source_data = np.copy(templates[start_idx:end_idx, 0:512])
            source_data = helper.apply_transformation(source_data,
                                                      batch_euler_poses)
            source_full_data = helper.apply_transformation(
                template_data, batch_euler_poses)

        # Check for noise in source.
        if np.random.sample() < ADD_NOISE:
            source_data = helper.add_noise(source_data)

        if centroid_subtraction_switch:
            source_data = source_data - np.mean(
                source_data, axis=1, keepdims=True)
            template_data = template_data - np.mean(
                template_data, axis=1, keepdims=True)
            source_full_data = source_full_data - np.mean(
                source_full_data, axis=1, keepdims=True)

        # Only chose limited number of points from the source and template data.
        template_data = template_data[:, 0:NUM_POINT, :]
        source_full_data = source_full_data[:, 0:NUM_POINT, :]

        # To visualize the source and point clouds:
        if display_ptClouds:
            helper.display_clouds_data(source_data[0])
            helper.display_clouds_data(template_data[0])

        TRANSFORMATIONS = np.identity(
            4)  # Initialize identity transformation matrix.
        TRANSFORMATIONS = npm.repmat(TRANSFORMATIONS, BATCH_SIZE, 1).reshape(
            BATCH_SIZE, 4,
            4)  # Intialize identity matrices of size equal to batch_size

        # Iterations for pose refinement.
        for loop_idx in range(MAX_LOOPS - 1):
            # 4a
            # Feed the placeholders of Network with template data and source data.
            feed_dict = {
                ops['source_pointclouds_pl']: source_data,
                ops['template_pointclouds_pl']: template_data,
                ops['is_training_pl']: is_training
            }
            predicted_transformation = sess.run(
                [ops['predicted_transformation']],
                feed_dict=feed_dict)  # Ask the network to predict the pose.

            TRANSFORMATIONS, source_data = helper.transformation_quat2mat(
                predicted_transformation, TRANSFORMATIONS, source_data)

            # Display Results after each iteration.
            if display_poses_in_itr:
                print(predicted_transformation[0, 0:3])
                print(predicted_transformation[0, 3:7] * (180 / np.pi))
            if display_ptClouds_in_itr:
                helper.display_clouds_data(source_data[0])

        # Feed the placeholders of Network with source data and template data obtained from N-Iterations.
        feed_dict = {
            ops['source_pointclouds_pl']: source_data,
            ops['template_pointclouds_pl']: template_data,
            ops['full_source_pointclouds_pl']: source_full_data,
            ops['is_training_pl']: is_training
        }

        # Ask the network to predict transformation, calculate loss using distance between actual points.
        summary, step, loss_val, predicted_transformation = sess.run(
            [
                ops['merged'], ops['step'], ops['loss'],
                ops['predicted_transformation']
            ],
            feed_dict=feed_dict)

        eval_writer.add_summary(
            summary, step)  # Add all the summary to the tensorboard.

        TRANSFORMATIONS, source_data = helper.transformation_quat2mat(
            predicted_transformation, TRANSFORMATIONS, source_data)

        final_pose = helper.find_final_pose_inv(
            TRANSFORMATIONS
        )  # Find the final pose (translation, orientation (euler angles in degrees)) from transformation matrix.

        # Display the ground truth pose and predicted pose for first Point Cloud in batch
        if display_poses:
            print('Ground Truth Position: {}'.format(
                batch_euler_poses[0, 0:3].tolist()))
            print('Predicted Position: {}'.format(final_pose[0, 0:3].tolist()))
            print('Ground Truth Orientation: {}'.format(
                (batch_euler_poses[0, 3:6] * (180 / np.pi)).tolist()))
            print('Predicted Orientation: {}'.format(
                (final_pose[0, 3:6] * (180 / np.pi)).tolist()))

        # Display Loss Value.
        print("Batch: {}, Loss: {}\r".format(fn, loss_val), end='')

        # Add loss for each batch.
        loss_sum += loss_val
    print('\n')
    log_string(
        'Eval Mean loss: %f' %
        (loss_sum / num_batches))  # Store and display mean loss of epoch.
Esempio n. 5
0
def test_one_epoch(sess, ops, templates, poses, saver, model_path):
    # Arguments:
    # sess: 		Tensorflow session to handle tensors.
    # ops:			Dictionary for tensors of Network
    # templates:	Training Point Cloud data.
    # poses: 		Training pose data.
    # saver: 		To restore the weights.
    # model_path: 	Path of log directory.

    saver.restore(sess, model_path)  # Restore the weights of trained network.

    is_training = False
    display_ptClouds = False
    display_poses = False
    display_poses_in_itr = False
    display_ptClouds_in_itr = False
    swap_case = False
    MAX_LOOPS = 4

    template_data = np.zeros((BATCH_SIZE, MAX_NUM_POINT,
                              3))  # Extract Templates for batch training.
    template_data[0] = np.copy(templates[FLAGS.template_idx, :, :])

    batch_euler_poses = poses[0].reshape(
        (1, 6))  # Extract poses for batch training.

    # Define test case.
    batch_euler_poses[0] = [
        0.4, 0.5, 0.1, 10 * (np.pi / 180), 20 * (np.pi / 180),
        20 * (np.pi / 180)
    ]
    source_data = helper.apply_transformation(
        template_data, batch_euler_poses
    )  # Apply the poses on the templates to get source data.

    # Chose Random Points from point clouds for training.
    if np.random.random_sample() < 0:
        source_data = helper.select_random_points(
            source_data, NUM_POINT
        )  # probability that source data has different points than template
    else:
        source_data = source_data[:, 0:NUM_POINT, :]
    # Add noise to source point cloud.
    if np.random.random_sample() < 1.0:
        source_data = helper.add_noise(source_data)

    # Only choose limited number of points from the source and template data.
    source_data = source_data[:, 0:NUM_POINT, :]
    template_data = template_data[:, 0:NUM_POINT, :]

    TEMPLATE_DATA = np.copy(
        template_data)  # Store the initial template to visualize results.
    SOURCE_DATA = np.copy(
        source_data)  # Store the initial source to visualize results.

    # Subtract the Centroids from the Point Clouds.
    if centroid_subtraction_switch:
        source_data = source_data - np.mean(source_data, axis=1, keepdims=True)
        template_data = template_data - np.mean(
            template_data, axis=1, keepdims=True)

    # To visualize the source and point clouds:
    if display_ptClouds:
        helper.display_clouds_data(source_data[0])
        helper.display_clouds_data(template_data[0])

    TRANSFORMATIONS = np.identity(
        4)  # Initialize identity transformation matrix.
    TRANSFORMATIONS = npm.repmat(TRANSFORMATIONS, BATCH_SIZE, 1).reshape(
        BATCH_SIZE, 4,
        4)  # Intialize identity matrices of size equal to batch_size

    # Store the transformed point clouds after each iteration.
    ITR = np.zeros((MAX_LOOPS, template_data.shape[0], template_data.shape[1],
                    template_data.shape[2]))

    # Iterations for pose refinement.
    for loop_idx in range(MAX_LOOPS - 1):
        # 4a
        # Feed the placeholders of Network with template data and source data.
        feed_dict = {
            ops['source_pointclouds_pl']: source_data,
            ops['template_pointclouds_pl']: template_data,
            ops['is_training_pl']: is_training
        }
        predicted_transformation = sess.run(
            [ops['predicted_transformation']],
            feed_dict=feed_dict)  # Ask the network to predict the pose.
        #print (predicted_transformation[0])

        # 4b,4c
        # Apply the transformation on the template data and multiply it to transformation matrix obtained in previous iteration.
        TRANSFORMATIONS, source_data = helper.transformation_quat2mat(
            predicted_transformation, TRANSFORMATIONS, source_data)

        # Display Results after each iteration.
        if display_poses_in_itr:
            print(predicted_transformation[0, 0:3])
            print(predicted_transformation[0, 3:7] * (180 / np.pi))
        if display_ptClouds_in_itr:
            helper.display_clouds_data(source_data[0])
        ITR[loop_idx, :, :, :] = source_data

    # Feed the placeholders of Network with source data and template data obtained from N-Iterations.
    feed_dict = {
        ops['source_pointclouds_pl']: source_data,
        ops['template_pointclouds_pl']: template_data,
        ops['is_training_pl']: is_training
    }

    # Ask the network to predict transformation, calculate loss using distance between actual points.
    step, predicted_transformation = sess.run(
        [ops['step'], ops['predicted_transformation']], feed_dict=feed_dict)

    # Apply the final transformation on the template data and multiply it with the transformation matrix obtained from N-Iterations.
    TRANSFORMATIONS, source_data = helper.transformation_quat2mat(
        predicted_transformation, TRANSFORMATIONS, source_data)

    final_pose = helper.find_final_pose_inv(TRANSFORMATIONS)
    final_pose[0, 0:3] = final_pose[0, 0:3] + np.mean(SOURCE_DATA, axis=1)[0]

    title = "Actual T (Red->Green): "
    for i in range(len(batch_euler_poses[0])):
        if i > 2:
            title += str(round(batch_euler_poses[0][i] * (180 / np.pi), 2))
        else:
            title += str(batch_euler_poses[0][i])
        title += ', '
    title += "\nPredicted T (Red->Blue): "
    for i in range(len(final_pose[0])):
        if i > 2:
            title += str(round(final_pose[0, i] * (180 / np.pi), 3))
        else:
            title += str(round(final_pose[0, i], 3))
        title += ', '

    # Display the ground truth pose and predicted pose for first Point Cloud in batch
    if display_poses:
        print('Ground Truth Position: {}'.format(
            batch_euler_poses[0, 0:3].tolist()))
        print('Predicted Position: {}'.format(final_pose[0, 0:3].tolist()))
        print('Ground Truth Orientation: {}'.format(
            (batch_euler_poses[0, 3:6] * (180 / np.pi)).tolist()))
        print('Predicted Orientation: {}'.format(
            (final_pose[0, 3:6] * (180 / np.pi)).tolist()))

    helper.display_three_clouds(TEMPLATE_DATA[0], SOURCE_DATA[0],
                                source_data[0], title)