Beispiel #1
0
    def report_chamfer_presampled(self):
        evaluation_ids = self.data_splitter.get_data(
            tf.estimator.ModeKeys.PREDICT)

        point_cloud_dataset = get_point_clouds(self.preprocessed_data_path,
                                               self.cat_id,
                                               self.n_ffd_resamples)
        point_cloud_dataset = point_cloud_dataset.subset(evaluation_ids)

        mesh_dataset = MeshReader(self.preprocessed_data_path).get_dataset(
            self.cat_id)
        mesh_dataset = mesh_dataset.subset(evaluation_ids)
        mesh_dataset.open()

        deformed_predictions = []
        ground_truth_point_cloud = []
        mesh_ground_truth = []

        ffd_dataset = get_template_ffd(self.preprocessed_data_path,
                                       self.cat_id,
                                       edge_length_threshold=None)

        template_ids, bs, ps = zip(*self.get_ffd_data(ffd_dataset))

        with tf.Graph().as_default():
            dataset = get_dataset(self.preprocessed_data_path,
                                  self.cat_id,
                                  self.view_angles,
                                  self.cloud_resamples,
                                  evaluation_ids,
                                  False,
                                  False,
                                  batch_size=len(evaluation_ids))
            features, targets = dataset.make_one_shot_iterator().get_next()
            predictions = self.build_estimator(
                features, targets, tf.estimator.ModeKeys.PREDICT).predictions
            saver = tf.train.Saver()
            with tf.train.MonitoredSession() as sess:
                saver.restore(sess, tf.train.latest_checkpoint(self.model_dir))
                data = sess.run(predictions)
                point_cloud_dataset.open()
                for evaluation_id, prediction_tensor in zip(
                        evaluation_ids, nested_generator(data)):
                    dp = prediction_tensor['deformed_points']
                    probs = prediction_tensor['probs']
                    i = np.argmax(probs)
                    predicted_vertices = np.matmul(bs[i], ps[i] + dp[i])
                    deformed_predictions.append(
                        sample_points(predicted_vertices,
                                      self.n_ffd_resamples))
                    ground_truth_point_cloud.append(
                        point_cloud_dataset[evaluation_id])
                    mesh_ground_truth.append(mesh_dataset[evaluation_id])
            chamfer_list, unnorm_chamfer = get_normalized_chamfer(
                mesh_ground_truth, ground_truth_point_cloud,
                deformed_predictions, self.n_ffd_resamples)
            print("The normalized chamfer for test set is " +
                  str(np.mean(chamfer_list)))
            print("The non normalized chamfer for test set is " +
                  str(np.mean(unnorm_chamfer)))
Beispiel #2
0
    def get_dataset(self, mode):
        dataset_ids = self.data_splitter.get_data(mode)
        """For testing purpose
        dataset_ids = [ '10640377f4eb9ecdadceecd3bc8bde14', '115aa37af1a07d24a5a88312547ed863', '1a640c8dffc5d01b8fd30d65663cfd42',
                       '2091ab9f69c77273de2426af5ed9b6a', '2eca5fa283b317c7602717bb378973f1', '383ed236166980209e23d6b6aa619041',
                       '3a8478c9f2c1c043eb81825856d1297f', '425abc480a0b390d7cc46b39c0cc084b',
                       '42de9b896d23244fe6fbd395d87e5106', '446e4145b475eb245751d640a4e334']"""

        dataset = get_dataset(self.preprocessed_data_path,
                              self.cat_id,
                              self.view_angles,
                              self.cloud_resamples,
                              dataset_ids,
                              mode == tf.estimator.ModeKeys.TRAIN,
                              mode == tf.estimator.ModeKeys.TRAIN,
                              batch_size=self.batch_size)
        return dataset
Beispiel #3
0
    def visualize_predicted_pointclouds(self):
        evaluation_ids = self.data_splitter.get_data(
            tf.estimator.ModeKeys.PREDICT)

        template_ids, bs, ps = zip(*self.get_ffd_data())

        with tf.Graph().as_default():
            dataset = get_dataset(self.preprocessed_data_path,
                                  self.cat_id,
                                  self.view_angles,
                                  self.cloud_resamples,
                                  evaluation_ids,
                                  False,
                                  False,
                                  batch_size=len(evaluation_ids))
            features, targets = dataset.make_one_shot_iterator().get_next()
            predictions = self.build_estimator(
                features, targets, tf.estimator.ModeKeys.PREDICT).predictions
            saver = tf.train.Saver()
            with tf.train.MonitoredSession() as sess:
                saver.restore(sess, tf.train.latest_checkpoint(self.model_dir))
                data = sess.run([features, predictions])
                for prediction_tensor in nested_generator(data):
                    image = prediction_tensor[0]['image']
                    image -= np.min(image)
                    image /= np.max(image)
                    plt.imshow(image)
                    plt.show()

                    dp = prediction_tensor[1]['deformed_points']
                    probs = prediction_tensor[1]['probs']
                    i = np.argmax(probs)
                    predicted_vertices = np.matmul(bs[i], ps[i] + dp[i])

                    visualize_point_cloud(np.matmul(bs[i], ps[i]),
                                          color=(0, 0, 1),
                                          scale_factor=0.01)
                    mlab.show()

                    visualize_point_cloud(predicted_vertices,
                                          color=(1, 0, 0),
                                          scale_factor=0.01)
                    mlab.show()