def test_print(self): y_true = np.array([0, 1, 2, 0]) y_pred = y_true # expected_confusion_matrix = np.array([[2, 0, 0], [0, 1, 0], [0, 0, 1]]) metrics.report_multiclass_metrics(y_true, y_pred) y_pred = [[0, 0, 1], [1, 1, 0], [0, 1, 1]] y_true = [[0, 1, 1], [1, 1, 0], [0, 1, 1]] # expected confusion matix: # Confusion matrix for label 0: # [[2 0] # [0 1]] # Confusion matrix for label 1: # [[0 0] # [1 2]] # Confusion matrix for label 2: # [[1 0] # [0 2]] metrics.report_multilabel_confusion_matrix(y_true, y_pred) # expected confusion matrix: # Confusion matrix for label 0: # [[2 0] # [0 1]] # Confusion matrix for label 1: # [[3]] # Confusion matrix for label 2: # [[1 0] # [0 2]] metrics.report_multilabel_confusion_matrix(y_true, y_true)
def eval(self, feed_dict): print("========= Evaluation =========") _ = self.tf_session.run(self.dataset_initializer, feed_dict=feed_dict) loss_value, confusion_matrix, class_scores_values, predictions_class_winners_values, labels_winners_values = \ self.tf_session.run( [self._loss_op, self._confusion_matrix, self._class_scores, self._predictions_class_winners, self._labels_winners]) print(f'Loss: {loss_value:.2f}') metrics.report_multiclass_metrics(labels_winners_values, predictions_class_winners_values) print("========= Evaluation Complete =========\n\n")
def train(self, feed_dict): print("========= Training =========") _ = self.tf_session.run(self.dataset_initializer, feed_dict=feed_dict) for step in range(self._max_training_steps): _, loss_value, confusion_matrix, class_scores_values, predictions_class_winners_values, \ labels_winners_values = self.tf_session.run( [self._train_op, self._loss_op, self._confusion_matrix, self._class_scores, self._predictions_class_winners, self._labels_winners]) summary_str = self.tf_session.run(self.summary, feed_dict=feed_dict) if self._write_summary: self.summary_writer.add_summary(summary_str, step) self.summary_writer.flush() if step % int(self._max_training_steps / 20) == 0: print(f'\n-----') print(f'Step {step}') print(f'Loss: {loss_value:.2f}') metrics.report_multiclass_metrics(labels_winners_values, predictions_class_winners_values) print("========= Training Complete =========\n\n")
def test_print(self): y_true = np.array([0, 1, 2, 0]) y_pred = y_true # expected_confusion_matrix = np.array([[2, 0, 0], [0, 1, 0], [0, 0, 1]]) metrics.report_multiclass_metrics(y_true, y_pred)
def _report_metrics(self, labels, predictions): return metrics.report_multiclass_metrics(labels, predictions)