def load(self, path): classes = load_pickle(path, "steps.pickle") self._steps = [object.__new__(c) for c in classes] for index, step in enumerate(self._steps): step_path = path + str(index) step.load(step_path) self._fitted_scores = np.sort(load_numpy_txt(path+"scores"))
def transform(self, x=None): if not x: data = load_numpy_txt(self._test_input_path) y_true = data[:, 0] y_score = data[:, 1] y_pred = data[:, 2] # Accuracy classification score. accuracy = sklearn.metrics.accuracy_score(y_true, y_pred) # Compute average precision (AP) from prediction scores avg_precision = sklearn.metrics.average_precision_score(y_true, y_score) # Compute the F1 score, also known as balanced F-score or F-measure f1 = sklearn.metrics.f1_score(y_true, y_pred) # fbeta = sklearn.metrics.fbeta_score(y_true, y_pred, beta=0.5) # Compute the F-beta score # Compute the Matthews correlation coefficient (MCC) for binary classes mcc = sklearn.metrics.matthews_corrcoef(y_true, y_pred) precision = sklearn.metrics.precision_score(y_true, y_pred) # Compute the precision recall = sklearn.metrics.recall_score(y_true, y_pred) # Compute the recall # Compute Area Under the Curve (AUC) from prediction scores auc = sklearn.metrics.roc_auc_score(y_true, y_score) res = { 'accuracy': accuracy, 'avg_precision': avg_precision, 'f1': f1, 'mcc': mcc, 'precision': precision, 'recall': recall, 'auc': auc } save_json(res, self._test_output_path) else: raise NotImplemented