def test_confusion_matrix(): labels = [1, 1, 2, 2] preds = [1, 1, 2, 2] c_matrix = general.confusion_matrix(labels, preds) assert c_matrix.shape == (2, 2)
# If similarity should be used if use_similarity: # Creates a SupervisedOPF with pre-computed distances opf = SupervisedOPF(pre_computed_distance=input_sim) # If similarity should not be used else: # Creates a SupervisedOPF without pre-computed distances opf = SupervisedOPF(distance='log_squared_euclidean') # Fits training data into the classifier opf.fit(X_train, Y_train, I_train) # Predicts new data preds = opf.predict(X_test, I_test) # Calculates the confusion matrix c_matrix = g.confusion_matrix(Y_test, preds) # Calculates the classification report report = classification_report(Y_test, preds, output_dict=True) # Saves confusion matrix in a .npy file np.save(f'outputs/{dataset}_{use_similarity}_{seed}_matrix', c_matrix) # Opens file to further save with open(f'outputs/{dataset}_{use_similarity}_{seed}_report.pkl', 'wb') as f: # Saves report to a .pkl file pickle.dump(report, f)
import numpy as np import opfython.math.general as g # Defining array, labels and predictions array = np.asarray([1.5, 2, 0.5, 1.25, 1.75, 3]) labels = [0, 0, 0, 1, 1, 1, 2] preds = [0, 0, 1, 1, 0, 1, 2] # Normalizing the array norm_array = g.normalize(array) print(norm_array) # Calculating the confusion matrix c_matrix = g.confusion_matrix(labels, preds) print(c_matrix) # Calculating OPF-like accuracy opf_acc = g.opf_accuracy(labels, preds) print(opf_acc) # Calculating OPF-like accuracy per label opf_acc_per_label = g.opf_accuracy_per_label(labels, preds) print(opf_acc_per_label) # Calculating purity measure purity = g.purity(labels, preds) print(purity)