def test_model_margine_double(directory, path, version, resize, batch_size, margin1, margin2): try: index = path.find("\\") index = path.find("\\", index + 1) key1 = path[index + 1:len(path) - 4] print("key1", key1) except: key1 = "PerformanceTest" print("version", version) print("key1", key1) print("Margine_1 ", margin1) print("Margine_2 ", margin2) siamese_test = torch.load(path) controlFileCSV() dataSetPair = DataSetPairCreate(resize) dataSetPair.controlNormalize() pair_test = dataSetPair.pair_money_test pair_money_test_loader = DataLoader(pair_test, batch_size, num_workers=0) #------------------------TESTARE SU DATI DEL TEST----------- print("Testing on Test set....") #pair_prediction, pair_label, timeTest = test_siamese(siamese_reload, pair_money_test_loader, margin=2 ) timeTest, pair_prediction, pair_label = test_siamese_margine_double( siamese_test, pair_money_test_loader, margin1, margin2) numSimilPredette = np.sum(pair_prediction == 0) print("Num Simili predette", numSimilPredette) numDissimilPredette = np.sum(pair_prediction == 1) print("Num Dissimil predette", numDissimilPredette) numSimilReali = np.sum(pair_label == 0) print("Num Simili Reali", numSimilReali) numDissimilReali = np.sum(pair_label == 1) print("Num Dissimil Reali", numDissimilReali) #calculate Accuracy print(pair_prediction[0:10]) print(pair_label[0:10]) accuracyTest = accuracy_score(pair_label, pair_prediction) print("Accuarcy di test: %0.4f" % accuracyTest) #calculate Precision precisionTest = precision_score(pair_label, pair_prediction) print("Precision di test: %0.4f" % precisionTest) #calculate Recall recallTest = recall_score(pair_label, pair_prediction) print("Recall di test: %0.4f" % recallTest) #calculate F1 score if recallTest != 0.0 and precisionTest != 0.0: scores_testing = f1_score(pair_label, pair_prediction, average=None) scores_testing = scores_testing.mean() print("mF1 score di testing: %0.4f" % scores_testing) else: scores_testing = 0.000 print("mscoref1", scores_testing) #-------------------------------- print("Classification Report") print(classification_report(pair_label, pair_prediction)) cm = confusion_matrix(pair_label, pair_prediction) print("Matrice di confusione \n", cm) cm.sum(1).reshape(-1, 1) cm = cm / cm.sum(1).reshape( -1, 1) #il reshape serve a trasformare il vettore in un vettore colonna print("\n") print("Matrice di confusione normalizzata \n", cm) tnr, fpr, fnr, tpr = cm.ravel() print("\n") print("TNR:", tnr) print("FPR:", fpr) print("FNR:", fnr) print("TPR:", tpr) key = key1 entry = [ "accuracyTest", "precisionTest", "recallTest", "f1_score_Test", "TNR", "FPR", "FNR", "TPR", "Timetesting" ] value = [ accuracyTest, precisionTest, recallTest, scores_testing, tnr, fpr, fnr, tpr, timeTest ] addValueJsonModel(directory + "modelTrained.json", version, key, entry[0], value[0]) addValueJsonModel(directory + "modelTrained.json", version, key, entry[1], value[1]) addValueJsonModel(directory + "modelTrained.json", version, key, entry[2], value[2]) addValueJsonModel(directory + "modelTrained.json", version, key, entry[3], value[3]) addValueJsonModel(directory + "modelTrained.json", version, key, entry[4], value[4]) addValueJsonModel(directory + "modelTrained.json", version, key, entry[5], value[5]) addValueJsonModel(directory + "modelTrained.json", version, key, entry[6], value[6]) addValueJsonModel(directory + "modelTrained.json", version, key, entry[7], value[7]) addValueJsonModel(directory + "modelTrained.json", version, key, entry[8], value[8])
def test_model_margine_dynamik(directory, path, version, resize, batch_size, margine=None): siamese_test = torch.load(path) controlFileCSV() dataSetPair = DataSetPairCreate(resize) dataSetPair.controlNormalize() pair_test = dataSetPair.pair_money_test pair_money_test_loader = DataLoader(pair_test, batch_size, num_workers=0) percorso = directory + "modelTrained.json" soglia = readJson(percorso, version, "euclidean_distance_threshold", "last") #------------------------ TESTARE SU DATI DEL TEST ----------- print("Testing on Test set....") #pair_prediction, pair_label, timeTest = test_siamese(siamese_reload, pair_money_test_loader, margin=2 ) timeTest, pair_prediction, pair_label = test_margine_dynamik( siamese_test, pair_money_test_loader, soglia, margine=margine) numSimilPredette = np.sum(pair_prediction == 0) print("Num Simili predette", numSimilPredette) numDissimilPredette = np.sum(pair_prediction == 1) print("Num Dissimil predette", numDissimilPredette) numSimilReali = np.sum(pair_label == 0) print("Num Simili Reali", numSimilReali) numDissimilReali = np.sum(pair_label == 1) print("Num Dissimil Reali", numDissimilReali) #calculate Accuracy print(pair_prediction[0:10]) print(pair_label[0:10]) accuracyTest = accuracy_score(pair_label, pair_prediction) print("Accuarcy di test: %0.4f" % accuracyTest) #calculate Precision precisionTest = precision_score(pair_label, pair_prediction) print("Precision di test: %0.4f" % precisionTest) #calculate Recall recallTest = recall_score(pair_label, pair_prediction) print("Recall di test: %0.4f" % recallTest) #calculate F1 score if recallTest != 0.0 and precisionTest != 0.0: scores_testing = f1_score(pair_label, pair_prediction, average=None) scores_testing = scores_testing.mean() print("mF1 score di testing: %0.4f" % scores_testing) else: scores_testing = 0.000 print("mscoref1", scores_testing) #-------------------------------- key = ["accuracy", "precision", "recall", "mf1_score", "time"] entry = [ "accuracyTest", "precisionTest", "recallTest", "f1_score_Test", "testing" ] value = [accuracyTest, precisionTest, recallTest, scores_testing, timeTest] addValueJsonModel(directory + "modelTrained.json", version, key[0], entry[0], value[0]) addValueJsonModel(directory + "modelTrained.json", version, key[1], entry[1], value[1]) addValueJsonModel(directory + "modelTrained.json", version, key[2], entry[2], value[2]) addValueJsonModel(directory + "modelTrained.json", version, key[3], entry[3], value[3]) addValueJsonModel(directory + "modelTrained.json", version, key[4], entry[4], value[4])
def test_siamese_roc(model, loader_train, loader_valid, directory, version): device = "cuda" if torch.cuda.is_available() else "cpu" model.to(device) predictions, labels = [], [] timer = Timer() loader = {'train': loader_train, 'valid': loader_valid} modalita = ['train', 'valid'] for mode in ['train', 'valid']: print("Modalita ", mode) gt = [] distanze = [] for i, batch in enumerate(loader[mode]): I_i, I_j, l_ij, _, _ = [b.to(device) for b in batch] #img1, img2, label12, label1, label2 #l'implementazione della rete siamese è banale: #eseguiamo la embedding net sui due input phi_i = model(I_i) #img 1 phi_j = model(I_j) #img2 print("Output train img1", phi_i.size()) print("Output train img2", phi_j.size()) print("Etichetta reale", l_ij) labs = l_ij.to('cpu') dist = F.pairwise_distance(phi_i, phi_j) dist = dist.cpu() dist = dist.tolist() print("DISTANZE ", dist) gt.extend(list(labs)) distanze.extend(list(dist)) print("Modalita: " + mode) print("Curve ROC") fpr, tpr, thresholds = roc_curve(gt, distanze) plot_roc(directory, version, fpr, tpr, mode) print("Scelta della buona soglia") score = tpr + 1 - fpr soglia_ottimale = plot_threshold(directory, version, thresholds, score, mode) print("Performance..." + mode) predette = distanze > soglia_ottimale cm = confusion_matrix(gt, predette) #cm=cm/cm.sum(1).reshape(-1,1) tnr, fpr, fnr, tpr = cm.ravel() print("False Positive Rate: {:0.2f}".format(fpr)) print("True Positive Rate: {:0.2f}".format(tpr)) accuracy = accuracy_score(gt, predette) precision = precision_score(gt, predette) recall = recall_score(gt, predette) f1 = f1_score(gt, predette) print("Precision: {:0.2f}, Recall: {:0.2f}".format(precision, recall)) print("Accuracy: {:0.2f} ".format(precision, recall)) print("F1 score: {:0.2f}".format(f1.mean())) key = ["threshold", "accuracy", "precision", "recall", "mf1_score"] entry = [ "threshold_" + mode, "accuracy_" + mode, "precision_" + mode, "recall_" + mode, "f1_score_" + mode ] value = [soglia_ottimale, accuracy, precision, recall, f1] for i in range(5): addValueJsonModel(directory + "\\" + "modelTrained.json", version, key[i], entry[i], value[i]) key = "performance_test" entry = ["TNR", "FPR", "FNR", "TPR"] value = [tnr, fpr, fnr, tpr] addValueJsonModel(directory + "\\modelTrained.json", version, key, entry[0], value[0]) addValueJsonModel(directory + "\\modelTrained.json", version, key, entry[1], value[1]) addValueJsonModel(directory + "\\modelTrained.json", version, key, entry[2], value[2]) addValueJsonModel(directory + "\\modelTrained.json", version, key, entry[3], value[3])
def train_model_margine_dynamik(directory, filename, version, exp_name, name, model, lr, epochs, momentum, batch_size, resize): dataSetPair = DataSetPairCreate(resize) dataSetPair.controlNormalize() pair_train = dataSetPair.pair_money_train pair_test = dataSetPair.pair_money_test pair_validation = dataSetPair.pair_money_val pair_money_train_loader = DataLoader(pair_train, batch_size=batch_size, num_workers=0, shuffle=True) pair_money_test_loader = DataLoader(pair_test, batch_size=batch_size, num_workers=0) pair_money_val_loader = DataLoader(pair_validation, batch_size=batch_size, num_workers=0) #training #modello, tempo di training, loss su train, loss su val createFolder(directory + "\\" + version) writeJsonModelInit1(directory, name, version) print("Training...") modello, f, last_loss_train, last_loss_val, last_acc_train, last_acc_val = train_margine_dynamik( directory, version, model, pair_money_train_loader, pair_money_val_loader, resize, batch_size, exp_name, lr=lr, epochs=epochs) print("Time computing", f) print("last_loss_train", last_loss_train) print("last_loss_val", last_loss_val) print("last_acc_train", last_acc_train) print("last_acc_val", last_acc_val) hyperparametr = { "indexEpoch": epochs - 1, "lr": lr, "momentum": momentum, "numSampleTrain": len(pair_train) } contrastiveLoss = { "lossTrain": last_loss_train, "lossValid": last_loss_val } accuracy = {"accuracyTrain": last_acc_train, "accuracyValid": last_acc_val} time = {"training": f} writeJsonModelClass(directory, name, version, hyperparametr, resize, batch_size, contrastiveLoss, accuracy, time) namep = exp_name + ".pth" siamese_model = torch.load(namep) print("Testing on Validation set") timeVal, pair_prediction_val, pair_label_val = test_margine_dynamik( siamese_model, pair_money_val_loader) numSimilPredette = np.sum(pair_prediction_val == 0) print("Num Simili predette", numSimilPredette) numDissimilPredette = np.sum(pair_prediction_val == 1) print("Num Dissimil predette", numDissimilPredette) numSimilReali = np.sum(pair_label_val == 0) print("Num Simili Reali", numSimilReali) numDissimilReali = np.sum(pair_label_val == 1) print("Num Dissimil Reali", numDissimilReali) #calculate Accuracy print(pair_prediction_val[0:10]) print(pair_label_val[0:10]) accuracyVal = accuracy_score(pair_label_val, pair_prediction_val) print("Accuarcy di test: %0.4f" % accuracyVal) #calculate Precision precisionVal = precision_score(pair_label_val, pair_prediction_val) print("Precision di test: %0.4f" % precisionVal) #calculate Recall recallVal = recall_score(pair_label_val, pair_prediction_val) print("Recall di test: %0.4f" % recallVal) #calculate F1 score if recallVal != 0.0 and precisionVal != 0.0: scores_testing_val = f1_score(pair_label_val, pair_prediction_val, average=None) scores_testing_val = scores_testing_val.mean() print("mF1 score di testing: %0.4f" % scores_testing_val) else: scores_testing_val = 0.000 print("mscoref1", scores_testing_val) key = ["accuracy", "precision", "recall", "mf1_score", "time"] entry = [ "accuracyVal", "precisionVal", "recallVal", "f1_score_Val", "testVal" ] value = [accuracyVal, precisionVal, recallVal, scores_testing_val, timeVal] addValueJsonModel(directory + "modelTrained.json", version, key[0], entry[0], value[0]) addValueJsonModel(directory + "modelTrained.json", version, key[1], entry[1], value[1]) addValueJsonModel(directory + "modelTrained.json", version, key[2], entry[2], value[2]) addValueJsonModel(directory + "modelTrained.json", version, key[3], entry[3], value[3]) addValueJsonModel(directory + "modelTrained.json", version, key[4], entry[4], value[4])
def testing_classificazionePair(directory,path, version,resize,batch_size): # directory "Classe model = torch.load(path) controlFileCSV() controlFileCSV() dataSetPair = DataSetPairCreate(resize) dataSetPair.controlNormalize() pair_test = dataSetPair.pair_money_test pair_money_test_loader = DataLoader(pair_test, batch_size, num_workers=0) createFolder(directory) createFolder(directory+"\\"+version) timeTest,pair_prediction, pair_label = test_classifierPair(model, pair_money_test_loader) accuracyTest = accuracy_score(pair_label, pair_prediction) print("Accuarcy di test: %0.4f"% accuracyTest) #calculate Precision precisionTest = precision_score(pair_label, pair_prediction,average='micro') print("Precision di test: %0.4f"% precisionTest) #calculate Recall recallTest = recall_score(pair_label, pair_prediction,average='micro') print("Recall di test: %0.4f"% recallTest) #calculate F1 score if recallTest!= 0.0 and precisionTest != 0.0: scores_testing = f1_score(pair_label,pair_prediction, average='micro') scores_testing = scores_testing.mean() print("mF1 score di testing: %0.4f"% scores_testing) else: scores_testing = 0.000 print("mscoref1",scores_testing) key=["accuracy","precision","recall","mf1_score","time"] entry=["accuracyTest_Pair","precisionTest_Pair","recallTest_Pair","f1_score_Test_Pair","testing_Pair"] value=[accuracyTest,precisionTest,recallTest,scores_testing,timeTest] addValueJsonModel(directory+"\\modelTrained.json",version, key[0] ,entry[0], value[0]) addValueJsonModel(directory+"\\modelTrained.json",version, key[1] ,entry[1], value[1]) addValueJsonModel(directory+"\\modelTrained.json",version, key[2] ,entry[2], value[2]) addValueJsonModel(directory+"\\modelTrained.json",version, key[3] ,entry[3], value[3]) addValueJsonModel(directory+"\\modelTrained.json",version, key[4] ,entry[4], value[4]) print("Classification Report") print(classification_report(pair_label, pair_prediction)) cm = confusion_matrix(pair_label, pair_prediction) print("Matrice di confusione \n",cm) print("\n") #"-------- FP = cm.sum(axis=0) - np.diag(cm) FN = cm.sum(axis=1) - np.diag(cm) TP = np.diag(cm) TN = cm.sum() - (FP + FN + TP) FP = FP.astype(float) FN = FN.astype(float) TP = TP.astype(float) TN = TN.astype(float) # Sensitivity, hit rate, recall, or true positive rate TPR = TP/(TP+FN) # Specificity or true negative rate TNR = TN/(TN+FP) # Precision or positive predictive value PPV = TP/(TP+FP) # Negative predictive value NPV = TN/(TN+FN) # Fall out or false positive rate FPR = FP/(FP+TN) # False negative rate FNR = FN/(TP+FN) # False discovery rate FDR = FP/(TP+FP) print("\n") print("TNR:",TNR) print("FPR:",FPR) print("FNR:",FNR) print("TPR:",TPR) #---------------- cm.sum(1).reshape(-1,1) cm=cm/cm.sum(1).reshape(-1,1) #il reshape serve a trasformare il vettore in un vettore colonna print("\n") print("Matrice di confusione normalizzata \n",cm) """ tnr, fpr, fnr, tpr = cm.ravel() print("\n") print("TNR:",tnr) print("FPR:",fpr) print("FNR:",fnr) print("TPR:",tpr) """ key = "performance_test_Pair" entry=["TNR","FPR","FNR","TPR"] value=[list(TNR), list(FPR), list(FNR), list(TPR)] addValueJsonModel(directory+"\\modelTrained.json",version, key ,entry[0], value[0]) addValueJsonModel(directory+"\\modelTrained.json",version, key ,entry[1], value[1]) addValueJsonModel(directory+"\\modelTrained.json",version, key ,entry[2], value[2]) addValueJsonModel(directory+"\\modelTrained.json",version, key ,entry[3], value[3])
def train_margine_dynamik(directory, version, model, train_loader, valid_loader, resize, batch_size, exp_name='model_1', lr=0.0001, epochs=10, momentum=0.99, margin=2, logdir='logs', modeLoss=None): criterion = ContrastiveLoss() optimizer = Adam(model.parameters(), lr, betas=(0.9, 0.999), weight_decay=0.0004) #meters loss_meter = AverageValueMeter() acc_meter = AverageValueMeter() #writer writer = SummaryWriter(join(logdir, exp_name)) #device device = "cuda" if torch.cuda.is_available() else "cpu" model.to(device) criterion.to(device) #definiamo un dizionario contenente i loader di training e test loader = {'train': train_loader, 'valid': valid_loader} # inizializza euclidean_distance_threshold = 1 array_accuracy_train = [] array_accuracy_valid = [] array_loss_train = [] array_loss_valid = [] array_glb_train = [] array_glb_valid = [] last_loss_train = 0 last_loss_val = 0 last_acc_train = 0 last_acc_val = 0 #inizializziamo il global step global_step = 0 tempo = Timer() start = timer() soglie = [] for e in range(epochs): print("Epoca = ", e) print("Euclidean_distance_soglia = ", euclidean_distance_threshold) # keep track of euclidean_distance and label history each epoch training_euclidean_distance_history = [] training_label_history = [] validation_euclidean_distance_history = [] validation_label_history = [] #iteriamo tra due modalità: train e valid for mode in ['train', 'valid']: loss_meter.reset() acc_meter.reset() model.train() if mode == 'train' else model.eval() with torch.set_grad_enabled( mode == 'train'): #abilitiamo i gradienti solo in training for i, batch in enumerate(loader[mode]): print("Num batch =", i) I_i, I_j, l_ij, _, _ = [b.to(device) for b in batch] #img1, img2, label12, label1, label2 #l'implementazione della rete siamese è banale: #eseguiamo la embedding net sui due input phi_i = model(I_i) #img 1 phi_j = model(I_j) #img2 print("Output train img1", phi_i.size()) print("Output train img2", phi_j.size()) print("Etichetta reale", l_ij) l_ij = l_ij.type(torch.LongTensor).to(device) #calcoliamo la loss l = criterion(phi_i, phi_j, l_ij) #aggiorniamo il global_step #conterrà il numero di campioni visti durante il training n = I_i.shape[0] #numero di elementi nel batch #print("numero elementi nel batch ",n) global_step += n if mode == 'train': l.backward() optimizer.step() optimizer.zero_grad() phi_i = model(I_i) #img 1 phi_j = model(I_j) #img2 #distanza euclidea if mode == 'train': euclidean_distance = F.pairwise_distance(phi_i, phi_j) training_label = euclidean_distance > euclidean_distance_threshold # 0 if same, 1 if not same (progression) #equals = training_label.int() == l_ij.int() # 1 if true training_label = training_label.int() acc = accuracy_score( l_ij.to('cpu'), torch.Tensor.numpy(training_label.cpu())) # save euclidean distance and label history euclid_tmp = torch.Tensor.numpy( euclidean_distance.detach().cpu() ) # detach gradient, move to CPU training_euclidean_distance_history.extend(euclid_tmp) label_tmp = torch.Tensor.numpy(l_ij.to('cpu')) training_label_history.extend(label_tmp) elif mode == 'valid': # evaluate validation accuracy using a Euclidean distance threshold euclidean_distance = F.pairwise_distance(phi_i, phi_j) validation_label = euclidean_distance > euclidean_distance_threshold # 0 if same, 1 if not same #equals = validation_label.int() == l_ij.int() # 1 if true validation_label = validation_label.int() acc = accuracy_score( l_ij.to('cpu'), torch.Tensor.numpy(validation_label.cpu())) # save euclidean distance and label history euclid_tmp = torch.Tensor.numpy( euclidean_distance.detach().cpu() ) # detach gradient, move to CPU validation_euclidean_distance_history.extend( euclid_tmp) label_tmp = torch.Tensor.numpy(l_ij.cpu()) validation_label_history.extend(label_tmp) n = batch[0].shape[0] loss_meter.add(l.item(), n) acc_meter.add(acc, n) #loggiamo i risultati iterazione per iterazione solo durante il training if mode == 'train': writer.add_scalar('loss/train', loss_meter.value(), global_step=global_step) writer.add_scalar('accuracy/train', acc_meter.value(), global_step=global_step) #una volta finita l'epoca (sia nel caso di training che valid, loggiamo le stime finali) if mode == 'train': global_step_train = global_step last_loss_train = loss_meter.value() last_acc_train = acc_meter.value() array_accuracy_train.append(acc_meter.value()) array_loss_train.append(loss_meter.value()) array_glb_train.append(global_step) else: global_step_val = global_step last_loss_val = loss_meter.value() last_acc_val = acc_meter.value() array_accuracy_valid.append(acc_meter.value()) array_loss_valid.append(loss_meter.value()) array_glb_valid.append(global_step) writer.add_scalar('loss/' + mode, loss_meter.value(), global_step=global_step) writer.add_scalar('accuracy/' + mode, acc_meter.value(), global_step=global_step) # fine di una epoca print("Loss TRAIN", array_loss_train) print("Losss VALID", array_loss_valid) print("Accuracy TRAIN", array_accuracy_train) print("Accuracy VALID", array_accuracy_valid) print("dim acc train", len(array_accuracy_train)) print("dim acc valid", len(array_accuracy_valid)) plt.figure(figsize=(12, 8)) plt.plot(array_accuracy_train) plt.plot(array_accuracy_valid) plt.xlabel('samples') plt.ylabel('accuracy') plt.grid() plt.legend(['Training', 'Valid']) plt.savefig(directory + '//plotAccuracy_' + version + '.png') plt.show() plt.figure(figsize=(12, 8)) plt.plot(array_loss_train) plt.plot(array_loss_valid) plt.xlabel('samples') plt.ylabel('loss') plt.grid() plt.legend(['Training', 'Valid']) plt.savefig(directory + '//plotLoss_' + version + '.png') plt.show() euclidean_distance_threshold = aggiusta_soglia( training_label_history, training_euclidean_distance_history, validation_label_history, validation_euclidean_distance_history) soglie.append(euclidean_distance_threshold) saveArray(directory, version, array_loss_train, array_loss_valid, array_accuracy_train, array_accuracy_valid, array_glb_train, array_glb_valid, soglie) saveinFileJson(start, directory, version, resize, batch_size, e, lr, momentum, len(train_loader), array_accuracy_train[-1], array_accuracy_valid[-1], array_loss_train[-1], array_loss_valid[-1]) addValueJsonModel(directory + "//" + "modelTrained.json", version, "euclidean_distance_threshold", "last", euclidean_distance_threshold) #writer.add_embedding(phi_i, batch[3], I_i, global_step=global_step, tag=exp_name+'_embedding') #conserviamo i pesi del modello alla fine di un ciclo di training e test net_save(epochs, model, optimizer, last_loss_train, last_loss_val, last_acc_train, last_acc_val, global_step_train, global_step_val, '%s.pth' % (exp_name + "_dict"), dict_stato_no=True) torch.save(model, '%s.pth' % exp_name) torch.save( model, directory + "//" + version + "//" + '%s.pth' % (exp_name + "_" + str(e))) f = '{:.7f}'.format(tempo.stop()) return model, f, last_loss_train, last_loss_val, last_acc_train, last_acc_val
def gaussian_distribution_train_margine_single(directory, version, train_loader, resize, batch_size, path): device = "cuda" if torch.cuda.is_available() else "cpu" model = torch.load(path) model.to(device) tempo = Timer() start = timer() array_total_0 = [] array_total_1 = [] model.eval() for i, batch in enumerate(train_loader): distance_1 = [] distance_0 = [] print("num batch:", i) I_i, I_j, l_ij, _, _ = [b.to(device) for b in batch] phi_i = model(I_i) #img 1 phi_j = model(I_j) #img2 euclidean_distance = F.pairwise_distance(phi_i, phi_j) euclid_tmp = torch.Tensor.numpy(euclidean_distance.detach().cpu()) labs = l_ij.to('cpu').numpy() print(euclid_tmp) print(labs) distance_1 = [ distance for distance, label in zip(euclid_tmp, labs) if label == 1 ] distance_0 = [ distance for distance, label in zip(euclid_tmp, labs) if label == 0 ] print(distance_1) print(distance_0) if (len(distance_0) != 0): array_total_0.extend(distance_0) if (len(distance_1) != 0): array_total_1.extend(distance_1) print("len_0:", len(array_total_0)) print("len_1:", len(array_total_1)) tot_sample = len(array_total_0) + len(array_total_1) print("num tot:", tot_sample) print("Distribution gaussian_norm") mu_0 = statistics.mean(array_total_0) print("Media 0:", mu_0) somma = 0 for i in array_total_0: somma = somma + math.pow(i - mu_0, 2) sigma_0 = math.sqrt(somma / len(array_total_0)) print("Dev_std_0:", sigma_0) # --------------------------- mu_1 = statistics.mean(array_total_1) print("Media_1:", mu_1) somma = 0 for i in array_total_1: somma = somma + math.pow(i - mu_1, 2) sigma_1 = math.sqrt(somma / len(array_total_1)) print("Dev_std_1:", sigma_1) key = "mediaDistrib" entry = "media_0" value = mu_0 entry1 = "media_1" value1 = mu_1 g_0 = norm(mu_0, sigma_0) g_1 = norm(mu_1, sigma_1) x_0 = np.linspace(0, max(array_total_0), 100) x_1 = np.linspace(0, max(array_total_1), 100) plt.figure(figsize=(15, 6)) media_0 = '{:.3f}'.format(mu_0) media_1 = '{:.3f}'.format(mu_1) addValueJsonModel(directory + "\\" + "modelTrained.json", version, key, entry, media_0) addValueJsonModel(directory + "\\" + "modelTrained.json", version, key, entry1, media_1) plt.hist(array_total_0, bins=100, density=True) plt.hist(array_total_1, bins=100, density=True) plt.plot(x_0, g_0.pdf(x_0)) plt.plot(x_1, g_1.pdf(x_1)) plt.grid() plt.title("Media_0: " + media_0 + " Media_1: " + media_1) plt.legend([ 'Densità Stimata_0', 'Densità Stimata_1', 'Distribuzione Gaussiana_0', 'Distribuzione Gaussiana_1' ]) plt.savefig(directory + "\\" + version + "\\" + 'plotDistribution_ofClassifacation.png') plt.clf()