Esempio n. 1
0
def test_model_performance(directory, path, version, resize, batch_size):

    siamese_test = torch.load(path)
    controlFileCSV()
    dataSetPair = DataSetPairCreate(resize)
    dataSetPair.controlNormalize()
    pair_train = dataSetPair.pair_money_train
    pair_val = dataSetPair.pair_money_val
    pair_money_train_loader = DataLoader(pair_train, batch_size, num_workers=0)
    pair_money_valid_loader = DataLoader(pair_val, batch_size, num_workers=0)

    #------------------------TESTARE SU DATI DEL TEST-----------
    print("Performance....")
    #pair_prediction, pair_label, timeTest = test_siamese(siamese_reload, pair_money_test_loader, margin=2 )
    test_siamese_roc(siamese_test, pair_money_train_loader,
                     pair_money_valid_loader, directory, version)
Esempio n. 2
0
def test_model_margine_dynamik(directory,
                               path,
                               version,
                               resize,
                               batch_size,
                               margine=None):

    siamese_test = torch.load(path)
    controlFileCSV()
    dataSetPair = DataSetPairCreate(resize)
    dataSetPair.controlNormalize()
    pair_test = dataSetPair.pair_money_test
    pair_money_test_loader = DataLoader(pair_test, batch_size, num_workers=0)
    percorso = directory + "modelTrained.json"

    soglia = readJson(percorso, version, "euclidean_distance_threshold",
                      "last")
    #------------------------ TESTARE SU DATI DEL TEST -----------
    print("Testing on Test set....")
    #pair_prediction, pair_label, timeTest = test_siamese(siamese_reload, pair_money_test_loader, margin=2 )
    timeTest, pair_prediction, pair_label = test_margine_dynamik(
        siamese_test, pair_money_test_loader, soglia, margine=margine)

    numSimilPredette = np.sum(pair_prediction == 0)
    print("Num Simili predette", numSimilPredette)
    numDissimilPredette = np.sum(pair_prediction == 1)
    print("Num Dissimil predette", numDissimilPredette)
    numSimilReali = np.sum(pair_label == 0)
    print("Num Simili Reali", numSimilReali)
    numDissimilReali = np.sum(pair_label == 1)
    print("Num Dissimil Reali", numDissimilReali)

    #calculate Accuracy
    print(pair_prediction[0:10])
    print(pair_label[0:10])
    accuracyTest = accuracy_score(pair_label, pair_prediction)
    print("Accuarcy di test: %0.4f" % accuracyTest)
    #calculate Precision
    precisionTest = precision_score(pair_label, pair_prediction)
    print("Precision di test: %0.4f" % precisionTest)
    #calculate Recall
    recallTest = recall_score(pair_label, pair_prediction)
    print("Recall di test: %0.4f" % recallTest)
    #calculate F1 score
    if recallTest != 0.0 and precisionTest != 0.0:

        scores_testing = f1_score(pair_label, pair_prediction, average=None)
        scores_testing = scores_testing.mean()
        print("mF1 score di testing: %0.4f" % scores_testing)

    else:
        scores_testing = 0.000
        print("mscoref1", scores_testing)

    #--------------------------------

    key = ["accuracy", "precision", "recall", "mf1_score", "time"]
    entry = [
        "accuracyTest", "precisionTest", "recallTest", "f1_score_Test",
        "testing"
    ]
    value = [accuracyTest, precisionTest, recallTest, scores_testing, timeTest]
    addValueJsonModel(directory + "modelTrained.json", version, key[0],
                      entry[0], value[0])
    addValueJsonModel(directory + "modelTrained.json", version, key[1],
                      entry[1], value[1])
    addValueJsonModel(directory + "modelTrained.json", version, key[2],
                      entry[2], value[2])
    addValueJsonModel(directory + "modelTrained.json", version, key[3],
                      entry[3], value[3])
    addValueJsonModel(directory + "modelTrained.json", version, key[4],
                      entry[4], value[4])
Esempio n. 3
0
def test_model_margine_double(directory, path, version, resize, batch_size,
                              margin1, margin2):

    try:
        index = path.find("\\")
        index = path.find("\\", index + 1)
        key1 = path[index + 1:len(path) - 4]
        print("key1", key1)
    except:
        key1 = "PerformanceTest"

    print("version", version)
    print("key1", key1)
    print("Margine_1 ", margin1)
    print("Margine_2 ", margin2)
    siamese_test = torch.load(path)
    controlFileCSV()
    dataSetPair = DataSetPairCreate(resize)
    dataSetPair.controlNormalize()
    pair_test = dataSetPair.pair_money_test
    pair_money_test_loader = DataLoader(pair_test, batch_size, num_workers=0)

    #------------------------TESTARE SU DATI DEL TEST-----------
    print("Testing on Test set....")
    #pair_prediction, pair_label, timeTest = test_siamese(siamese_reload, pair_money_test_loader, margin=2 )
    timeTest, pair_prediction, pair_label = test_siamese_margine_double(
        siamese_test, pair_money_test_loader, margin1, margin2)

    numSimilPredette = np.sum(pair_prediction == 0)
    print("Num Simili predette", numSimilPredette)
    numDissimilPredette = np.sum(pair_prediction == 1)
    print("Num Dissimil predette", numDissimilPredette)
    numSimilReali = np.sum(pair_label == 0)
    print("Num Simili Reali", numSimilReali)
    numDissimilReali = np.sum(pair_label == 1)
    print("Num Dissimil Reali", numDissimilReali)

    #calculate Accuracy
    print(pair_prediction[0:10])
    print(pair_label[0:10])
    accuracyTest = accuracy_score(pair_label, pair_prediction)
    print("Accuarcy di test: %0.4f" % accuracyTest)
    #calculate Precision
    precisionTest = precision_score(pair_label, pair_prediction)
    print("Precision di test: %0.4f" % precisionTest)
    #calculate Recall
    recallTest = recall_score(pair_label, pair_prediction)
    print("Recall di test: %0.4f" % recallTest)
    #calculate F1 score
    if recallTest != 0.0 and precisionTest != 0.0:

        scores_testing = f1_score(pair_label, pair_prediction, average=None)
        scores_testing = scores_testing.mean()
        print("mF1 score di testing: %0.4f" % scores_testing)

    else:
        scores_testing = 0.000
        print("mscoref1", scores_testing)

    #--------------------------------

    print("Classification Report")
    print(classification_report(pair_label, pair_prediction))

    cm = confusion_matrix(pair_label, pair_prediction)
    print("Matrice di confusione \n", cm)

    cm.sum(1).reshape(-1, 1)
    cm = cm / cm.sum(1).reshape(
        -1,
        1)  #il reshape serve a trasformare il vettore in un vettore colonna
    print("\n")
    print("Matrice di confusione normalizzata \n", cm)

    tnr, fpr, fnr, tpr = cm.ravel()
    print("\n")
    print("TNR:", tnr)
    print("FPR:", fpr)
    print("FNR:", fnr)
    print("TPR:", tpr)

    key = key1
    entry = [
        "accuracyTest", "precisionTest", "recallTest", "f1_score_Test", "TNR",
        "FPR", "FNR", "TPR", "Timetesting"
    ]
    value = [
        accuracyTest, precisionTest, recallTest, scores_testing, tnr, fpr, fnr,
        tpr, timeTest
    ]
    addValueJsonModel(directory + "modelTrained.json", version, key, entry[0],
                      value[0])
    addValueJsonModel(directory + "modelTrained.json", version, key, entry[1],
                      value[1])
    addValueJsonModel(directory + "modelTrained.json", version, key, entry[2],
                      value[2])
    addValueJsonModel(directory + "modelTrained.json", version, key, entry[3],
                      value[3])
    addValueJsonModel(directory + "modelTrained.json", version, key, entry[4],
                      value[4])
    addValueJsonModel(directory + "modelTrained.json", version, key, entry[5],
                      value[5])
    addValueJsonModel(directory + "modelTrained.json", version, key, entry[6],
                      value[6])
    addValueJsonModel(directory + "modelTrained.json", version, key, entry[7],
                      value[7])
    addValueJsonModel(directory + "modelTrained.json", version, key, entry[8],
                      value[8])
Esempio n. 4
0
def train_continue(directory, version, path, exp_name, name, model, lr, epochs,
                   momentum, batch_size, resize, margin, logdir):
    #definiamo la contrastive loss

    print("Continue model")
    directory = directory
    resize = resize
    device = "cuda" if torch.cuda.is_available() else "cpu"
    siamese_reload = model
    siamese_reload.to(device)
    checkpoint = torch.load(path)

    siamese_reload.load_state_dict(checkpoint['model_state_dict'])
    #optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
    epoch = checkpoint['epoch']
    lossTrain = checkpoint['lossTrain']
    lossValid = checkpoint['lossValid']

    print('lossTrain', lossTrain)
    print('lossValid', lossValid)
    global_step_train = checkpoint['global_step_train']
    global_step_val = checkpoint['global_step_valid']

    accTrain = checkpoint['accTrain']
    accValid = checkpoint['accValid']
    print('accTrain', accTrain)
    print('accValid', accValid)

    print(
        "Epoca %s , lossTrain %s , lossValid ,accTarin, accValid, global_step_train %s , global_step_val %s",
        epoch, lossTrain, lossValid, accTrain, accValid, global_step_train,
        global_step_val)

    print(siamese_reload.load_state_dict(checkpoint['model_state_dict']))
    #model(torch.zeros(16,3,28,28)).shape

    #E' possibile accedere a un dizionario contenente tutti i parametri del modello utilizzando il metodo state_dict .
    state_dict = siamese_reload.state_dict()
    print(state_dict.keys())

    # Print model's state_dict
    print("Model's state_dict:")
    for param_tensor in siamese_reload.state_dict():
        print(param_tensor, "\t",
              siamese_reload.state_dict()[param_tensor].size())

    controlFileCSV()
    #controlFileCSVPair()
    dataSetPair = DataSetPairCreate(resize)
    dataSetPair.controlNormalize()

    pair_train = dataSetPair.pair_money_train
    #pair_test = dataSetPair.pair_money_test
    pair_validation = dataSetPair.pair_money_val

    pair_money_train_loader = DataLoader(pair_train,
                                         batch_size=batch_size,
                                         num_workers=0,
                                         shuffle=True)
    #pair_money_test_loader = DataLoader(pair_test, batch_size=1024, num_workers=0)
    pair_money_val_loader = DataLoader(pair_validation,
                                       batch_size=batch_size,
                                       num_workers=0)

    criterion = ContrastiveLoss(margin)
    optimizer = SGD(siamese_reload.parameters(), lr, momentum=momentum)
    optimizer.load_state_dict(checkpoint['optimizer_state_dict'])

    #meters
    array_loss_train = []
    array_loss_valid = []
    array_sample_train = []
    array_sample_valid = []
    array_acc_valid = []
    array_acc_train = []
    prediction_train = []
    labels_train = []
    prediction_val = []
    labels_val = []

    loss_meter = AverageValueMeter()
    acc_meter = AverageValueMeter()
    #writer
    writer = SummaryWriter(join(logdir, exp_name))

    criterion.to(
        device
    )  # anche la loss va portata sul device in quanto contiene un parametro(m)
    #definiamo un dizionario contenente i loader di training e test
    loader = {'train': pair_money_train_loader, 'valid': pair_money_val_loader}
    #global_step_train = global_step_train
    #gloabal_step_val = global_step_val

    #lossTrain = lossTrain
    #lossValid = lossValid
    timer = Timer()
    global_step = global_step_val

    for e in range(epochs):
        print("Epoca ", e)
        #iteriamo tra due modalità: train e test
        for mode in ['train', 'valid']:
            """
            if mode =='train':
                loss_meter.inizializza(lossTrain, global_step_train)
                acc_meter.inizializza(accTrain, global_step_train)
                global_step=global_step_train
            else:
                loss_meter.inizializza(lossValid, global_step_val)
                acc_meter.inizializza(accValid, global_step_val)
                global_step = global_step_val
              """

            siamese_reload.train() if mode == 'train' else siamese_reload.eval(
            )
            with torch.set_grad_enabled(
                    mode == 'train'):  #abilitiamo i gradienti solo in training

                for i, batch in enumerate(loader[mode]):
                    I_i, I_j, l_ij, _, _ = [b.to(device) for b in batch]
                    #img1, img2, label12, label1, label2
                    #l'implementazione della rete siamese è banale:
                    #eseguiamo la embedding net sui due input
                    phi_i = siamese_reload(I_i)  #img 1
                    phi_j = siamese_reload(I_j)  #img2

                    #calcoliamo la loss
                    l = criterion(phi_i, phi_j, l_ij)

                    d = F.pairwise_distance(phi_i.to('cpu'), phi_j.to('cpu'))
                    labs = l_ij.to('cpu')
                    #print(len(labs))
                    tensor = torch.clamp(
                        margin - d, min=0
                    )  # sceglie il massimo  # sceglie il massimo -- se è zero allora sono dissimili
                    #print("max",type(tensor))
                    #print("size max tensor ",tensor.size())
                    #print("tentor 1", tensor)

                    for el in tensor:
                        if el <= 2:  # SIMILI
                            if mode == 'train':
                                prediction_train.append(0)
                            else:
                                prediction_val.append(0)
                        else:  # DISSIMILI
                            if mode == 'train':

                                prediction_train.append(1)
                            else:
                                prediction_val.append(1)
                    """
                    if mode=='train':
                        array_loss_train.append(l.item())
                    else:
                        array_loss_valid.append(l.item())
                    """
                    #aggiorniamo il global_step
                    #conterrà il numero di campioni visti durante il training
                    n = I_i.shape[0]  #numero di elementi nel batch
                    global_step += n

                    if mode == 'train':
                        labels_train.extend(list(labs.numpy()))
                        print("Lunghezza predette TRAIN ",
                              len(prediction_train))
                        print("Lunghezza vere TRAIN ", len(labels_train))
                        acc = accuracy_score(np.array(labels_train),
                                             np.array(prediction_train))
                        acc_meter.add(acc, n)

                    else:
                        labels_val.extend(list(labs.numpy()))
                        print("Lunghezza predette VALID ", len(prediction_val))
                        print("Lunghezza vere VALID ", len(labels_val))
                        acc = accuracy_score(np.array(labels_val),
                                             np.array(prediction_val))
                        acc_meter.add(acc, n)

                    if mode == 'train':
                        l.backward()
                        optimizer.step()
                        optimizer.zero_grad()

                    n = batch[0].shape[0]  #numero di elementi nel batch
                    loss_meter.add(l.item(), n)

                    if mode == 'train':
                        writer.add_scalar('loss/train',
                                          loss_meter.value(),
                                          global_step=global_step)
                        writer.add_scalar('accuracy/train',
                                          acc_meter.value(),
                                          global_step=global_step)

                    if mode == 'train':
                        lossTrain = loss_meter.value()
                        global_step_train = global_step
                        array_loss_train.append(lossTrain)
                        array_acc_train.append(acc_meter.value())
                        array_sample_train.append(global_step_train)
                        print("TRAIN- Epoca", e)
                        print("GLOBAL STEP TRAIN", global_step_train)
                        print("LOSS TRAIN", lossTrain)
                        print("ACC TRAIN", acc_meter.value())

                    else:
                        lossValid = loss_meter.value()
                        global_step_val = global_step
                        array_loss_valid.append(lossValid)
                        array_acc_valid.append(acc_meter.value())
                        array_sample_valid.append(global_step_val)
                        print("VALID- Epoca", e)
                        print("GLOBAL STEP VALID", global_step_val)
                        print("LOSS VALID", lossValid)
                        print("ACC VALID", acc_meter.value())

            writer.add_scalar('loss/' + mode,
                              loss_meter.value(),
                              global_step=global_step)
            writer.add_scalar('accuracy/' + mode,
                              acc_meter.value(),
                              global_step=global_step)

        #aggiungiamo un embedding. Tensorboard farà il resto
        #Per monitorare lo stato di training della rete in termini qualitativi, alla fine di ogni epoca stamperemo l'embedding dell'ultimo batch di test.
        writer.add_embedding(phi_i,
                             batch[3],
                             I_i,
                             global_step=global_step,
                             tag=exp_name + '_embedding')
        #conserviamo solo l'ultimo modello sovrascrivendo i vecchi

        #torch.save(siamese_reload.state_dict(),'%s.pth'%exp_name) # salvare i parametri del modello

        net_save(epochs, siamese_reload, optimizer, lossTrain, lossValid,
                 array_acc_train[-1], array_acc_valid[-1], global_step_train,
                 global_step_val, '%s.pth' % exp_name)
    f = '{:.7f}'.format(timer.stop())

    return siamese_reload, f, array_loss_train, array_loss_valid, array_sample_train, array_sample_valid, array_acc_train, array_acc_valid, labels_train, prediction_train, labels_val, prediction_val
Esempio n. 5
0
def main(argv):

    #crazione file "dataSet.json" se non esiste
    entry = {"nameDB": "Moneys"}
    controlFolder("Dataset")
    creteFileJson("Dataset\dataSetJson.json", entry)
    data_create = DataSetCreate()

    #name_id = data_create.name_classes_id()
    #list_all_images = data_create.list_all_images()
    #num_tot_files = data_create.num_total_file()

    parser = argparse.ArgumentParser(description="Dataset Money")

    parser.add_argument('--create',
                        help="datasetBase | datasetLarge | datasetPair")
    parser.add_argument(
        '--info', help="dataset | datasetBase | datasetLarge | datasetPair")
    #parser.add_argument('--training', help="1")
    parser.add_argument('--test',
                        help="Name of model [model5 | model6 ]",
                        type=str)

    parser.add_argument('--train',
                        help="Name of model [model5 | model6 ]",
                        type=str)
    parser.add_argument('--v', help="version", type=int)

    parser.add_argument('--progress',
                        help="Name of model [model5 | model6 ]",
                        type=str)
    parser.add_argument('--file', help="name file .pth", type=str)

    parser.add_argument('--e', help="epoche", type=int)
    parser.add_argument('--margine', help="dim of resize", type=int)

    parser.add_argument('--classification',
                        help="[ train | test | continue | demo ]",
                        type=str)

    parser.add_argument('--classtest', help="classTest")
    parser.add_argument('--demo', help="[ model5 | model6 ]")
    parser.add_argument('--pair', help=" insert id pair [0 - 13824]", type=int)
    parser.add_argument('--soglia', help="soglia", type=float)
    parser.add_argument('--margin', help="margin", type=float)

    parser.add_argument('--path', help="path of model '.pth'", type=str)

    parser.add_argument('--distribution',
                        help="distribuzione dei dati di train allenati")

    parser.add_argument('--pathModel',
                        help="percorso modello da inizializzare")

    parser.add_argument('--margin1', help="margine 1", type=float)

    parser.add_argument('--margin2', help="margine 2", type=float)

    parser.add_argument('--roc', help="roc")
    argomento = parser.parse_args()

    #-------- DISTRIBUTION------

    required_together_distrib = ('distribution', 'v')
    if argomento.distribution is not None:
        # args.model will be None if v is not provided
        if not all([getattr(argomento, x) for x in required_together_distrib]):
            raise RuntimeError("Cannot supply --distribution without --v ")
        else:

            #------  MODEL 6
            if argomento.distribution == "model6":
                print("DISTRIBUTION model ", argomento.distribution)
                #------       MODEL 6 v 2
                if argomento.v == 2:
                    print("version v2")
                    directory = "Model-6"
                    version = "2"
                    resize = 100
                    batch_size = 16
                    createFolder(directory)
                    createFolder(directory + "\\" + version)
                    dataSetPair = DataSetPairCreate(resize)
                    dataSetPair.controlNormalize()
                    pair_train = dataSetPair.pair_money_train

                    pair_money_train_loader = DataLoader(pair_train,
                                                         batch_size=batch_size,
                                                         num_workers=0,
                                                         shuffle=True)
                    path = directory + "\\" + version + "\\modello6_v2_6.pth"
                    gaussian_distribution_train_margine_single(
                        directory, version, pair_money_train_loader, resize,
                        batch_size, path)
                else:
                    exit(0)

#--------------------------- DEMO -------------------------------

    required_together_demo = ('demo', 'v', 'pair')
    if argomento.demo is not None:
        # args.model will be None if v is not provided
        if not all([getattr(argomento, x) for x in required_together_demo]):
            raise RuntimeError("Cannot supply --demo without --v --pair")
        else:

            #------  MODEL 5
            if argomento.demo == "model5":
                print("Demo model ", argomento.demo)

                if argomento.v == 5:
                    print("version v5")
                    print("model5 v5 ResNet siamese classification SGD")
                    directory = "Model-5\\"
                    path = 'modello5_v5.pth'
                    version = "5"
                    idPair = argomento.pair
                    # verifica l'id corrispondente alla coppia se è presente
                    resize = 100
                    demo_obj = Demo(directory, version, resize)
                    demo_obj.controlPair(idPair)
                    demo_obj.read_normalize()
                    dizionario = demo_obj.getitem(idPair)

                    siamese_test = torch.load(path)

                    demo_obj.test_demo(dizionario, siamese_test)
                    demo_obj.plottare()

                elif argomento.v == 7:
                    print("version v7")
                    print(
                        "DEMO model5 v7, Marek Net siamese classification SGD")
                    directory = "Model-5\\"
                    version = "7"
                    path = directory + version + "\\" + 'modello5_v7_17.pth'

                    idPair = argomento.pair
                    # verifica l'id corrispondente alla coppia se è presente
                    resize = 100
                    demo_obj = Demo(directory, version, resize)
                    demo_obj.controlPair(idPair)
                    demo_obj.read_normalize()
                    dizionario = demo_obj.getitem(idPair)

                    siamese_test = torch.load(path)

                    demo_obj.test_demo(dizionario, siamese_test)
                    demo_obj.plottare()

                else:
                    print("Versione del model5 non riconosciuta")
                    sys.stderr.write(
                        "Version not acknowledged, try --train model5 --v [ 5 | 7 ]\n"
                    )
                    exit(0)

            # --DEMO ---- MODEL 6
            elif argomento.demo == "model6":
                print("Demo model ", argomento.demo)

                #------DEMO---  MODEL 6 v 2
                if argomento.v == 2:
                    print("version v2")
                    print("model6 v2 ResNet, single margine=2.0, soglia=0.92")
                    directory = "Model-6\\"
                    version = "2"
                    path = directory + version + "\\" + 'modello6_v2_6.pth'

                    idPair = argomento.pair
                    resize = 100
                    demo_obj = Demo(directory, version, resize)
                    demo_obj.controlPair(idPair)
                    demo_obj.read_normalize()
                    dizionario = demo_obj.getitem(idPair)

                    siamese_test = torch.load(path)
                    soglia = 0.92
                    dist = demo_obj.test_demo_single_margine(
                        dizionario, siamese_test, soglia)
                    demo_obj.plottare(dist)

                elif argomento.v == 4:
                    print("version v2")
                    print("model6 v4 ResNet, double margine=0.7 e 1.3")
                    directory = "Model-6\\"
                    version = "4"
                    path = directory + version + "\\" + 'modello6_v4_51.pth'

                    idPair = argomento.pair
                    resize = 100
                    demo_obj = Demo(directory, version, resize)
                    demo_obj.controlPair(idPair)
                    demo_obj.read_normalize()
                    dizionario = demo_obj.getitem(idPair)

                    siamese_test = torch.load(path)
                    margin1 = 0.7
                    margin2 = 1.2
                    dist = demo_obj.test_demo_double_margine(
                        dizionario, siamese_test, margin1, margin2)
                    demo_obj.plottare(dist)

                else:
                    print("Versione del model6 non riconosciuta")
                    sys.stderr.write(
                        "Version not acknowledged, try --train model6 --v [ 2 | 4 ]\n"
                    )
                    exit(0)

            else:
                print("Modello non riconosciuto")
                sys.stderr.write(
                    "Model not acknowledged, try --train [ model5 | model6 ]\n"
                )
                exit(0)

# --------------------TRAIN------CLASSIFICAZIONE DEI DATI ---------------------

    elif argomento.classification == "train":
        required_together = ('classification', 'v', 'e')
        if not all([getattr(argomento, x) for x in required_together]):
            raise RuntimeError(
                "Cannot supply --classification train without --v --e")
        else:
            #-------MODEL MNET
            if argomento.v == 2:
                epochs = 20
                lr = 0.0001
                momentum = 0.9
                batch_size = 16
                resize = 100
                if argomento.e is not None:
                    epochs = argomento.e

                directory = "Classe"
                filename = "//class"
                version = "2"
                exp_name = 'class_2'
                name = 'ModelM'
                model = ModelM()

                classificazione(directory, filename, version, exp_name, name,
                                model, lr, epochs, momentum, batch_size,
                                resize)

            #--------MODEL RESNET
            elif argomento.v == 1:
                print("Resnet")
                lr = 0.0001
                momentum = 0.9
                batch_size = 16
                resize = 256
                if argomento.e is not None:
                    epochs = argomento.e
                directory = "Classe"
                filename = "//class"
                version = "1"
                exp_name = 'class_1'
                name = 'ResNet'

                model = resnet34(pretrained=True)
                resnet_copy = deepcopy(model)

                ### adattamento
                num_class = 5
                resnet_copy.fc = nn.Linear(512, num_class)
                resnet_copy.num_classes = num_class
                print(resnet_copy)

                classificazione(directory, filename, version, exp_name, name,
                                resnet_copy, lr, epochs, momentum, batch_size,
                                resize)

            else:
                print("Versione non riconosciuta")
                sys.stderr.write(
                    "Version not acknowledged, try --classification train --v [ 1 | 2 ]\n"
                )
                exit(0)

#-------------------TEST ------CLASSIFICAZIONE DEI DATI

#---- test su data set di base
    elif argomento.classification == "test":
        required_together = ('classification', 'v')
        if not all([getattr(argomento, x) for x in required_together]):
            raise RuntimeError(
                "Cannot supply --classification test without --v")
        else:
            if argomento.v == 2:
                print("MNet classification version 2")
                directory = "Classe"

                version = "2"
                batch_size = 16
                resize = 100
                name = 'ModelM'

                if argomento.pathModel is not None:
                    path_dict = argomento.pathModel
                else:
                    path_dict = 'Classe//2//class_2_44.pth'

                testing_classificazione(directory, path_dict, version, resize,
                                        batch_size)

            elif argomento.v == 1:
                print("Resnet classification version 1")

                directory = "Classe"

                version = "1"
                if argomento.pathModel is not None:
                    path_dict = argomento.pathModel
                else:
                    path_dict = 'Classe//1//class_1_19.pth'
                name = 'ResNet'
                batch_size = 4
                resize = 256
                testing_classificazione(directory, path_dict, version, resize,
                                        batch_size)

            else:
                print("Versione non riconosciuta")
                sys.stderr.write(
                    "Version not acknowledged, try --classification test --v [ 1 | 2 ]\n"
                )
                exit(0)

#---------------TEST   su datasetPair con classificazione Manuale

    elif argomento.classification == "testPair":
        required_together = ('classification', 'v')
        if not all([getattr(argomento, x) for x in required_together]):
            raise RuntimeError(
                "Cannot supply --classification testPair without --v")
        else:
            if argomento.v == 2:

                directory = "Classe"

                version = "2"
                batch_size = 16
                resize = 100
                name = 'ModelM'

                if argomento.pathModel is not None:
                    path_dict = argomento.pathModel
                else:
                    path_dict = 'Classe//2//class_2_44.pth'

                testing_classificazionePair(directory, path_dict, version,
                                            resize, batch_size)

            elif argomento.v == 1:
                print("Resnet classification version 1")

                directory = "Classe"

                version = "1"
                if argomento.pathModel is not None:
                    path_dict = argomento.pathModel
                else:
                    path_dict = 'Classe//1//class_1_19.pth'
                name = 'ResNet'
                batch_size = 4
                resize = 256
                testing_classificazionePair(directory, path_dict, version,
                                            resize, batch_size)

            else:
                print("Versione non riconosciuta")
                sys.stderr.write(
                    "Version not acknowledged, try --classification testPair --v [ 1 | 2 ]\n"
                )
                exit(0)

#-------------------CONTINUE ------CLASSIFICAZIONE DEI DATI

    elif argomento.classification == "continue":
        required_together = ('classification', 'v', 'e')
        if not all([getattr(argomento, x) for x in required_together]):
            raise RuntimeError(
                "Cannot supply --classification continue without --v --e")
        else:

            if argomento.v == 2:
                print("MNet classification continue version 2")
                directory = "Classe"
                exp_name = 'class_2'
                version = "2"
                lr = 0.0001
                momentum = 0.9
                batch_size = 16
                resize = 100

                name = 'ModelM'

                if argomento.pathModel is not None:
                    path_dict = argomento.pathModel
                else:
                    path_dict = 'Classe//2//class_2_19.pth'

                model = torch.load(path_dict)
                epoche_avanza = argomento.e
                continue_classificazione(directory, model, version, exp_name,
                                         name, lr, momentum, resize,
                                         batch_size, epoche_avanza)

            elif argomento.v == 1:
                print("Resnet classification continue version 1")

                directory = "Classe"
                version = "1"
                batch_size = 4
                resize = 256
                lr = 0.0001
                momentum = 0.9
                exp_name = 'class_1'
                name = 'ResNet'
                if argomento.pathModel is not None:
                    path_dict = argomento.pathModel
                else:
                    path_dict = 'Classe//1//class_1_19.pth'

                model = torch.load(path_dict)
                epoche_avanza = argomento.e

                continue_classificazione(directory, model, version, exp_name,
                                         name, lr, momentum, resize,
                                         batch_size, epoche_avanza)

            else:
                print("Versione non riconosciuta")
                sys.stderr.write(
                    "Version not acknowledged, try --classification continue --v [ 1 | 2 ]\n"
                )
                exit(0)

# --------------- DEMO ------------------CLASSIFICAZIONE MANUAL
    elif argomento.classification == "demo":

        required_together = ('classification', 'v', 'pair')
        if not all([getattr(argomento, x) for x in required_together]):
            raise RuntimeError(
                "Cannot supply --classification demo without --v --pair")
        else:

            #----- MODEL RESNET
            if argomento.v == 1:
                print("Classification Manual ResNet")
                if argomento.pathModel is not None:
                    path = argomento.pathModel
                else:
                    path = 'Classe\\1\\class_1_19.pth'
                directory = "Classe\\"
                version = "1"
                idPair = argomento.pair

                resize = 256
                demo_obj = Demo(directory, version, resize)
                demo_obj.controlPair(idPair)
                demo_obj.read_normalize()
                dizionario = demo_obj.getitem(idPair)

                class_test = torch.load(path)

                demo_obj.test_demo_order_manual(dizionario, class_test)
                demo_obj.plottare()

            #----- MODEL MNET
            elif argomento.v == 2:
                directory = "Classe\\"
                if argomento.pathModel is not None:
                    path = argomento.pathModel
                else:
                    path = 'Classe\\2\\class_2_44.pth'

                version = "2"
                idPair = argomento.pair
                # verifica l'id corrispondente alla coppia se è presente
                resize = 100
                demo_obj = Demo(directory, version, resize)
                demo_obj.controlPair(idPair)
                demo_obj.read_normalize()
                dizionario = demo_obj.getitem(idPair)

                class_test = torch.load(path)

                demo_obj.test_demo_order_manual(dizionario, class_test)
                demo_obj.plottare()
                return

            else:
                print("Versione non riconosciuta")
                sys.stderr.write(
                    "Version not acknowledged, try --classification demo --v [ 1 | 2 ]\n"
                )
                exit(0)

#------------ CREAZIONE DEI DATASET --create

    if argomento.create == "datasetBase":  # creazione dataset di Base
        data_create.create_Dataset_Base()

    elif argomento.create == "datasetLarge":  # creazione dataset di Base e datasetLarge
        data_create.create_Dataset_Large()

    elif argomento.create == "datasetPair":
        # controlla se è presente il dataset splittato
        controlFileCSV()
        dataSetPair = DataSetPairCreate()
        dataSetPair.controlNormalize()

#-------------   INFORMAZIONI SUI DATASET  --info

    data_set_info = argomento.info
    if (data_set_info == "dataset"):
        #oggetto DataSetCreate
        print("Dataset of base\n")
        #lettura da file Dataset\dataSetJson.json
        info = readFileDataset("Dataset\dataSetJson.json", "dataset")
        #info = data_create.info_classes()
        for i in info:
            print(i)
        num = lengthDataset("Dataset\dataSetJson.json", "dataset",
                            "num_images")
        print("Length Dataset of Base = ", num)
        print("\n")

    elif (data_set_info == "datasetBase"):
        print("Dataset Base\n")
        #info = data_create.info_datasetLarge()
        info = readFileDataset("Dataset\dataSetJson.json", "datasetBase")
        for i in info:
            print(i)
        num = lengthDataset("Dataset\dataSetJson.json", "datasetBase",
                            "num_sample")
        print("Length DatasetBase = ", num)

    elif (data_set_info == "datasetLarge"):
        print("Dataset Large\n")
        #info = data_create.info_datasetLarge()
        info = readFileDataset("Dataset\dataSetJson.json", "datasetLarge")
        for i in info:
            print(i)
        num = lengthDataset("Dataset\dataSetJson.json", "datasetLarge",
                            "num_sample")
        print("Length DatasetLarge = ", num)

    elif (data_set_info == "datasetPair"):

        print("DatasetPair\n")
        info = readFileDataset("Dataset\dataSetJson.json", "dataSetPair")
        for i in info:
            print(i)

#--------------FASE TRAINING OF MODEL 5 and 6 --train

    required_together = ('train', 'v', 'e')
    if argomento.train is not None:

        if not all([getattr(argomento, x) for x in required_together]):
            raise RuntimeError("Cannot supply --train without --v --e")
        else:

            #------  MODEL 5
            if argomento.train == "model5":
                if argomento.v == 7:
                    # siamese con trasfer-learning Mnet usata pe rla classigìficazione
                    # tolto il livello per la classificazione a 2 classi
                    # nuovo- inizializzazione class_2 epoche 44
                    lr = 0.0001
                    momentum = 0.9
                    resize = 100
                    epochs = 20
                    if argomento.e is not None:
                        epochs = argomento.e
                    batch_size = 4
                    directory = "Model-5"
                    filename = "//5_v7"
                    version = "7"
                    exp_name = 'modello5_v7'
                    name = 'MNet'

                    #inizializzazione del modello con parametri di MNet
                    path = "class_2.pth"
                    model = torch.load(path)
                    model_copy = deepcopy(model)
                    fully_connect = model_copy.fc
                    fully = list(fully_connect)
                    fully.pop()
                    model_copy.fc = nn.Sequential(*fully)
                    # adattamento
                    model_copy.fc2 = nn.Sequential(nn.Linear(512, 2))
                    print(model_copy)

                    train_model_class_v1(directory, filename, version,
                                         exp_name, name, model_copy, lr,
                                         epochs, momentum, batch_size, resize)

                elif argomento.v == 5:
                    # siamese con trasfer-learning Resnet usato per la classificazione
                    # e tolto l'ultimo livello
                    # aggiunto per prendere in ingresso la concatenazione degli output
                    # e aggiunto il livello per la classificazione a 2 classi
                    # la loss function è la CrossEntropy per la classificazione 0 e 1
                    lr = 0.0001
                    momentum = 0.9
                    resize = 100
                    epochs = 20
                    if argomento.e is not None:
                        epochs = argomento.e
                    batch_size = 4
                    decay = 0.0004
                    directory = "Model-5"
                    filename = "//5_v5"
                    version = "5"
                    exp_name = 'modello5_v5'
                    name = 'ResNet_Class'

                    # inizializzazione
                    model = torch.load("class_1.pth")
                    model_copy = deepcopy(model)

                    ### adattamento
                    num_class = 256
                    model_copy.fc = nn.Linear(512, num_class)
                    model_copy.num_classes = num_class
                    print(model_copy)

                    model_copy.fc2 = nn.Sequential(nn.Linear(512, 2))
                    print(model_copy)

                    train_model_class_v1(directory,
                                         filename,
                                         version,
                                         exp_name,
                                         name,
                                         model_copy,
                                         lr,
                                         epochs,
                                         momentum,
                                         batch_size,
                                         resize,
                                         decay=decay,
                                         modeLoss=None,
                                         dizionario_array=None)

                else:
                    print("Versione non riconosciuta")
                    sys.stderr.write(
                        "Version not acknowledged, try --train model5 --v [ 5 | 7 ]\n"
                    )
                    exit(0)

                #-----train MODEL6  siamese
            elif argomento.train == "model6":
                #----- Resnet - single margine
                if argomento.v == 2:
                    # siamese con trasfer-learning Resnet usato per la classificazione
                    # e tolto il livello per la classivicazione a 5 classi
                    # e inserito quello da 256
                    #
                    # la loss function è la Contrastive loss , margine

                    decay = 0.0004
                    lr = 0.0001
                    momentum = 0.9
                    resize = 100
                    epochs = 20
                    if argomento.e is not None:
                        epochs = argomento.e
                    batch_size = 4
                    directory = "Model-6"
                    filename = "//6_v2"
                    version = "2"
                    exp_name = 'modello6_v2'
                    name = 'RestNet_Margine'

                    # Usato per la classificazione a 5 classi, fine-tuning Resnet34
                    model = torch.load("class_1.pth")
                    model_copy = deepcopy(model)

                    ### adattamento
                    num_class = 256
                    model_copy.fc = nn.Linear(512, num_class)
                    model_copy.num_classes = num_class
                    print(model_copy)

                    train_model_margine(directory,
                                        filename,
                                        version,
                                        exp_name,
                                        name,
                                        model_copy,
                                        lr,
                                        epochs,
                                        momentum,
                                        batch_size,
                                        resize,
                                        decay=decay,
                                        margin=2.0,
                                        soglia=1.0,
                                        modeLoss="single")

                elif argomento.v == 4:
                    # siamese con trasfer-learning Resnet usato per la classificazione
                    # e tolto il livello per la classivicazione a 5 classi
                    # e inserito quello da 256
                    # la loss function è la Contrastive loss , double margine

                    decay = 0.004
                    lr = 0.0001
                    momentum = 0.9
                    resize = 100
                    epochs = 20
                    if argomento.e is not None:
                        epochs = argomento.e
                    batch_size = 4
                    directory = "Model-6"
                    filename = "//6_v4"
                    version = "4"
                    exp_name = 'modello6_v4'
                    name = 'RestNet_Margine_Double'

                    model = torch.load(
                        "class_1.pth"
                    )  # Usato per la classificazione fine-tuning Resnet
                    model_copy = deepcopy(model)
                    # serve per rimuovere l'ultimo livello"
                    ### adattamento
                    num_class = 256
                    model_copy.fc = nn.Linear(512, num_class)
                    model_copy.num_classes = num_class
                    print(model_copy)
                    margin1 = 0.7
                    margin2 = 1.2
                    if argomento.margin1 is not None:
                        margin1 = argomento.margin1

                    if argomento.margin2 is not None:
                        margin2 = argomento.margin2

                    train_model_margine_double(directory,
                                               filename,
                                               version,
                                               exp_name,
                                               name,
                                               model_copy,
                                               lr,
                                               epochs,
                                               momentum,
                                               batch_size,
                                               resize,
                                               decay=decay,
                                               margin1=margin1,
                                               margin2=margin2,
                                               modeLoss="double")

                else:
                    print("Versione non riconosciuta")
                    sys.stderr.write(
                        "Version not acknowledged, try --train model6 --v [ 2 | 4 ]\n"
                    )
                    exit(0)

            else:
                print("Modello non riconosciuto ")
                sys.stderr.write(
                    "Model not acknowledged, try --train [model5 | model6 ]\n")
                exit(0)

#--------------FASE  TESTING --test
    required_together_test = ('test', 'v')

    if argomento.test is not None:
        if not all([getattr(argomento, x) for x in required_together_test]):
            raise RuntimeError("Cannot supply --test without --v")
        else:
            #------ test MODEL 5
            if argomento.test == "model5":

                # ----------model 5 v 5 ---- ResNet
                if argomento.v == 5:
                    print("version", argomento.v)

                    print("model 5 v5 ResNet classi siamese con lr =0.0001 ")
                    directory = "Model-5\\"
                    path = 'modello5_v5.pth'
                    version = "5"

                    batch_size = 16
                    resize = 100

                    test_model_class(directory,
                                     path,
                                     version,
                                     resize,
                                     batch_size,
                                     margine=None)

                elif argomento.v == 7:
                    # ----------model 5 v 7 ---- MNet
                    print("version", argomento.v)

                    print("model 5 v7 MNet classi siamese con lr =0.0001")
                    directory = "Model-5\\"

                    path = 'Model-5\\7\\modello5_v7_17.pth'
                    version = "7"

                    batch_size = 16
                    resize = 100

                    test_model_class(directory,
                                     path,
                                     version,
                                     resize,
                                     batch_size,
                                     margine=None)

                else:
                    print("Versione non riconosciuta")
                    sys.stderr.write(
                        "Version not acknowledged, try --test model5 --v [ 5 | 7 ]\n"
                    )
                    exit(0)

                #----------test  MODEL 6
            elif argomento.test == "model6":

                #------ model test 6 v 2
                if argomento.v == 2:
                    print("version", argomento.v)
                    print(
                        "model6 v2 Test ResNet siamese margine one 2.0 soglia 0.92"
                    )
                    directory = "Model-6\\"
                    path = directory + "2\\" + 'modello6_v2_6.pth'
                    version = "2"
                    soglia = 0.92
                    if argomento.soglia is not None:
                        soglia = argomento.soglia
                    batch_size = 16
                    resize = 100
                    print("Soglia", soglia)
                    test_model_margine(directory,
                                       path,
                                       version,
                                       resize,
                                       batch_size,
                                       margine=soglia)

                    #-------- model test 6 v 4
                elif argomento.v == 4:
                    print("version", argomento.v)
                    print(
                        "model6 v 4 Test ResNet siamese margine double 0.7 e 1.2, numero epoche 52 "
                    )
                    directory = "Model-6\\"
                    path = directory + "4\\" + 'modello6_v4_51.pth'
                    version = "4"
                    margin1 = 0.7
                    margin2 = 1.2
                    batch_size = 16
                    resize = 100

                    test_model_margine_double(directory, path, version, resize,
                                              batch_size, margin1, margin2)
                else:
                    print("Versione non riconosciuta")
                    sys.stderr.write(
                        "Version not acknowledged, try --test model6 --v [ 2 | 4 ]\n"
                    )
                    exit(0)

            else:
                print("Modello non riconosciuto")
                sys.stderr.write(
                    "Model not acknowledged, try --test [model5 | model6 ]\n")
                exit(0)

# ---------------------PERFORMANCE

    if argomento.roc is not None:
        print(argomento.roc)
        print(argomento.v)

        #  PERFORMANCE MODEL 6 V 2
        if argomento.roc == "model6":
            # model test 6 v 2
            if argomento.v == 2:
                print("version", argomento.v)
                print(
                    "model6 v2 Test ResNet siamese margine one 2.0 soglia 1.0")
                directory = "Model-6"
                version = "2"
                path = directory + "\\" + version + '\\modello6_v2_6.pth'
                version = "2"

                batch_size = 16
                resize = 100

                test_model_performance(directory, path, version, resize,
                                       batch_size)


#--------------FASE  CONTINUE --continue
    required_together_continue = ('progress', 'v', 'e')
    # args.model will be None if v is not provided
    if argomento.progress is not None:
        if not all([getattr(argomento, x)
                    for x in required_together_continue]):
            raise RuntimeError("Cannot supply --progress without --v --e")
        else:

            #----- MODEL 6
            if argomento.progress == "model6":
                print("model", argomento.progress)

                # model continue 6 v 2
                if argomento.v == 2:
                    # siamese con trasfer-learning Resnet usato per la classificazione
                    # e tolto il livello per la classivicazione a 5 classi
                    # e inserito quello da 256
                    # la loss function è la Contrastive loss , margine single

                    decay = 0.0004
                    lr = 0.0001
                    momentum = 0.9
                    resize = 100

                    batch_size = 4
                    directory = "Model-6"
                    filename = "//6_v2"
                    version = "2"
                    exp_name = 'modello6_v2'
                    name = 'RestNet_Margine_Single'
                    if argomento.pathModel is not None:
                        path = argomento.pathModel
                    else:
                        path = 'Model-6//2//modello6_v2_13.pth'
                    model = torch.load(path)

                    epoche_avanza = argomento.e
                    continue_model_margine_single(directory,
                                                  filename,
                                                  version,
                                                  exp_name,
                                                  name,
                                                  model,
                                                  lr,
                                                  epoche_avanza,
                                                  momentum,
                                                  batch_size,
                                                  resize,
                                                  decay=decay,
                                                  margin1=2.0,
                                                  soglia=0.92,
                                                  modeLoss="single")

                # model continue 6 v 4
                elif argomento.v == 4:
                    # siamese con trasfer-learning Resnet usato per la classificazione
                    # e tolto il livello per la classivicazione a 5 classi
                    # e inserito quello da 256
                    # la loss function è la Contrastive loss , margine double

                    decay = 0.004
                    lr = 0.0001
                    momentum = 0.9
                    resize = 100

                    batch_size = 4
                    directory = "Model-6"
                    filename = "//6_v4"
                    version = "4"
                    exp_name = 'modello6_v4'
                    name = 'RestNet_Margine_Double'
                    if argomento.pathModel is not None:
                        path = argomento.pathModel
                    else:
                        path = 'Model-6//4//modello6_v4_56.pth'
                    model = torch.load(path)

                    margin1 = 0.7
                    margin2 = 1.2
                    if argomento.margin1 is not None:
                        margin1 = argomento.margin1

                    if argomento.margin2 is not None:
                        margin2 = argomento.margin2
                    epoche_avanza = argomento.e
                    continue_model_margine_double(directory,
                                                  filename,
                                                  version,
                                                  exp_name,
                                                  name,
                                                  model,
                                                  lr,
                                                  epoche_avanza,
                                                  momentum,
                                                  batch_size,
                                                  resize,
                                                  decay=decay,
                                                  margin1=margin1,
                                                  margin2=margin2,
                                                  modeLoss="double")

                # model continue 6 v 6
                elif argomento.v == 6:

                    decay = 0.02
                    lr = 0.001
                    momentum = 0.9
                    resize = 100

                    batch_size = 4
                    directory = "Model-6"
                    filename = "//6_v6"
                    version = "6"
                    exp_name = 'modello6_v6'
                    name = 'RestNet_Margine_Double'
                    if argomento.pathModel is not None:
                        path = argomento.pathModel
                    else:
                        path = 'Model-6//4//modello6_v4_51.pth'
                    model = torch.load(path)

                    margin1 = 0.7
                    margin2 = 1.2
                    if argomento.margin1 is not None:
                        margin1 = argomento.margin1

                    if argomento.margin2 is not None:
                        margin2 = argomento.margin2
                    epoche_avanza = argomento.e
                    continue_model_margine_double(directory,
                                                  filename,
                                                  version,
                                                  exp_name,
                                                  name,
                                                  model,
                                                  lr,
                                                  epoche_avanza,
                                                  momentum,
                                                  batch_size,
                                                  resize,
                                                  decay=decay,
                                                  margin1=margin1,
                                                  margin2=margin2,
                                                  modeLoss="double")
                else:
                    print("Versione non riconosciuta")
                    sys.stderr.write(
                        "Version not acknowledged, try --progress model6 --v [ 2 | 4 | 6 ]\n"
                    )
                    exit(0)
            else:
                print("Modello non riconosciuto ")
                sys.stderr.write(
                    "Model not acknowledged, try --progress [ model6 ]\n")
                exit(0)
Esempio n. 6
0
def train_model(directory,
                filename,
                version,
                exp_name,
                name,
                model,
                lr,
                epochs,
                momentum,
                batch_size,
                resize,
                modeLoss=None):

    warnings.filterwarnings('always')

    directory = directory
    version = version
    lr = lr
    epochs = epochs
    momentum = momentum
    batch_size = batch_size
    resize = resize
    controlFileCSV()
    controlFileCSVPair()

    dataSetPair = DataSetPairCreate(resize)
    dataSetPair.controlNormalize()

    pair_train = dataSetPair.pair_money_train
    pair_test = dataSetPair.pair_money_test
    pair_validation = dataSetPair.pair_money_val

    pair_money_train_loader = DataLoader(pair_train,
                                         batch_size=batch_size,
                                         num_workers=0,
                                         shuffle=True)
    pair_money_test_loader = DataLoader(pair_test,
                                        batch_size=batch_size,
                                        num_workers=0)
    pair_money_val_loader = DataLoader(pair_validation,
                                       batch_size=batch_size,
                                       num_workers=0)

    siamese_money = model  # modello
    #training
    #modello, tempo di training, loss su train, loss su val
    print("Training...")

    siamese_money, timeTraining, array_loss_train, array_loss_val, array_sample_train, array_sample_valid, array_acc_train, array_acc_valid, labels_train, prediction_train, labels_val, prediction_val = train_siamese(
        siamese_money,
        pair_money_train_loader,
        pair_money_val_loader,
        exp_name,
        lr=lr,
        epochs=epochs,
        modeLoss=modeLoss)

    print("time Training\n ", timeTraining)
    print("Loss last on train", array_loss_train[-1])
    print("Loss last on valid\n", array_loss_val[-1])

    print("Array sample last on train", array_sample_train[-1])
    print("Array sample on last  valid\n", array_sample_valid[-1])

    print("lunghezza array accuracy TRAIN", len(array_acc_train))
    print("Lunghezza array SAMPLE train", len(array_sample_train))

    print("lunghezza array accuracy VALID", len(array_acc_valid))
    print("Lunghezza array SAMPLE VALID", len(array_sample_valid))

    #controlla se è presente la directory, altrimenti la crei
    createFolder(directory)
    #plot
    plotLoss(directory, filename, array_loss_train, array_loss_val,
             array_sample_train, array_sample_valid)
    plotAccuracy(directory, filename, array_acc_train, array_acc_valid,
                 array_sample_train, array_sample_valid)

    #------------------------TESTARE SU DATI DEL TRAIN
    #device = "cuda" if torch.cuda.is_available() else "cpu"
    #siamese_money.to(device)
    print("Score on dataTrain...")
    #pair_predictionTrain, pair_labelTrain , timeTrain = test_siamese(siamese_money, pair_money_train_loader, margin=2 )
    #calculate Accuracy
    accuracyTrain = accuracy_score(labels_train, prediction_train)
    print("Accuarcy di train of last batch: %0.4f" % accuracyTrain)
    #calculate Precision
    precisionTrain = precision_score(labels_train, prediction_train)
    print("Precision di of last batch train: %0.4f" % precisionTrain)
    #calculate Recall
    recallTrain = recall_score(labels_train, prediction_train)
    print("Recall of last batch di train: %0.4f" % recallTrain)

    if recallTrain != 0.0 and precisionTrain != 0.0:
        #calculate F1 score
        scores_training = f1_score(labels_train,
                                   prediction_train,
                                   average=None)
        scores_training = scores_training.mean()
        print("F1 score of last bacth di train: %0.4f" % scores_training)
    else:
        scores_training = 0.000
        print("F1 score of last bacth di train: %0.4f" % scores_training)

    #------------------------TESTARE SU DATI DEL VALID
    print("Score on dataValid...")
    #pair_predictionValid, pair_labelValid , timeValid = test_siamese(siamese_money, pair_money_val_loader, margin=2 )
    #calculate Accuracy
    accuracyValid = accuracy_score(labels_val, prediction_val)
    print("Accuarcy di validation: %0.4f" % accuracyValid)
    #calculate Precision
    precisionValid = precision_score(labels_val, prediction_val)
    print("Precision di validation: %0.4f" % precisionValid)
    #calculate Recall
    recallValid = recall_score(labels_val, prediction_val)
    print("Recall di validation: %0.4f" % recallValid)
    #calculate F1 score
    if recallValid != 0.0 and recallTrain != 0.0:

        scores_valid = f1_score(labels_val, prediction_val, average=None)
        scores_valid = scores_valid.mean()
        print("mF1 score di validation: %0.4f" % scores_valid)

    else:
        scores_valid = 0.00
        print("mF1 score di validation: %0.4f" % scores_valid)
    """ QUESTO VA FATTO IN FASE DI TESTING UTILIZZANDO I DATI DEL TEST E IL COMANDO RELOAD 
    #------------------------TESTARE SU DATI DEL TEST
    print("Testing on dataTest....")
    pair_prediction, pair_label, timeTest = test_siamese(siamese_money, pair_money_test_loader, margin=2 )
        #calculate Accuracy
    accuracyTest = accuracy_score(pair_label, pair_prediction)
    print("Accuarcy di test: %0.4f"% accuracyTest)
        #calculate Precision
    precisionTest = precision_score(pair_label, pair_prediction)
    print("Precision di test: %0.4f"% precisionTest)
        #calculate Recall
    recallTest = recall_score(pair_label, pair_prediction)
    print("Recall di test: %0.4f"% recallTest)
        #calculate F1 score
    scores_testing = f1_score(pair_label,pair_prediction, average=None)
    print("F1 score di testing: %0.4f"% scores_testing)
    """

    #-------------------------

    hyperparametr = {
        "indexEpoch": epochs - 1,
        "lr": lr,
        "momentum": momentum,
        "numSampleTrain": len(pair_train)
    }
    contrastiveLoss = {
        "lossTrain": array_loss_train[-1],
        "lossValid": array_loss_val[-1]
    }
    accuracy = {"accuracyTrain": accuracyTrain, "accuracyValid": accuracyValid}
    precision = {
        "precisionTrain": precisionTrain,
        "precisionValid": precisionValid
    }
    recall = {"recallTrain": recallTrain, "recallValid": recallValid}
    f1score = {
        "f1_score_Train": scores_training,
        "f1_score_Valid": scores_valid
    }
    time = {"training": timeTraining}

    writeJsonModel(directory, name, version, hyperparametr, batch_size,
                   contrastiveLoss, accuracy, f1score, precision, recall, time)
Esempio n. 7
0
def testing_classificazionePair(directory,path, version,resize,batch_size):
    # directory "Classe
    
    model = torch.load(path)
    controlFileCSV()
    
    controlFileCSV()
    dataSetPair = DataSetPairCreate(resize)
    dataSetPair.controlNormalize()
    pair_test = dataSetPair.pair_money_test
    pair_money_test_loader = DataLoader(pair_test, batch_size, num_workers=0)
    
    createFolder(directory)
    createFolder(directory+"\\"+version)
    
   
    timeTest,pair_prediction, pair_label  = test_classifierPair(model, pair_money_test_loader)
    
    accuracyTest = accuracy_score(pair_label, pair_prediction)
    print("Accuarcy di test: %0.4f"% accuracyTest)
        #calculate Precision
    precisionTest = precision_score(pair_label, pair_prediction,average='micro')
    print("Precision di test: %0.4f"% precisionTest)
        #calculate Recall
    recallTest = recall_score(pair_label, pair_prediction,average='micro')
    print("Recall di test: %0.4f"% recallTest)
        #calculate F1 score
    if recallTest!= 0.0 and precisionTest != 0.0:
        
        scores_testing = f1_score(pair_label,pair_prediction, average='micro')
        scores_testing = scores_testing.mean()
        print("mF1 score di testing: %0.4f"% scores_testing)
        
        
    else:
        scores_testing = 0.000
        print("mscoref1",scores_testing)

    key=["accuracy","precision","recall","mf1_score","time"]
    entry=["accuracyTest_Pair","precisionTest_Pair","recallTest_Pair","f1_score_Test_Pair","testing_Pair"]
    value=[accuracyTest,precisionTest,recallTest,scores_testing,timeTest]
    addValueJsonModel(directory+"\\modelTrained.json",version, key[0] ,entry[0], value[0])
    addValueJsonModel(directory+"\\modelTrained.json",version, key[1] ,entry[1], value[1])
    addValueJsonModel(directory+"\\modelTrained.json",version, key[2] ,entry[2], value[2])
    addValueJsonModel(directory+"\\modelTrained.json",version, key[3] ,entry[3], value[3])
    addValueJsonModel(directory+"\\modelTrained.json",version, key[4] ,entry[4], value[4])
    
    print("Classification Report")
    print(classification_report(pair_label, pair_prediction))
    
    cm = confusion_matrix(pair_label, pair_prediction)
    print("Matrice di confusione \n",cm)
    print("\n")
    #"--------
    FP = cm.sum(axis=0) - np.diag(cm)  
    FN = cm.sum(axis=1) - np.diag(cm)
    TP = np.diag(cm)
    TN = cm.sum() - (FP + FN + TP)

    FP = FP.astype(float)
    FN = FN.astype(float)
    TP = TP.astype(float)
    TN = TN.astype(float)


    # Sensitivity, hit rate, recall, or true positive rate
    TPR = TP/(TP+FN)
    # Specificity or true negative rate
    TNR = TN/(TN+FP) 
    # Precision or positive predictive value
    PPV = TP/(TP+FP)
    # Negative predictive value
    NPV = TN/(TN+FN)
    # Fall out or false positive rate
    FPR = FP/(FP+TN)
    # False negative rate
    FNR = FN/(TP+FN)
    # False discovery rate
    FDR = FP/(TP+FP)
    print("\n")
    print("TNR:",TNR)
    print("FPR:",FPR)
    print("FNR:",FNR)
    print("TPR:",TPR)
    
    #----------------
    

    
    cm.sum(1).reshape(-1,1)
        
    cm=cm/cm.sum(1).reshape(-1,1) #il reshape serve a trasformare il vettore in un vettore colonna
    print("\n")
    print("Matrice di confusione normalizzata \n",cm)
    """
    tnr, fpr, fnr, tpr = cm.ravel()
    print("\n")
    print("TNR:",tnr)
    print("FPR:",fpr)
    print("FNR:",fnr)
    print("TPR:",tpr)
    """
    key = "performance_test_Pair"
    entry=["TNR","FPR","FNR","TPR"]
    value=[list(TNR), list(FPR), list(FNR), list(TPR)]
    addValueJsonModel(directory+"\\modelTrained.json",version, key ,entry[0], value[0])
    addValueJsonModel(directory+"\\modelTrained.json",version, key ,entry[1], value[1])
    addValueJsonModel(directory+"\\modelTrained.json",version, key ,entry[2], value[2])
    addValueJsonModel(directory+"\\modelTrained.json",version, key ,entry[3], value[3])