Esempio n. 1
0
def classificazione(directory,filename, version,exp_name,name, model,lr, epochs,  momentum, batch_size, resize):
    print("Classificazione")
    #directory "Class"
    directory =directory
    version=version
    lr=lr
    epochs=epochs
    momentum=momentum
    batch_size = batch_size
    resize=resize
    controlFileCSVBase()
    
    dataSetClass = DatasetClassi(resize)
    dataSetClass.controlNormalize()
    
    train = dataSetClass.dataset_train_norm
    validation = dataSetClass.dataset_valid_norm
    test = dataSetClass.dataset_test_norm
    print("Numeri campioni",len(train))
    createFolder(directory)
    createFolder(directory+"\\"+version)
    writeJsonModelInit1(directory,name,version) 
    
    money_train_loader = DataLoader(train, batch_size=batch_size, num_workers=0, shuffle=True)
    money_test_loader = DataLoader(test, batch_size=batch_size, num_workers=0)
    money_val_loader = DataLoader(validation , batch_size = batch_size, num_workers=0)
    print("Numero di batch", len(money_train_loader))
    modello ,f, last_loss_train, last_loss_val, last_acc_train, last_acc_val = train_class(directory,version, model, money_train_loader, money_val_loader,resize, batch_size, exp_name , lr=lr, epochs = epochs)
    print("Time computing", f)
    print("last_loss_train",last_loss_train)
    print("last_loss_val",last_loss_val)
    print("last_acc_train",last_acc_train)
    print("last_acc_val",last_acc_val)
    
    hyperparametr = {"indexEpoch":epochs-1,"lr":lr, "momentum" : momentum, "batchSize":batch_size }
    contrastiveLoss = {"lossTrain": last_loss_train, "lossValid":last_loss_val}
    accuracy = {"accuracyTrain":last_acc_train , "accuracyValid":last_acc_val }
    time = {"training": f}
    
    
    writeJsonModelClass(directory,name,version, hyperparametr,resize,batch_size, contrastiveLoss, accuracy ,time)
    
    path = exp_name+".pth"
    model_test = torch.load(path)
    
    """
Esempio n. 2
0
def train_model_margine_dynamik(directory, filename, version, exp_name, name,
                                model, lr, epochs, momentum, batch_size,
                                resize):
    dataSetPair = DataSetPairCreate(resize)
    dataSetPair.controlNormalize()

    pair_train = dataSetPair.pair_money_train
    pair_test = dataSetPair.pair_money_test
    pair_validation = dataSetPair.pair_money_val

    pair_money_train_loader = DataLoader(pair_train,
                                         batch_size=batch_size,
                                         num_workers=0,
                                         shuffle=True)
    pair_money_test_loader = DataLoader(pair_test,
                                        batch_size=batch_size,
                                        num_workers=0)
    pair_money_val_loader = DataLoader(pair_validation,
                                       batch_size=batch_size,
                                       num_workers=0)

    #training
    #modello, tempo di training, loss su train, loss su val
    createFolder(directory + "\\" + version)
    writeJsonModelInit1(directory, name, version)

    print("Training...")

    modello, f, last_loss_train, last_loss_val, last_acc_train, last_acc_val = train_margine_dynamik(
        directory,
        version,
        model,
        pair_money_train_loader,
        pair_money_val_loader,
        resize,
        batch_size,
        exp_name,
        lr=lr,
        epochs=epochs)

    print("Time computing", f)
    print("last_loss_train", last_loss_train)
    print("last_loss_val", last_loss_val)
    print("last_acc_train", last_acc_train)
    print("last_acc_val", last_acc_val)

    hyperparametr = {
        "indexEpoch": epochs - 1,
        "lr": lr,
        "momentum": momentum,
        "numSampleTrain": len(pair_train)
    }
    contrastiveLoss = {
        "lossTrain": last_loss_train,
        "lossValid": last_loss_val
    }
    accuracy = {"accuracyTrain": last_acc_train, "accuracyValid": last_acc_val}
    time = {"training": f}

    writeJsonModelClass(directory, name, version, hyperparametr, resize,
                        batch_size, contrastiveLoss, accuracy, time)

    namep = exp_name + ".pth"
    siamese_model = torch.load(namep)

    print("Testing on Validation set")

    timeVal, pair_prediction_val, pair_label_val = test_margine_dynamik(
        siamese_model, pair_money_val_loader)

    numSimilPredette = np.sum(pair_prediction_val == 0)
    print("Num Simili predette", numSimilPredette)
    numDissimilPredette = np.sum(pair_prediction_val == 1)
    print("Num Dissimil predette", numDissimilPredette)
    numSimilReali = np.sum(pair_label_val == 0)
    print("Num Simili Reali", numSimilReali)
    numDissimilReali = np.sum(pair_label_val == 1)
    print("Num Dissimil Reali", numDissimilReali)

    #calculate Accuracy
    print(pair_prediction_val[0:10])
    print(pair_label_val[0:10])
    accuracyVal = accuracy_score(pair_label_val, pair_prediction_val)
    print("Accuarcy di test: %0.4f" % accuracyVal)
    #calculate Precision
    precisionVal = precision_score(pair_label_val, pair_prediction_val)
    print("Precision di test: %0.4f" % precisionVal)
    #calculate Recall
    recallVal = recall_score(pair_label_val, pair_prediction_val)
    print("Recall di test: %0.4f" % recallVal)
    #calculate F1 score
    if recallVal != 0.0 and precisionVal != 0.0:

        scores_testing_val = f1_score(pair_label_val,
                                      pair_prediction_val,
                                      average=None)
        scores_testing_val = scores_testing_val.mean()
        print("mF1 score di testing: %0.4f" % scores_testing_val)

    else:
        scores_testing_val = 0.000
        print("mscoref1", scores_testing_val)

    key = ["accuracy", "precision", "recall", "mf1_score", "time"]
    entry = [
        "accuracyVal", "precisionVal", "recallVal", "f1_score_Val", "testVal"
    ]
    value = [accuracyVal, precisionVal, recallVal, scores_testing_val, timeVal]
    addValueJsonModel(directory + "modelTrained.json", version, key[0],
                      entry[0], value[0])
    addValueJsonModel(directory + "modelTrained.json", version, key[1],
                      entry[1], value[1])
    addValueJsonModel(directory + "modelTrained.json", version, key[2],
                      entry[2], value[2])
    addValueJsonModel(directory + "modelTrained.json", version, key[3],
                      entry[3], value[3])
    addValueJsonModel(directory + "modelTrained.json", version, key[4],
                      entry[4], value[4])
Esempio n. 3
0
def train_model_margine_double(directory,
                               filename,
                               version,
                               exp_name,
                               name,
                               model,
                               lr,
                               epochs,
                               momentum,
                               batch_size,
                               resize,
                               decay=None,
                               margin1=None,
                               margin2=None,
                               modeLoss=None):
    # directory es "Model-6"
    createFolder(directory)
    dataSetPair = DataSetPairCreate(resize)
    dataSetPair.controlNormalize()

    pair_train = dataSetPair.pair_money_train
    pair_test = dataSetPair.pair_money_test
    pair_validation = dataSetPair.pair_money_val

    pair_money_train_loader = DataLoader(pair_train,
                                         batch_size=batch_size,
                                         num_workers=0,
                                         shuffle=True)
    pair_money_test_loader = DataLoader(pair_test,
                                        batch_size=batch_size,
                                        num_workers=0)
    pair_money_val_loader = DataLoader(pair_validation,
                                       batch_size=batch_size,
                                       num_workers=0)

    siamese_money = model  # modello

    createFolder(directory + "\\" + version)
    createFolder(directory + "\\" + version + "\\" + "Metod2")

    writeJsonModelInit1(directory, name, version)

    print("Training...")

    modello, f, last_loss_train, last_loss_val, last_acc_train, last_acc_val = train_siamese_margin_double(
        directory,
        version,
        siamese_money,
        pair_money_train_loader,
        pair_money_val_loader,
        resize,
        batch_size,
        exp_name=exp_name,
        lr=lr,
        epochs=epochs,
        momentum=momentum,
        margin1=margin1,
        margin2=margin2,
        logdir='logs',
        decay=decay,
        modeLoss=modeLoss)

    print("Time computing", f)
    print("last_loss_train", last_loss_train)
    print("last_loss_val", last_loss_val)
    print("last_acc_train", last_acc_train)
    print("last_acc_val", last_acc_val)
    if not decay is None:

        hyperparametr = {
            "indexEpoch": epochs - 1,
            "lr": lr,
            "decay": decay,
            "momentum": momentum,
            "numSampleTrain": len(pair_train)
        }
    else:
        hyperparametr = {
            "indexEpoch": epochs - 1,
            "lr": lr,
            "momentum": momentum,
            "numSampleTrain": len(pair_train)
        }

    contrastiveLoss = {
        "lossTrain": last_loss_train,
        "lossValid": last_loss_val
    }
    accuracy = {"accuracyTrain": last_acc_train, "accuracyValid": last_acc_val}
    time = {"training": f}

    writeJsonModelClass(directory, name, version, hyperparametr, resize,
                        resize, batch_size, contrastiveLoss, accuracy, time)
Esempio n. 4
0
def continue_classificazione(directory,model,version,exp_name,name,lr, momentum,resize,batch_size, epoche_avanza):
    createFolder(directory)
    createFolder(directory+"\\"+version)
    controlFileCSVBase()
    
    dataSetClass = DatasetClassi(resize)
    dataSetClass.controlNormalize()
    
    train = dataSetClass.dataset_train_norm
    validation = dataSetClass.dataset_valid_norm
    test = dataSetClass.dataset_test_norm
    
    
    money_train_loader = DataLoader(train, batch_size=batch_size, num_workers=0, shuffle=True)
    money_test_loader = DataLoader(test, batch_size=batch_size, num_workers=0)
    money_val_loader = DataLoader(validation , batch_size = batch_size, num_workers=0)

    epoche_fatte =0
    array_loss_train=[]
    array_loss_valid=[]
    array_accuracy_train=[]
    array_accuracy_valid=[]
    array_glb_train=[]
    array_glb_valid=[]
    
    percorso1 = directory+"//"+"modelTrained.json"
    
    with open(percorso1,"r") as file:
        data = json.load(file)
        
    if not( data.get(version) is None):
        obj = data[version]
        if not(obj.get("hyperparametr") is None):
            para = obj["hyperparametr"]
            
            if not(para.get("indexEpoch") is None):
                epoche_fatte = para["indexEpoch"]
                
        if not(obj.get("time") is None):
            tempo = obj["time"]
            
            if not(tempo.get("training") is None):
                tempoTrain = tempo["training"]
    
    percorso2=  directory+"//"+version+"//"+"value_arrays.json"
    with open(percorso2,"r") as file2:
        data2 = json.load(file2)
        
    if not( data2.get("array_loss_train") is None):
        array_loss_train = data2["array_loss_train"]
    
    if not( data2.get("array_loss_valid") is None):
        array_loss_valid = data2["array_loss_valid"]  
    
    if not( data2.get("array_accuracy_train") is None):
        array_accuracy_train = data2["array_accuracy_train"]
        
    if not( data2.get("array_accuracy_valid") is None):
        array_accuracy_valid = data2["array_accuracy_valid"]
    
    
    if not( data2.get("array_glb_train") is None):
        array_glb_train = data2["array_glb_train"]
    
        
    if not( data2.get("array_glb_valid") is None):
        array_glb_valid = data2["array_glb_valid"]                

    print("Indice epoca gia fatta: ",epoche_fatte)
    print("Epoche avanza :",epoche_avanza)
    print("Array loss train ", array_loss_train)
    print("Array loss valid", array_loss_valid)
    
    print("Array accuracy train", array_accuracy_train)
    print("Array accuracy valid", array_accuracy_valid)
    
    print("Array glb train", array_glb_train)
    print("array glb valid ", array_glb_valid)
    
    epochs = epoche_fatte + epoche_avanza + 1 
    dizionario_array = {"epoche_fatte":epoche_fatte, "epoche_avanza":epoche_avanza,"l_train": array_loss_train, "l_valid": array_loss_valid, "a_train":array_accuracy_train,"a_valid":array_accuracy_valid, "g_train":array_glb_train, "g_valid":array_glb_valid, "tempoTrain":tempoTrain }
    
    modello ,f, last_loss_train, last_loss_val, last_acc_train, last_acc_val=train_class(directory,version, model, money_train_loader, money_val_loader,resize, batch_size, exp_name , lr=lr, epochs = epochs, dizionario = dizionario_array)
    
    print("Time computing", f)
    tempo1 = float(tempoTrain)
    tempo2 = float(f) 
    tempo = tempo1+tempo2
    f =str(tempo) 
    print("last_loss_train",last_loss_train)
    print("last_loss_val",last_loss_val)
    print("last_acc_train",last_acc_train)
    print("last_acc_val",last_acc_val)
    
    hyperparametr = {"indexEpoch":epochs-1,"lr":lr, "momentum" : momentum, "batchSize":batch_size }
    loss = {"lossTrain": last_loss_train, "lossValid":last_loss_val}
    accuracy = {"accuracyTrain":last_acc_train , "accuracyValid":last_acc_val }
    time = {"training": f}
    
    
    writeJsonModelClass(directory,name,version, hyperparametr,resize,batch_size, loss, accuracy ,time)
    
         
Esempio n. 5
0
def continue_model_class( directory,filename,version, exp_name,name,model,path_dict, lr,epoche_avanza ,momentum,batch_size,resize,decay=None,margin=1.5,soglia=0.8, modeLoss="single"):
    #exp_name='modello6_v3'
    #path_dict= 'modello6_v3_dict.pth'
    dataSetPair = DataSetPairCreate(resize)
    dataSetPair.controlNormalize()
   
    pair_train = dataSetPair.pair_money_train
    pair_test = dataSetPair.pair_money_test
    pair_validation = dataSetPair.pair_money_val
        
    pair_money_train_loader = DataLoader(pair_train, batch_size=batch_size, num_workers=0, shuffle=True)
    pair_money_test_loader = DataLoader(pair_test, batch_size=batch_size, num_workers=0)
    pair_money_val_loader = DataLoader(pair_validation , batch_size = batch_size, num_workers=0)
        
    epoche_fatte =0
    array_loss_train=[]
    array_loss_valid=[]
    array_acc_train=[]
    array_acc_valid=[]
    array_glb_train=[]
    array_glb_valid=[]
        
    percorso1 = directory+"//"+"modelTrained.json"
    with open(percorso1,"r") as file:
        data = json.load(file)
        
    if not( data.get(version) is None):
        obj = data[version]
        if not(obj.get("hyperparametr") is None):
            para = obj["hyperparametr"]
            
            if not(para.get("indexEpoch") is None):
                epoche_fatte = para["indexEpoch"]
    
    percorso2=  directory+"//"+version+"//"+"value_arrays.json"
    with open(percorso2,"r") as file2:
        data2 = json.load(file2)
        
    if not( data2.get("array_loss_train") is None):
        array_loss_train = data2["array_loss_train"]
    
    if not( data2.get("array_loss_valid") is None):
        array_loss_valid = data2["array_loss_valid"]  
    
    if not( data2.get("array_accuracy_train") is None):
        array_accuracy_train = data2["array_accuracy_train"]
        
    if not( data2.get("array_accuracy_valid") is None):
        array_accuracy_valid = data2["array_accuracy_valid"]
    
    
    if not( data2.get("array_glb_train") is None):
        array_glb_train = data2["array_glb_train"]
    
        
    if not( data2.get("array_glb_valid") is None):
        array_glb_valid = data2["array_glb_valid"]
    
    
    
                
        
            
    print("Indice epoca gia fatta: ",epoche_fatte)
    print("Epoche avanza :",epoche_avanza)
    print("Array loss train ", array_loss_train)
    print("Array loss valid", array_loss_valid)
    
    print("Array accuracy train", array_accuracy_train)
    print("Array accuracy valid", array_accuracy_valid)
    
    print("Array glb train", array_glb_train)
    print("array glb valid ", array_glb_valid)
     #indice epoca_fatte 19 
    epochs = epoche_fatte + epoche_avanza + 1 
    dizionario_array = {"epoche_fatte":epoche_fatte, "epoche_avanza":epoche_avanza,"l_train": array_loss_train, "l_valid": array_loss_valid, "a_train":array_accuracy_train,"a_valid":array_accuracy_valid, "g_train":array_glb_train, "g_valid":array_glb_valid}
    
    modello ,f, last_loss_train, last_loss_val, last_acc_train, last_acc_val= train_siamese_margine(directory,version,model, pair_money_train_loader, pair_money_val_loader,resize,batch_size, exp_name=exp_name,lr=lr, epochs=epochs, momentum=momentum, margin=margin,soglia=soglia, logdir="logs", decay=decay, modeLoss=modeLoss, dizionario= dizionario_array)
    
    print("Time computing", f)
    print("last_loss_train",last_loss_train)
    print("last_loss_val",last_loss_val)
    print("last_acc_train",last_acc_train)
    print("last_acc_val",last_acc_val)
    if not decay is None:
         
        hyperparametr = {"indexEpoch":epochs-1,"lr":lr,"decay":decay, "momentum" : momentum, "numSampleTrain": len(pair_train) }
    else:  
        hyperparametr = {"indexEpoch":epochs-1,"lr":lr, "momentum" : momentum, "numSampleTrain": len(pair_train) }
    
    contrastiveLoss = {"lossTrain": last_loss_train, "lossValid":last_loss_val}
    accuracy = {"accuracyTrain":last_acc_train , "accuracyValid":last_acc_val }
    time = {"training": f}
    
    writeJsonModelClass(directory,name,version, hyperparametr,resize, batch_size, contrastiveLoss, accuracy ,time)
    
    """
Esempio n. 6
0
def continue_model_margine_double( directory,filename,version, exp_name,name,model, lr,epoche_avanza ,momentum,batch_size,resize,decay=None,margin1=0.8,margin2=1.2, modeLoss="double"):
    
    createFolder(directory+"\\"+version)
    createFolder(directory+"\\"+version+"\\"+"Metod2")
    dataSetPair = DataSetPairCreate(resize)
    dataSetPair.controlNormalize()
   
    pair_train = dataSetPair.pair_money_train
    pair_test = dataSetPair.pair_money_test
    pair_validation = dataSetPair.pair_money_val
        
    pair_money_train_loader = DataLoader(pair_train, batch_size=batch_size, num_workers=0, shuffle=True)
    pair_money_test_loader = DataLoader(pair_test, batch_size=batch_size, num_workers=0)
    pair_money_val_loader = DataLoader(pair_validation , batch_size = batch_size, num_workers=0)
        
    epoche_fatte =0
    array_loss_train=[]
    array_loss_valid=[]
    array_acc_train=[]
    array_acc_valid=[]
    array_glb_train=[]
    array_glb_valid=[]
        
    percorso1 = directory+"//"+"modelTrained.json"
    with open(percorso1,"r") as file:
        data = json.load(file)
        
    if not( data.get(version) is None):
        obj = data[version]
        if not(obj.get("hyperparametr") is None):
            para = obj["hyperparametr"]
            
            if not(para.get("indexEpoch") is None):
                epoche_fatte = para["indexEpoch"]
                
        if not(obj.get("time") is None):
            tempo = obj["time"]
            
            if not(tempo.get("training") is None):
                tempoTrain = tempo["training"]
            
            
            
    
    percorso2=  directory+"//"+version+"//"+"value_arrays.json"
    with open(percorso2,"r") as file2:
        data2 = json.load(file2)
        
    if not( data2.get("array_loss_train") is None):
        array_loss_train = data2["array_loss_train"]
    
    if not( data2.get("array_loss_valid") is None):
        array_loss_valid = data2["array_loss_valid"]  
    
    if not( data2.get("array_accuracy_train") is None):
        array_accuracy_train = data2["array_accuracy_train"]
        
    if not( data2.get("array_accuracy_valid") is None):
        array_accuracy_valid = data2["array_accuracy_valid"]
    
    
    if not( data2.get("array_glb_train") is None):
        array_glb_train = data2["array_glb_train"]
    
        
    if not( data2.get("array_glb_valid") is None):
        array_glb_valid = data2["array_glb_valid"]
    
    percorso3=  directory+"//"+version+"//"+"Metod2"+"//"+"value_performance.json"
    with open(percorso3,"r") as file3:
        data3 = json.load(file3)
        
    if not( data3.get("array_acc_valid") is None):
        array_acc_valid_2 = data3["array_acc_valid"]
        
    if not( data3.get("array_f1_valid") is None):
        array_f1_valid_2 = data3["array_f1_valid"]
        
    if not( data3.get("array_recall_valid") is None):
        array_recall_valid_2 = data3["array_recall_valid"]
        
    if not( data3.get("array_precision_valid") is None):
        array_precision_valid_2 = data3["array_precision_valid"]
    
    if not( data3.get("array_tp_valid") is None):
        array_tp_valid_2 = data3["array_tp_valid"]
    
    if not( data3.get("array_fp_valid") is None):
        array_fp_valid_2 = data3["array_fp_valid"]
    
    if not( data3.get("gl_step_valid") is None):
        array_glb_valid_2 = data3["gl_step_valid"]
    
    
        
    
        
    
                
        
            
    print("Indice epoca gia fatta: ",epoche_fatte)
    print("Epoche avanza :",epoche_avanza)
    print("Array loss train ", array_loss_train)
    print("Array loss valid", array_loss_valid)
    
    print("Array accuracy train", array_accuracy_train)
    print("Array accuracy valid", array_accuracy_valid)
    
    print("Array glb train", array_glb_train)
    print("array glb valid ", array_glb_valid)
     #indice epoca_fatte 19 
    epochs = epoche_fatte + epoche_avanza + 1 
    dizionario_array = {"epoche_fatte":epoche_fatte, "epoche_avanza":epoche_avanza,"l_train": array_loss_train, "l_valid": array_loss_valid, "a_train":array_accuracy_train,"a_valid":array_accuracy_valid, "g_train":array_glb_train, "g_valid":array_glb_valid, "tempoTrain":tempoTrain, "array_acc_valid_2":array_acc_valid_2, "array_f1_valid_2":array_f1_valid_2, "array_recall_valid_2":array_recall_valid_2, "array_precision_valid_2":array_precision_valid_2, "array_tp_valid_2":array_tp_valid_2, "array_fp_valid_2":array_fp_valid_2, "array_glb_valid_2":array_glb_valid_2 }
    
    modello ,f, last_loss_train, last_loss_val, last_acc_train, last_acc_val= train_siamese_margin_double(directory,version,model, pair_money_train_loader, pair_money_val_loader,resize,batch_size, exp_name=exp_name,lr=lr, epochs=epochs, momentum=momentum, margin1=margin1,margin2=margin2, logdir="logs", decay=decay, modeLoss=modeLoss, dizionario= dizionario_array)
    
    print("Time computing", f)
    print("last_loss_train",last_loss_train)
    print("last_loss_val",last_loss_val)
    print("last_acc_train",last_acc_train)
    print("last_acc_val",last_acc_val)
    if not decay is None:
         
        hyperparametr = {"indexEpoch":epochs-1,"lr":lr,"decay":decay, "momentum" : momentum, "numSampleTrain": len(pair_train), "margin1":margin1, "margin2":margin2 }
    else:  
        hyperparametr = {"indexEpoch":epochs-1,"lr":lr, "momentum" : momentum, "numSampleTrain": len(pair_train),"margin1":margin1, "margin2":margin2 }
    
    contrastiveLoss = {"lossTrain": last_loss_train, "lossValid":last_loss_val}
    accuracy = {"accuracyTrain":last_acc_train , "accuracyValid":last_acc_val }
    
    time = {"training": f}
    
    writeJsonModelClass(directory,name,version, hyperparametr,resize, batch_size, contrastiveLoss, accuracy ,time)