Ejemplo n.º 1
0
def train_siamese_diff(directory,
                       version,
                       embedding_net,
                       train_loader,
                       valid_loader,
                       resize,
                       batch_size,
                       margin1,
                       margin2,
                       exp_name='model_1',
                       lr=0.01,
                       epochs=10,
                       momentum=0.99,
                       logdir='logs',
                       decay=None,
                       modeLoss=None,
                       dizionario_array=None):
    #definiamo la contrastive loss
    print("lr", lr)
    print("momentum", momentum)
    print("decay", decay)
    print("margin1", margin1)
    print("margine2", margin2)

    if not modeLoss is None:
        if modeLoss == "norm":
            print("Loss mode Norm margin = 0.5")
            criterion = ContrastiveLossNorm()
        elif modeLoss == "double":
            print("Loss mode Double margin m1= 0.3 , m2 =0.7")
            criterion = ContrastiveLossDouble()
        elif modeLoss == "soglia":
            print("Loss mode Soglia")
            criterion = ContrastiveLoss()

        elif modeLoss == "due":
            print("Loss mode due")
            criterion = ContrastiveLossDouble(margin1, margin2)

    else:
        print("Loss mode margine=2")
        criterion = ContrastiveLoss()

    if not decay is None:
        print("Weight_Decay", decay)
        optimizer = SGD(embedding_net.parameters(),
                        lr,
                        momentum=momentum,
                        weight_decay=decay)

    else:
        optimizer = SGD(embedding_net.parameters(), lr, momentum=momentum)

    #meters
    loss_meter = AverageValueMeter()
    acc_meter = AverageValueMeter()

    #writer
    writer = SummaryWriter(join(logdir, exp_name))
    #device
    device = "cuda" if torch.cuda.is_available() else "cpu"
    embedding_net.to(device)
    criterion.to(
        device
    )  # anche la loss va portata sul device in quanto contiene un parametro(m)
    #definiamo un dizionario contenente i loader di training e test
    loader = {'train': train_loader, 'valid': valid_loader}
    last_loss_train = 0
    last_loss_val = 0
    last_acc_train = 0
    last_acc_val = 0

    array_accuracy_train = []
    array_accuracy_valid = []
    array_loss_train = []
    array_loss_valid = []
    array_glb_train = []
    array_glb_valid = []
    tempo = Timer()
    global_step = 0
    start_epoca = 0

    if dizionario_array is not None:
        print("Inizializza")
        array_accuracy_train = dizionario_array["a_train"]
        array_accuracy_valid = dizionario_array["a_valid"]
        array_loss_train = dizionario_array["l_train"]
        array_loss_valid = dizionario_array["l_valid"]
        array_glb_train = dizionario_array["g_train"]
        array_glb_valid = dizionario_array["g_valid"]
        global_step = dizionario_array["g_valid"][-1]
        start_epoca = dizionario_array[
            "epoche_fatte"] + 1  # indice epoca di inizio

    print("global step", global_step)
    print("a_acc_train", array_accuracy_train)
    print("a_acc_valid", array_accuracy_valid)
    print("loss_train", array_loss_train)
    print("loss_valid", array_loss_valid)
    print("glb_train", array_glb_train)
    print("glb_valid", array_glb_valid)
    print("epoca_start_indice ", start_epoca)

    start = timer()

    print("Num epoche", epochs)

    for e in range(start_epoca, epochs):
        print("Epoca ", e)

        array_total_0 = []
        array_total_1 = []
        #iteriamo tra due modalità: train e test
        for mode in ['train', 'valid']:
            print("Mode ", mode)
            loss_meter.reset()
            acc_meter.reset()
            embedding_net.train() if mode == 'train' else embedding_net.eval()
            with torch.set_grad_enabled(
                    mode == 'train'):  #abilitiamo i gradienti solo in training

                for i, batch in enumerate(loader[mode]):

                    distance_1 = []
                    distance_0 = []

                    I_i, I_j, l_ij, _, _ = [b.to(device) for b in batch]
                    #img1, img2, label12, label1, label2
                    #l'implementazione della rete siamese è banale:
                    #eseguiamo la embedding net sui due input
                    phi_i = embedding_net(I_i)  #img 1
                    phi_j = embedding_net(I_j)  #img2

                    print("Output train img1", phi_i.size())
                    print("Output train img2", phi_j.size())
                    print("Etichetta reale", l_ij)

                    #calcoliamo la loss
                    l = criterion(phi_i, phi_j, l_ij)

                    #aggiorniamo il global_step
                    #conterrà il numero di campioni visti durante il training
                    n = I_i.shape[0]  #numero di elementi nel batch
                    print("Num elemnti nel batch ", n)
                    global_step += n

                    if mode == 'train':
                        l.backward()
                        optimizer.step()
                        optimizer.zero_grad()

                    dist = F.pairwise_distance(phi_i, phi_j)
                    dist = dist.detach().cpu()
                    dist = dist.tolist()
                    print("DISTANZE ", dist)
                    """
                    prediction_train = []
                    labels_train = []
                    prediction_val = []
                    labels_val = []
                    """
                    """
                    d = F.pairwise_distance(phi_i.to('cpu'), phi_j.to('cpu'))
                    print(type(d))
                    print(d.size())
                    """
                    res = torch.abs(phi_i.cuda() - phi_j.cuda())
                    res = res.detach().cpu()

                    #print("Tipo",type(res))
                    #print("DIm",res.size())
                    labs = l_ij.to('cpu')
                    label = []
                    lab_batch_predette = []
                    for i, el in enumerate(res):
                        print("Tipo", type(el))
                        print("DIm", el.size())
                        print("posiz 0", el[0])
                        print("posizione 1", el[1])
                        result, indice = torch.max(el, 0)
                        indice = indice.item()
                        print("PREDETTA di un smaple", indice)
                        labelv = l_ij[i].to('cpu')
                        labelv = labelv.item()

                        print(" REALE di un sample", labelv)

                        if labelv == indice:
                            print("Corretta R - P", labelv, indice)

                        else:
                            print("Scorretta R - P", labelv, indice)

                        lab_batch_predette.append(indice)

                    label.extend(list(labs.numpy()))
                    print("Predette", lab_batch_predette)
                    print("Reali", label)

                    acc = accuracy_score(np.array(label),
                                         np.array(lab_batch_predette))
                    n = batch[0].shape[0]  #numero di elementi nel batch
                    loss_meter.add(l.item(), n)
                    acc_meter.add(acc, n)

                    if mode == 'train':

                        writer.add_scalar('loss/train',
                                          loss_meter.value(),
                                          global_step=global_step)
                        writer.add_scalar('accuracy/train',
                                          acc_meter.value(),
                                          global_step=global_step)

                        distance_1 = [
                            distance
                            for distance, label in zip(dist, labs.numpy())
                            if label == 1
                        ]
                        distance_0 = [
                            distance
                            for distance, label in zip(dist, labs.numpy())
                            if label == 0
                        ]
                        if (len(distance_0) != 0):
                            array_total_0.extend(distance_0)
                        if (len(distance_1) != 0):
                            array_total_1.extend(distance_1)

            if mode == 'train':
                global_step_train = global_step
                last_loss_train = loss_meter.value()
                last_acc_train = acc_meter.value()

                array_accuracy_train.append(acc_meter.value())
                array_loss_train.append(loss_meter.value())
                array_glb_train.append(global_step)
            else:
                global_step_val = global_step
                last_loss_val = loss_meter.value()
                last_acc_val = acc_meter.value()

                array_accuracy_valid.append(acc_meter.value())
                array_loss_valid.append(loss_meter.value())
                array_glb_valid.append(global_step)

            writer.add_scalar('loss/' + mode,
                              loss_meter.value(),
                              global_step=global_step)
            writer.add_scalar('loss/' + mode,
                              acc_meter.value(),
                              global_step=global_step)

        print("Loss TRAIN", array_loss_train)
        print("Losss VALID", array_loss_valid)
        print("Accuracy TRAIN", array_accuracy_train)
        print("Accuracy VALID", array_accuracy_valid)
        print("dim acc train", len(array_accuracy_train))
        print("dim acc valid", len(array_accuracy_valid))
        figure = plt.figure(figsize=(12, 8))
        plt.plot(array_glb_train, array_accuracy_train)
        plt.plot(array_glb_valid, array_accuracy_valid)
        plt.xlabel('samples')
        plt.ylabel('accuracy')
        plt.grid()
        plt.legend(['Training', 'Valid'])
        plt.savefig(directory + '//plotAccuracy_' + version + '.png')
        plt.clf()
        plt.close(figure)
        #plt.show()

        figure = plt.figure(figsize=(12, 8))
        plt.plot(array_glb_train, array_loss_train)
        plt.plot(array_glb_valid, array_loss_valid)
        plt.xlabel('samples')
        plt.ylabel('loss')
        plt.grid()
        plt.legend(['Training', 'Valid'])
        plt.savefig(directory + '//plotLoss_' + version + '.png')
        plt.clf()
        plt.close(figure)

        #plt.show()

        #aggiungiamo un embedding. Tensorboard farà il resto
        writer.add_embedding(phi_i,
                             batch[3],
                             I_i,
                             global_step=global_step,
                             tag=exp_name + '_embedding')
        #conserviamo solo l'ultimo modello sovrascrivendo i vecchi
        torch.save(embedding_net, '%s.pth' % exp_name)
        # conserviamo il odello a questa epoca
        torch.save(
            embedding_net, directory + "//" + version + "//" + '%s.pth' %
            (exp_name + "_" + str(e)))

        #conserviamo il modello sotto forma di dizionario
        net_save(epochs, embedding_net, optimizer, last_loss_train,
                 last_loss_val, last_acc_train, last_acc_val,
                 global_step_train, global_step_val,
                 '%s.pth' % (exp_name + "_dict"))
        #dipo ogni epoca plotto la distribuzione
        print("lungezza array_total_0 ", len(array_total_0))
        print("lunghezza array_total_1", len(array_total_1))

        saveArray(directory, version, array_loss_train, array_loss_valid,
                  array_accuracy_train, array_accuracy_valid, array_glb_train,
                  array_glb_valid)

        saveinFileJson(start, directory, version, resize, batch_size, e, lr,
                       momentum, len(train_loader), array_accuracy_train[-1],
                       array_accuracy_valid[-1], array_loss_train[-1],
                       array_loss_valid[-1])

        draw_distribution(directory, version, e, array_total_0, array_total_1)

    f = '{:.7f}'.format(tempo.stop())

    return embedding_net, f, last_loss_train, last_loss_val, last_acc_train, last_acc_val
Ejemplo n.º 2
0
def train_siamese_margin_double(directory,version,embedding_net, train_loader, valid_loader,resize,batch_size, exp_name='model',lr=0.01, epochs=10, momentum=0.99, margin1=0.8,margin2=1.3, logdir='logs', decay=None, modeLoss=None, dizionario= None):
#definiamo la contrastive loss
    print("lr",lr)
    print("momentum",momentum)
    print("decay",decay)
    print("margin1", margin1)
    print("margin2",margin2)
    
    #definiamo la contrastive loss
    if not modeLoss is None:
        if modeLoss == "double":
            print("Loss mode Double margin ")
            criterion = ContrastiveLossDouble(margin1, margin2)
        
        
    
    if not decay is None:                                         
        print("Weight_Decay",decay)
        optimizer = SGD(embedding_net.parameters(), lr, momentum=momentum,weight_decay=decay)
    
    else:        
        optimizer = SGD(embedding_net.parameters(), lr, momentum=momentum)
    #meters
    loss_meter = AverageValueMeter()
    acc_meter = AverageValueMeter()
    #writer
    writer = SummaryWriter(join(logdir, exp_name))
    #device
    device = "cuda" if torch.cuda.is_available() else "cpu"
    embedding_net.to(device)
    criterion.to(device)# anche la loss va portata sul device in quanto contiene un parametro(m)
    #definiamo un dizionario contenente i loader di training e test
    loader = {
        'train' : train_loader,
        'valid' : valid_loader
    }
    
    last_loss_train = 0
    last_loss_val = 0
    last_acc_train = 0
    last_acc_val = 0
    
    array_accuracy_train = []
    array_accuracy_valid = [] 
    accuracy_metodo2_validation= []
    
    array_f1_valid =[]
    array_recall_valid = [] 
    array_precision_valid = [] 
    tp_valid = []
    fp_valid = [] 
    
    
    
    array_loss_train = []
    array_loss_valid = []
    array_glb_train = []
    array_glb_valid = []
    tempo = Timer()
    global_step=0
    start_epoca = 0
    tempoTrain = 0
    
    if dizionario is not None:
        print("Inizializza")
        array_accuracy_train = dizionario["a_train"]
        array_accuracy_valid = dizionario["a_valid"] 
        accuracy_metodo2_validation = dizionario["array_acc_valid_2"] 
        array_loss_train = dizionario["l_train"]
        array_loss_valid = dizionario["l_valid"]
        array_glb_train = dizionario["g_train"]
        array_glb_valid = dizionario["g_valid"]
        global_step= dizionario["g_valid"][-1]
        start_epoca = dizionario["epoche_fatte"] + 1 # indice epoca di inizio
        tempoTrain = dizionario["tempoTrain"]
        array_f1_valid =dizionario["array_f1_valid_2"]
        array_recall_valid = dizionario ["array_recall_valid_2"] 
        array_precision_valid =dizionario["array_precision_valid_2"] 
        tp_valid = dizionario["array_tp_valid_2"]
        fp_valid =dizionario["array_fp_valid_2"]
    
    print("global step", global_step)
    print("a_acc_train", array_accuracy_train)
    print("a_acc_valid",array_accuracy_valid)
    print("loss_train", array_loss_train)
    print("loss_valid",array_loss_valid)
    print("glb_train",array_glb_train)
    print("glb_valid",array_glb_valid)
    print("%d, %d, %d, %d, %d, %d" %(len(array_accuracy_train) , len(array_accuracy_valid),len(array_loss_train), len(array_loss_valid),len(array_glb_train),len(array_glb_valid) ))
    print("epoca_start_indice ", start_epoca)

    
    
    start = timer()  + tempoTrain
    
    print("Num epoche", epochs)
    
    for e in range(start_epoca,epochs):
        print("Epoca= ",e)

        array_total_0 = []
        array_total_1 = []

        distanze_validation = []
        label_reali_validation = []

        #iteriamo tra due modalità: train e test
        for mode in ['train','valid'] :
            loss_meter.reset()
            acc_meter.reset()
            embedding_net.train() if mode == 'train' else embedding_net.eval()
            with torch.set_grad_enabled(mode=='train'): #abilitiamo i gradienti solo in training
                for i, batch in enumerate(loader[mode]):
                    distance_1 = []
                    distance_0 = []
                    

                    
                    I_i, I_j, l_ij, _, _ = [b.to(device) for b in batch]
                    #l'implementazione della rete siamese è banale:
                    #eseguiamo la embedding net sui due input
                    phi_i = embedding_net(I_i)
                    phi_j = embedding_net(I_j)
                    
                    #print("Output train img1", phi_i.size())
                    #print("Output train img2", phi_j.size())

                    #calcoliamo la loss
                    l = criterion(phi_i, phi_j, l_ij)

                    #aggiorniamo il global_step
                    #conterrà il numero di campioni visti durante il training
                    n = I_i.shape[0] #numero di elementi nel batch
                    #print("Num elemnti nel batch ",n)
                    global_step += n
                    
                    if mode=='train':
                        l.backward()
                        optimizer.step()
                        optimizer.zero_grad()
                    
                    dist = F.pairwise_distance(phi_i, phi_j)
                    dist = dist.detach().cpu()
                    dist = dist.tolist()
                    #print("DISTANZE ",dist)
                    
                    pred = []
                    label = []
                    labs = l_ij.to('cpu')
                    print("epoca %d, mode %s" %(e, mode) )
                    
                    
                    if mode =='valid':
                        distanze_validation.extend(dist)
                        label_reali_validation.extend(list(labs.numpy()))
                    
                    

                    
                    
                    for j in dist:
                        #print(j)
                        if j<= margin1:
                            #print("é minore %0.5f"%(j<= margin1))
                            pred.append(0)
                                
                        elif j>=margin2:
                            #print("E' maggiore %0.5f"%(j>=margin2))
                            pred.append(1)
                        
                        else:
                            if(abs(j - margin1) <= abs(j-margin2)):
                                #print("intervallo classe 0 :%0.5f , %0.5f"%(abs(j - margin1),abs(j-margin2)))
                                pred.append(0)
                            else:
                                #print("intervallo classe 1 :%0.5f , %0.5f"%(abs(j - margin1),abs(j-margin2)))
                                pred.append(1)
                        
                    label.extend(list(labs.numpy()))
                    
                    
                    #print("Predette", pred)
                    #print("Reali", labs)
                        
                    
                    acc = accuracy_score(np.array(label),np.array(pred))
                    n = batch[0].shape[0] #numero di elementi nel batch
                    loss_meter.add(l.item(),n)
                    acc_meter.add(acc,n)


                    if mode=='train':
                        writer.add_scalar('loss/train', loss_meter.value(), global_step=global_step)
                        writer.add_scalar('accuracy/train', acc_meter.value(), global_step=global_step)
                       
                        distance_1 = [distance for distance , label in zip (dist, labs.numpy()) if label == 1]
                        distance_0 = [distance for distance, label in zip (dist, labs.numpy()) if label == 0]
                        if(len(distance_0)!=0):
                            array_total_0.extend(distance_0)
                        if(len(distance_1)!=0):
                            array_total_1.extend(distance_1)
            
            if mode == 'train':
                global_step_train=global_step
                last_loss_train = loss_meter.value()
                last_acc_train = acc_meter.value()
                
                array_accuracy_train.append(acc_meter.value())
                array_loss_train.append(loss_meter.value())
                array_glb_train.append(global_step)
            else:
                global_step_val = global_step
                last_loss_val = loss_meter.value()
                last_acc_val = acc_meter.value()
                
                array_accuracy_valid.append(acc_meter.value())
                array_loss_valid.append(loss_meter.value())
                array_glb_valid.append(global_step)
            
            writer.add_scalar('loss/'+mode, loss_meter.value(), global_step=global_step)
            writer.add_scalar('loss/'+mode, acc_meter.value(), global_step=global_step)
        
        
        #aggiungiamo un embedding. Tensorboard farà il resto
        writer.add_embedding(phi_i, batch[3], I_i, global_step=global_step, tag=exp_name+'_embedding')
        #conserviamo solo l'ultimo modello sovrascrivendo i vecchi
        torch.save(embedding_net,'%s.pth'%exp_name)
        # conserviamo il odello a questa epoca
        torch.save(embedding_net,directory+"//"+version+"//"+'%s.pth'%(exp_name+"_"+str(e)))
        
        #conserviamo il modello sotto forma di dizionario
        net_save(epochs, embedding_net, optimizer, last_loss_train ,last_loss_val,last_acc_train,last_acc_val, global_step_train, global_step_val,'%s.pth'%(exp_name+"_dict"))
        #dipo ogni epoca plotto la distribuzione
        print("lungezza array_total_0 ",len(array_total_0))
        print("lunghezza array_total_1",len(array_total_1))
        
        
        saveArray(directory,version,array_loss_train, array_loss_valid, array_accuracy_train, array_accuracy_valid, array_glb_train,array_glb_valid)
        
        saveinFileJson(start,directory,version,resize,batch_size,e, lr, momentum,decay,len(train_loader),array_accuracy_train[-1],array_accuracy_valid[-1], array_loss_train[-1],array_loss_valid[-1],margin1, margin2)

        draw_distribution(directory, version , e, array_total_0, array_total_1)
        
        accuracy_metodo2, f1,recall,precision, tp, fp = calculate_pred_label_metod2(directory, version , e, array_total_0, array_total_1, array_glb_valid,label_reali_validation, distanze_validation, accuracy_metodo2_validation)
        array_f1_valid.append(f1)
        array_recall_valid.append(recall)
        array_precision_valid.append(precision)
        fp_valid.append(fp)
        tp_valid.append(tp)
        accuracy_metodo2_validation= accuracy_metodo2
        print("accuracy_metodo2_validation: ",len(accuracy_metodo2_validation))
        saveArray_metod2(directory,version,accuracy_metodo2_validation, array_f1_valid , array_recall_valid, array_precision_valid,tp_valid,fp_valid, array_glb_valid)
        
        
        #print("Loss TRAIN",array_loss_train)
        #print("Losss VALID",array_loss_valid)
        #print("Accuracy TRAIN",array_accuracy_train)
        #print("Accuracy VALID",array_accuracy_valid)
        print("dim acc train",len(array_accuracy_train))
        print("dim acc valid",len(array_accuracy_valid))
        figure = plt.figure(figsize=(12,8))
        plt.plot(array_glb_train,array_accuracy_train)
        plt.plot(array_glb_valid,array_accuracy_valid)
        plt.xlabel('samples')
        plt.ylabel('accuracy')
        plt.grid()
        plt.legend(['Training','Valid'])
        plt.savefig(directory+'//plotAccuracy_'+version+'.png')
        plt.clf()
        plt.close(figure)
        #plt.show()
        
        figure= plt.figure(figsize=(12,8))
        plt.plot(array_glb_train,array_loss_train)
        plt.plot(array_glb_valid,array_loss_valid)
        plt.xlabel('samples')
        plt.ylabel('loss')
        plt.grid()
        plt.legend(['Training','Valid'])
        plt.savefig(directory+'//plotLoss_'+version+'.png')
        plt.clf()
        plt.close(figure)

        
    f = (tempo.stop()) + tempoTrain 
    
    return embedding_net, f, last_loss_train,last_loss_val, last_acc_train,last_acc_val
Ejemplo n.º 3
0
def train_siamese(embedding_net,
                  train_loader,
                  valid_loader,
                  exp_name='model_1',
                  lr=0.01,
                  epochs=10,
                  momentum=0.99,
                  margin=2,
                  logdir='logs',
                  modeLoss=None):
    #definiamo la contrastive loss
    if not modeLoss is None:
        if modeLoss == "norm":
            print("Loss mode Norm margin = 0.5")
            criterion = ContrastiveLossNorm()
        elif modeLoss == "double":
            print("Loss mode Norm & Double margin m1= 0.3 , m2 =0.7")
            criterion = ContrastiveLossDouble()
        elif modeLoss == "soglia":
            print("Loss mode Soglia")
            criterion = ContrastiveLoss()

    else:
        print("Loss mode margine=2")
        criterion = ContrastiveLoss()

    optimizer = SGD(embedding_net.parameters(), lr, momentum=momentum)

    #meters
    array_loss_train = []
    array_loss_valid = []
    array_sample_train = []
    array_sample_valid = []
    array_acc_valid = []
    array_acc_train = []
    prediction_train = []
    labels_train = []
    prediction_val = []
    labels_val = []

    loss_meter = AverageValueMeter()
    acc_meter = AverageValueMeter()
    #writer
    writer = SummaryWriter(join(logdir, exp_name))
    #device
    device = "cuda" if torch.cuda.is_available() else "cpu"
    embedding_net.to(device)
    criterion.to(
        device
    )  # anche la loss va portata sul device in quanto contiene un parametro(m)
    #definiamo un dizionario contenente i loader di training e test
    loader = {'train': train_loader, 'valid': valid_loader}
    global_step = 0
    lossTrain = 0
    lossValid = 0
    timer = Timer()
    for e in range(epochs):
        print("Epoca ", e)
        #iteriamo tra due modalità: train e test
        for mode in ['train', 'valid']:
            print("Mode ", mode)
            loss_meter.reset()
            acc_meter.reset()
            embedding_net.train() if mode == 'train' else embedding_net.eval()
            with torch.set_grad_enabled(
                    mode == 'train'):  #abilitiamo i gradienti solo in training

                for i, batch in enumerate(loader[mode]):
                    I_i, I_j, l_ij, _, _ = [b.to(device) for b in batch]
                    #img1, img2, label12, label1, label2
                    #l'implementazione della rete siamese è banale:
                    #eseguiamo la embedding net sui due input
                    phi_i = embedding_net(I_i)  #img 1
                    phi_j = embedding_net(I_j)  #img2
                    print("Etichetta reale", l_ij)

                    #calcoliamo la loss
                    l = criterion(phi_i, phi_j, l_ij)

                    prediction_train = []
                    labels_train = []
                    prediction_val = []
                    labels_val = []

                    d = F.pairwise_distance(phi_i.to('cpu'), phi_j.to('cpu'))
                    print(type(d))
                    print(d.size())

                    labs = l_ij.to('cpu')
                    #print(len(labs))
                    #tensor = torch.clamp( margin-d, min = 0) # sceglie il massimo  # sceglie il massimo -- se è zero allora sono dissimili
                    #print("max",type(tensor))
                    #print("size max tensor ",tensor.size())
                    #print("tentor 1", tensor)
                    if not modeLoss is None:
                        if modeLoss == "double":

                            listaEtichette = modeLossDouble(0.3, 0.7, d)
                        elif modeLoss == "norm":

                            listaEtichette = modeLossNorm(0.5, d)
                        elif modeLoss == "soglia":
                            listaEtichette = modeLossSoglia(0.5, d)
                    else:
                        listaEtichette = modeLossTrad(0.8, d)

                    if mode == 'train':
                        prediction_train = listaEtichette
                    else:
                        prediction_val = listaEtichette

                    #aggiorniamo il global_step
                    #conterrà il numero di campioni visti durante il training
                    n = I_i.shape[0]  #numero di elementi nel batch
                    global_step += n

                    #print(len(labels))
                    #print(len(prediction))

                    if mode == 'train':
                        l.backward()
                        optimizer.step()
                        optimizer.zero_grad()

                    n = batch[0].shape[0]  #numero di elementi nel batch

                    valore = l.item()
                    loss_meter.add(valore, n)
                    print(valore, n)

                    if mode == 'train':
                        labels_train = labs.numpy()
                        print("Lunghezza predette TRAIN ",
                              len(prediction_train))
                        print("Lunghezza vere TRAIN ", len(labels_train))
                        acc = accuracy_score(np.array(labels_train),
                                             np.array(prediction_train))
                        acc_meter.add(acc, n)

                    else:
                        labels_val = labs.numpy()
                        print("Lunghezza predette VALID ", len(prediction_val))
                        print("Lunghezza vere VALID ", len(labels_val))
                        acc = accuracy_score(np.array(labels_val),
                                             np.array(prediction_val))
                        acc_meter.add(acc, n)

                    if mode == 'train':
                        l_m_v = loss_meter.value()
                        print(l_m_v)
                        writer.add_scalar('loss/train',
                                          l_m_v,
                                          global_step=global_step)
                        writer.add_scalar('accuracy/train',
                                          acc_meter.value(),
                                          global_step=global_step)

                    if mode == 'train':
                        #lossTrain =  loss_meter.value()
                        global_step_train = global_step
                        array_loss_train.append(l_m_v)
                        array_acc_train.append(acc_meter.value())
                        array_sample_train.append(global_step_train)
                        print("TRAIN- Epoca", e)
                        print("GLOBAL STEP TRAIN", global_step_train)
                        print("LOSS TRAIN", l_m_v)
                        print("ACC TRAIN", acc_meter.value())

                    else:

                        lossValid = loss_meter.value()
                        global_step_val = global_step
                        array_loss_valid.append(lossValid)
                        array_acc_valid.append(acc_meter.value())
                        array_sample_valid.append(global_step_val)
                        print("VALID- Epoca", e)
                        print("GLOBAL STEP VALID", global_step_val)
                        print("LOSS VALID", lossValid)
                        print("ACC VALID", acc_meter.value())

            writer.add_scalar('loss/' + mode,
                              loss_meter.value(),
                              global_step=global_step)
            writer.add_scalar('accuracy/' + mode,
                              acc_meter.value(),
                              global_step=global_step)

        #aggiungiamo un embedding. Tensorboard farà il resto
        #Per monitorare lo stato di training della rete in termini qualitativi, alla fine di ogni epoca stamperemo l'embedding dell'ultimo batch di test.
        writer.add_embedding(phi_i,
                             batch[3],
                             I_i,
                             global_step=global_step,
                             tag=exp_name + '_embedding')
        #conserviamo solo l'ultimo modello sovrascrivendo i vecchi

        #torch.save(embedding_net.state_dict(),'%s.pth'%exp_name) # salvare i parametri del modello

        net_save(epochs, embedding_net.state_dict(), optimizer, lossTrain,
                 lossValid, array_acc_train[-1], array_acc_valid[-1],
                 global_step_train, global_step_val, '%s.pth' % exp_name)
    f = '{:.7f}'.format(timer.stop())

    return embedding_net, f, array_loss_train, array_loss_valid, array_sample_train, array_sample_valid, array_acc_train, array_acc_valid, labels_train, prediction_train, labels_val, prediction_val
Ejemplo n.º 4
0
def train_continue(directory, version, path, exp_name, name, model, lr, epochs,
                   momentum, batch_size, resize, margin, logdir):
    #definiamo la contrastive loss

    print("Continue model")
    directory = directory
    resize = resize
    device = "cuda" if torch.cuda.is_available() else "cpu"
    siamese_reload = model
    siamese_reload.to(device)
    checkpoint = torch.load(path)

    siamese_reload.load_state_dict(checkpoint['model_state_dict'])
    #optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
    epoch = checkpoint['epoch']
    lossTrain = checkpoint['lossTrain']
    lossValid = checkpoint['lossValid']

    print('lossTrain', lossTrain)
    print('lossValid', lossValid)
    global_step_train = checkpoint['global_step_train']
    global_step_val = checkpoint['global_step_valid']

    accTrain = checkpoint['accTrain']
    accValid = checkpoint['accValid']
    print('accTrain', accTrain)
    print('accValid', accValid)

    print(
        "Epoca %s , lossTrain %s , lossValid ,accTarin, accValid, global_step_train %s , global_step_val %s",
        epoch, lossTrain, lossValid, accTrain, accValid, global_step_train,
        global_step_val)

    print(siamese_reload.load_state_dict(checkpoint['model_state_dict']))
    #model(torch.zeros(16,3,28,28)).shape

    #E' possibile accedere a un dizionario contenente tutti i parametri del modello utilizzando il metodo state_dict .
    state_dict = siamese_reload.state_dict()
    print(state_dict.keys())

    # Print model's state_dict
    print("Model's state_dict:")
    for param_tensor in siamese_reload.state_dict():
        print(param_tensor, "\t",
              siamese_reload.state_dict()[param_tensor].size())

    controlFileCSV()
    #controlFileCSVPair()
    dataSetPair = DataSetPairCreate(resize)
    dataSetPair.controlNormalize()

    pair_train = dataSetPair.pair_money_train
    #pair_test = dataSetPair.pair_money_test
    pair_validation = dataSetPair.pair_money_val

    pair_money_train_loader = DataLoader(pair_train,
                                         batch_size=batch_size,
                                         num_workers=0,
                                         shuffle=True)
    #pair_money_test_loader = DataLoader(pair_test, batch_size=1024, num_workers=0)
    pair_money_val_loader = DataLoader(pair_validation,
                                       batch_size=batch_size,
                                       num_workers=0)

    criterion = ContrastiveLoss(margin)
    optimizer = SGD(siamese_reload.parameters(), lr, momentum=momentum)
    optimizer.load_state_dict(checkpoint['optimizer_state_dict'])

    #meters
    array_loss_train = []
    array_loss_valid = []
    array_sample_train = []
    array_sample_valid = []
    array_acc_valid = []
    array_acc_train = []
    prediction_train = []
    labels_train = []
    prediction_val = []
    labels_val = []

    loss_meter = AverageValueMeter()
    acc_meter = AverageValueMeter()
    #writer
    writer = SummaryWriter(join(logdir, exp_name))

    criterion.to(
        device
    )  # anche la loss va portata sul device in quanto contiene un parametro(m)
    #definiamo un dizionario contenente i loader di training e test
    loader = {'train': pair_money_train_loader, 'valid': pair_money_val_loader}
    #global_step_train = global_step_train
    #gloabal_step_val = global_step_val

    #lossTrain = lossTrain
    #lossValid = lossValid
    timer = Timer()
    global_step = global_step_val

    for e in range(epochs):
        print("Epoca ", e)
        #iteriamo tra due modalità: train e test
        for mode in ['train', 'valid']:
            """
            if mode =='train':
                loss_meter.inizializza(lossTrain, global_step_train)
                acc_meter.inizializza(accTrain, global_step_train)
                global_step=global_step_train
            else:
                loss_meter.inizializza(lossValid, global_step_val)
                acc_meter.inizializza(accValid, global_step_val)
                global_step = global_step_val
              """

            siamese_reload.train() if mode == 'train' else siamese_reload.eval(
            )
            with torch.set_grad_enabled(
                    mode == 'train'):  #abilitiamo i gradienti solo in training

                for i, batch in enumerate(loader[mode]):
                    I_i, I_j, l_ij, _, _ = [b.to(device) for b in batch]
                    #img1, img2, label12, label1, label2
                    #l'implementazione della rete siamese è banale:
                    #eseguiamo la embedding net sui due input
                    phi_i = siamese_reload(I_i)  #img 1
                    phi_j = siamese_reload(I_j)  #img2

                    #calcoliamo la loss
                    l = criterion(phi_i, phi_j, l_ij)

                    d = F.pairwise_distance(phi_i.to('cpu'), phi_j.to('cpu'))
                    labs = l_ij.to('cpu')
                    #print(len(labs))
                    tensor = torch.clamp(
                        margin - d, min=0
                    )  # sceglie il massimo  # sceglie il massimo -- se è zero allora sono dissimili
                    #print("max",type(tensor))
                    #print("size max tensor ",tensor.size())
                    #print("tentor 1", tensor)

                    for el in tensor:
                        if el <= 2:  # SIMILI
                            if mode == 'train':
                                prediction_train.append(0)
                            else:
                                prediction_val.append(0)
                        else:  # DISSIMILI
                            if mode == 'train':

                                prediction_train.append(1)
                            else:
                                prediction_val.append(1)
                    """
                    if mode=='train':
                        array_loss_train.append(l.item())
                    else:
                        array_loss_valid.append(l.item())
                    """
                    #aggiorniamo il global_step
                    #conterrà il numero di campioni visti durante il training
                    n = I_i.shape[0]  #numero di elementi nel batch
                    global_step += n

                    if mode == 'train':
                        labels_train.extend(list(labs.numpy()))
                        print("Lunghezza predette TRAIN ",
                              len(prediction_train))
                        print("Lunghezza vere TRAIN ", len(labels_train))
                        acc = accuracy_score(np.array(labels_train),
                                             np.array(prediction_train))
                        acc_meter.add(acc, n)

                    else:
                        labels_val.extend(list(labs.numpy()))
                        print("Lunghezza predette VALID ", len(prediction_val))
                        print("Lunghezza vere VALID ", len(labels_val))
                        acc = accuracy_score(np.array(labels_val),
                                             np.array(prediction_val))
                        acc_meter.add(acc, n)

                    if mode == 'train':
                        l.backward()
                        optimizer.step()
                        optimizer.zero_grad()

                    n = batch[0].shape[0]  #numero di elementi nel batch
                    loss_meter.add(l.item(), n)

                    if mode == 'train':
                        writer.add_scalar('loss/train',
                                          loss_meter.value(),
                                          global_step=global_step)
                        writer.add_scalar('accuracy/train',
                                          acc_meter.value(),
                                          global_step=global_step)

                    if mode == 'train':
                        lossTrain = loss_meter.value()
                        global_step_train = global_step
                        array_loss_train.append(lossTrain)
                        array_acc_train.append(acc_meter.value())
                        array_sample_train.append(global_step_train)
                        print("TRAIN- Epoca", e)
                        print("GLOBAL STEP TRAIN", global_step_train)
                        print("LOSS TRAIN", lossTrain)
                        print("ACC TRAIN", acc_meter.value())

                    else:
                        lossValid = loss_meter.value()
                        global_step_val = global_step
                        array_loss_valid.append(lossValid)
                        array_acc_valid.append(acc_meter.value())
                        array_sample_valid.append(global_step_val)
                        print("VALID- Epoca", e)
                        print("GLOBAL STEP VALID", global_step_val)
                        print("LOSS VALID", lossValid)
                        print("ACC VALID", acc_meter.value())

            writer.add_scalar('loss/' + mode,
                              loss_meter.value(),
                              global_step=global_step)
            writer.add_scalar('accuracy/' + mode,
                              acc_meter.value(),
                              global_step=global_step)

        #aggiungiamo un embedding. Tensorboard farà il resto
        #Per monitorare lo stato di training della rete in termini qualitativi, alla fine di ogni epoca stamperemo l'embedding dell'ultimo batch di test.
        writer.add_embedding(phi_i,
                             batch[3],
                             I_i,
                             global_step=global_step,
                             tag=exp_name + '_embedding')
        #conserviamo solo l'ultimo modello sovrascrivendo i vecchi

        #torch.save(siamese_reload.state_dict(),'%s.pth'%exp_name) # salvare i parametri del modello

        net_save(epochs, siamese_reload, optimizer, lossTrain, lossValid,
                 array_acc_train[-1], array_acc_valid[-1], global_step_train,
                 global_step_val, '%s.pth' % exp_name)
    f = '{:.7f}'.format(timer.stop())

    return siamese_reload, f, array_loss_train, array_loss_valid, array_sample_train, array_sample_valid, array_acc_train, array_acc_valid, labels_train, prediction_train, labels_val, prediction_val
Ejemplo n.º 5
0
def train_siamese_class(directory,
                        version,
                        model,
                        train_loader,
                        valid_loader,
                        resize,
                        batch_size,
                        exp_name='model_1',
                        decay=None,
                        lr=0.0001,
                        epochs=10,
                        momentum=0.99,
                        logdir='logs',
                        dizionario=None):
    print("momonetum", momentum)
    print("lr", lr)
    criterion = nn.CrossEntropyLoss()
    if not decay is None:
        print("Weight_Decay", decay)
        optimizer = SGD(model.parameters(),
                        lr,
                        momentum=momentum,
                        weight_decay=decay)
    else:
        optimizer = SGD(model.parameters(), lr, momentum=momentum)

    #meters
    loss_meter = AverageValueMeter()
    acc_meter = AverageValueMeter()
    #writer
    writer = SummaryWriter(join(logdir, exp_name))
    #device
    device = "cuda" if torch.cuda.is_available() else "cpu"
    model.to(device)
    criterion.to(device)
    #definiamo un dizionario contenente i loader di training e test
    loader = {'train': train_loader, 'valid': valid_loader}

    global_step = 0
    last_loss_train = 0
    last_loss_val = 0
    last_acc_train = 0
    last_acc_val = 0

    array_accuracy_train = []
    array_accuracy_valid = []
    array_loss_train = []
    array_loss_valid = []
    array_glb_train = []
    array_glb_valid = []
    tempo = Timer()
    global_step = 0
    start_epoca = 0

    if dizionario is not None:
        array_accuracy_train = dizionario["a_train"]
        array_accuracy_valid = dizionario["a_valid"]
        array_loss_train = dizionario["l_train"]
        array_loss_valid = dizionario["l_valid"]
        array_glb_train = dizionario["g_train"]
        array_glb_valid = dizionario["g_valid"]
        global_step = dizionario["g_valid"][-1]
        start_epoca = dizionario["epoche_fatte"] + 1  # indice epoca di inizio

    print("global step", global_step)
    print("a_acc_train", array_accuracy_train)
    print("a_acc_valid", array_accuracy_valid)
    print("loss_train", array_loss_train)
    print("loss_valid", array_loss_valid)
    print("glb_train", array_glb_train)
    print("glb_valid", array_glb_valid)
    print("epoca_start_indice ", start_epoca)

    start = timer()
    print("Num epoche", epochs)

    for e in range(start_epoca, epochs):
        print("Epoca= ", e)
        #iteriamo tra due modalità: train e test
        for mode in ['train', 'valid']:
            loss_meter.reset()
            acc_meter.reset()
            model.train() if mode == 'train' else model.eval()
            with torch.set_grad_enabled(
                    mode == 'train'):  #abilitiamo i gradienti solo in training

                for i, batch in enumerate(loader[mode]):
                    print("Num batch =", i)
                    I_i, I_j, l_ij, _, _ = [b.to(device) for b in batch]
                    #img1, img2, label12, label1, label2
                    #l'implementazione della rete siamese è banale:
                    #eseguiamo la embedding net sui due input

                    # output dai due rami della SNN
                    phi_i = model(I_i)  #img1
                    phi_j = model(I_j)  #img2

                    # concatenazione delle features map
                    f = torch.cat((phi_i, phi_j), 1)

                    #output finale
                    output = model.fc2(f)

                    #etichetta predetta
                    label_pred = output.to('cpu').max(1)[1]

                    l_ij = l_ij.type(torch.LongTensor).to(device)
                    #calcoliamo la loss
                    l = criterion(output, l_ij)

                    #aggiorniamo il global_step
                    #conterrà il numero di campioni visti durante il training
                    n = I_i.shape[0]  #numero di elementi nel batch
                    #print("numero elementi nel batch ",n)
                    global_step += n

                    if mode == 'train':
                        l.backward()
                        optimizer.step()
                        optimizer.zero_grad()

                    acc = accuracy_score(l_ij.to('cpu'), label_pred)
                    n = batch[0].shape[0]
                    loss_meter.add(l.item(), n)
                    acc_meter.add(acc, n)
                    #loggiamo i risultati iterazione per iterazione solo durante il training
                    if mode == 'train':
                        writer.add_scalar('loss/train',
                                          loss_meter.value(),
                                          global_step=global_step)
                        writer.add_scalar('accuracy/train',
                                          acc_meter.value(),
                                          global_step=global_step)
                    #una volta finita l'epoca (sia nel caso di training che test, loggiamo le stime finali)

            if mode == 'train':
                global_step_train = global_step
                last_loss_train = loss_meter.value()
                last_acc_train = acc_meter.value()

                array_accuracy_train.append(acc_meter.value())
                array_loss_train.append(loss_meter.value())
                array_glb_train.append(global_step)

            else:
                global_step_val = global_step
                last_loss_val = loss_meter.value()
                last_acc_val = acc_meter.value()

                array_accuracy_valid.append(acc_meter.value())
                array_loss_valid.append(loss_meter.value())
                array_glb_valid.append(global_step)

            writer.add_scalar('loss/' + mode,
                              loss_meter.value(),
                              global_step=global_step)
            writer.add_scalar('accuracy/' + mode,
                              acc_meter.value(),
                              global_step=global_step)

        print("Loss TRAIN", array_loss_train)
        print("Losss VALID", array_loss_valid)
        print("Accuracy TRAIN", array_accuracy_train)
        print("Accuracy VALID", array_accuracy_valid)
        print("dim acc train", len(array_accuracy_train))
        print("dim acc valid", len(array_accuracy_valid))
        figure = plt.figure(figsize=(12, 8))
        plt.plot(array_glb_train, array_accuracy_train)
        plt.plot(array_glb_valid, array_accuracy_valid)
        plt.xlabel('samples')
        plt.ylabel('accuracy')
        plt.grid()
        plt.legend(['Training', 'Valid'])
        plt.savefig(directory + '//plotAccuracy_' + version + '.png')
        plt.clf()
        plt.close(figure)
        #plt.show()

        figure = plt.figure(figsize=(12, 8))
        plt.plot(array_glb_train, array_loss_train)
        plt.plot(array_glb_valid, array_loss_valid)
        plt.xlabel('samples')
        plt.ylabel('loss')
        plt.grid()
        plt.legend(['Training', 'Valid'])
        plt.savefig(directory + '//plotLoss_' + version + '.png')
        #plt.show()
        plt.clf()
        plt.close(figure)

        #writer.add_embedding(phi_i, batch[3], I_i, global_step=global_step, tag=exp_name+'_embedding')
        #conserviamo i pesi del modello alla fine di un ciclo di training e test

        torch.save(model, '%s.pth' % exp_name)
        torch.save(
            model, directory + "//" + version + "//" + '%s.pth' %
            (exp_name + "_" + str(e)))

        net_save(epochs, model, optimizer, last_loss_train, last_loss_val,
                 last_acc_train, last_acc_val, global_step_train,
                 global_step_val, '%s.pth' % (exp_name + "_dict"))

        saveArray(directory, version, array_loss_train, array_loss_valid,
                  array_accuracy_train, array_accuracy_valid, array_glb_train,
                  array_glb_valid)

        saveinFileJson(start, directory, version, resize, batch_size, e, lr,
                       momentum, len(train_loader), array_accuracy_train[-1],
                       array_accuracy_valid[-1], array_loss_train[-1],
                       array_loss_valid[-1])

    f = '{:.7f}'.format(tempo.stop())

    return model, f, last_loss_train, last_loss_val, last_acc_train, last_acc_val
Ejemplo n.º 6
0
def train_siamese_distrib_margine(directory,
                                  version,
                                  model,
                                  train_loader,
                                  valid_loader,
                                  resize,
                                  batch_size,
                                  exp_name='model_1',
                                  margine,
                                  decay=None,
                                  lr=0.0001,
                                  epochs=10,
                                  momentum=0.99,
                                  logdir='logs',
                                  dizionario_array=None,
                                  modeLoss=None):
    print("momonetum", momentum)
    print("lr", lr)

    if not modeLoss is None:
        if modeLoss == "single":
            criterion = ContrastiveLoss(margine)

    if not decay is None:
        print("Weight_Decay", decay)
        optimizer = SGD(model.parameters(),
                        lr,
                        momentum=momentum,
                        weight_decay=decay)
    else:
        optimizer = SGD(model.parameters(), lr, momentum=momentum)

    if not dizionario_array is None:
        optimizer.load_state_dict(dizionario_array["optimizer"])
    #meters
    loss_meter = AverageValueMeter()
    acc_meter = AverageValueMeter()
    #writer
    writer = SummaryWriter(join(logdir, exp_name))
    #device
    device = "cuda" if torch.cuda.is_available() else "cpu"
    model.to(device)
    criterion.to(device)
    #definiamo un dizionario contenente i loader di training e test
    loader = {'train': train_loader, 'valid': valid_loader}

    if not dizionario_array is None:
        array_accuracy_train = dizionario_array["a_train"]
        array_accuracy_valid = dizionario_array["a_valid"]
        array_loss_train = dizionario_array["l_train"]
        array_loss_valid = dizionario_array["l_valid"]
        array_glb_train = dizionario_array["g_train"]
        array_glb_valid = dizionario_array["g_valid"]
        global_step = array_glb_valid[-1]
        last_loss_train = array_loss_train[-1]
        last_loss_val = array_loss_valid[-1]
        last_acc_train = array_accuracy_train[-1]
        last_acc_val = array_accuracy_valid[-1]
        epoche_fatte = dizionario_array["epoche_fatte"]
        epoche_avanza = dizionario_array["epoche_avanza"]

    else:
        array_accuracy_train = []
        array_accuracy_valid = []
        array_loss_train = []
        array_loss_valid = []
        array_glb_train = []
        array_glb_valid = []
        global_step = 0
        last_loss_train = 0
        last_loss_val = 0
        last_acc_train = 0
        last_acc_val = 0
    #inizializziamo il global step

    tempo = Timer()
    start = timer()

    for e in range(epochs):
        print("Epoca= ", e)
        #iteriamo tra due modalità: train e test
        for mode in ['train', 'valid']:
            loss_meter.reset()
            acc_meter.reset()
            model.train() if mode == 'train' else model.eval()
            with torch.set_grad_enabled(
                    mode == 'train'):  #abilitiamo i gradienti solo in training

                for i, batch in enumerate(loader[mode]):
                    print("Num batch =", i)
                    I_i, I_j, l_ij, _, _ = [b.to(device) for b in batch]
                    #img1, img2, label12, label1, label2
                    #l'implementazione della rete siamese è banale:
                    #eseguiamo la embedding net sui due input
                    phi_i = model(I_i)  #img 1
                    phi_j = model(I_j)  #img2

                    print("Output train img1", phi_i.size())
                    print("Output train img2", phi_j.size())
                    #print("Etichetta reale",l_ij)
                    euclidean_distance = F.pairwise_distance(phi_i, phi_j)

                    euclid_tmp = torch.Tensor.numpy(
                        euclidean_distance.detach().cpu())  # distanza
                    labs = l_ij.to('cpu').numpy()  # etichette reali

                    etichette_predette = [euclid_tmp > margine]
                    print(etichette_predette)
                    etichette_predette = etichette_predette.int()

                    #l_ij = l_ij.type(torch.LongTensor).to(device)
                    #calcoliamo la loss
                    l = criterion(phi_i, phi_j, l_ij)

                    #aggiorniamo il global_step
                    #conterrà il numero di campioni visti durante il training
                    n = I_i.shape[0]  #numero di elementi nel batch
                    #print("numero elementi nel batch ",n)
                    global_step += n

                    if mode == 'train':
                        l.backward()
                        optimizer.step()
                        optimizer.zero_grad()

                    acc = accuracy_score(np.array(labs),
                                         np.array(etichette_predette.numpy()))
                    n = batch[0].shape[0]
                    loss_meter.add(l.item(), n)
                    acc_meter.add(acc, n)
                    #loggiamo i risultati iterazione per iterazione solo durante il training
                    if mode == 'train':
                        writer.add_scalar('loss/train',
                                          loss_meter.value(),
                                          global_step=global_step)
                        writer.add_scalar('accuracy/train',
                                          acc_meter.value(),
                                          global_step=global_step)
                    #una volta finita l'epoca (sia nel caso di training che test, loggiamo le stime finali)

            if mode == 'train':
                global_step_train = global_step
                last_loss_train = loss_meter.value()
                last_acc_train = acc_meter.value()

                array_accuracy_train.append(acc_meter.value())
                array_loss_train.append(loss_meter.value())
                array_glb_train.append(global_step)

            else:
                global_step_val = global_step
                last_loss_val = loss_meter.value()
                last_acc_val = acc_meter.value()

                array_accuracy_valid.append(acc_meter.value())
                array_loss_valid.append(loss_meter.value())
                array_glb_valid.append(global_step)

            writer.add_scalar('loss/' + mode,
                              loss_meter.value(),
                              global_step=global_step)
            writer.add_scalar('accuracy/' + mode,
                              acc_meter.value(),
                              global_step=global_step)

        print("Loss TRAIN", array_loss_train)
        print("Losss VALID", array_loss_valid)
        print("Accuracy TRAIN", array_accuracy_train)
        print("Accuracy VALID", array_accuracy_valid)
        print("dim acc train", len(array_accuracy_train))
        print("dim acc valid", len(array_accuracy_valid))
        plt.figure(figsize=(12, 8))
        plt.plot(array_glb_train, array_accuracy_train)
        plt.plot(array_glb_valid, array_accuracy_valid)
        plt.xlabel('samples')
        plt.ylabel('accuracy')
        plt.grid()
        plt.legend(['Training', 'Valid'])
        plt.savefig(directory + '//plotAccuracy_' + version + '.png')
        plt.show()

        plt.figure(figsize=(12, 8))
        plt.plot(array_glb_train, array_loss_train)
        plt.plot(array_glb_valid, array_loss_valid)
        plt.xlabel('samples')
        plt.ylabel('loss')
        plt.grid()
        plt.legend(['Training', 'Valid'])
        plt.savefig(directory + '//plotLoss_' + version + '.png')
        plt.show()

        saveArray(directory, version, array_loss_train, array_loss_valid,
                  array_accuracy_train, array_accuracy_valid, array_glb_train,
                  array_glb_valid)

        saveinFileJson(start, directory, version, resize, batch_size, e, lr,
                       momentum, len(train_loader), array_accuracy_train[-1],
                       array_accuracy_valid[-1], array_loss_train[-1],
                       array_loss_valid[-1])

        #writer.add_embedding(phi_i, batch[3], I_i, global_step=global_step, tag=exp_name+'_embedding')
        #conserviamo i pesi del modello alla fine di un ciclo di training e test
        net_save(epochs, model, optimizer, last_loss_train, last_loss_val,
                 last_acc_train, last_acc_val, global_step_train,
                 global_step_val, '%s.pth' % (exp_name + "_dict"))
        torch.save(model, '%s.pth' % exp_name)
        torch.save(
            model, directory + "//" + version + "//" + '%s.pth' %
            (exp_name + "_" + str(e)))
    f = '{:.7f}'.format(tempo.stop())
    return model, f, last_loss_train, last_loss_val, last_acc_train, last_acc_val
Ejemplo n.º 7
0
def train_class(directory,
                version,
                model,
                train_loader,
                valid_loader,
                resize,
                batch_size,
                exp_name='experiment',
                lr=0.01,
                epochs=10,
                momentum=0.99,
                logdir='logs',
                dizionario=None):
    print("Taining classifacation")
    criterion = nn.CrossEntropyLoss()
    optimizer = SGD(model.parameters(), lr, momentum=momentum)
    #meters
    loss_meter = AverageValueMeter()
    acc_meter = AverageValueMeter()
    #writer
    writer = SummaryWriter(join(logdir, exp_name))
    #device
    device = "cuda" if torch.cuda.is_available() else "cpu"
    model.to(device)
    #definiamo un dizionario contenente i loader di training e test
    loader = {'train': train_loader, 'valid': valid_loader}

    array_accuracy_train = []
    array_accuracy_valid = []
    array_loss_train = []
    array_loss_valid = []
    array_glb_train = []
    array_glb_valid = []

    last_loss_train = 0
    last_loss_val = 0
    last_acc_train = 0
    last_acc_val = 0
    #inizializziamo il global step
    global_step = 0
    tempo = Timer()
    start = timer()
    start_epoca = 0

    if dizionario is not None:
        print("Inizializza")
        array_accuracy_train = dizionario["a_train"]
        array_accuracy_valid = dizionario["a_valid"]
        array_loss_train = dizionario["l_train"]
        array_loss_valid = dizionario["l_valid"]
        array_glb_train = dizionario["g_train"]
        array_glb_valid = dizionario["g_valid"]
        global_step = dizionario["g_valid"][-1]
        start_epoca = dizionario["epoche_fatte"] + 1  # indice epoca di inizio

    print("global step", global_step)
    print("a_acc_train", array_accuracy_train)
    print("a_acc_valid", array_accuracy_valid)
    print("loss_train", array_loss_train)
    print("loss_valid", array_loss_valid)
    print("glb_train", array_glb_train)
    print("glb_valid", array_glb_valid)
    print("epoca_start_indice ", start_epoca)
    start = timer()

    print("Num epoche", epochs)

    for e in range(start_epoca, epochs):
        print("Epoca= ", e)
        #iteriamo tra due modalità: train e test
        for mode in ['train', 'valid']:
            loss_meter.reset()
            acc_meter.reset()
            model.train() if mode == 'train' else model.eval()
            with torch.set_grad_enabled(
                    mode == 'train'):  #abilitiamo i gradienti solo in training

                for i, batch in enumerate(loader[mode]):
                    print(batch['label'])

                    #x, y = [b.to(device) for b in batch]
                    x = batch['image'].to(
                        device)  #"portiamoli sul device corretto"
                    y = batch['label'].to(device)
                    output = model(x)

                    #aggiorniamo il global_step
                    #conterrà il numero di campioni visti durante il training
                    n = x.shape[0]  #numero di elementi nel batch
                    print("numero elementi nel batch ", n)
                    global_step += n

                    l = criterion(output, y)

                    if mode == 'train':
                        l.backward()
                        optimizer.step()
                        optimizer.zero_grad()

                    print("Etichette predette", output.to('cpu').max(1)[1])
                    acc = accuracy_score(y.to('cpu'),
                                         output.to('cpu').max(1)[1])

                    loss_meter.add(l.item(), n)
                    acc_meter.add(acc, n)
                    #loggiamo i risultati iterazione per iterazione solo durante il training
                    if mode == 'train':
                        writer.add_scalar('loss/train',
                                          loss_meter.value(),
                                          global_step=global_step)
                        writer.add_scalar('accuracy/train',
                                          acc_meter.value(),
                                          global_step=global_step)
                        print("Accuracy Train=", acc_meter.value())
                    #una volta finita l'epoca (sia nel caso di training che test, loggiamo le stime finali)

            if mode == 'train':
                global_step_train = global_step
                last_loss_train = loss_meter.value()
                last_acc_train = acc_meter.value()
                print("Accuracy Train=", acc_meter.value())
                array_accuracy_train.append(acc_meter.value())
                array_loss_train.append(loss_meter.value())
                array_glb_train.append(global_step)

            else:
                global_step_val = global_step
                last_loss_val = loss_meter.value()
                last_acc_val = acc_meter.value()
                print("Accuracy Valid=", acc_meter.value())
                array_accuracy_valid.append(acc_meter.value())
                array_loss_valid.append(loss_meter.value())
                array_glb_valid.append(global_step)

            writer.add_scalar('loss/' + mode,
                              loss_meter.value(),
                              global_step=global_step)
            writer.add_scalar('accuracy/' + mode,
                              acc_meter.value(),
                              global_step=global_step)

        print("Loss TRAIN", array_loss_train)
        print("Losss VALID", array_loss_valid)
        print("Accuracy TRAIN", array_accuracy_train)
        print("Accuracy VALID", array_accuracy_valid)
        print("dim acc train", len(array_accuracy_train))
        print("dim acc valid", len(array_accuracy_valid))
        figure = plt.figure(figsize=(12, 8))
        plt.plot(array_glb_train, array_accuracy_train)
        plt.plot(array_glb_valid, array_accuracy_valid)
        plt.xlabel('samples')
        plt.ylabel('accuracy')
        plt.grid()
        plt.legend(['Training', 'Valid'])
        plt.savefig(directory + '//plotAccuracy_' + version + '.png')
        plt.clf()
        plt.close(figure)

        figure = plt.figure(figsize=(12, 8))
        plt.plot(array_glb_train, array_loss_train)
        plt.plot(array_glb_valid, array_loss_valid)
        plt.xlabel('samples')
        plt.ylabel('loss')
        plt.grid()
        plt.legend(['Training', 'Valid'])
        plt.savefig(directory + '//plotLoss_' + version + '.png')
        plt.clf()
        plt.close(figure)

        #conserviamo i pesi del modello alla fine di un ciclo di training e test
        net_save(epochs,
                 model,
                 optimizer,
                 last_loss_train,
                 last_loss_val,
                 last_acc_train,
                 last_acc_val,
                 global_step_train,
                 global_step_val,
                 '%s.pth' % (exp_name + "_dict"),
                 dict_stato_no=True)

        #conserviamo i pesi del modello alla fine di un ciclo di training e test
        torch.save(
            model, directory + "//" + version + "//" + '%s.pth' %
            (exp_name + "_" + str(e)))
        torch.save(model, '%s.pth' % (exp_name))

        saveArray(directory, version, array_loss_train, array_loss_valid,
                  array_accuracy_train, array_accuracy_valid, array_glb_train,
                  array_glb_valid)

        saveinFileJson(start, directory, version, resize, batch_size, e, lr,
                       momentum, len(train_loader), array_accuracy_train[-1],
                       array_accuracy_valid[-1], array_loss_train[-1],
                       array_loss_valid[-1])

    f = '{:.7f}'.format(tempo.stop())
    return model, f, last_loss_train, last_loss_val, last_acc_train, last_acc_val
Ejemplo n.º 8
0
def train_margine_dynamik(directory,
                          version,
                          model,
                          train_loader,
                          valid_loader,
                          resize,
                          batch_size,
                          exp_name='model_1',
                          lr=0.0001,
                          epochs=10,
                          momentum=0.99,
                          margin=2,
                          logdir='logs',
                          modeLoss=None):

    criterion = ContrastiveLoss()
    optimizer = Adam(model.parameters(),
                     lr,
                     betas=(0.9, 0.999),
                     weight_decay=0.0004)
    #meters
    loss_meter = AverageValueMeter()
    acc_meter = AverageValueMeter()
    #writer
    writer = SummaryWriter(join(logdir, exp_name))
    #device
    device = "cuda" if torch.cuda.is_available() else "cpu"
    model.to(device)
    criterion.to(device)
    #definiamo un dizionario contenente i loader di training e test
    loader = {'train': train_loader, 'valid': valid_loader}

    # inizializza
    euclidean_distance_threshold = 1

    array_accuracy_train = []
    array_accuracy_valid = []
    array_loss_train = []
    array_loss_valid = []
    array_glb_train = []
    array_glb_valid = []

    last_loss_train = 0
    last_loss_val = 0
    last_acc_train = 0
    last_acc_val = 0
    #inizializziamo il global step
    global_step = 0
    tempo = Timer()
    start = timer()

    soglie = []

    for e in range(epochs):
        print("Epoca = ", e)
        print("Euclidean_distance_soglia = ", euclidean_distance_threshold)
        # keep track of euclidean_distance and label history each epoch
        training_euclidean_distance_history = []
        training_label_history = []
        validation_euclidean_distance_history = []
        validation_label_history = []

        #iteriamo tra due modalità: train e valid
        for mode in ['train', 'valid']:
            loss_meter.reset()
            acc_meter.reset()
            model.train() if mode == 'train' else model.eval()
            with torch.set_grad_enabled(
                    mode == 'train'):  #abilitiamo i gradienti solo in training

                for i, batch in enumerate(loader[mode]):
                    print("Num batch =", i)
                    I_i, I_j, l_ij, _, _ = [b.to(device) for b in batch]
                    #img1, img2, label12, label1, label2
                    #l'implementazione della rete siamese è banale:
                    #eseguiamo la embedding net sui due input
                    phi_i = model(I_i)  #img 1
                    phi_j = model(I_j)  #img2

                    print("Output train img1", phi_i.size())
                    print("Output train img2", phi_j.size())
                    print("Etichetta reale", l_ij)
                    l_ij = l_ij.type(torch.LongTensor).to(device)

                    #calcoliamo la loss
                    l = criterion(phi_i, phi_j, l_ij)

                    #aggiorniamo il global_step
                    #conterrà il numero di campioni visti durante il training
                    n = I_i.shape[0]  #numero di elementi nel batch
                    #print("numero elementi nel batch ",n)
                    global_step += n

                    if mode == 'train':
                        l.backward()
                        optimizer.step()
                        optimizer.zero_grad()

                    phi_i = model(I_i)  #img 1
                    phi_j = model(I_j)  #img2
                    #distanza euclidea
                    if mode == 'train':
                        euclidean_distance = F.pairwise_distance(phi_i, phi_j)
                        training_label = euclidean_distance > euclidean_distance_threshold  # 0 if same, 1 if not same (progression)
                        #equals = training_label.int() == l_ij.int() # 1 if true

                        training_label = training_label.int()
                        acc = accuracy_score(
                            l_ij.to('cpu'),
                            torch.Tensor.numpy(training_label.cpu()))

                        # save euclidean distance and label history
                        euclid_tmp = torch.Tensor.numpy(
                            euclidean_distance.detach().cpu()
                        )  # detach gradient, move to CPU
                        training_euclidean_distance_history.extend(euclid_tmp)

                        label_tmp = torch.Tensor.numpy(l_ij.to('cpu'))
                        training_label_history.extend(label_tmp)

                    elif mode == 'valid':

                        # evaluate validation accuracy using a Euclidean distance threshold
                        euclidean_distance = F.pairwise_distance(phi_i, phi_j)
                        validation_label = euclidean_distance > euclidean_distance_threshold  # 0 if same, 1 if not same
                        #equals = validation_label.int() == l_ij.int() # 1 if true

                        validation_label = validation_label.int()
                        acc = accuracy_score(
                            l_ij.to('cpu'),
                            torch.Tensor.numpy(validation_label.cpu()))

                        # save euclidean distance and label history
                        euclid_tmp = torch.Tensor.numpy(
                            euclidean_distance.detach().cpu()
                        )  # detach gradient, move to CPU
                        validation_euclidean_distance_history.extend(
                            euclid_tmp)

                        label_tmp = torch.Tensor.numpy(l_ij.cpu())
                        validation_label_history.extend(label_tmp)

                    n = batch[0].shape[0]
                    loss_meter.add(l.item(), n)
                    acc_meter.add(acc, n)
                    #loggiamo i risultati iterazione per iterazione solo durante il training
                    if mode == 'train':
                        writer.add_scalar('loss/train',
                                          loss_meter.value(),
                                          global_step=global_step)
                        writer.add_scalar('accuracy/train',
                                          acc_meter.value(),
                                          global_step=global_step)
                    #una volta finita l'epoca (sia nel caso di training che valid, loggiamo le stime finali)

            if mode == 'train':
                global_step_train = global_step
                last_loss_train = loss_meter.value()
                last_acc_train = acc_meter.value()

                array_accuracy_train.append(acc_meter.value())
                array_loss_train.append(loss_meter.value())
                array_glb_train.append(global_step)

            else:
                global_step_val = global_step
                last_loss_val = loss_meter.value()
                last_acc_val = acc_meter.value()

                array_accuracy_valid.append(acc_meter.value())
                array_loss_valid.append(loss_meter.value())
                array_glb_valid.append(global_step)

            writer.add_scalar('loss/' + mode,
                              loss_meter.value(),
                              global_step=global_step)
            writer.add_scalar('accuracy/' + mode,
                              acc_meter.value(),
                              global_step=global_step)

        # fine di una epoca

        print("Loss TRAIN", array_loss_train)
        print("Losss VALID", array_loss_valid)
        print("Accuracy TRAIN", array_accuracy_train)
        print("Accuracy VALID", array_accuracy_valid)
        print("dim acc train", len(array_accuracy_train))
        print("dim acc valid", len(array_accuracy_valid))
        plt.figure(figsize=(12, 8))
        plt.plot(array_accuracy_train)
        plt.plot(array_accuracy_valid)
        plt.xlabel('samples')
        plt.ylabel('accuracy')
        plt.grid()
        plt.legend(['Training', 'Valid'])
        plt.savefig(directory + '//plotAccuracy_' + version + '.png')
        plt.show()

        plt.figure(figsize=(12, 8))
        plt.plot(array_loss_train)
        plt.plot(array_loss_valid)
        plt.xlabel('samples')
        plt.ylabel('loss')
        plt.grid()
        plt.legend(['Training', 'Valid'])
        plt.savefig(directory + '//plotLoss_' + version + '.png')
        plt.show()

        euclidean_distance_threshold = aggiusta_soglia(
            training_label_history, training_euclidean_distance_history,
            validation_label_history, validation_euclidean_distance_history)
        soglie.append(euclidean_distance_threshold)

        saveArray(directory, version, array_loss_train, array_loss_valid,
                  array_accuracy_train, array_accuracy_valid, array_glb_train,
                  array_glb_valid, soglie)

        saveinFileJson(start, directory, version, resize, batch_size, e, lr,
                       momentum, len(train_loader), array_accuracy_train[-1],
                       array_accuracy_valid[-1], array_loss_train[-1],
                       array_loss_valid[-1])
        addValueJsonModel(directory + "//" + "modelTrained.json", version,
                          "euclidean_distance_threshold", "last",
                          euclidean_distance_threshold)
        #writer.add_embedding(phi_i, batch[3], I_i, global_step=global_step, tag=exp_name+'_embedding')
        #conserviamo i pesi del modello alla fine di un ciclo di training e test
        net_save(epochs,
                 model,
                 optimizer,
                 last_loss_train,
                 last_loss_val,
                 last_acc_train,
                 last_acc_val,
                 global_step_train,
                 global_step_val,
                 '%s.pth' % (exp_name + "_dict"),
                 dict_stato_no=True)
        torch.save(model, '%s.pth' % exp_name)
        torch.save(
            model, directory + "//" + version + "//" + '%s.pth' %
            (exp_name + "_" + str(e)))

    f = '{:.7f}'.format(tempo.stop())
    return model, f, last_loss_train, last_loss_val, last_acc_train, last_acc_val