def train_fully_supervised(model,n_epochs,train_loader,val_loader,criterion,optimizer,scheduler,auto_lr,\
        save_folder,model_name,benchmark=False,save_all_ep=True, save_best=False, device='cpu',num_classes=21):
    """
        A complete training of fully supervised model. 
        save_folder : Path to save the model, the courb of losses,metric...
        benchmark : enable or disable backends.cudnn 
        save_all_ep : if True, the model is saved at each epoch in save_folder
        scheduler : if True, the model will apply a lr scheduler during training
        auto_lr : Auto lr finder 
    """
    torch.backends.cudnn.benchmark = benchmark

    if auto_lr:
        print('Auto finder for the Learning rate')
        lr_finder = LRFinder(model,
                             optimizer,
                             criterion,
                             memory_cache=False,
                             cache_dir='/tmp',
                             device=device)
        lr_finder.range_test(train_loader,
                             start_lr=10e-5,
                             end_lr=10,
                             num_iter=100)

    if scheduler:
        lr_scheduler = torch.optim.lr_scheduler.LambdaLR(
            optimizer, lambda x: (1 - x / (len(train_loader) * n_epochs))**0.9)

    loss_test = []
    loss_train = []
    iou_train = []
    iou_test = []
    accuracy_train = []
    accuracy_test = []
    model.to(device)
    for ep in range(n_epochs):
        print("EPOCH", ep)
        model.train()
        state = step_train_supervised(model,train_loader=train_loader,criterion=criterion,\
            optimizer=optimizer,device=device,num_classes=num_classes)
        iou = state.metrics['mean IoU']
        acc = state.metrics['accuracy']
        loss = state.metrics['CE Loss']
        loss_train.append(loss)
        iou_train.append(iou)
        accuracy_train.append(acc)
        print('TRAIN - EP:', ep, 'iou:', iou, 'Accuracy:', acc, 'Loss CE',
              loss)
        if scheduler:
            lr_scheduler.step()
        #Eval model
        model.eval()
        with torch.no_grad():
            state = eval_model(model,
                               val_loader,
                               device=device,
                               num_classes=num_classes)
            iou = state.metrics['mean IoU']
            acc = state.metrics['accuracy']
            loss = state.metrics['CE Loss']
            loss_test.append(loss)
            iou_test.append(iou)
            accuracy_test.append(acc)
            print('TEST - EP:', ep, 'iou:', iou, 'Accuracy:', acc, 'Loss CE',
                  loss)

        ## Save model
        U.save_model(model,
                     save_all_ep,
                     save_best,
                     save_folder,
                     model_name,
                     ep=ep,
                     iou=iou,
                     iou_test=iou_test)

    U.save_curves(path=save_folder,loss_train=loss_train,iou_train=iou_train,accuracy_train=accuracy_train\
                                ,loss_test=loss_test,iou_test=iou_test,accuracy_test=accuracy_test)
def train_rot_equiv(model,n_epochs,train_loader_sup,train_dataset_unsup,val_loader,criterion_supervised,optimizer,scheduler,\
        Loss,gamma,batch_size,iter_every,save_folder,model_name,benchmark=False,angle_max=30,size_img=520,\
        eval_every=5,save_all_ep=True,dataroot_voc='~/data/voc2012',save_best=False,rot_cpu=False, device='cpu',num_classes=21):
    """
        A complete training of rotation equivariance supervised model. 
        save_folder : Path to save the model, the courb of losses,metric...
        benchmark : enable or disable backends.cudnn 
        Loss : Loss for unsupervised training 'KL' 'CE' 'L1' or 'MSE'
        gamma : float btwn [0,1] -> Balancing two losses loss_sup*gamma + (1-gamma)*loss_unsup
        save_all_ep : if True, the model is saved at each epoch in save_folder
        scheduler : if True, the model will apply a lr scheduler during training
        eval_every : Eval Model with different input image angle every n step
        size_img : size of image during evaluation
        angle_max : max angle rotation for input images
    """
    torch.backends.cudnn.benchmark = benchmark
    if scheduler:
        lr_scheduler = torch.optim.lr_scheduler.LambdaLR(
            optimizer, lambda x: (1 - x /
                                  (len(train_loader_sup) * n_epochs))**0.9)
    criterion_unsupervised = U.get_criterion(Loss)
    iou_train = []
    iou_test = []
    combine_loss_train = []
    combine_loss_test = []
    loss_train_unsup = []
    loss_train_sup = []
    loss_test_unsup = []
    loss_test_sup = []
    equiv_accuracy_train = []
    equiv_accuracy_test = []
    accuracy_test = []
    accuracy_train = []
    for ep in range(n_epochs):
        train_loader_equiv = torch.utils.data.DataLoader(train_dataset_unsup,batch_size=batch_size,\
                                                     shuffle=True,drop_last=True)
        print("EPOCH", ep)
        # TRAINING
        d = train_step_rot_equiv(model,train_loader_sup,train_loader_equiv,criterion_supervised,criterion_unsupervised,\
                        optimizer,gamma,Loss,rot_cpu=rot_cpu,device=device,angle_max=angle_max,num_classes=num_classes,iter_every=iter_every)
        if scheduler:
            lr_scheduler.step()
        combine_loss_train.append(d['loss'])
        loss_train_unsup.append(d['loss_equiv'])
        loss_train_sup.append(d['loss_sup'])
        equiv_accuracy_train.append(d['equiv_acc'])
        iou_train.append(d['iou_train'])
        accuracy_train.append(d['accuracy_train'])
        print('TRAIN - EP:',ep,'iou:',d['iou_train'],'Accuracy:',d['accuracy_train'],'Loss sup:',d['loss_sup'],\
            'Loss equiv:',d['loss_equiv'],'Combine Loss:',d['loss'],'Equivariance Accuracy:',d['equiv_acc'],)
        # EVALUATION
        model.eval()
        with torch.no_grad():
            state = eval_model(model,
                               val_loader,
                               device=device,
                               num_classes=num_classes)
            iou = state.metrics['mean IoU']
            acc = state.metrics['accuracy']
            loss = state.metrics['CE Loss']
            loss_test_sup.append(loss)
            iou_test.append(iou)
            accuracy_test.append(acc)
            print('TEST - EP:', ep, 'iou:', iou, 'Accuracy:', acc, 'Loss CE',
                  loss)
            # SAVING MODEL
            U.save_model(model,
                         save_all_ep,
                         save_best,
                         save_folder,
                         model_name,
                         ep=ep,
                         iou=iou,
                         iou_test=iou_test)

            if ep % eval_every == 0:  # Eval loss equiv and equivariance accuracy for the validation dataset
                equiv_acc, m_loss_equiv = U.eval_accuracy_equiv(model,val_loader,criterion=criterion_unsupervised,\
                                nclass=21,device=device,Loss=Loss,plot=False,angle_max=angle_max,random_angle=False)
                loss_test_unsup.append(m_loss_equiv)
                equiv_accuracy_test.append(equiv_acc)
                """  
                print('VOC Dataset Train')
                _ = eval_model_all_angle(model,size_img,dataroot_voc,train=True,device=device,num_classes=num_classes)
                print('VOC Dataset Val')
                _ = eval_model_all_angle(model,size_img,dataroot_voc,train=False,device=device,num_classes=num_classes)
                ## Save model"""


    U.save_curves(path=save_folder,combine_loss_train=combine_loss_train,loss_train_sup=loss_train_sup,\
    loss_train_unsup=loss_train_unsup,iou_train=iou_train,accuracy_train=accuracy_train,equiv_accuracy_train=equiv_accuracy_train,\
    combine_loss_test=combine_loss_test,loss_test_unsup=loss_test_unsup,equiv_accuracy_test=equiv_accuracy_test,\
    loss_test_sup= loss_test_sup,iou_test=iou_test,accuracy_test=accuracy_test)
def train_scale_equiv(model,n_epochs,train_loader_sup,train_dataset_unsup,val_loader,criterion_supervised,optimizer,scheduler,\
        Loss,gamma,batch_size,save_folder,model_name,benchmark=False,angle_max=30,size_img=520,scale_factor=(0.5,1.2),\
        save_all_ep=True,dataroot_voc='~/data/voc2012',save_best=False, device='cpu',num_classes=21):
    """
        A complete training of rotation equivariance supervised model. 
        save_folder : Path to save the model, the courb of losses,metric...
        benchmark : enable or disable backends.cudnn 
        Loss : Loss for unsupervised training 'KL' 'CE' 'L1' or 'MSE'
        gamma : float btwn [0,1] -> Balancing two losses loss_sup*gamma + (1-gamma)*loss_unsup
        save_all_ep : if True, the model is saved at each epoch in save_folder
        scheduler : if True, the model will apply a lr scheduler during training
        eval_every : Eval Model with different input image angle every n step
        size_img : size of image during evaluation
        scale_factor : scale between min*size_img and max*size_img
    """
    torch.backends.cudnn.benchmark = benchmark
    if scheduler:
        lr_scheduler = torch.optim.lr_scheduler.LambdaLR(
            optimizer, lambda x: (1 - x /
                                  (len(train_loader_sup) * n_epochs))**0.9)
    criterion_unsupervised = U.get_criterion(Loss)
    print('Criterion Unsupervised', criterion_unsupervised)
    iou_train = []
    iou_test = []
    combine_loss_train = []
    combine_loss_test = []
    loss_train_unsup = []
    loss_train_sup = []
    loss_test_unsup = []
    loss_test_sup = []
    equiv_accuracy_train = []
    equiv_accuracy_test = []
    accuracy_test = []
    accuracy_train = []
    torch.autograd.set_detect_anomaly(True)
    for ep in range(n_epochs):
        train_loader_equiv = torch.utils.data.DataLoader(train_dataset_unsup,batch_size=batch_size,\
                                                     shuffle=True,drop_last=True)
        print("EPOCH", ep)
        # TRAINING
        d = train_step_scale_equiv(model,train_loader_sup,train_loader_equiv,criterion_supervised,criterion_unsupervised,\
                        optimizer,gamma,Loss,device,size_img=size_img,scale_factor=scale_factor)
        if scheduler:
            lr_scheduler.step()
        combine_loss_train.append(d['loss'])
        loss_train_unsup.append(d['loss_equiv'])
        loss_train_sup.append(d['loss_sup'])
        equiv_accuracy_train.append(d['equiv_acc'])
        iou_train.append(d['iou_train'])
        accuracy_train.append(d['accuracy_train'])
        print('TRAIN - EP:',ep,'iou:',d['iou_train'],'Accuracy:',d['accuracy_train'],'Loss sup:',d['loss_sup'],\
            'Loss equiv:',d['loss_equiv'],'Combine Loss:',d['loss'],'Equivariance Accuracy:',d['equiv_acc'],)
        # EVALUATION
        model.eval()
        with torch.no_grad():
            state = eval_model(model,
                               val_loader,
                               device=device,
                               num_classes=num_classes)
            iou = state.metrics['mean IoU']
            acc = state.metrics['accuracy']
            loss = state.metrics['CE Loss']
            loss_test_sup.append(loss)
            iou_test.append(iou)
            accuracy_test.append(acc)
            print('TEST - EP:', ep, 'iou:', iou, 'Accuracy:', acc, 'Loss CE',
                  loss)

    U.save_curves(path=save_folder,combine_loss_train=combine_loss_train,loss_train_sup=loss_train_sup,\
    loss_train_unsup=loss_train_unsup,iou_train=iou_train,accuracy_train=accuracy_train,equiv_accuracy_train=equiv_accuracy_train,\
    combine_loss_test=combine_loss_test,loss_test_unsup=loss_test_unsup,equiv_accuracy_test=equiv_accuracy_test,\
    loss_test_sup= loss_test_sup,iou_test=iou_test,accuracy_test=accuracy_test)
Esempio n. 4
0
from torchvision import transforms, datasets
import numpy as np

from matplotlib import pyplot as plt

import time
import sys
sys.path.append('../python_scripts/')
from utils import save_curves_no_smooth as save_curves

from sys import argv
# the path of the log is passed after the command
exp = argv[1]
ep = argv[2]

# define a CNN
sys.path.append('../python_scripts/')
from networks import alexnet_pytorch as Net

epoch = '_epoch_' + ep
model_path = '../saving_model/exp' + exp + '/exp' + exp + epoch + '.pth.tar'

checkpoint = torch.load(model_path)
print 'Model loaded from the following path:'
print model_path
print 'Model keys:'
print checkpoint.keys()

save_curves(checkpoint, './')