Esempio n. 1
0
def finetune_objective(root=None,
                       previous_task_model_path=None,
                       train_data_path=None,
                       test_data_path=None,
                       exp_dir=None,
                       reg_lambda=None,
                       norm=None,
                       epochs=None,
                       lr=None,
                       test_iter=None,
                       batch=None):

    root, previous_task_model_path, train_data_path, test_data_path, exp_dir, reg_lambda, norm, epochs, lr, test_iter, batch = fill_default_vals(
        root, previous_task_model_path, train_data_path, test_data_path,
        exp_dir, reg_lambda, norm, epochs, lr, test_iter, batch)
    #define root , save_dir and log_file

    if not os.path.exists(exp_dir):
        os.makedirs(exp_dir)

    'DEFINE TRAINING'

    #define LOSS
    criterion = sherlock_model_utils.Fact_Euclidian_Loss()

    print('Using epochs:', epochs, 'Criterion:', type(criterion))

    # in case you have a checkpoint saved in the resume path, the training will start from there
    resume_path = exp_dir + '/checkpoint.pth.tar'

    'LOAD YOUR DATASET INFORMATION'
    r"""

    df_train and df_test are dataframe holding:
        - image_links: rel_path to each image
        - NLP_links:   rel_path to each NLP representation of each image
        - SPO: fact representation S:subject, P:Predicate, O:Object
        - id : fact label. (each fact has its own Unique label)
        - w_s, w_p, w_o: boolean. Indicate if the fact representated in the image has a Subject(w_s), Predicate(w_p), Object(w_o)"

    """

    df_train = pd.read_csv(train_data_path)
    df_test = pd.read_csv(test_data_path)
    ##################################################################################################################

    'MAKE YOUR  DATASET'
    # your dataloader will hold
    # images, NLP, ws, wp, wo, labels = data

    train_dt = dataset_utils.Cst_Dataset(
        df_train['image_links'],
        df_train['NLP_links'],
        df_train['id'],
        df_train['w_s'],
        df_train['w_p'],
        df_train['w_o'],
        root,
        image_loader=dataset_utils.pil_loader,
        NLP_feat_loader=dataset_utils.NLP_loader,
        transform=transforms.Compose([
            transforms.Scale((256, 256)),
            transforms.RandomSizedCrop(224),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            transforms.Normalize([0.485, 0.456, 0.406],
                                 [0.229, 0.224, 0.225])  #per-channel mean/std
        ]))

    test_dt = dataset_utils.Cst_Dataset(
        df_test['image_links'],
        df_test['NLP_links'],
        df_test['id'],
        df_test['w_s'],
        df_test['w_p'],
        df_test['w_o'],
        root,
        image_loader=dataset_utils.pil_loader,
        NLP_feat_loader=dataset_utils.NLP_loader,
        transform=transforms.Compose([
            transforms.Scale((256, 256)),
            transforms.CenterCrop(224),
            transforms.ToTensor(),
            transforms.Normalize([0.485, 0.456, 0.406],
                                 [0.229, 0.224, 0.225])  #per-channel mean/std 
        ]))

    # Make your dataset accessible in batches
    dset_loaders = {
        'train':
        torch.utils.data.DataLoader(train_dt,
                                    batch_size=batch,
                                    shuffle=True,
                                    num_workers=4),
        'val':
        torch.utils.data.DataLoader(test_dt,
                                    batch_size=batch,
                                    shuffle=True,
                                    num_workers=4)
    }

    use_gpu = torch.cuda.is_available()

    print('Training will be done using gpu %s' % use_gpu)

    #loading previous model
    objective_init_file = exp_dir + '/init_model.pth.tar'

    if os.path.isfile(resume_path):
        checkpoint = torch.load(resume_path)
        Sherlock_Net = checkpoint['model']

        print("=> loaded checkpoint '{}' (epoch {})".format(
            resume_path, checkpoint['epoch']))
    else:
        checkpoint = []
        if os.path.isfile(objective_init_file):
            Sherlock_Net = torch.load(objective_init_file)
        else:
            print('Loading previous task model')
            Sherlock_Net = torch.load(previous_task_model_path)
            if use_gpu:
                Sherlock_Net = Sherlock_Net.cuda()
            #initialize reg_params to zero
            #it doesn't create parameters for freezed layers
            reg_params = Objective_based_Training.initialize_reg_params(
                Sherlock_Net, train_eval_utils.freeze_layers)
            Sherlock_Net.reg_params = reg_params
            Sherlock_Net = objective_based_caller.update_objective_based_weights(
                dset_loaders, Sherlock_Net, len(df_train), use_gpu, norm)
            torch.save(Sherlock_Net, objective_init_file)

        Sherlock_Net.reg_params['lambda'] = float(reg_lambda)

    del df_train, df_test
    'SET LEARNING RATE AND WEIGHT DECAY'
    params = train_eval_utils.set_param_learning_rate(Sherlock_Net, lr)

    #TRAIN
    objective_based_caller.finetune_objective(dset_loaders, Sherlock_Net,
                                              params, criterion, epochs,
                                              exp_dir, checkpoint, lr,
                                              test_iter)
def update_objective_based_weights(root,
                                   model_ft,
                                   batch,
                                   use_gpu,
                                   norm='L2',
                                   reg_sets=['data_info/B1_train.csv']):

    #reg_params=Objective_based_Training.initialize_reg_params(model_ft)
    #model_ft.reg_params=reg_params
    #====================get data loaders============
    dset_loaders = []
    for data_path in reg_sets:

        df_data = pd.read_csv(root + data_path)

        ##################################################################################################################

        'MAKE YOUR  DATASET'
        # your dataloader will hold
        # images, NLP, ws, wp, wo, labels = data

        data_dt = dataset_utils.Cst_Dataset(
            df_data['image_links'],
            df_data['NLP_links'],
            df_data['id'],
            df_data['w_s'],
            df_data['w_p'],
            df_data['w_o'],
            root,
            image_loader=dataset_utils.pil_loader,
            NLP_feat_loader=dataset_utils.NLP_loader,
            transform=transforms.Compose([
                transforms.Scale(256),
                transforms.CenterCrop(224),
                transforms.ToTensor(),
                transforms.Normalize(
                    [0.485, 0.456, 0.406],
                    [0.229, 0.224, 0.225])  #per-channel mean/std 
            ]))

        # Make your dataset accessible in batches
        dset_loader = torch.utils.data.DataLoader(data_dt,
                                                  batch_size=batch,
                                                  shuffle=False,
                                                  num_workers=4)

        dset_loaders.append(dset_loader)

    #==============================================

    optimizer_ft = Objective_based_Training.Objective_After_SGD(
        model_ft.parameters(), lr=0.0001, momentum=0.9)
    #exp_dir='/esat/monkey/raljundi/pytorch/CUB11f_hebbian_finetuned'

    if norm == 'L2':
        print('********************objective with L2 norm***************')
        model_ft = compute_importance_l2(model_ft, optimizer_ft,
                                         train_eval_utils.Cst_exp_lr_scheduler,
                                         dset_loaders, use_gpu)
    else:
        model_ft = compute_importance(model_ft, optimizer_ft,
                                      train_eval_utils.Cst_exp_lr_scheduler,
                                      dset_loaders, use_gpu)

    return model_ft
def finetune_elastic(root=None,previous_task_model_path=None,train_data_path=None,test_data_path=None,exp_dir=None,reg_lambda=None,epochs=None,lr=None,test_iter=None,batch=None,use_multiple_gpu=None):
    
    'DEFINE TRAINING PARAMETERS'
    root,previous_task_model_path,train_data_path,test_data_path,exp_dir,reg_lambda,epochs,lr,test_iter,batch=fill_default_vals(root,previous_task_model_path,train_data_path,test_data_path,exp_dir,reg_lambda,epochs,lr,test_iter,batch)
    
    #make your save directory
    if not os.path.exists(exp_dir):
        os.makedirs(exp_dir)
 
    #define LOSS
    criterion = sherlock_model_utils.Fact_Euclidian_Loss()
    print ('Criterion:', type(criterion))

    # in case you have a checkpoint saved in the resume path, the training will start from there
    resume_path = exp_dir+ '/checkpoint.pth.tar'


    'LOAD YOUR DATASET INFORMATION'
    r"""

    df_train and df_test are dataframe holding:
        - image_links: rel_path to each image
        - NLP_links:   rel_path to each NLP representation of each image
        - SPO: fact representation S:subject, P:Predicate, O:Object
        - id : fact label. (each fact has its own Unique label)
        - w_s, w_p, w_o: boolean. Indicate if the fact representated in the image has a Subject(w_s), Predicate(w_p), Object(w_o)"

    """
    df_train = pd.read_csv(train_data_path)
    df_test= pd.read_csv(test_data_path)
    
    ##################################################################################################################

    'MAKE YOUR  DATASET'
    # your dataloader will hold
    # images, NLP, ws, wp, wo, labels = data 

    train_dt = dataset_utils.Cst_Dataset(df_train['image_links'], df_train['NLP_links'], df_train['id'], df_train['w_s'] , df_train['w_p'], df_train['w_o'], 
                          root, image_loader=dataset_utils.pil_loader, NLP_feat_loader = dataset_utils.NLP_loader,  transform = transforms.Compose([
                                                                        transforms.Scale(256),
                                                                        transforms.RandomSizedCrop(224),
                                                                        transforms.RandomHorizontalFlip(),
                                                                        transforms.ToTensor(),
                                                                        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])  #per-channel mean/std
                                                                        ]))


    test_dt = dataset_utils.Cst_Dataset(df_test['image_links'], df_test['NLP_links'], df_test['id'], df_test['w_s'] , df_test['w_p'], df_test['w_o'] ,
                         root, image_loader=dataset_utils.pil_loader, NLP_feat_loader = dataset_utils.NLP_loader, transform = transforms.Compose([
                                                                        transforms.Scale(256),
                                                                        transforms.CenterCrop(224),
                                                                        transforms.ToTensor(),
                                                                        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])  #per-channel mean/std 
                                                                        ]))




    del  df_train, df_test

    # Make your dataset accessible in batches
    dset_loaders = {'train': torch.utils.data.DataLoader(train_dt, batch_size=batch, shuffle=True, num_workers=4),
                    'val':torch.utils.data.DataLoader(test_dt, batch_size=batch, shuffle=True, num_workers=4)}


    use_gpu = torch.cuda.is_available()  
    print('Training will be done using gpu %s'%use_gpu)
    
    ##################################################################################################################
    
    'LOAD YOUR INITIAL MODEL'
    
    #loading previous model   
    Sherlock_Net=[]
    
    if not os.path.isfile(resume_path):
        checkpoint=[]
        
        #TRAINING ON TASK 1
        if not os.path.isfile(previous_task_model_path):
            #build your Sherlock Net from stratch            
            Sherlock_Net = sherlock_model_utils.build_Sherlock_Net()
            
            #initialize Sherloch Net with VGG16 params
            Sherlock_Net = sherlock_model_utils.initialize_from_VGG(Sherlock_Net, use_gpu)
                 
        
        #FINETUNING
        else:
            #Importing model from previous task.
            print('Loading model from a previous task') 
            Sherlock_Net=torch.load(previous_task_model_path)
            
        #set model on GPU
    
        if use_gpu:
            Sherlock_Net=Sherlock_Net.cuda()
            
    else:
        checkpoint = torch.load(resume_path)
        Sherlock_Net = checkpoint['model']
        print("=> loaded checkpoint '{}' (epoch {})"
                 .format(resume_path, checkpoint['epoch']))

            

    'SET LEARNING RATE AND WEIGHT DECAY'
    params=train_eval_utils.set_param_learning_rate(Sherlock_Net,lr)

    'TRAIN'
    reg_lambda=float(reg_lambda)
    elastic_caller.finetune_elastic(dset_loaders,Sherlock_Net,params,criterion, epochs,exp_dir,checkpoint,lr,test_iter,reg_lambda,use_multiple_gpu)