def test(model, test_dataset, batch_size, use_gpu=True): sampler = SequentialSampler(test_dataset) test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=batch_size, sampler=sampler) score, loss = validate(model, test_loader, use_gpu=use_gpu) return score
def main_program(path_data, nb_row_per_classe, use_gpu, do_training, do_testing, nb_epoch, batch_size, learning_rate, path_save_model): # Label encoding and decoding dicts enc_dict, dec_dict = create_encoding_deconding_dict(path_data) #Data_set size_image_train = 224 data_train = create_huge_data_set(path_data, nb_rows=nb_row_per_classe, size_image=size_image_train, encoding_dict=enc_dict) data_valid = create_huge_data_set(path_data, nb_rows=100, size_image=size_image_train, skip_rows=range(1, nb_row_per_classe), encoding_dict=enc_dict) # Model model = create_model(use_gpu) if use_gpu: model.cuda() #Loss criterion = nn.CrossEntropyLoss() #Optimiser optimizer = optim.SGD(model.parameters(), lr=learning_rate) # Scheduler LR scheduler = LambdaLR(optimizer, lr_lambda=LRPolicy(start_lr=learning_rate)) #Data loader train_loader = DataLoader(data_train, batch_size=batch_size, shuffle=True) valid_loader = DataLoader(data_valid, batch_size=batch_size) #Train if do_training: train_model(model, train_loader, valid_loader, nb_epoch, scheduler, optimizer, criterion, use_gpu, path_save=path_save_model) score = validate(model, valid_loader, use_gpu=use_gpu)[0] print(score) #Test if do_testing: print("BIDON") model_final, history = load_model_weights(model, path_save_model, type="best", use_gpu=use_gpu, get_history=True) history.display() score = validate(model, valid_loader, use_gpu=use_gpu)[0] print(score) pass
def train_model(model, train_loader, val_loader, n_epoch, scheduler, optimizer, criterion, use_gpu=False, path_save=None, path_start_from_existing_model=None): if path_start_from_existing_model is not None: # Loading state checkpoint = torch.load(path_start_from_existing_model) model.load_state_dict(checkpoint['model_state_dict']) optimizer.load_state_dict(checkpoint['optimizer_state_dict']) next_epoch = checkpoint['epoch'] + 1 loss = checkpoint['loss'] history = checkpoint["history"] best_acc = checkpoint["best_acc"] best_model_weights = checkpoint["best_model_weights"] scheduler.load_state_dict(checkpoint["lr_scheduler_state"]) print("Modèle chargé pour entraînement") else: # best_model_weights = copy.deepcopy(model.state_dict()) history = History() next_epoch = 0 best_acc = 0 print("Aucun modèle chargé pour entraînement") # Entrainement for epoch in range(next_epoch, n_epoch): model.train() scheduler.step() for j, batch in enumerate(train_loader): inputs, targets = batch if use_gpu: inputs = inputs.cuda() targets = targets.cuda() optimizer.zero_grad() output = model(inputs) loss = criterion(output, targets) loss.backward() optimizer.step() train_acc, train_loss = validate(model, train_loader, use_gpu) val_acc, val_loss = validate(model, val_loader, use_gpu) #Current LR for param_group in optimizer.param_groups: current_lr = param_group["lr"] history.save(train_acc, val_acc, train_loss, val_loss, current_lr) print( 'Epoch {} - Train acc: {:.2f} - Val acc: {:.2f} - Train loss: {:.4f} - Val loss: {:.4f}' .format(epoch, train_acc, val_acc, train_loss, val_loss)) #Best model if val_acc > best_acc: best_acc = val_acc best_model_weights = copy.deepcopy(model.state_dict()) # Sauvegarde if path_save is not None: torch.save( { 'epoch': epoch, 'model_state_dict': model.state_dict(), 'optimizer_state_dict': optimizer.state_dict(), 'loss': loss, "history": history, "best_acc": best_acc, "best_model_weights": best_model_weights, "lr_scheduler_state": scheduler.state_dict() }, path_save) print("Epoch {} sauvegardée".format(epoch))
def train(model, optimizer, dataset, n_epoch, batch_size, use_gpu=True, scheduler=None, criterion=None, pruner=None, best_result_save_path=None, batch_count=None, should_validate=True): history = History() if criterion is None: criterion = torch.nn.CrossEntropyLoss() """ if someone provides a transform upstream. there is chances this person what the transform and not silently override it. """ if dataset.transform is None: dataset.transform = ToTensor() train_loader, val_loader = train_valid_loaders(dataset, batch_size=batch_size) highest_score = 0.0 for i in range(n_epoch): start = time.time() do_epoch(criterion, model, optimizer, scheduler, train_loader, use_gpu, pruner=pruner, count=batch_count) end = time.time() if should_validate: train_acc, train_loss = validate(model, train_loader, use_gpu) val_acc, val_loss = validate(model, val_loader, use_gpu) train_time = end - start history.save(train_acc, val_acc, train_loss, val_loss, optimizer.param_groups[0]['lr'], train_time) print( 'Epoch {} - Train acc: {:.2f} - Val acc: {:.2f} - Train loss: {:.4f} - Val loss: {:.4f} - Training time: {:.2f}s' .format(i, train_acc, val_acc, train_loss, val_loss, train_time)) if best_result_save_path is not None \ and val_acc > highest_score: highest_score = val_acc if os.path.isfile(best_result_save_path): copyfile(best_result_save_path, best_result_save_path + ".old") basedir = os.path.dirname(best_result_save_path) if not os.path.exists(basedir): os.makedirs(basedir) torch.save(model.state_dict(), best_result_save_path) return history
def train_model(model, train_loader, val_loader, n_epoch, scheduler, optimizer, criterion, use_gpu=False, path_save=None): ''' :param model: :param train_dataset: :param n_epoch: :param batch_size: :param learning_rate: :param use_gpu: :param path_save: obligatoire pour l'instant :return: ''' # if path_save is not None: # try: # # Loading state # checkpoint = torch.load(path_save) # model.load_state_dict(checkpoint['model_state_dict']) # optimizer.load_state_dict(checkpoint['optimizer_state_dict']) # next_epoch = checkpoint['epoch'] + 1 # loss = checkpoint['loss'] # history = checkpoint["history"] # best_acc = checkpoint["best_acc"] # best_model_weights = checkpoint["best_model_weights"] # scheduler.load_state_dict(checkpoint["lr_scheduler_state"]) # # print("Modèle chargé") # # except: # best_model_weights = copy.deepcopy(model.state_dict()) # best_acc = 0 # history = History() # next_epoch = 0 # print("Aucun modèle chargé") # pass history = History() next_epoch = 0 # Entrainement for epoch in range(next_epoch, n_epoch): model.train() scheduler.step() for j, batch in enumerate(train_loader): inputs, targets = batch if use_gpu: inputs = inputs.cuda() targets = targets.cuda() optimizer.zero_grad() output = model(inputs) loss = criterion(output, targets) loss.backward() optimizer.step() train_acc, train_loss = validate(model, train_loader, use_gpu) val_acc, val_loss = validate(model, val_loader, use_gpu) #Current LR for param_group in optimizer.param_groups: current_lr = param_group["lr"] history.save(train_acc, val_acc, train_loss, val_loss, current_lr) print( 'Epoch {} - Train acc: {:.2f} - Val acc: {:.2f} - Train loss: {:.4f} - Val loss: {:.4f}' .format(epoch, train_acc, val_acc, train_loss, val_loss)) # Best model # if val_acc > best_acc: # best_acc = val_acc # best_model_weights = copy.deepcopy(model.state_dict()) # Sauvegarde # if path_save is not None: # torch.save({ # 'epoch': epoch, # 'model_state_dict': model.state_dict(), # 'optimizer_state_dict': optimizer.state_dict(), # 'loss': loss, # "history": history, # "best_acc": best_acc, # "best_model_weights": best_model_weights, # "lr_scheduler_state": scheduler.state_dict() # # }, path_save) # print("Epoch {} sauvegardée".format(i)) # Return # checkpoint = torch.load(path_save) # model.load_state_dict(checkpoint['best_model_weights']) return history, model
def test(model, dataset, batch_size, use_gpu=True): loader, _ = train_valid_loaders(dataset, batch_size) return validate(model, loader, use_gpu)