def main(): ## params: config = {'learning_rate': 0.0001, 'nr_epochs': 500, 'batch_size': 8 } batch_size = 1 model_name = stringify(["model_"]+ [str(k)+'_'+str(config[k]) for k in config \ if type(config[k])==int or type(config[k])==float])+'.pt' model_path = '' data_path = '' train_dataset = dataset() train_loader =DataLoader(train_dataset,batch_size=batch_size,shuffle = True) test_dataset = dataset() val_loader = DataLoader(test_dataset, batch_size = batch_size,shuffle=False) model_inference = model_def() model_inference.load_state_dict(torch.load( os.path.join(model_path, model_name), map_location = device)) model_inference.eval() labels = np.array([]) outputs = np.array([]) for batch_idx, (data, target) in enumerate(val_loader): prob = model_inference.forward_inference(data) labels = np.append(labels, target.data.to('cpu')) outputs = np.append(outputs, prob.data.to('cpu')) print(labels) print(outputs)
def train(config, model_def, dataset, experiment): print("Device selected: ", device) data_path = '' model_path = '' learning_rate = config['learning_rate'] nr_epochs = config['nr_epochs'] batch_size = config['batch_size'] print(stringify([str(k) + '_' + str(config[k]) for k in config])) train_dataset = dataset() train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True) test_dataset = dataset() test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False) model_inference = model_def() model_inference = model_inference.to(device) criterion = F.binary_cross_entropy_with_logits optimizer = optim.Adam(model_inference.parameters(), lr=learning_rate) for epoch in range(nr_epochs + 1): epoch_loss = train_epoch(epoch, model_inference, train_loader, optimizer, criterion, verbose=False) if experiment != None: experiment.log_metric(stringify(["train_loss_"]+ [str(config[k]) for k in config if type(config[k])==int \ or type(config[k])==float]), epoch_loss, epoch = epoch) else: print( stringify(["train_loss_"] + [str(k) + '_' + str(config[k]) for k in config]), epoch_loss) epoch_loss = evaluate_epoch(epoch, model_inference, test_loader, criterion, verbose=False) if experiment != None: experiment.log_metric(stringify(["val_loss_"]+ [str(config[k]) for k in config if type(config[k])==int \ or type(config[k])==float]), epoch_loss) else: print( stringify(["test_loss_"] + [str(k) + '_' + str(config[k]) for k in config]), epoch_loss) accuracy = calculate_accuracy(model_inference, test_loader) if experiment != None: experiment.log_metric(stringify(["accuracy"]+ [str(config[k]) for k in config if type(config[k])==int \ or type(config[k])==float]), accuracy) else: print( stringify(["accuracy"] + [str(k) + '_' + str(config[k]) for k in config]), accuracy) torch.save(model_inference.state_dict(), os.path.join(model_path,stringify( ["model_"]+ [str(k)+'_'+str(config[k]) for k in config \ if type(config[k])==int or type(config[k])==float])+'.pt')) print("Model written to disc")
def train(config, model_def, dataset, experiment): print("Device selected: ", device) data_path = 'temp/dataset' data_path = '/mnt/nfs/scratch1/vkeshav/vision/dataset' model_path = 'temp/saved_models' model_path = '/mnt/nfs/scratch1/vkeshav/vision/saved_models' learning_rate = config['learning_rate'] nr_epochs = config['nr_epochs'] batch_size = config['batch_size'] print(stringify([str(k) + '_' + str(config[k]) for k in config])) train_dataset = dataset(data_path, split='train') train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True) test_dataset = dataset(data_path, split='test') test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False) model_inference = model_def() model_inference = model_inference.to(device) criterion = nn.CrossEntropyLoss() optimizer = optim.Adam(model_inference.parameters(), lr=learning_rate) for epoch in range(nr_epochs + 1): epoch_loss = train_epoch(epoch, model_inference, train_loader, optimizer, criterion, verbose=False) if experiment is not None: experiment.log_metric(stringify(["train_loss_"] + [ str(config[k]) for k in config if type(config[k]) == int or type(config[k]) == float ]), epoch_loss, epoch=epoch) else: print( stringify(["train_loss_"] + [str(k) + '_' + str(config[k]) for k in config]), epoch_loss) epoch_loss = evaluate_epoch(epoch, model_inference, test_loader, criterion, verbose=False) if experiment is not None: experiment.log_metric( stringify(["val_loss_"] + [ str(config[k]) for k in config if type(config[k]) == int or type(config[k]) == float ]), epoch_loss) else: print( stringify(["test_loss_"] + [str(k) + '_' + str(config[k]) for k in config]), epoch_loss) accuracies = calculate_accuracy(model_inference, test_loader) if experiment is not None: experiment.log_metric( stringify(["accuracy_path1"] + [ str(config[k]) for k in config if type(config[k]) == int or type(config[k]) == float ]), accuracies[0]) experiment.log_metric( stringify(["accuracy_path2"] + [ str(config[k]) for k in config if type(config[k]) == int or type(config[k]) == float ]), accuracies[1]) experiment.log_metric( stringify(["accuracy_path3"] + [ str(config[k]) for k in config if type(config[k]) == int or type(config[k]) == float ]), accuracies[2]) experiment.log_metric( stringify(["accuracy_path4"] + [ str(config[k]) for k in config if type(config[k]) == int or type(config[k]) == float ]), accuracies[3]) else: print( stringify(["accuracy"] + [str(k) + '_' + str(config[k]) for k in config]), np.mean(accuracies)) torch.save( model_inference.state_dict(), os.path.join( model_path, stringify(["model_"] + [ str(k) + '_' + str(config[k]) for k in config if type(config[k]) == int or type(config[k]) == float ]) + '.pt')) print("Model written to disc")