def train(opts): # Select device device = 'cuda:0' if torch.cuda.is_available() else 'cpu' # Define model model = Model().to(device) # Define dataloaders train_loader, val_loader = split_trainval(opts.data, opts.bs) # Define loss loss_criter = nn.L1Loss().to(device) # Define optimizer optimizer = Adam(model.parameters(), lr=opts.lr, weight_decay=1e-6) scheduler = StepLR(optimizer, step_size=int(opts.epoch/2), gamma=0.1) # Training loop for epoch in range(opts.epoch): # Train cycle running_loss = 0.0 model.train() for batch_num, (inputs, labels) in enumerate(train_loader): inputs = inputs.to(device) labels = labels.to(device) outputs = model(inputs) loss = loss_criter(outputs, labels) optimizer.zero_grad() loss.backward() optimizer.step() running_loss += loss.item() * inputs.size(0) print(f'epoch num {epoch:02d} batch num {batch_num:04d} train loss {loss:02.04f}', end='\r') epoch_loss = running_loss / len(train_loader.dataset) # Val cycle running_loss = 0.0 model.eval() for inputs, labels in val_loader: inputs = inputs.to(device) labels = labels.to(device) with torch.no_grad(): outputs = model(inputs) loss = loss_criter(outputs, labels) running_loss += loss.item() * inputs.size(0) epoch_val_loss = running_loss / len(val_loader.dataset) print(f'\n\nepoch num {epoch:02d} train loss {epoch_loss:02.04f} val loss {epoch_val_loss:02.04f}') scheduler.step() if (epoch + 1) % opts.save_every == 0: torch.save(model.state_dict(), os.path.join(opts.output, f'checkpoint_size{opts.size}_e{epoch+1}of{opts.epoch}_lr{opts.lr:.01E}.pth'))
def main(opts): # Select device device = 'cuda:0' if torch.cuda.is_available() else 'cpu' # Define model model = Model().to(device) model.load_state_dict(torch.load(opts.weights)) model.eval() # Define dataloader test_loader = get_test(opts.input) preds = [] for img in test_loader: img = img.to(device) with torch.no_grad(): predict = model(img) predict = predict.cpu().detach().numpy().squeeze() preds.append(predict) preds = np.array(preds) np.save(os.path.join(opts.input, 'ytest.npy'), preds)