writer = SummaryWriter() global_train_step = 0 global_val_step = 0 # Loop over epochs for epoch in range(max_epochs): tqdm.write("Epoch: {}".format(epoch)) progress_bar = tqdm(total=len(train_dataset), leave=True, position=0) # Training for local_batch, local_labels in training_generator: # Transfer to GPU local_batch, local_labels = local_batch.to(device), local_labels.to( device) loss, yhat = model.step(local_batch, local_labels) yhat = model.softmax(yhat) yhat = torch.max(yhat, 1)[1].cpu() acc = pred_acc(local_labels.cpu(), yhat) writer.add_scalar('Train/Accuracy', acc, global_train_step) writer.add_scalar('Train/Loss', loss, global_train_step) global_train_step += params['batch_size'] progress_bar.update(params['batch_size']) progress_bar.set_postfix(loss=loss) tqdm.write("Running validation...") # Validation y_pred = [] y_true = [] with torch.set_grad_enabled(False):