# Calculate and log other batch metrics cd_corrects = ( 100 * (cd_preds.squeeze().byte() == labels.squeeze().byte()).sum() / (labels.size()[0] * (opt.patch_size**2))) cd_train_report = prfs(labels.data.cpu().numpy().flatten(), cd_preds.data.cpu().numpy().flatten(), average='binary', pos_label=1) train_metrics = set_metrics(train_metrics, cd_loss, cd_corrects, cd_train_report, scheduler.get_lr()) # log the batch mean metrics mean_train_metrics = get_mean_metrics(train_metrics) for k, v in mean_train_metrics.items(): writer.add_scalars(str(k), {'train': v}, total_step) # clear batch variables from memory del batch_img1, batch_img2, labels scheduler.step() logging.info("EPOCH {} TRAIN METRICS".format(epoch) + str(mean_train_metrics)) """ Begin Validation """ model.eval() with torch.no_grad():
batch_img1 = autograd.Variable(batch_img1).float().to(dev) batch_img2 = autograd.Variable(batch_img2).float().to(dev) labels = autograd.Variable(labels).long().to(dev) # Get predictions and calculate loss cd_preds = model(batch_img1, batch_img2) cd_preds = cd_preds[-1] _, cd_preds = torch.max(cd_preds, 1) # Calculate and log other batch metrics cd_corrects = ( 100 * (cd_preds.squeeze().byte() == labels.squeeze().byte()).sum() / (labels.size()[0] * (opt.patch_size**2))) cd_val_report = prfs(labels.data.cpu().numpy().flatten(), cd_preds.data.cpu().numpy().flatten(), average='binary', pos_label=1) test_metrics = set_test_metrics(val_metrics, cd_corrects, cd_val_report) # log the batch mean metrics mean_test_metrics = get_mean_metrics(test_metrics) # clear batch variables from memory del batch_img1, batch_img2, labels print("EPOCH VALIDATION METRICS", mean_test_metrics)