outputs_cpu = outputs.data.cpu().numpy() lossL1 = criterionL1(outputs, targets) L1val_accum += lossL1.item() if i == 0: input_ndarray = inputs_cpu.cpu().numpy()[0] v_norm = (np.max(np.abs(input_ndarray[0, :, :]))**2 + np.max(np.abs(input_ndarray[1, :, :]))**2)**0.5 outputs_denormalized = data.denormalize(outputs_cpu[0], v_norm) targets_denormalized = data.denormalize( targets_cpu.cpu().numpy()[0], v_norm) utils.makeDirs(["results_train"]) utils.imageOut("results_train/epoch{}_{}".format(epoch, i), outputs_denormalized, targets_denormalized, saveTargets=True) # data for graph plotting L1_accum /= len(trainLoader) L1val_accum /= len(valiLoader) if saveL1: if epoch == 0: utils.resetLog(prefix + "L1.txt") utils.resetLog(prefix + "L1val.txt") utils.log(prefix + "L1.txt", "{} ".format(L1_accum), False) utils.log(prefix + "L1val.txt", "{} ".format(L1val_accum), False) torch.save(netG.state_dict(), prefix + "modelG")
targets_denormalized_comp, outputs_denormalized_comp = targets_denormalized_comp.float( ).cuda(), outputs_denormalized_comp.float().cuda() outputs_dn.data.resize_as_(outputs_denormalized_comp).copy_( outputs_denormalized_comp) targets_dn.data.resize_as_(targets_denormalized_comp).copy_( targets_denormalized_comp) loss_dn = criterionLoss(outputs_dn, targets_dn) Lossval_dn_accum += loss_dn.item() # write output image, note - this is currently overwritten for multiple models os.chdir("./results_test/") utils.imageOut("%04d" % (i), outputs_cpu, targets_cpu, normalize=False, saveMontage=True) # write normalized with error os.chdir("../") log(lf, "\n") Lossval_accum /= len(testLoader) lossPer_p_accum /= len(testLoader) lossPer_v_accum /= len(testLoader) lossPer_accum /= len(testLoader) Lossval_dn_accum /= len(testLoader) log( lf, "Loss percentage (p, v, combined): %f %% %f %% %f %% " % (lossPer_p_accum * 100, lossPer_v_accum * 100, lossPer_accum * 100)) log(lf, "Loss error: %f" % (Lossval_accum)) log(lf, "Denormalized error: %f" % (Lossval_dn_accum))