f"RMSE {value[1]:6.4f}\n" f"PSNR {value[2]:6.2f}\n" f"SSIM {value[3]:6.4f}\n" f"LPIPS {value[4]:6.4f}\n" f"GMSD {value[5]:6.4f}\n") else: images = torch.cat([bicubic, sr], dim=-1) vutils.save_image(lr, os.path.join("tests", f"lr_{filename}")) vutils.save_image(bicubic, os.path.join("tests", f"bicubic_{filename}")) vutils.save_image(sr, os.path.join("tests", f"sr_{filename}")) vutils.save_image(images, os.path.join("tests", f"compare_{filename}"), padding=10) if __name__ == "__main__": print("##################################################\n") print("Run Testing Engine.\n") create_folder("tests") logger.info("TestingEngine:") print("\tAPI version .......... 0.2.0") print("\tBuild ................ 2021.04.15") print("##################################################\n") main() logger.info( "Test single image performance evaluation completed successfully.\n")
"url used to set up distributed training. (Default: `tcp://59.110.31.55:12345`)" ) parser.add_argument("--dist-backend", default="nccl", type=str, help="Distributed backend. (Default: `nccl`)") parser.add_argument("--seed", default=None, type=int, help="Seed for initializing training.") parser.add_argument("--gpu", default=None, type=int, help="GPU id to use.") parser.add_argument( "--multiprocessing-distributed", action="store_true", help="Use multi-processing distributed training to launch " "N processes per node, which has N GPUs. This is the " "fastest way to use PyTorch for either single node or " "multi node data parallel training.") args = parser.parse_args() create_folder("runs") create_folder("weights") logger.info("TrainEngine:") logger.info("\tAPI version .......... 0.3.0") logger.info("\tBuild ................ 2021.06.15") main(args) logger.info("All training has been completed successfully.\n")
writer.add_scalar("Train/G Loss", g_loss.item(), iters) writer.add_scalar("Train/Pixel Loss", pixel_loss.item(), iters) writer.add_scalar("Train/Content Loss", content_loss.item(), iters) writer.add_scalar("Train/Adversarial Loss", adversarial_loss.item(), iters) # Output results every 100 batches. if i % 100 == 0: progress.display(i) # Save image every 300 batches. if iters % 300 == 0: vutils.save_image(hr.detach(), os.path.join("runs", "hr", f"GAN_{iters}.bmp")) vutils.save_image(sr.detach(), os.path.join("runs", "sr", f"GAN_{iters}.bmp")) if __name__ == "__main__": print("##################################################\n") print("Run Training Engine.\n") create_folder("runs") create_folder("runs/hr") create_folder("runs/sr") create_folder("weights") logger.info("TrainingEngine:") print("\tAPI version .......... 0.2.0") print("\tBuild ................ 2021.04.15") print("##################################################\n") main() logger.info("All training has been completed successfully.\n")
bottom_image_width))(tensor2pil(bottom_image))) # 4. Combine the bottom zone with the upper zone. final_image = np.concatenate((top_image, bottom_image)) # save compare video compare_writer.write(final_image) if args.view: # display video cv2.imshow("LR video convert SR video ", final_image) if cv2.waitKey(1) & 0xFF == ord("q"): break # next frame success, raw_frame = video_capture.read() if __name__ == "__main__": print("##################################################\n") print("Run SR Engine.\n") create_folder("videos") logger.info("SREngine:") print("\tAPI version .......... 0.2.0") print("\tBuild ................ 2021.04.15") print("##################################################\n") main() logger.info("Super-resolution video completed successfully.\n")
images = torch.cat([bicubic, sr, hr], -1) vutils.save_image(images, os.path.join("benchmarks", f"{i + 1}.bmp"), padding=10) print(f"Performance average results:\n") print(f"indicator Score\n") print(f"--------- -----\n") print(f"MSE {total_mse_value / len(dataloader):6.4f}\n" f"RMSE {total_rmse_value / len(dataloader):6.4f}\n" f"PSNR {total_psnr_value / len(dataloader):6.2f}\n" f"SSIM {total_ssim_value / len(dataloader):6.4f}\n" f"LPIPS {total_lpips_value / len(dataloader):6.4f}\n" f"GMSD {total_gmsd_value / len(dataloader):6.4f}") if __name__ == "__main__": print("##################################################\n") print("Run Testing Engine.\n") create_folder("benchmarks") logger.info("TestingEngine:") print("\tAPI version .......... 0.2.0") print("\tBuild ................ 2021.04.15") print("##################################################\n") main() logger.info("Test dataset performance evaluation completed successfully.")