def main_worker(gpu, args): args.gpu = gpu if args.gpu is not None: logger.info(f"Use GPU: {args.gpu} for training.") model = configure(args) if not torch.cuda.is_available(): logger.warning("Using CPU, this will be slow.") elif args.gpu is not None: torch.cuda.set_device(args.gpu) model = model.cuda(args.gpu) # Set eval mode. model.eval() cudnn.benchmark = True # Get image filename. filename = os.path.basename(args.lr) # Read all pictures. lr = Image.open(args.lr) bicubic = transforms.Resize( (lr.size[1] * args.upscale_factor, lr.size[0] * args.upscale_factor), Mode.BICUBIC)(lr) lr = process_image(lr, args.gpu) bicubic = process_image(bicubic, args.gpu) with torch.no_grad(): sr = model(lr) if args.hr: hr = process_image(Image.open(args.hr), args.gpu) vutils.save_image(hr, os.path.join("test", f"hr_{filename}")) images = torch.cat([bicubic, sr, hr], dim=-1) value = iqa(sr, hr, args.gpu) print(f"Performance avg results:\n") print(f"indicator Score\n") print(f"--------- -----\n") print(f"MSE {value[0]:6.4f}\n" f"RMSE {value[1]:6.4f}\n" f"PSNR {value[2]:6.2f}\n" f"SSIM {value[3]:6.4f}\n" f"LPIPS {value[4]:6.4f}\n" f"GMSD {value[5]:6.4f}\n") else: images = torch.cat([bicubic, sr], dim=-1) vutils.save_image(lr, os.path.join("test", f"lr_{filename}")) vutils.save_image(bicubic, os.path.join("test", f"bicubic_{filename}")) vutils.save_image(sr, os.path.join("test", f"sr_{filename}")) vutils.save_image(images, os.path.join("test", f"compare_{filename}"), padding=10)
def inference(lr, hr, model, model_path, gpu: int = None): model.load_state_dict(torch.load(model_path)["state_dict"]) if gpu is not None: torch.cuda.set_device(gpu) model = model.cuda(gpu) # Set eval mode. model.eval() with torch.no_grad(): sr = model(lr) value = iqa(sr, hr, gpu) return value
def main_worker(gpu, ngpus_per_node, args): global total_mse_value, total_rmse_value, total_psnr_value global total_ssim_value, total_lpips_value, total_gmsd_value args.gpu = gpu if args.gpu is not None: logger.info(f"Use GPU: {args.gpu} for training.") if args.distributed: if args.dist_url == "env://" and args.rank == -1: args.rank = int(os.environ["RANK"]) if args.multiprocessing_distributed: # For multiprocessing distributed training, rank needs to be the # global rank among all the processes args.rank = args.rank * ngpus_per_node + gpu dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url, world_size=args.world_size, rank=args.rank) model = configure(args) if not torch.cuda.is_available(): logger.warning("Using CPU, this will be slow.") elif args.distributed: # For multiprocessing distributed, DistributedDataParallel constructor # should always set the single device scope, otherwise, # DistributedDataParallel will use all available devices. if args.gpu is not None: torch.cuda.set_device(args.gpu) model.cuda(args.gpu) # When using a single GPU per process and per # DistributedDataParallel, we need to divide the batch size # ourselves based on the total number of GPUs we have args.batch_size = int(args.batch_size / ngpus_per_node) args.workers = int( (args.workers + ngpus_per_node - 1) / ngpus_per_node) model = nn.parallel.DistributedDataParallel(module=model, device_ids=[args.gpu]) else: model.cuda() # DistributedDataParallel will divide and allocate batch_size to all # available GPUs if device_ids are not set model = nn.parallel.DistributedDataParallel(model) elif args.gpu is not None: torch.cuda.set_device(args.gpu) model = model.cuda(args.gpu) else: # DataParallel will divide and allocate batch_size to all available GPUs if args.arch.startswith("alexnet") or args.arch.startswith("vgg"): model.features = torch.nn.DataParallel(model.features) model.cuda() else: model = torch.nn.DataParallel(model).cuda() logger.info("Load testing dataset") # Selection of appropriate treatment equipment. dataset = BaseTestDataset(root=os.path.join(args.data, "test"), image_size=args.image_size, upscale_factor=args.upscale_factor) dataloader = torch.utils.data.DataLoader(dataset, batch_size=args.batch_size, pin_memory=True, num_workers=args.workers) logger.info(f"Dataset information:\n" f"\tPath: {os.getcwd()}/{args.data}/test\n" f"\tNumber of samples: {len(dataset)}\n" f"\tNumber of batches: {len(dataloader)}\n" f"\tShuffle: False\n" f"\tSampler: None\n" f"\tWorkers: {args.workers}") cudnn.benchmark = True # Set eval mode. model.eval() # Start evaluate model performance. progress_bar = tqdm(enumerate(dataloader), total=len(dataloader)) for i, (lr, bicubic, hr) in progress_bar: # Move data to special device. if args.gpu is not None: lr = lr.cuda(args.gpu, non_blocking=True) bicubic = bicubic.cuda(args.gpu, non_blocking=True) hr = hr.cuda(args.gpu, non_blocking=True) with torch.no_grad(): sr = model(lr) # Evaluate performance value = iqa(sr, hr, args.gpu) total_mse_value += value[0] total_rmse_value += value[1] total_psnr_value += value[2] total_ssim_value += value[3] total_lpips_value += value[4] total_gmsd_value += value[5] progress_bar.set_description( f"[{i + 1}/{len(dataloader)}] " f"PSNR: {total_psnr_value / (i + 1):6.2f} " f"SSIM: {total_ssim_value / (i + 1):6.4f}") images = torch.cat([bicubic, sr, hr], dim=-1) vutils.save_image(images, os.path.join("benchmark", f"{i + 1}.bmp"), padding=10) print(f"Performance average results:\n") print(f"indicator Score\n") print(f"--------- -----\n") print(f"MSE {total_mse_value / len(dataloader):6.4f}\n" f"RMSE {total_rmse_value / len(dataloader):6.4f}\n" f"PSNR {total_psnr_value / len(dataloader):6.2f}\n" f"SSIM {total_ssim_value / len(dataloader):6.4f}\n" f"LPIPS {total_lpips_value / len(dataloader):6.4f}\n" f"GMSD {total_gmsd_value / len(dataloader):6.4f}")
def main_worker(gpu, args): global total_mse_value, total_rmse_value, total_psnr_value, total_ssim_value, total_lpips_value, total_gmsd_value args.gpu = gpu if args.gpu is not None: logger.info(f"Use GPU: {args.gpu} for testing.") model = configure(args) if not torch.cuda.is_available(): logger.warning("Using CPU, this will be slow.") if args.gpu is not None: torch.cuda.set_device(args.gpu) model = model.cuda(args.gpu) logger.info("Load testing dataset.") # Selection of appropriate treatment equipment. dataset = BaseTestDataset(os.path.join(args.data, "test"), args.image_size, args.upscale_factor) dataloader = torch.utils.data.DataLoader(dataset, batch_size=args.batch_size, pin_memory=True, num_workers=args.workers) logger.info(f"Dataset information:\n" f"\tPath: {os.getcwd()}/{args.data}/test\n" f"\tNumber of samples: {len(dataset)}\n" f"\tNumber of batches: {len(dataloader)}\n" f"\tShuffle: False\n" f"\tSampler: None\n" f"\tWorkers: {args.workers}") cudnn.benchmark = True # Set eval mode. model.eval() with torch.no_grad(): # Start evaluate model performance. progress_bar = tqdm(enumerate(dataloader), total=len(dataloader)) for i, (lr, bicubic, hr) in progress_bar: # Move data to special device. if args.gpu is not None: lr = lr.cuda(args.gpu, non_blocking=True) bicubic = bicubic.cuda(args.gpu, non_blocking=True) hr = hr.cuda(args.gpu, non_blocking=True) sr = model(lr) # Evaluate performance value = iqa(sr, hr, args.gpu) total_mse_value += value[0] total_rmse_value += value[1] total_psnr_value += value[2] total_ssim_value += value[3] total_lpips_value += value[4] total_gmsd_value += value[5] progress_bar.set_description( f"[{i + 1}/{len(dataloader)}] " f"PSNR: {total_psnr_value / (i + 1):6.2f} " f"SSIM: {total_ssim_value / (i + 1):6.4f}") images = torch.cat([bicubic, sr, hr], dim=-1) vutils.save_image(images, os.path.join("benchmarks", f"{i + 1}.bmp"), padding=10) print(f"Performance average results:\n") print(f"indicator Score\n") print(f"--------- -----\n") print(f"MSE {total_mse_value / len(dataloader):6.4f}\n" f"RMSE {total_rmse_value / len(dataloader):6.4f}\n" f"PSNR {total_psnr_value / len(dataloader):6.2f}\n" f"SSIM {total_ssim_value / len(dataloader):6.4f}\n" f"LPIPS {total_lpips_value / len(dataloader):6.4f}\n" f"GMSD {total_gmsd_value / len(dataloader):6.4f}")