def get_model(name, in_channels, out_channels, **kwargs): if name == "unet": return Unet(in_channels, out_channels) if name == "baby-unet": return BabyUnet(in_channels, out_channels) if name == "dncnn": return DnCNN(in_channels, out_channels) if name == "convolution": return SingleConvolution(in_channels, out_channels, kwargs["width"])
def get_model(name, in_channels, out_channels, **kwargs): if name == 'unet': return Unet(in_channels, out_channels) if name == 'baby-unet': return BabyUnet(in_channels, out_channels) if name == 'dncnn': return DnCNN(in_channels, out_channels) if name == 'convolution': return SingleConvolution(in_channels, out_channels, kwargs['width'])
# image_types = ['test_mix'] run_dir = args_test.pretrain_dir + f'/{args_test.model}' with open(run_dir + '/args.txt') as args_file: args = Namespace(**json.load(args_file)) pprint(args) if args_test.no_cuda: test_dir = run_dir + '/benchmark_cpu' else: test_dir = run_dir + '/benchmark_gpu' mkdirs(test_dir) if args_test.model == 'dncnn': model = DnCNN(depth=args.depth, n_channels=args.width, image_channels=1, use_bnorm=True, kernel_size=3) elif args_test.model == 'n2n': model = UnetN2N(args.in_channels, args.out_channels) if args.debug: print(model) print(module_size(model)) model.load_state_dict( torch.load(run_dir + f'/checkpoints/model_epoch{args.epochs}.pth', map_location='cpu')) model = model.to(device) model.eval() logger = {}
return sum([np.prod(p.size()) for p in model_parameters]) if __name__ == "__main__": print("torch.cuda.is_available() =", torch.cuda.is_available()) print("torch.cuda.device_count() =", torch.cuda.device_count()) print("torch.cuda.device('cuda') =", torch.cuda.device('cuda')) print("torch.cuda.current_device() =", torch.cuda.current_device()) print() epochs = 20 device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") model = CDNet( in_planes=1, bbone=DnCNN(), ) checkpoint = torch.load('./results/models/cdnet-30.pth') model.load_state_dict(checkpoint['model']) model.to(device) print("total parameters: {}".format(total_parameters(model))) noise = 0 test_scores = [] for epoch in range(epochs): noise += 0.1 test_dataset = CIRCLEDataset(count=1000, noise=2,
test_dataset = DnCNNDataset(count=1000, noise=2, random_noise=False, debug=False) train_dataloader = torch_data.DataLoader(train_dataset, num_workers=0, batch_size=32) val_dataloader = torch_data.DataLoader(val_dataset, num_workers=0, batch_size=32) test_dataloader = torch_data.DataLoader(test_dataset, num_workers=0, batch_size=32) model = DnCNN() model.to(device) print("total parameters: {}".format(total_parameters(model))) optimizer = torch.optim.Adam(lr=0.005, weight_decay=1e-3, params=model.parameters()) scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=10, verbose=True) criterion = SSE() train_meta = []