initial_epoch = findLastCheckpoint( save_dir=save_dir) # load the last model in matconvnet style initial_epoch = 11 if initial_epoch > 0: print('resuming by loading epoch %03d' % initial_epoch) decompose_model.load_state_dict( torch.load(os.path.join(save_dir, 'model_%03d.pth' % initial_epoch))) # model = torch.load(os.path.join(save_dir, 'model_%03d.pth' % initial_epoch)) # criterion = nn.MSELoss(reduction = 'sum') # PyTorch 0.4.1 # criterion = sum_squared_error() criterion = nn.MSELoss() if cuda: decompose_model = decompose_model.cuda() # compose_model = compose_model.cuda() # device_ids = [0] # model = nn.DataParallel(model, device_ids=device_ids).cuda() # criterion = criterion.cuda() optimizer_decompose = optim.Adam(decompose_model.parameters(), lr=args.lr) scheduler_decompose = MultiStepLR(optimizer_decompose, milestones=[30, 60, 90], gamma=0.2) # learning rates # optimizer_compose = optim.Adam(compose_model.parameters(), lr=args.lr) # scheduler_compose = MultiStepLR(optimizer_compose, milestones=[30, 60, 90], gamma=0.2) # learning rates for epoch in range(initial_epoch, n_epoch): decompose_model.train() # compose_model.train()
criterion = nn.L1Loss() chk = nn.MSELoss() transform = transforms.Compose( [transforms.RandomCrop(100), transforms.ToTensor()]) trainset = datasets.ImageFolder(root=args.train_data, transform=transform) trainloader = torch.utils.data.DataLoader(trainset, batch_size=args.batch_size, shuffle=True, num_workers=0) if cuda: model = model.cuda() pre_model = pre_model.cuda() # device_ids = [0] # model = nn.DataParallel(model, device_ids=device_ids).cuda() # criterion = criterion.cuda() optimizer = optim.Adam(model.parameters(), lr=args.lr) scheduler = MultiStepLR(optimizer, milestones=[700, 1400, 2100], gamma=0.2) # learning rates for epoch in range(initial_epoch, n_epoch): scheduler.step(epoch) # step to the learning rate in this epcoh # xs = dg.datagenerator(data_dir=args.train_data) # xs = xs.astype('float32') / 255.0 # xs = torch.from_numpy(xs.transpose((0, 3, 1, 2))) # tensor of the clean patches, NXCXHXW # # DDataset = DenoisingDataset(xs, sigma)
log('load trained model') # params = model.state_dict() # print(params.values()) # print(params.keys()) # # for key, value in params.items(): # print(key) # parameter name # print(params['dncnn.12.running_mean']) # print(model.state_dict()) model.eval() # evaluation mode # model.train() if torch.cuda.is_available(): model = model.cuda() if not os.path.exists(args.result_dir): os.mkdir(args.result_dir) for set_cur in args.set_names: if not os.path.exists(os.path.join(args.result_dir, set_cur)): os.mkdir(os.path.join(args.result_dir, set_cur)) psnrs = [] ssims = [] for im in os.listdir(os.path.join(args.set_dir, set_cur)): if im.endswith(".jpg") or im.endswith(".bmp") or im.endswith(".png"): x = np.array(imread(os.path.join(args.set_dir, set_cur, im)), dtype=np.float32)/255.0
low_model = DnCNN() low_model.load_state_dict(torch.load(os.path.join(args.low_model_dir, args.low_model_name))) initial_epoch = findLastCheckpoint(save_dir=save_dir) # load the last model in matconvnet style if initial_epoch > 0: print('resuming by loading epoch %03d' % initial_epoch) # model.load_state_dict(torch.load(os.path.join(save_dir, 'model_%03d.pth' % initial_epoch))) # model = torch.load(os.path.join(save_dir, 'model_%03d.pth' % initial_epoch)) model.train() low_model.eval() # criterion = nn.MSELoss(reduction = 'sum') # PyTorch 0.4.1 # criterion = sum_squared_error() criterion = nn.MSELoss() Edge_enhance = torch.FloatTensor(args.batch_size, 1, 40, 40) if cuda: model = model.cuda() low_model = low_model.cuda() # device_ids = [0] # model = nn.DataParallel(model, device_ids=device_ids).cuda() criterion = criterion.cuda() optimizer = optim.Adam(model.parameters(), lr=args.lr) scheduler = MultiStepLR(optimizer, milestones=[30, 60, 90], gamma=0.2) # learning rates for epoch in range(initial_epoch, n_epoch): scheduler.step(epoch) # step to the learning rate in this epcoh xs = dg.datagenerator(data_dir=args.train_data) xs = xs.astype('float32') / 255.0 xs = torch.from_numpy(xs.transpose((0, 3, 1, 2))) # tensor of the clean patches, NXCXHXW DDataset = DenoisingDataset(xs, sigma)
# params = model.state_dict() # print(params.values()) # print(params.keys()) # # for key, value in params.items(): # print(key) # parameter name # print(params['dncnn.12.running_mean']) # print(model.state_dict()) model_dncnn.eval() # evaluation mode model_lowfreq.eval() # model.train() if torch.cuda.is_available(): model_dncnn = model_dncnn.cuda() model_lowfreq = model_lowfreq.cuda() if not os.path.exists(args.result_dir): os.mkdir(args.result_dir) for set_cur in args.set_names: if not os.path.exists(os.path.join(args.result_dir, set_cur)): os.mkdir(os.path.join(args.result_dir, set_cur)) psnrs = [] ssims = [] for im in os.listdir(os.path.join(args.set_dir, set_cur)): if im.endswith(".jpg") or im.endswith(".bmp") or im.endswith(".png"):
# params = model.state_dict() # print(params.values()) # print(params.keys()) # # for key, value in params.items(): # print(key) # parameter name # print(params['dncnn.12.running_mean']) # print(model.state_dict()) high_model.eval() # evaluation mode low_model.eval() # model.train() if torch.cuda.is_available(): high_model = high_model.cuda() low_model = low_model.cuda() if not os.path.exists(args.result_dir): os.mkdir(args.result_dir) for set_cur in args.set_names: if not os.path.exists(os.path.join(args.result_dir, set_cur)): os.mkdir(os.path.join(args.result_dir, set_cur)) psnrs = [] ssims = [] for im in os.listdir(os.path.join(args.set_dir, set_cur)): if im.endswith(".jpg") or im.endswith(".bmp") or im.endswith( ".png"):
# params = model.state_dict() # print(params.values()) # print(params.keys()) # # for key, value in params.items(): # print(key) # parameter name # print(params['dncnn.12.running_mean']) # print(model.state_dict()) low_model.eval() # evaluation mode res_model.eval() model.eval() if torch.cuda.is_available(): low_model = low_model.cuda() res_model = res_model.cuda() model = model.cuda() # evaluation mode if not os.path.exists(args.result_dir): os.mkdir(args.result_dir) for set_cur in args.set_names: if not os.path.exists(os.path.join(args.result_dir, set_cur)): os.mkdir(os.path.join(args.result_dir, set_cur)) psnrs = [] ssims = [] for im in os.listdir(os.path.join(args.set_dir, set_cur)):