def train(epoch, train_loader, model): loss_accumulator = utils.DictAccumulator(config.loss_average_win_size) grad_accumulator = utils.DictAccumulator(config.loss_average_win_size) score_accumulator = utils.DictAccumulator() iters = len(train_loader) for i, (inputs, targets) in enumerate(train_loader): inputs = inputs.cuda() targets = targets.cuda() inputs = Variable(inputs) targets = Variable(targets) net_outputs, loss, grad, lr_dict, score = model.fit(inputs, targets, update=True, epoch=epoch) loss_accumulator.update(loss) grad_accumulator.update(grad) score_accumulator.update(score) if (i+1) % config.loss_average_win_size == 0: need_plot = True if hasattr(config, 'plot_loss_start_iter'): need_plot = (i + 1 + (epoch - 1) * iters >= config.plot_loss_start_iter) elif hasattr(config, 'plot_loss_start_epoch'): need_plot = (epoch >= config.plot_loss_start_epoch) utils.print_loss(config, "train_loss", loss_accumulator.get_average(), epoch=epoch, iters=iters, current_iter=i+1, need_plot=need_plot) utils.print_loss(config, "grad", grad_accumulator.get_average(), epoch=epoch, iters=iters, current_iter=i+1, need_plot=need_plot) utils.print_loss(config, "learning rate", lr_dict, epoch=epoch, iters=iters, current_iter=i+1, need_plot=need_plot) utils.print_loss(config, "train_score", score_accumulator.get_average(), epoch=epoch, iters=iters, current_iter=i+1, need_plot=need_plot) if epoch % config.save_train_hr_interval_epoch == 0: k = random.randint(0, net_outputs['output'].size(0) - 1) for name, out in net_outputs.items(): utils.save_tensor(out.data[k], os.path.join(config.TRAIN_OUT_FOLDER, 'epoch_%d_k_%d_%s.png' % (epoch, k, name)))
def peek(target_net, img_path, epoch): # open image img = Image.open(img_path) # save raw peek images for first time if epoch == config.peek_interval_epoch: img.save( os.path.join(config.PEEK_OUT_FOLDER, os.path.basename(img_path) + '_0.png')) # do inference img = img.convert('RGB') trans = transforms.Compose([ transforms.ToTensor(), ]) input_tensor = trans(img) inputs = input_tensor.view(1, input_tensor.size(0), input_tensor.size(1), input_tensor.size(2)) print("inference...") inputs = Variable(inputs, volatile=True) target_net.eval() net_outputs = target_net(inputs.cuda()) # save net_outputs for name, out in net_outputs.items(): utils.save_tensor( out.data[0], os.path.join( config.PEEK_OUT_FOLDER, os.path.basename(img_path) + '_%s_%d.png' % (name, epoch)))
def run(input_dirs, save_dir, target_net, gpu_id=None): if gpu_id is not None: print("use cuda") #cudnn.benchmark = True torch.cuda.set_device(gpu_id) target_net.cuda(gpu_id) print "input_dirs:", input_dirs print "save_dir:", save_dir if type(input_dirs) is not list: input_dirs = [input_dirs] # 兼容旧config for input_dir in input_dirs: # 样本目录 if os.path.isdir(input_dir): files = utils.get_files_from_dir(input_dir) # 样本desc.txt elif os.path.isfile(input_dir): files = utils.get_files_from_desc(input_dir) total = 0 for img_path in files: total += 1 #read image img = Image.open(img_path) img = img.convert('RGB') #img = img.resize((384,384)) width, height = img.size if debug: print width, height trans = transforms.Compose([ transforms.ToTensor(), ]) input_tensor = trans(img) net_outputs = process(input_tensor, target_net, gpu_id) for name, out in net_outputs.items(): save_path = os.path.join( save_dir, os.path.basename(img_path) + '_%s.png' % (name, )) print "save to :", save_path utils.save_tensor(out.data[0], save_path, width, height) print "total: ", total
def run(dic, input_dir, save_dir, target_net, snapshot_paths, gpu_id=None, divided=True, psnr=False): if gpu_id is not None: print("use cuda") #cudnn.benchmark = True if gpu_id != -1: torch.cuda.set_device(gpu_id) target_net.cuda(gpu_id) print "input_dir:", input_dir print "save_dir:", save_dir utils.touch_dir(save_dir) total = 0 cluster_num = len(snapshot_paths) for ii in range(cluster_num): llist = dic[ii] # load net checkpoint = torch.load(config.snapshot_paths[ii], map_location=lambda storage, loc: storage) print checkpoint.keys() config.target_net.load_state_dict(checkpoint) config.target_net.eval() for file_name in llist: total += 1 img_path = input_dir + '/' + file_name #read image img = Image.open(img_path) img = img.convert('RGB') #img = img.resize((384,384)) width, height = img.size if debug: print width, height trans = transforms.Compose([transforms.ToTensor(), ]) start_time = time.time() if (not divided) or (width < 1000) or (height < 1000): input_tensor = trans(img) net_outputs = process(input_tensor, target_net, gpu_id) end_time = time.time() if debug: print "single image cost:%s ms" % int((end_time-start_time)*1000) for name, out in net_outputs.items(): if name == 'input': continue save_path = os.path.join(save_dir, os.path.basename(img_path)) print "save to :", save_path utils.save_tensor(out.data[0], save_path, width, height) else: patch_size = 500 crop_size = 300 interval_size = (patch_size - crop_size) / 2 input_tensor_0 = trans(img) output_1 = np.zeros((input_tensor_0.shape[0], input_tensor_0.shape[1], input_tensor_0.shape[2])) dict_1 = {'input': input_tensor_0, 'output': output_1} xx = 0 while xx + patch_size < height: yy = 0 while yy + patch_size < width: input_tensor = trans(img.crop((yy, xx, yy + patch_size, xx + patch_size))) net_outputs = process(input_tensor, target_net, gpu_id) out_data = net_outputs['output'].data[0] if xx == 0: if yy == 0: dict_1['output'][:, xx:xx+interval_size+crop_size, yy:yy+interval_size+crop_size] = out_data[:, 0:interval_size+crop_size, 0:interval_size+crop_size] else: dict_1['output'][:, xx:xx+interval_size+crop_size, yy+interval_size:yy+interval_size+crop_size] = out_data[:, 0:interval_size+crop_size, interval_size:interval_size+crop_size] else: if yy == 0: dict_1['output'][:, xx+interval_size:xx+interval_size+crop_size, yy:yy+interval_size+crop_size] = out_data[:, interval_size:interval_size+crop_size, 0:interval_size+crop_size] else: dict_1['output'][:, xx+interval_size:xx+interval_size+crop_size, yy+interval_size:yy+interval_size+crop_size] = out_data[:, interval_size:interval_size+crop_size, interval_size:interval_size+crop_size] yy += crop_size yy = width - patch_size input_tensor = trans(img.crop((yy, xx, yy + patch_size, xx + patch_size))) net_outputs = process(input_tensor, target_net, gpu_id) out_data = net_outputs['output'].data[0] if xx == 0: dict_1['output'][:, xx:xx+interval_size+crop_size, yy+interval_size:] = out_data[:, 0:interval_size+crop_size, interval_size:] else: dict_1['output'][:, xx+interval_size:xx+interval_size+crop_size, yy+interval_size:] = out_data[:, interval_size:interval_size+crop_size, interval_size:] xx += crop_size xx = height - patch_size yy = 0 while yy + patch_size < width: input_tensor = trans(img.crop((yy, xx, yy + patch_size, xx + patch_size))) net_outputs = process(input_tensor, target_net, gpu_id) out_data = net_outputs['output'].data[0] if yy == 0: dict_1['output'][:, xx+interval_size:, yy:yy+interval_size+crop_size] = out_data[:, interval_size:, 0:interval_size+crop_size] else: dict_1['output'][:, xx+interval_size:, yy+interval_size:yy+interval_size+crop_size] = out_data[:, interval_size:, interval_size:interval_size+crop_size] yy += crop_size yy = width - patch_size input_tensor = trans(img.crop((yy, xx, yy + patch_size, xx + patch_size))) net_outputs = process(input_tensor, target_net, gpu_id) out_data = net_outputs['output'].data[0] dict_1['output'][:, xx+interval_size:, yy+interval_size:] = out_data[:, interval_size:, interval_size:] end_time = time.time() if debug: print "single image cost:%s ms" % int((end_time-start_time)*1000) for name, out in dict_1.items(): if name == 'input': continue save_path = os.path.join(save_dir, os.path.basename(img_path)) print "save to :", save_path out = torch.Tensor(out).cuda() utils.save_tensor(out, save_path, width, height) print "total: ", total if psnr == True: run_psnr(test_in_dir=input_dir, test_out_dir=save_dir)
def train(epoch, train_loader, model, input_images, gt_images): loss_accumulator = utils.DictAccumulator(config.loss_average_win_size) grad_accumulator = utils.DictAccumulator(config.loss_average_win_size) score_accumulator = utils.DictAccumulator(config.loss_average_win_size) iters = len(train_loader) for i, (ratio, index, in_path, gt_path) in enumerate(train_loader): ratio = int(ratio) index = int(index) in_path = "".join(in_path) gt_path = "".join(gt_path) if input_images[str(ratio)[0:3]][index] is None: print('Yes') raw = rawpy.imread(in_path) input_images[str(ratio)[0:3]][index] = np.expand_dims( pack_raw(raw), axis=0) * ratio gt_raw = rawpy.imread(gt_path) im = gt_raw.postprocess(use_camera_wb=True, half_size=False, no_auto_bright=True, output_bps=16) gt_images[index] = np.expand_dims(np.float32(im / 65535.0), axis=0) H = input_images[str(ratio)[0:3]][index].shape[1] W = input_images[str(ratio)[0:3]][index].shape[2] ps = 512 xx = np.random.randint(0, W - ps) yy = np.random.randint(0, H - ps) input_patch = input_images[str(ratio)[0:3]][index][:, yy:yy + ps, xx:xx + ps, :] gt_patch = gt_images[index][:, yy * 2:yy * 2 + ps * 2, xx * 2:xx * 2 + ps * 2, :] if np.random.randint(2, size=1)[0] == 1: # random flip input_patch = np.flip(input_patch, axis=1) gt_patch = np.flip(gt_patch, axis=1) if np.random.randint(2, size=1)[0] == 1: input_patch = np.flip(input_patch, axis=0) gt_patch = np.flip(gt_patch, axis=0) if np.random.randint(2, size=1)[0] == 1: # random transpose input_patch = np.transpose(input_patch, (0, 2, 1, 3)) gt_patch = np.transpose(gt_patch, (0, 2, 1, 3)) input_patch = np.minimum(input_patch, 1.0) inputs = np.transpose(input_patch, (0, 3, 2, 1)) targets = np.transpose(gt_patch, (0, 3, 2, 1)).copy() inputs = torch.from_numpy(inputs).float() targets = torch.from_numpy(targets).float() inputs = inputs.cuda() targets = targets.cuda() inputs = Variable(inputs) targets = Variable(targets) net_outputs, loss, grad, lr_dict, score = model.fit( inputs, targets, update=True, epoch=epoch, cur_iter=i + 1, iter_one_epoch=iters) loss_accumulator.update(loss) grad_accumulator.update(grad) score_accumulator.update(score) if (i + 1) % config.loss_average_win_size == 0: need_plot = True if hasattr(config, 'plot_loss_start_iter'): need_plot = (i + 1 + (epoch - 1) * iters >= config.plot_loss_start_iter) elif hasattr(config, 'plot_loss_start_epoch'): need_plot = (epoch >= config.plot_loss_start_epoch) utils.print_loss(config, "train_loss", loss_accumulator.get_average(), epoch=epoch, iters=iters, current_iter=i + 1, need_plot=need_plot) utils.print_loss(config, "grad", grad_accumulator.get_average(), epoch=epoch, iters=iters, current_iter=i + 1, need_plot=need_plot) utils.print_loss(config, "learning rate", lr_dict, epoch=epoch, iters=iters, current_iter=i + 1, need_plot=need_plot) utils.print_loss(config, "train_score", score_accumulator.get_average(), epoch=epoch, iters=iters, current_iter=i + 1, need_plot=need_plot) if epoch % config.save_train_hr_interval_epoch == 0: k = random.randint(0, net_outputs['output'].size(0) - 1) for name, out in net_outputs.items(): utils.save_tensor( out.data[k], os.path.join(config.TRAIN_OUT_FOLDER, 'epoch_%d_k_%d_%s.png' % (epoch, k, name)))
def run(input_dirs, save_dir, target_net, gpu_id=None, divided=True, psnr=False): if gpu_id is not None: print("use cuda") #cudnn.benchmark = True if gpu_id != -1: torch.cuda.set_device(gpu_id) target_net.cuda(gpu_id) print "input_dirs:", input_dirs print "save_dir:", save_dir utils.touch_dir(save_dir) if type(input_dirs) is not list: input_dirs = [input_dirs] # 兼容旧config for input_dir in input_dirs: # 样本目录 if os.path.isdir(input_dir): files = utils.get_files_from_dir(input_dir) # 样本desc.txt elif os.path.isfile(input_dir): files = utils.get_files_from_desc(input_dir) total = 0 for img_path in files: total += 1 #read image img = Image.open(img_path) img = img.convert('RGB') #img = img.resize((384,384)) width, height = img.size if debug: print width, height trans = transforms.Compose([ transforms.ToTensor(), ]) start_time = time.time() if (not divided) or (width < 1000) or (height < 1000): input_tensor = trans(img) net_outputs = process(input_tensor, target_net, gpu_id) end_time = time.time() if debug: print "single image cost:%s ms" % int( (end_time - start_time) * 1000) for name, out in net_outputs.items(): save_path = os.path.join( save_dir, os.path.basename(img_path) + '_%s.png' % (name, )) print "save to :", save_path utils.save_tensor(out.data[0], save_path, width, height) else: patch_size = 500 crop_size = 300 interval_size = (patch_size - crop_size) / 2 input_tensor_0 = trans(img) output_1 = np.zeros( (input_tensor_0.shape[0], input_tensor_0.shape[1], input_tensor_0.shape[2])) dict_1 = {'input': input_tensor_0, 'output': output_1} xx = 0 while xx + patch_size < height: yy = 0 while yy + patch_size < width: input_tensor = trans( img.crop( (yy, xx, yy + patch_size, xx + patch_size))) net_outputs = process(input_tensor, target_net, gpu_id) out_data = net_outputs['output'].data[0] if xx == 0: if yy == 0: dict_1[ 'output'][:, xx:xx + interval_size + crop_size, yy:yy + interval_size + crop_size] = out_data[:, 0: interval_size + crop_size, 0: interval_size + crop_size] else: dict_1[ 'output'][:, xx:xx + interval_size + crop_size, yy + interval_size:yy + interval_size + crop_size] = out_data[:, 0: interval_size + crop_size, interval_size: interval_size + crop_size] else: if yy == 0: dict_1[ 'output'][:, xx + interval_size:xx + interval_size + crop_size, yy:yy + interval_size + crop_size] = out_data[:, interval_size: interval_size + crop_size, 0: interval_size + crop_size] else: dict_1[ 'output'][:, xx + interval_size:xx + interval_size + crop_size, yy + interval_size:yy + interval_size + crop_size] = out_data[:, interval_size: interval_size + crop_size, interval_size: interval_size + crop_size] yy += crop_size yy = width - patch_size input_tensor = trans( img.crop((yy, xx, yy + patch_size, xx + patch_size))) net_outputs = process(input_tensor, target_net, gpu_id) out_data = net_outputs['output'].data[0] if xx == 0: dict_1[ 'output'][:, xx:xx + interval_size + crop_size, yy + interval_size:] = out_data[:, 0: interval_size + crop_size, interval_size:] else: dict_1[ 'output'][:, xx + interval_size:xx + interval_size + crop_size, yy + interval_size:] = out_data[:, interval_size: interval_size + crop_size, interval_size:] xx += crop_size xx = height - patch_size yy = 0 while yy + patch_size < width: input_tensor = trans( img.crop((yy, xx, yy + patch_size, xx + patch_size))) net_outputs = process(input_tensor, target_net, gpu_id) out_data = net_outputs['output'].data[0] if yy == 0: dict_1[ 'output'][:, xx + interval_size:, yy:yy + interval_size + crop_size] = out_data[:, interval_size:, 0:interval_size + crop_size] else: dict_1['output'][:, xx + interval_size:, yy + interval_size:yy + interval_size + crop_size] = out_data[:, interval_size:, interval_size: interval_size + crop_size] yy += crop_size yy = width - patch_size input_tensor = trans( img.crop((yy, xx, yy + patch_size, xx + patch_size))) net_outputs = process(input_tensor, target_net, gpu_id) out_data = net_outputs['output'].data[0] dict_1['output'][:, xx + interval_size:, yy + interval_size:] = out_data[:, interval_size:, interval_size:] end_time = time.time() if debug: print "single image cost:%s ms" % int( (end_time - start_time) * 1000) for name, out in dict_1.items(): save_path = os.path.join( save_dir, os.path.basename(img_path) + '_%s.png' % (name, )) print "save to :", save_path out = torch.Tensor(out).cuda() utils.save_tensor(out, save_path, width, height) print "total: ", total if psnr == True: run_psnr(test_in_dir=input_dirs[0], test_out_dir=save_dir)