def execute_experiment(cfg): # -- set seed -- np.random.seed(seed) torch.manual_seed(seed) # -- load dataset -- data,loaders = load_image_dataset(cfg) image_iter = iter(loaders.tr) # -- run over images -- for i_index in range(NUM_BATCHES): # -- sample & unpack batch -- sample = next(image_iter) sample_to_cuda(sample) dyn_noisy = sample['noisy'] # dynamics and noise dyn_clean = sample['burst'] # dynamics and no noise static_noisy = sample['snoisy'] # no dynamics and noise static_clean = sample['sburst'] # no dynamics and no noise flow_gt = sample['flow'] image_index = sample['index'] tl_index = sample['tl_index'] rng_state = sample['rng_state'] if cfg.noise_params.ntype == "pn": dyn_noisy = anscombe.forward(dyn_noisy)
def sample_new_bursts(cfg, nbatches=5): # -- set random seed -- set_seed(cfg.random_seed) # -- load dataset -- print("load image dataset.") data, loaders = load_image_dataset(cfg) image_iter = iter(loaders.tr) # -- init stores -- records = edict({ 'rng_state': [], 'tl_index': [], 'image_index': [], 'dyn_noisy': [], 'dyn_clean': [], 'flow_gt': [], 'static_noisy': [], 'static_clean': [] }) # -- sample data -- for image_bindex in range(nbatches): # -- sample & unpack batch -- sample = next(image_iter) # sample_to_cuda(sample) dyn_noisy = sample['noisy'] # dynamics and noise dyn_clean = sample['burst'] # dynamics and no noise static_noisy = sample['snoisy'] # no dynamics and noise static_clean = sample['sburst'] # no dynamics and no noise flow_gt = sample['flow'] image_index = sample['index'] tl_index = sample['tl_index'] rng_state = sample['rng_state'] # # -- append to records -- # # -- unpack according to batchsize -- for b in range(cfg.batch_size): records['rng_state'].append(rng_state[b]) records['tl_index'].append(tl_index[b]) records['image_index'].append(image_index[b]) records['dyn_noisy'].append(dyn_noisy[:, b]) records['dyn_clean'].append(dyn_clean[:, b]) records['flow_gt'].append(flow_gt[b]) records['static_noisy'].append(static_noisy[:, b]) records['static_clean'].append(static_clean[:, b]) print(image_bindex, records['tl_index']) print("-" * 10) print(len(records['image_index'])) return records
def main(): seed = 234 np.random.seed(seed) torch.manual_seed(seed) # -- settings -- cfg = get_cfg_defaults() cfg.use_anscombe = True cfg.noise_params.ntype = 'g' cfg.noise_params.g.std = 25. cfg.nframes = 3 cfg.num_workers = 0 cfg.dynamic_info.nframes = cfg.nframes cfg.dynamic_info.ppf = 10 cfg.nblocks = 3 cfg.patchsize = 10 cfg.gpuid = 1 cfg.device = f"cuda:{cfg.gpuid}" # -- load dataset -- data, loaders = load_image_dataset(cfg) train_iter = iter(loaders.tr) # -- fetch sample -- sample = next(train_iter) sample_to_cuda(sample) # -- unpack data -- noisy, clean = sample['noisy'], sample['burst'] # -- save ave image -- save_image(torch.mean(noisy[:, 0], dim=0), SAVE_DIR / "./bootstrap_noisy_ave.png") # -- format for plots -- print("noisy.shape", noisy.shape) noisy = rearrange(noisy[:, 0], 't c h w -> t h w c') clean = rearrange(clean[:, 0], 't c h w -> t h w c') plot_bootstrapping(noisy, clean)
def test_nnf(): # -- get config -- cfg = config() # -- set random seed -- set_seed(cfg.random_seed) # -- load dataset -- data, loaders = load_image_dataset(cfg) image_iter = iter(loaders.tr) # -- get score function -- score_fxn = get_score_function(cfg.score_fxn_name) # -- some constants -- NUM_BATCHES = 2 nframes, nblocks = cfg.nframes, cfg.nblocks patchsize = cfg.patchsize check_parameters(nblocks, patchsize) # -- create evaluator iterations, K = 10, 2 subsizes = [2, 2, 2, 2, 2] evaluator = combo.eval_scores.EvalBlockScores(score_fxn, patchsize, 100, None) # -- iterate over images -- for image_bindex in range(NUM_BATCHES): # -- sample & unpack batch -- sample = next(image_iter) sample_to_cuda(sample) dyn_noisy = sample['noisy'] # dynamics and noise dyn_clean = sample['burst'] # dynamics and no noise static_noisy = sample['snoisy'] # no dynamics and noise static_clean = sample['sburst'] # no dynamics and no noise flow_gt = sample['flow'] # -- shape info -- T, B, C, H, W = dyn_noisy.shape isize = edict({'h': H, 'w': W}) ref_t = nframes // 2 npix = H * W # -- groundtruth flow -- flow_gt = repeat(flow_gt, 'i tm1 two -> i p tm1 two', p=npix) print("sample['flow']: ", flow_gt.shape) aligned_of = align_from_flow(dyn_clean, flow_gt, patchsize, isize=isize) # -- compute nearest neighbor fields -- shape_str = 't b h w two -> b (h w) t two' nnf_vals, nnf_pix = nnf.compute_burst_nnf(dyn_clean, ref_t, patchsize) nnf_pix_best = torch.LongTensor( rearrange(nnf_pix[..., 0, :], shape_str)) nnf_pix_best = torch.LongTensor(nnf_pix_best) flow_nnf = pix_to_flow(nnf_pix_best) aligned_nnf = align_from_pix(dyn_clean, nnf_pix_best, patchsize) # -- compute proposed search of nnf -- flow_split = optim.v1.run_image_burst(dyn_clean, patchsize, evaluator, nblocks, iterations, subsizes, K) isize = edict({'h': H, 'w': W}) aligned_split = align_from_flow(dyn_clean, flow_split, patchsize, isize=isize) # -- compute proposed search of nnf -- flow_est = optim.v3.run_image_burst(dyn_clean, patchsize, evaluator, nblocks, iterations, subsizes, K) aligned_est = align_from_flow(dyn_clean, flow_est, patchsize, isize=isize) # -- banner -- print("-" * 25 + " Results " + "-" * 25) # -- compare gt v.s. nnf computations -- nnf_of = compute_epe(flow_nnf, flow_gt) split_of = compute_epe(flow_split, flow_gt) est_of = compute_epe(flow_est, flow_gt) split_nnf = compute_epe(flow_split, flow_nnf) est_nnf = compute_epe(flow_est, flow_nnf) print("-" * 50) print("EPE Errors") print("-" * 50) print("NNF v.s. Optical Flow.") print(nnf_of) print("Split v.s. Optical Flow.") print(split_of) print("Proposed v.s. Optical Flow.") print(est_of) print("Split v.s. NNF") print(split_nnf) print("Proposed v.s. NNF") print(est_nnf) # -- psnr eval -- pad = 2 * patchsize isize = edict({'h': H - pad, 'w': W - pad}) psnr_of = compute_aligned_psnr(aligned_of, static_clean, isize) psnr_nnf = compute_aligned_psnr(aligned_nnf, static_clean, isize) psnr_split = compute_aligned_psnr(aligned_split, static_clean, isize) psnr_est = compute_aligned_psnr(aligned_est, static_clean, isize) print("-" * 50) print("PSNR Values") print("-" * 50) print("Optical Flow [groundtruth v1]") print(psnr_of) print("NNF [groundtruth v2]") print(psnr_nnf) print("Split [old method]") print(psnr_split) print("Proposed [new method]") print(psnr_est)
def test_global_dynamics(): # -- run exp -- cfg = get_cfg_defaults() cfg.random_seed = 123 nbatches = 20 # -- set random seed -- set_seed(cfg.random_seed) # -- load dataset -- print("load image dataset.") data, loaders = load_image_dataset(cfg) image_iter = iter(loaders.tr) # -- save path for viz -- save_dir = Path( f"{settings.ROOT_PATH}/output/tests/datasets/test_global_dynamics/") if not save_dir.exists(): save_dir.mkdir(parents=True) # -- sample data -- for image_index in range(nbatches): # -- sample image -- index = -1 # while index != 3233: # sample = next(image_iter) # convert_keys(sample) # index = sample['image_index'][0][0].item() sample = next(image_iter) # batch_dim0(sample) convert_keys(sample) # -- extract info -- noisy = sample['dyn_noisy'] clean = sample['dyn_clean'] snoisy = sample['static_noisy'] sclean = sample['static_clean'] flow = sample['flow_gt'] index = sample['image_index'][0][0].item() nframes, nimages, c, h, w = noisy.shape mid_pix = h * w // 2 + 2 * cfg.nblocks print(f"Image Index {index}") # -- io info -- image_dir = save_dir / f"index{index}/" if not image_dir.exists(): image_dir.mkdir() # # -- Compute NNF to Ensure things are OKAY -- # isize = edict({'h': h, 'w': w}) # pad = cfg.patchsize//2 if cfg.patchsize > 1 else 1 pad = cfg.nblocks // 2 + 1 psize = edict({'h': h - 2 * pad, 'w': w - 2 * pad}) flow_gt = repeat(flow, 'i fm1 two -> i s fm1 two', s=h * w) pix_gt = flow_to_pix(flow_gt.clone(), nframes, isize=isize) def cc(image): return tvF.center_crop(image, (psize.h, psize.w)) nnf_vals, nnf_pix = compute_burst_nnf(clean, nframes // 2, cfg.patchsize) shape_str = 't b h w two -> b (h w) t two' pix_global = torch.LongTensor(rearrange(nnf_pix[..., 0, :], shape_str)) flow_global = pix_to_flow(pix_global.clone()) # aligned_gt = warp_burst_flow(clean, flow_global) aligned_gt = align_from_pix(clean, pix_gt, cfg.nblocks) # isize = edict({'h':h,'w':w}) # aligned_gt = align_from_flow(clean,flow_global,cfg.nblocks,isize=isize) # psnr = compute_aligned_psnr(sclean[[nframes//2]],clean[[nframes//2]],psize) psnr = compute_aligned_psnr(sclean, aligned_gt, psize) print(f"[GT Alignment] PSNR: {psnr}") # -- compute with nvidia's opencv optical flow -- nd_clean = rearrange(clean.numpy(), 't 1 c h w -> t h w c') ref_t = nframes // 2 frames, flows = [], [] for t in range(nframes): if t == ref_t: frames.append(nd_clean[t][None, :]) flows.append(torch.zeros(flows[-1].shape)) continue from_frame = 255. * cv2.cvtColor(nd_clean[ref_t], cv2.COLOR_RGB2GRAY) to_frame = 255. * cv2.cvtColor(nd_clean[t], cv2.COLOR_RGB2GRAY) _flow = cv2.calcOpticalFlowFarneback(to_frame, from_frame, None, 0.5, 3, 3, 10, 5, 1.2, 0) _flow = np.round(_flow).astype(np.float32) # not good for later w_frame = warp_flow(nd_clean[t], -_flow) _flow[..., 0] = -_flow[..., 0] # my OF is probably weird. # print("w_frame.shape ",w_frame.shape) flows.append(torch.FloatTensor(_flow)) frames.append(torch.FloatTensor(w_frame[None, :])) flows = torch.stack(flows) flows = rearrange(flows, 't h w two -> 1 (h w) t two') frames = torch.FloatTensor(np.stack(frames)) frames = rearrange(frames, 't i h w c -> t i c h w') # print("flows.shape ",flows.shape) # print("frames.shape ",frames.shape) # print("sclean.shape ",sclean.shape) psnr = compute_aligned_psnr(sclean, frames, psize) print(f"[NVOF Alignment] PSNR: {psnr}") pix_nvof = flow_to_pix(flows.clone(), nframes, isize=isize) aligned_nvof = align_from_pix(clean, pix_nvof, cfg.nblocks) psnr = compute_aligned_psnr(sclean, aligned_nvof, psize) print(f"[NVOF Alignment v2] PSNR: {psnr}") psnr = compute_aligned_psnr(frames, aligned_nvof, psize) print(f"[NVOF Alignment Methods] PSNR: {psnr}") print(pix_global[0, mid_pix]) print(pix_gt[0, mid_pix]) print(flow_global[0, mid_pix]) print(flow_gt[0, mid_pix]) print(flows[0, mid_pix]) # # -- Save Images to Qualitative Inspect -- # fn = image_dir / "noisy.png" save_image(cc(noisy), fn, normalize=True, vrange=None) fn = image_dir / "clean.png" save_image(cc(clean), fn, normalize=True, vrange=None) print(cc(sclean).shape) fn = image_dir / "diff.png" save_image(cc(sclean) - cc(aligned_gt), fn, normalize=True, vrange=None) fn = image_dir / "aligned_gt.png" save_image(cc(aligned_gt), fn, normalize=True, vrange=None) # return # -- NNF Global -- nnf_vals, nnf_pix = compute_burst_nnf(clean, nframes // 2, cfg.patchsize) shape_str = 't b h w two -> b (h w) t two' pix_global = torch.LongTensor(rearrange(nnf_pix[..., 0, :], shape_str)) flow_global = pix_to_flow(pix_global.clone()) # aligned_global = align_from_flow(clean,flow_gt,cfg.nblocks) aligned_global = align_from_pix(clean, pix_gt, cfg.nblocks) psnr = compute_aligned_psnr(sclean, aligned_global, psize) print(f"[NNF Global] PSNR: {psnr}") # -- NNF Local (old) -- iterations, K, subsizes = 0, 1, [] optim = AlignOptimizer("v3") score_fxn_ave = get_score_function("ave") eval_ave = EvalBlockScores(score_fxn_ave, "ave", cfg.patchsize, 256, None) flow_local = optim.run(clean, cfg.patchsize, eval_ave, cfg.nblocks, iterations, subsizes, K) pix_local = flow_to_pix(flow_local.clone(), nframes, isize=isize) # aligned_local = align_from_flow(clean,flow_gt,cfg.nblocks) aligned_local = align_from_pix(clean, pix_local, cfg.nblocks) psnr = compute_aligned_psnr(sclean, aligned_local, psize) print(f"[NNF Local (Old)] PSNR: {psnr}") # -- NNF Local (new) -- _, pix_local = nnf_utils.runNnfBurst(clean, cfg.patchsize, cfg.nblocks, 1, valMean=0., blockLabels=None) pix_local = rearrange(pix_local, 't i h w 1 two -> i (h w) t two') flow_local = pix_to_flow(pix_local.clone()) # aligned_local = align_from_flow(clean,flow_gt,cfg.nblocks) aligned_local = align_from_pix(clean, pix_local, cfg.nblocks) psnr = compute_aligned_psnr(sclean, aligned_local, psize) print(f"[NNF Local (New)] PSNR: {psnr}") # -- remove boundary from pix -- pixes = {'gt': pix_gt, 'global': pix_global, 'local': pix_local} for field, pix in pixes.items(): pix_img = rearrange(pix, 'i (h w) t two -> (i t) two h w', h=h) pix_cc = cc(pix_img) pixes[field] = pix_cc # -- pairwise diffs -- field2 = "gt" for field1 in pixes.keys(): if field1 == field2: continue delta = pixes[field1] - pixes[field2] delta = delta.type(torch.float) delta_fn = image_dir / f"delta_{field1}_{field2}.png" save_image(delta, delta_fn, normalize=True, vrange=None) print(pix_gt[0, mid_pix]) print(pix_global[0, mid_pix]) print(pix_local[0, mid_pix]) print(flow_gt[0, mid_pix]) print(flow_global[0, mid_pix]) print(flow_local[0, mid_pix]) # # -- Save Images to Qualitative Inspect -- # fn = image_dir / "noisy.png" save_image(cc(noisy), fn, normalize=True, vrange=None) fn = image_dir / "clean.png" save_image(cc(clean), fn, normalize=True, vrange=None) fn = image_dir / "aligned_global.png" save_image(cc(aligned_global), fn, normalize=True, vrange=None) fn = image_dir / "aligned_local.png" save_image(cc(aligned_local), fn, normalize=True, vrange=None)
def execute_experiment(cfg): # -- init exp! -- print("RUNNING EXP.") print(cfg) # -- create results record to save -- dims = { 'batch_results': None, 'batch_to_record': None, 'record_results': { 'default': 0 }, 'stack': { 'default': 0 }, 'cat': { 'default': 0 } } record = cache_io.ExpRecord(dims) # -- set random seed -- set_seed(cfg.random_seed) # -- load dataset -- data, loaders = load_image_dataset(cfg) image_iter = iter(loaders.tr) # -- get score function -- score_fxn_ave = get_score_function("ave") score_fxn_mse = get_score_function("mse") score_fxn_bs = get_score_function("bootstrapping") score_fxn_bs_cf = get_score_function("bootstrapping_cf") # score_fxn_bs = get_score_function("bootstrapping_mod2") # score_fxn_bs = get_score_function(cfg.score_fxn_name) score_fxn_bsl = get_score_function("bootstrapping_limitB") # -- some constants -- NUM_BATCHES = 3 nframes, nblocks = cfg.nframes, cfg.nblocks patchsize = cfg.patchsize ppf = cfg.dynamic_info.ppf check_parameters(nblocks, patchsize) # -- create evaluator for ave; simple -- iterations, K = 1, 1 subsizes = [] block_batchsize = 256 eval_ave_simp = EvalBlockScores(score_fxn_ave, "ave", patchsize, block_batchsize, None) # -- create evaluator for ave -- iterations, K = 1, 1 subsizes = [] eval_ave = EvalBlockScores(score_fxn_ave, "ave", patchsize, block_batchsize, None) eval_mse = EvalBlockScores(score_fxn_mse, "mse", patchsize, block_batchsize, None) # -- create evaluator for bootstrapping -- block_batchsize = 81 eval_plimb = EvalBootBlockScores(score_fxn_bsl, score_fxn_bs, "bsl", patchsize, block_batchsize, None) eval_prop = EvalBlockScores(score_fxn_bs, "bs", patchsize, block_batchsize, None) eval_prop_cf = EvalBlockScores(score_fxn_bs_cf, "bs_cf", patchsize, block_batchsize, None) # -- iterate over images -- for image_bindex in range(NUM_BATCHES): print("-=" * 30 + "-") print(f"Running image batch index: {image_bindex}") print("-=" * 30 + "-") torch.cuda.empty_cache() # -- sample & unpack batch -- sample = next(image_iter) sample_to_cuda(sample) dyn_noisy = sample['noisy'] # dynamics and noise dyn_clean = sample['burst'] # dynamics and no noise static_noisy = sample['snoisy'] # no dynamics and noise static_clean = sample['sburst'] # no dynamics and no noise flow_gt = sample['flow'] image_index = sample['index'] tl_index = sample['tl_index'] rng_state = sample['rng_state'] if cfg.noise_params.ntype == "pn": dyn_noisy = anscombe.forward(dyn_noisy) # dyn_nosiy = dyn_clean dyn_noisy = static_clean # -- shape info -- T, B, C, H, W = dyn_noisy.shape isize = edict({'h': H, 'w': W}) ref_t = nframes // 2 nimages, npix, nframes = B, H * W, T # -- create results dict -- flows = edict() aligned = edict() runtimes = edict() optimal_scores = edict() # score function at optimal # -- groundtruth flow -- flow_gt_rs = rearrange(flow_gt, 'i tm1 two -> i 1 tm1 two') blocks_gt = flow_to_blocks(flow_gt_rs, nblocks) flows.of = repeat(flow_gt, 'i tm1 two -> i p tm1 two', p=npix) aligned.of = align_from_flow(dyn_clean, flows.of, nblocks, isize=isize) runtimes.of = 0. # given optimal_scores.of = np.zeros( (nimages, npix, nframes)) # clean target is zero aligned.clean = static_clean # -- compute nearest neighbor fields [global] -- start_time = time.perf_counter() shape_str = 't b h w two -> b (h w) t two' nnf_vals, nnf_pix = nnf.compute_burst_nnf(dyn_clean, ref_t, patchsize) nnf_pix_best = torch.LongTensor( rearrange(nnf_pix[..., 0, :], shape_str)) nnf_pix_best = torch.LongTensor(nnf_pix_best) flows.nnf = pix_to_flow(nnf_pix_best) aligned.nnf = align_from_pix(dyn_clean, nnf_pix_best, nblocks) runtimes.nnf = time.perf_counter() - start_time optimal_scores.nnf = np.zeros( (nimages, npix, nframes)) # clean target is zero # -- compute proposed search of nnf -- print("[Bootstrap] loss function") iterations = 1 K, subsizes = get_boot_hyperparams(cfg.nframes, cfg.nblocks) start_time = time.perf_counter() optim = AlignOptimizer("v3") # flows.est = optim.run(dyn_noisy,patchsize,eval_prop, # nblocks,iterations,subsizes,K) flows.est = flows.of.clone() aligned.est = align_from_flow(dyn_clean, flows.est, patchsize, isize=isize) runtimes.est = time.perf_counter() - start_time # -- load adjacent blocks -- nframes, nimages, ncolor, h, w = dyn_noisy.shape nsegs = h * w brange = exh_block_range(nimages, nsegs, nframes, nblocks) ref_block = get_ref_block(nblocks) curr_blocks = init_optim_block(nimages, nsegs, nframes, nblocks) curr_blocks = curr_blocks[:, :, :, None] # nimages, nsegs, nframes, naligns frames = np.r_[np.arange(nframes // 2), np.arange(nframes // 2 + 1, nframes)] frames = repeat(frames, 't -> i s t', i=nimages, s=nsegs) search_blocks = get_search_blocks(frames, brange, curr_blocks, f'cuda:{cfg.gpuid}') print("search_blocks.shape ", search_blocks.shape) init_blocks = rearrange(curr_blocks, 'i s t a -> i s a t').to(dyn_noisy.device) print("init_blocks.shape ", init_blocks.shape) search_blocks = search_blocks[0, 0] init_blocks_ = init_blocks[0, 0] search_blocks = eval_plimb.filter_blocks_to_1skip_neighbors( search_blocks, init_blocks_) search_blocks = repeat(search_blocks, 'a t -> i s a t', i=nimages, s=nsegs) print("search_blocks.shape ", search_blocks.shape) # -- compute MSE for the batch -- est = edict() bscf = edict() plimb = edict() print("curr_blocks.shape ", curr_blocks.shape) eval_prop.score_fxn_name = "" scores, scores_t, blocks = eval_mse.score_burst_from_blocks( dyn_noisy, search_blocks, patchsize, nblocks) est.scores = scores est.scores_t = scores_t est.blocks = blocks print("Done with est.") # -- compute bootrapping in closed form -- scores, scores_t, blocks = eval_prop_cf.score_burst_from_blocks( dyn_noisy, search_blocks, patchsize, nblocks) bscf.scores = scores bscf.scores_t = scores_t bscf.blocks = blocks # -- compute bootstrapping for the batch -- print("Get init block from original bootstrap.") print(init_blocks[0, 0]) scores, scores_t, blocks = eval_prop.score_burst_from_blocks( dyn_noisy, init_blocks, patchsize, nblocks) print("Starting prop.") eval_prop.score_fxn_name = "bs" eval_prop.score_cfg.bs_type = "" state = edict({'scores': scores, 'blocks': blocks}) scores, scores_t, blocks = eval_plimb.score_burst_from_blocks( dyn_noisy, search_blocks, state, patchsize, nblocks) plimb.scores = scores plimb.scores_t = scores_t plimb.blocks = blocks print(est.scores.shape) print(bscf.scores.shape) print(state.scores.shape) print(plimb.scores.shape) diff_plimb = plimb.scores[0] - est.scores[0] perc_delta = torch.abs(diff_plimb) / est.scores[0] diff_bscf = bscf.scores[0] - est.scores[0] perc_delta_cf = torch.abs(diff_bscf) / est.scores[0] pix_idx_list = [0, 20, 30] #np.arange(h*w) for p in pix_idx_list: print("-" * 10 + f" @ {p}") print("est", est.scores[0, p].cpu().numpy()) print("state", state.scores[0, p].cpu().numpy()) print("bscf", bscf.scores[0, p].cpu().numpy()) print("plimb", plimb.scores[0, p].cpu().numpy()) print("plimb/est", plimb.scores[0, p] / est.scores[0, p]) print("plimb - est", plimb.scores[0, p] - est.scores[0, p]) print("plimb - bscf", plimb.scores[0, p] - bscf.scores[0, p]) print("%Delta [plimb]", perc_delta[p]) print("L2-Norm [plimb]", torch.sum(diff_plimb[p]**2)) print("Nmlz L2-Norm [plimb]", torch.mean(diff_plimb[p]**2)) print("%Delta [bscf]", perc_delta_cf[p]) print("L2-Norm [bscf]", torch.sum(diff_bscf[p]**2)) print("Nmlz L2-Norm [bscf]", torch.mean(diff_bscf[p]**2)) print("[Overall: plimb] %Delta: ", torch.mean(perc_delta).item()) print("[Overall: plimb] L2-Norm: ", torch.sum(diff_plimb**2).item()) print("[Overall: plimb] Nmlz L2-Norm: ", torch.mean(diff_plimb**2).item()) print("[Overall: bscf] %Delta: ", torch.mean(perc_delta_cf).item()) print("[Overall: bscf] L2-Norm: ", torch.sum(diff_bscf**2).item()) print("[Overall: bscf] Nmlz L2-Norm: ", torch.mean(diff_bscf**2).item()) # -- format results -- pad = 3 #2*(nframes-1)*ppf+4 isize = edict({'h': H - pad, 'w': W - pad}) # -- flows to numpy -- is_even = cfg.frame_size % 2 == 0 mid_pix = cfg.frame_size * cfg.frame_size // 2 + (cfg.frame_size // 2) * is_even mid_pix = 32 * 10 + 23 flows_np = edict_torch_to_numpy(flows) # -- End-Point-Errors -- epes_of = compute_flows_epe_wrt_ref(flows, "of") epes_nnf = compute_flows_epe_wrt_ref(flows, "nnf") epes_nnf_local = compute_flows_epe_wrt_ref(flows, "nnf_local") nnf_acc = compute_acc_wrt_ref(flows, "nnf") nnf_local_acc = compute_acc_wrt_ref(flows, "nnf_local") # -- PSNRs -- aligned = remove_frame_centers(aligned) psnrs = compute_frames_psnr(aligned, isize) # -- print report --- print("\n" * 3) # banner print("-" * 25 + " Results " + "-" * 25) print_dict_ndarray_0_midpix(flows_np, mid_pix) print_runtimes(runtimes) print_verbose_psnrs(psnrs) print_delta_summary_psnrs(psnrs) print_verbose_epes(epes_of, epes_nnf) print_nnf_acc(nnf_acc) print_nnf_local_acc(nnf_local_acc) print_summary_epes(epes_of, epes_nnf) print_summary_psnrs(psnrs) # -- prepare results to be appended -- psnrs = edict_torch_to_numpy(psnrs) epes_of = edict_torch_to_numpy(epes_of) epes_nnf = edict_torch_to_numpy(epes_nnf) epes_nnf_local = edict_torch_to_numpy(epes_nnf_local) nnf_acc = edict_torch_to_numpy(nnf_acc) nnf_local_acc = edict_torch_to_numpy(nnf_local_acc) image_index = torch_to_numpy(image_index) batch_results = { 'runtimes': runtimes, 'optimal_scores': optimal_scores, 'psnrs': psnrs, 'epes_of': epes_of, 'epes_nnf': epes_nnf, 'epes_nnf_local': epes_nnf_local, 'nnf_acc': nnf_acc, 'nnf_local_acc': nnf_local_acc } # -- format results -- batch_results = flatten_internal_dict(batch_results) format_fields(batch_results, image_index, rng_state) print("shape check.") for key, value in batch_results.items(): print(key, value.shape) record.append(batch_results) # print("\n"*3) # print("-"*20) # print(record.record) # print("-"*20) # print("\n"*3) # record.stack_record() record.cat_record() # print("\n"*3) # print("-"*20) # print(record.record) # print("-"*20) print("\n" * 3) print("\n" * 3) print("-" * 20) # df = pd.DataFrame().append(record.record,ignore_index=True) for key, val in record.record.items(): print(key, val.shape) # print(df) print("-" * 20) print("\n" * 3) return record.record
def test_nnf(): # -- get config -- cfg = config() print("Config for Testing.") print(cfg) # -- set random seed -- set_seed(cfg.random_seed) # -- load dataset -- data, loaders = load_image_dataset(cfg) image_iter = iter(loaders.tr) nskips = 2 + 4 + 2 + 4 + 1 for skip in range(nskips): next(image_iter) # -- get score function -- score_fxn_ave = get_score_function("ave") score_fxn_bs = get_score_function(cfg.score_fxn_name) # -- some constants -- NUM_BATCHES = 10 nframes, nblocks = cfg.nframes, cfg.nblocks patchsize = cfg.patchsize ppf = cfg.dynamic_info.ppf check_parameters(nblocks, patchsize) # -- create evaluator for ave; simple -- iterations, K = 1, 1 subsizes = [] block_batchsize = 256 eval_ave_simp = EvalBlockScores(score_fxn_ave, "ave", patchsize, block_batchsize, None) # -- create evaluator for ave -- iterations, K = 1, 1 subsizes = [] eval_ave = EvalBlockScores(score_fxn_ave, "ave", patchsize, block_batchsize, None) # -- create evaluator for bootstrapping -- block_batchsize = 64 eval_prop = EvalBlockScores(score_fxn_bs, "bs", patchsize, block_batchsize, None) # -- iterate over images -- for image_bindex in range(NUM_BATCHES): print("-=" * 30 + "-") print(f"Running image batch index: {image_bindex}") print("-=" * 30 + "-") # -- sample & unpack batch -- sample = next(image_iter) sample_to_cuda(sample) dyn_noisy = sample['noisy'] # dynamics and noise dyn_clean = sample['burst'] # dynamics and no noise static_noisy = sample['snoisy'] # no dynamics and noise static_clean = sample['sburst'] # no dynamics and no noise flow_gt = sample['ref_flow'] # flow_gt = sample['seq_flow'] if cfg.noise_params.ntype == "pn": dyn_noisy = anscombe.forward(dyn_noisy) # -- shape info -- T, B, C, H, W = dyn_noisy.shape isize = edict({'h': H, 'w': W}) ref_t = nframes // 2 npix = H * W # -- groundtruth flow -- # print("flow_gt",flow_gt) flow_gt_rs = rearrange(flow_gt, 'i tm1 two -> i 1 tm1 two') blocks_gt = flow_to_blocks(flow_gt_rs, nblocks) # print("\n\n") # print("flow_gt[0,0] ",flow_gt) # print("blocks_gt[0,0] ",blocks_gt[0,0]) flow_gt = repeat(flow_gt, 'i tm1 two -> i p tm1 two', p=npix) aligned_of = align_from_flow(dyn_clean, flow_gt, nblocks, isize=isize) pix_gt = flow_to_pix(flow_gt.clone(), isize=isize) # -- compute nearest neighbor fields -- start_time = time.perf_counter() shape_str = 't b h w two -> b (h w) t two' nnf_vals, nnf_pix = nnf.compute_burst_nnf(dyn_clean, ref_t, patchsize) nnf_pix_best = torch.LongTensor( rearrange(nnf_pix[..., 0, :], shape_str)) nnf_pix_best = torch.LongTensor(nnf_pix_best) pix_nnf = nnf_pix_best.clone() flow_nnf = pix_to_flow(nnf_pix_best) aligned_nnf = align_from_pix(dyn_clean, nnf_pix_best, nblocks) time_nnf = time.perf_counter() - start_time # -- compute proposed search of nnf -- start_time = time.perf_counter() print(dyn_noisy.shape) # split_vals,split_pix = nnf.compute_burst_nnf(dyn_noisy,ref_t,patchsize) split_pix = np.copy(nnf_pix) split_pix_best = torch.LongTensor( rearrange(split_pix[..., 0, :], shape_str)) split_pix_best = torch.LongTensor(split_pix_best) pix_split = split_pix_best.clone() flow_split = pix_to_flow(split_pix_best) aligned_split = align_from_pix(dyn_clean, split_pix_best, nblocks) time_split = time.perf_counter() - start_time # -- compute simple ave -- iterations, K = 0, 1 subsizes = [] print("[simple] Ave loss function") start_time = time.perf_counter() optim = AlignOptimizer("v3") # flow_ave_simp = optim.run(dyn_noisy,patchsize,eval_ave_simp, # nblocks,iterations,subsizes,K) flow_ave_simp = flow_gt.clone().cpu() aligned_ave_simp = align_from_flow(dyn_clean, flow_ave_simp, nblocks, isize=isize) time_ave_simp = time.perf_counter() - start_time print(flow_ave_simp.shape) # -- compute complex ave -- iterations, K = 0, 1 subsizes = [] print("[complex] Ave loss function") start_time = time.perf_counter() optim = AlignOptimizer("v3") flow_ave = optim.run(dyn_noisy, patchsize, eval_ave, nblocks, iterations, subsizes, K) # flow_ave = flow_gt.clone() pix_ave = flow_to_pix(flow_ave.clone(), isize=isize) aligned_ave = align_from_flow(dyn_clean, flow_ave, nblocks, isize=isize) time_ave = time.perf_counter() - start_time # -- compute proposed search of nnf -- # iterations,K = 50,3 # subsizes = [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2] #iterations,K = 1,nblocks**2 # K is a function of noise level. # iterations,K = 1,nblocks**2 iterations, K = 1, 2 * nblocks #**2 # subsizes = [3]#,3,3,3,3,3,3,3,3,3] # subsizes = [3,3,3,3,3,3,3,] subsizes = [3, 3, 3, 3, 3, 3, 3, 3] # subsizes = [nframes] # subsizes = [nframes] print("[Bootstrap] loss function") start_time = time.perf_counter() optim = AlignOptimizer("v3") flow_est = optim.run(dyn_noisy, patchsize, eval_prop, nblocks, iterations, subsizes, K) pix_est = flow_to_pix(flow_est.clone(), isize=isize) aligned_est = align_from_flow(dyn_clean, flow_est, patchsize, isize=isize) time_est = time.perf_counter() - start_time # flow_est = flow_gt.clone() # aligned_est = aligned_of.clone() # time_est = 0. # -- banner -- print("\n" * 3) print("-" * 25 + " Results " + "-" * 25) # -- examples of flow -- print("-" * 50) is_even = cfg.frame_size % 2 == 0 mid_pix = cfg.frame_size * cfg.frame_size // 2 + (cfg.frame_size // 2) * is_even mid_pix = 32 * 10 + 23 # mid_pix = 32*23+10 flow_gt_np = torch_to_numpy(flow_gt) flow_nnf_np = torch_to_numpy(flow_nnf) flow_split_np = torch_to_numpy(flow_split) flow_ave_simp_np = torch_to_numpy(flow_ave_simp) flow_ave_np = torch_to_numpy(flow_ave) flow_est_np = torch_to_numpy(flow_est) print(flow_gt_np[0, mid_pix]) print(flow_nnf_np[0, mid_pix]) print(flow_split_np[0, mid_pix]) print(flow_ave_simp_np[0, mid_pix]) print(flow_ave_np[0, mid_pix]) print(flow_est_np[0, mid_pix]) print("-" * 50) pix_gt_np = torch_to_numpy(pix_gt) pix_nnf_np = torch_to_numpy(pix_nnf) pix_ave_np = torch_to_numpy(pix_ave) pix_est_np = torch_to_numpy(pix_est) print(pix_gt_np[0, mid_pix]) print(pix_nnf_np[0, mid_pix]) print(pix_ave_np[0, mid_pix]) print(pix_est_np[0, mid_pix]) # print(aligned_of[0,0,:,10,23].cpu() - static_clean[0,0,:,10,23].cpu()) # print(aligned_ave[0,0,:,10,23].cpu() - static_clean[0,0,:,10,23].cpu()) # print(aligned_of[0,0,:,23,10].cpu() - static_clean[0,0,:,23,10].cpu()) # print(aligned_ave[0,0,:,23,10].cpu() - static_clean[0,0,:,23,10].cpu()) print("-" * 50) # -- compare compute time -- print("-" * 50) print("Compute Time [smaller is better]") print("-" * 50) print("[NNF]: %2.3e" % time_nnf) print("[Split]: %2.3e" % time_split) print("[Ave [Simple]]: %2.3e" % time_ave_simp) print("[Ave]: %2.3e" % time_ave) print("[Proposed]: %2.3e" % time_est) # -- compare gt v.s. nnf computations -- nnf_of = compute_epe(flow_nnf, flow_gt) split_of = compute_epe(flow_split, flow_gt) ave_simp_of = compute_epe(flow_ave_simp, flow_gt) ave_of = compute_epe(flow_ave, flow_gt) est_of = compute_epe(flow_est, flow_gt) split_nnf = compute_epe(flow_split, flow_nnf) ave_simp_nnf = compute_epe(flow_ave_simp, flow_nnf) ave_nnf = compute_epe(flow_ave, flow_nnf) est_nnf = compute_epe(flow_est, flow_nnf) # -- End-Point-Errors -- print("-" * 50) print("EPE Errors [smaller is better]") print("-" * 50) print("NNF v.s. Optical Flow.") print(nnf_of) print("Split v.s. Optical Flow.") print(split_of) print("Ave [Simple] v.s. Optical Flow.") print(ave_simp_of) print("Ave v.s. Optical Flow.") print(ave_of) print("Proposed v.s. Optical Flow.") print(est_of) print("Split v.s. NNF") print(split_nnf) print("Ave [Simple] v.s. NNF") print(ave_simp_nnf) print("Ave v.s. NNF") print(ave_nnf) print("Proposed v.s. NNF") print(est_nnf) # -- compare accuracy of method nnf v.s. actual nnf -- def compute_flow_acc(guess, gt): both = torch.all(guess.type(torch.long) == gt.type(torch.long), dim=-1) ncorrect = torch.sum(both) acc = 100 * float(ncorrect) / both.numel() return acc split_nnf_acc = compute_flow_acc(flow_split, flow_nnf) ave_simp_nnf_acc = compute_flow_acc(flow_ave_simp, flow_nnf) ave_nnf_acc = compute_flow_acc(flow_ave, flow_nnf) est_nnf_acc = compute_flow_acc(flow_est, flow_nnf) # -- PSNR to Reference Image -- pad = 2 * (nframes - 1) * ppf + 4 isize = edict({'h': H - pad, 'w': W - pad}) # print("isize: ",isize) aligned_of = remove_center_frame(aligned_of) aligned_nnf = remove_center_frame(aligned_nnf) aligned_split = remove_center_frame(aligned_split) aligned_ave_simp = remove_center_frame(aligned_ave_simp) aligned_ave = remove_center_frame(aligned_ave) aligned_est = remove_center_frame(aligned_est) static_clean = remove_center_frame(static_clean) psnr_of = compute_aligned_psnr(aligned_of, static_clean, isize) psnr_nnf = compute_aligned_psnr(aligned_nnf, static_clean, isize) psnr_split = compute_aligned_psnr(aligned_split, static_clean, isize) psnr_ave_simp = compute_aligned_psnr(aligned_ave_simp, static_clean, isize) psnr_ave = compute_aligned_psnr(aligned_ave, static_clean, isize) psnr_est = compute_aligned_psnr(aligned_est, static_clean, isize) print("-" * 50) print("PSNR Values [bigger is better]") print("-" * 50) print("Optical Flow [groundtruth v1]") print(psnr_of) print("NNF [groundtruth v2]") print(psnr_nnf) print("Split [old method]") print(psnr_split) print("Ave [simple; old method]") print(psnr_ave_simp) print("Ave [old method]") print(psnr_ave) print("Proposed [new method]") print(psnr_est) # -- print nnf accuracy here -- print("-" * 50) print("NNF Accuracy [bigger is better]") print("-" * 50) print("Split v.s. NNF") print(split_nnf_acc) print("Ave [Simple] v.s. NNF") print(ave_simp_nnf_acc) print("Ave v.s. NNF") print(ave_nnf_acc) print("Proposed v.s. NNF") print(est_nnf_acc) # -- location of PSNR errors -- csize = 30 # aligned_of = torch_to_numpy(tvF.center_crop(aligned_of,(csize,csize))) # aligned_ave = torch_to_numpy(tvF.center_crop(aligned_ave,(csize,csize))) # static_clean = torch_to_numpy(tvF.center_crop(static_clean,(csize,csize))) flow_gt = torch_to_numpy(flow_gt) flow_ave = torch_to_numpy(flow_ave) aligned_of = torch_to_numpy(aligned_of) aligned_ave = torch_to_numpy(aligned_ave) static_clean = torch_to_numpy(static_clean) # print("WHERE?") # print("OF") # print(aligned_of.shape) # for row in range(30): # print(np.abs(aligned_of[0,0,0,row]- static_clean[0,0,0,row])) # print(np.where(~np.isclose(aligned_of,aligned_of))) # print(np.where(~np.isclose(flow_gt,flow_ave))) # print(np.where(~np.isclose(aligned_of,aligned_of))) # print(np.where(~np.isclose(aligned_of,static_clean))) # print("Ave") # indices = np.where(~np.isclose(aligned_ave,static_clean)) # row,col = indices[-2:] # for elem in range(len(row)): # print(np.c_[row,col][elem]) # print(np.where(~np.isclose(aligned_ave,static_clean))) # -- Summary of End-Point-Errors -- print("-" * 50) print("Summary of EPE Errors [smaller is better]") print("-" * 50) print("[NNF v.s. Optical Flow]: %2.3f" % nnf_of.mean().item()) print("[Split v.s. Optical Flow]: %2.3f" % split_of.mean().item()) print("[Ave [Simple] v.s. Optical Flow]: %2.3f" % ave_simp_of.mean().item()) print("[Ave v.s. Optical Flow]: %2.3f" % ave_of.mean().item()) print("[Proposed v.s. Optical Flow]: %2.3f" % est_of.mean().item()) print("[Split v.s. NNF]: %2.3f" % split_nnf.mean().item()) print("[Ave [Simple] v.s. NNF]: %2.3f" % ave_simp_nnf.mean().item()) print("[Ave v.s. NNF]: %2.3f" % ave_nnf.mean().item()) print("[Proposed v.s. NNF]: %2.3f" % est_nnf.mean().item()) # -- Summary of PSNR to Reference Image -- print("-" * 50) print("Summary PSNR Values [bigger is better]") print("-" * 50) print("[Optical Flow]: %2.3f" % psnr_of.mean().item()) print("[NNF]: %2.3f" % psnr_nnf.mean().item()) print("[Split]: %2.3f" % psnr_split.mean().item()) print("[Ave [Simple]]: %2.3f" % psnr_ave_simp.mean().item()) print("[Ave]: %2.3f" % psnr_ave.mean().item()) print("[Proposed]: %2.3f" % psnr_est.mean().item()) print("-" * 50) print("PSNR Comparisons [smaller is better]") print("-" * 50) delta_split = psnr_nnf - psnr_split delta_ave_simp = psnr_nnf - psnr_ave_simp delta_ave = psnr_nnf - psnr_ave delta_est = psnr_nnf - psnr_est print("ave([NNF] - [Split]): %2.3f" % delta_split.mean().item()) print("ave([NNF] - [Ave [Simple]]): %2.3f" % delta_ave_simp.mean().item()) print("ave([NNF] - [Ave]): %2.3f" % delta_ave.mean().item()) print("ave([NNF] - [Proposed]): %2.3f" % delta_est.mean().item())
def run_with_seed(seed): # -- settings -- cfg = get_cfg_defaults() cfg.use_anscombe = False cfg.noise_params.ntype = 'g' cfg.noise_params.g.std = 10. cfg.nframes = 5 cfg.patchsize = 11 # -- seeds -- cfg.seed = seed # cfg.seed = 123 # sky of a forest # cfg.seed = 345 # handrail and stairs # cfg.seed = 567 # cloudy blue sky # cfg.seed = 567 # cloudy blue sky # -- set seed -- set_seed(cfg.seed) # -- load dataset -- data, loaders = load_image_dataset(cfg) train_iter = iter(loaders.tr) # -- fetch sample -- sample = next(train_iter) sample_to_cuda(sample) # -- unpack data -- noisy, clean = sample['noisy'], sample['burst'] nframes, nimages, ncolors, H, W = noisy.shape isize = edict({'h': H, 'w': W}) # -- boxes for plotting -- boxes = edict() aligned = edict() # -- compute clean nnf -- vprint("[start] clean nnf.") align_fxn = get_align_method(cfg, "l2_global") aligned.gt, flow = align_fxn(clean, None, None) boxes.gt = boxes_from_flow(flow, H, W) vprint("[done] clean nnf.") # -- compute nnf -- vprint("[start] global nnf.") align_fxn = get_align_method(cfg, "l2_global") _, flow = align_fxn(noisy, None, None) aligned.global_l2 = align_from_flow(clean, flow, cfg.nblocks, isize=isize) boxes.global_l2 = boxes_from_flow(flow, H, W) vprint("[done] global nnf.") # -- compute local nnf -- vprint("[start] local nnf.") align_fxn = get_align_method(cfg, "l2_local") _, flow = align_fxn(noisy, None, None) aligned.local_l2 = align_from_flow(clean, flow, cfg.nblocks, isize=isize) boxes.local_l2 = boxes_from_flow(flow, H, W) vprint("[done] local nnf.") # -- compute proposed score -- vprint("[start] bootstrapping.") align_fxn = get_align_method(cfg, "bs_local_v2") _, flow = align_fxn(noisy, None, None) aligned.local_bs = align_from_flow(clean, flow, cfg.nblocks, isize=isize) boxes.local_bs = boxes_from_flow(flow, H, W) vprint("[done] bootstrapping.") # -- reshape to image -- noisy = rearrange(noisy, 't b c h w -> t b h w c') clean = rearrange(clean, 't b c h w -> t b h w c') # -- normalize to [0,1] -- noisy -= noisy.min() clean -= clean.min() noisy /= noisy.max() clean /= clean.max() # -- clamp to [0,1] -- # noisy = noisy.clamp(0,1) # clean = clean.clamp(0,1) # print_tensor_stats("noisy",noisy) # print_tensor_stats("clean",clean) # -- cuda to cpu -- noisy = noisy.cpu() clean = clean.cpu() for field in boxes.keys(): boxes[field] = boxes[field].cpu().numpy() # -- plot boxes for middle pix -- ref_pix = edict({'x': H // 2, 'y': W // 2}) field = 'global_l2' plot_boxes(noisy, clean, aligned, field, boxes, ref_pix, cfg.patchsize, seed)
def run_with_seed(seed): # -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- # # Settings # # -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- # -- settings -- cfg = get_cfg_defaults() cfg.use_anscombe = False cfg.noise_params.ntype = 'g' cfg.noise_params.g.std = 25. cfg.nframes = 3 cfg.dynamic_info.nframes = cfg.nframes cfg.nblocks = 3 cfg.patchsize = 11 cfg.gpuid = 1 cfg.device = f"cuda:{cfg.gpuid}" # -- seeds -- cfg.seed = seed # cfg.seed = 123 # sky of a forest # cfg.seed = 345 # handrail and stairs # cfg.seed = 567 # cloudy blue sky # cfg.seed = 567 # cloudy blue sky # -- set seed -- set_seed(cfg.seed) # -- load dataset -- data, loaders = load_image_dataset(cfg) train_iter = iter(loaders.tr) # -- fetch sample -- sample = next(train_iter) sample_to_cuda(sample) # -- unpack data -- noisy, clean = sample['noisy'], sample['burst'] nframes, nimages, ncolors, H, W = noisy.shape isize = edict({'h': H, 'w': W}) # -- setup results -- scores = edict() scores.noisy = edict() scores.clean = edict() blocks = edict() blocks.noisy = edict() blocks.clean = edict() # -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- # # Setup For Searches # # -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- # -- tile image to patches -- pad = 2 * (cfg.nblocks // 2) h, w = cfg.patchsize + pad, cfg.patchsize + pad noisy_patches = tile_patches(noisy, cfg.patchsize + pad).pix noisy_patches = rearrange(noisy_patches, 'b t s (h w c) -> b s t c h w', h=h, w=w) nimages, npix, nframes, c, psH, psW = noisy_patches.shape clean_patches = tile_patches(clean, cfg.patchsize + pad).pix clean_patches = rearrange(clean_patches, 'b t s (h w c) -> b s t c h w', h=h, w=w) nimages, npix, nframes, c, psH, psW = clean_patches.shape masks = torch.ones(nimages, npix, nframes, c, psH, psW).to(cfg.device) # -- create constants -- frames = np.r_[np.arange(cfg.nframes // 2), np.arange(cfg.nframes // 2 + 1, cfg.nframes)] frames = repeat(frames, 'z -> i s z', i=nimages, s=npix) brange = exh_block_range(nimages, npix, cfg.nframes, cfg.nblocks) curr_blocks = init_optim_block(nimages, npix, cfg.nframes, cfg.nblocks) srch_blocks = get_search_blocks(frames, brange, curr_blocks, cfg.device) np_srch_blocks = torch_to_numpy(srch_blocks[0]) S = len(srch_blocks[0, 0]) # -- create constants -- frames_pair = np.array([0]) frames = repeat(frames_pair, 'z -> i s z', i=nimages, s=npix) brange = exh_block_range(nimages, npix, cfg.nframes, cfg.nblocks) curr_blocks_pair = init_optim_block(nimages, npix, cfg.nframes, cfg.nblocks) srch_blocks_pair = get_search_blocks(frames, brange, curr_blocks_pair, cfg.device) S_pair = len(srch_blocks[0, 0]) # -- encode blocks -- single_search_block = srch_blocks[0, 0].cpu().numpy() block_strings = search_blocks_to_str(single_search_block) labels = search_blocks_to_labels(single_search_block, block_strings) # -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- # # Execute Searches # # -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- # # --- run PAIRED split search --- # ave_fxn = get_score_function("ave") block_batchsize = 128 evaluator = EvalBlockScores(ave_fxn, "ave", cfg.patchsize, block_batchsize, None) get_topK = evaluator.compute_topK_scores # -- a) run clean -- clean_scores, clean_blocks = get_topK(clean_patches, masks, srch_blocks_pair, cfg.nblocks, S_pair) scores_full = torch_to_numpy(clean_scores[0]) blocks_full = torch_to_numpy(clean_blocks[0]) # -- b) tile results to full blocks -- scores_full, blocks_full = tile_pair_to_full(scores_full, blocks_full, np_srch_blocks, frames_pair, cfg.nframes, cfg.nblocks) scores.clean.ave = scores_full blocks.clean.ave = batch_search_blocks_to_labels(blocks_full, block_strings) # -- a) run noisy -- noisy_scores, noisy_blocks = get_topK(noisy_patches, masks, srch_blocks_pair, cfg.nblocks, S_pair) scores_full = torch_to_numpy(noisy_scores[0]) blocks_full = torch_to_numpy(noisy_blocks[0]) # -- b) tile results to full blocks -- scores_full, blocks_full = tile_pair_to_full(scores_full, blocks_full, np_srch_blocks, frames_pair, cfg.nframes, cfg.nblocks) scores.noisy.ave = scores_full blocks.noisy.ave = batch_search_blocks_to_labels(blocks_full, block_strings) # # --- run FULL split search --- # ave_fxn = get_score_function("ave") block_batchsize = 128 evaluator = EvalBlockScores(ave_fxn, "ave", cfg.patchsize, block_batchsize, None) get_topK = evaluator.compute_topK_scores # -- run clean -- clean_scores, clean_blocks = get_topK(clean_patches, masks, srch_blocks, cfg.nblocks, S) clean_scores = torch_to_numpy(clean_scores) scores.clean.full_ave = clean_scores[0] clean_blocks = torch_to_numpy(clean_blocks) batch_blocks = clean_blocks[0, :, :, :] blocks.clean.full_ave = batch_search_blocks_to_labels( batch_blocks, block_strings) # -- run noisy -- noisy_scores, noisy_blocks = get_topK(noisy_patches, masks, srch_blocks, cfg.nblocks, S) noisy_scores = torch_to_numpy(noisy_scores) scores.noisy.full_ave = noisy_scores[0] noisy_blocks = torch_to_numpy(noisy_blocks) batch_blocks = noisy_blocks[0, :, :, :] blocks.noisy.full_ave = batch_search_blocks_to_labels( batch_blocks, block_strings) # # --- run bootstrapping --- # bs_fxn = get_score_function("bootstrapping_mod2") block_batchsize = 32 evaluator = EvalBlockScores(bs_fxn, "bs_mod2", cfg.patchsize, block_batchsize, None) get_topK = evaluator.compute_topK_scores # -- run noisy -- noisy_scores, noisy_blocks = get_topK(noisy_patches, masks, srch_blocks, cfg.nblocks, S) noisy_scores = torch_to_numpy(noisy_scores) scores.noisy.bs = noisy_scores[0] noisy_blocks = torch_to_numpy(noisy_blocks) batch_blocks = noisy_blocks[0, :, :, :] blocks.noisy.bs = batch_search_blocks_to_labels(batch_blocks, block_strings) # -- run clean -- clean_scores, clean_blocks = get_topK(clean_patches, masks, srch_blocks, cfg.nblocks, S) clean_scores = torch_to_numpy(clean_scores) scores.clean.bs = clean_scores[0] clean_blocks = torch_to_numpy(clean_blocks) batch_blocks = noisy_blocks[0, :, :, :] blocks.clean.bs = batch_search_blocks_to_labels(batch_blocks, block_strings) # -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- # # Plot Results # # -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- print("Plotting Results.") plot_landscape(scores, blocks, seed)
def execute_experiment(cfg): # -- reset sys.out if subprocess -- cproc = current_process() if not (cfg.pid == cproc.pid): printfn = Path("./running") / f"{os.getpid()}.txt" orig_stdout = sys.stdout f = open(printfn, 'w') sys.stdout = f # -- init exp! -- print("RUNNING Exp: [UNSUP DENOISING] Compare to Competitors") print(cfg) # -- set default device -- torch.cuda.set_device(cfg.gpuid) # -- create results record to save -- dims = { 'batch_results': None, 'batch_to_record': None, 'record_results': { 'default': 0 }, 'stack': { 'default': 0 }, 'cat': { 'default': 0 } } record = cache_io.ExpRecord(dims) # -- set random seed -- set_seed(cfg.random_seed) # -- load dataset -- data, loaders = load_image_dataset(cfg) # -- get neural netowrk -- model, loss_fxn, optim, sched_fxn = get_nn_model(cfg, cfg.nn_arch) # -- get align + sim method -- aligned_fxn = get_align_method(cfg, cfg.align_method) sim_fxn = get_sim_method(cfg, aligned_fxn) # -- some constants -- nframes, nblocks = cfg.nframes, cfg.nblocks patchsize = cfg.patchsize ppf = cfg.dynamic_info.ppf check_parameters(nblocks, patchsize) # -- iterate over images -- start_time = time.perf_counter() results = {} for epoch in range(cfg.nepochs): print("-" * 25) print(f"Epoch [{epoch}]") print("-" * 25) sched_fxn(epoch) result_tr = train_model(cfg, model, loss_fxn, optim, loaders.tr, sim_fxn) append_result_to_dict(results, result_tr) if epoch % cfg.test_interval == 0: result_te = test_model(cfg, model, loaders.te, loss_fxn, epoch) append_result_to_dict(results, result_te) if epoch % cfg.save_interval == 0: pass result_te = test_model(cfg, model, loaders.te, loss_fxn, epoch) append_result_to_dict(results, result_te) runtime = time.perf_counter() - start_time # -- format results -- # listdict_to_numpy(results) results['runtime'] = np.array([runtime]) return results