Esempio n. 1
0
def run_fnet_score(cfg, score_fxn_name, clean, noisy, directions, results):
    score_fxn = get_score_function(score_fxn_name)
    score, scores_t = run_fnet_ps(cfg, noisy, score_fxn)
    T = len(scores_t)
    results['fnet_{score_fxn_name}_score'] = score
    for t in range(T):
        results[f'fnet_{score_fxn_name}_score_{t}'] = scores_t[t]
Esempio n. 2
0
def init_exp(cfg,exp):

    # -- set patchsize -- 
    cfg.patchsize = int(exp.patchsize)
    
    # -- set patchsize -- 
    cfg.nframes = int(exp.nframes)
    cfg.N = cfg.nframes

    # -- set number of blocks (old: neighborhood size) -- 
    cfg.nblocks = int(exp.nblocks)
    cfg.nh_size = cfg.nblocks # old name

    # -- get noise function --
    nconfig = get_noise_config(cfg,exp.noise_type)
    noise_xform = get_noise_transform(nconfig,use_to_tensor=False)
    
    # -- get dynamics function --
    cfg.dynamic.ppf = exp.ppf
    cfg.dynamic.bool = True
    cfg.dynamic.random_eraser = False
    cfg.dynamic.frame_size = cfg.frame_size
    cfg.dynamic.total_pixels = cfg.dynamic.ppf*(cfg.nframes-1)
    cfg.dynamic.frames = exp.nframes

    def nonoise(image): return image
    dynamic_info = cfg.dynamic
    dynamic_raw_xform = get_dynamic_transform(dynamic_info,nonoise)
    dynamic_xform = dynamic_wrapper(dynamic_raw_xform)

    # -- get score function --
    score_function = get_score_function(exp.score_function)

    return noise_xform,dynamic_xform,score_function
Esempio n. 3
0
 def compute_raw_frame_scores(self, block):
     # B,R,E,T,C,H,W = block.shape
     block = rearrange(block, 'b r e t c h w -> r b e t c h w')
     score_fxn = get_score_function(self.score_params.name)
     score, scores_t = score_fxn(self.noise_info, block)
     score = torch.mean(score, dim=0)
     scores_t = torch.mean(scores_t, dim=0)
     scores = self.aggregate_scores(score, scores_t)
     return scores
Esempio n. 4
0
def test_ave(benchmark):
    
    npatches = 1
    nimages = 3
    naligns = 100
    nframes = 3
    ncolors = 3
    H,W = 15,15

    data = torch.rand(npatches,nimages,
                      naligns,nframes,
                      ncolors,H,W)
    data = data.to('cuda:0')
    
    ave = get_score_function("ave")
    benchmark(ave,None,data)
Esempio n. 5
0
def test_bootstrap_mod2(benchmark):
    
    npatches = 1
    nimages = 3
    naligns = 100
    nframes = 3
    ncolors = 3
    H,W = 15,15

    data = torch.rand(npatches,nimages,
                      naligns,nframes,
                      ncolors,H,W)
    data = data.to('cuda:0')
    
    bootstrapping = get_score_function("bootstrapping_mod2")
    benchmark(bootstrapping,None,data)
Esempio n. 6
0
def propose_frames_ransac(patches, curr_blocks, ps, size, nblocks):
    score_fxn = get_score_function("bootstrapping_mod2")

    nimages, nsegs, nframes = patches.shape[:3]
    pcolor, ps_pad, ps_pad = patches.shape[3:]
    device = patches.device
    block_patches = np.zeros((nimages, nsegs, nframes, 1, pcolor, ps, ps))
    block_patches = torch.FloatTensor(block_patches).to(device,
                                                        non_blocking=True)
    curr_blocks = repeat(
        torch.LongTensor(curr_blocks).to(device), 'i s t -> i s 1 t')
    tokeep = torch.IntTensor(np.arange(1)).to(device, non_blocking=True)

    # block_patches_nba = numba.cuda.as_cuda_array(block_patches)
    # patches_nba = numba.cuda.as_cuda_array(patches)
    # batch_nba = numba.cuda.as_cuda_array(curr_blocks)
    # block_utils.index_block_batches(block_patches_nba,
    #                                 patches_nba,batch_nba,
    #                                 ps,nblocks)
    block_patches_i = block_utils.index_block_batches(block_patches, patches,
                                                      curr_blocks, tokeep, ps,
                                                      nblocks)

    def compute_score_fxn(patches):
        nimages = patches.shape[0]
        patches = rearrange(patches, 'b p t a c h w -> 1 (b p) a t c h w')
        cfg = edict({'gpuid': 0})
        scores, scores_t = score_fxn(cfg, patches)
        scores = rearrange(scores, '1 (b p) a -> b p a', b=nimages)
        scores_t = rearrange(scores_t, '1 (b p) a t -> b p a t', b=nimages)
        scores_t = scores_t[:, :, 0, :]
        return scores_t

    scores_t = compute_score_fxn(block_patches_i)
    scores_t = tnnF.normalize(scores_t, dim=2, p=1.).cpu().numpy()
    picked = np.zeros((nimages, nsegs, size))
    numba_choice_sample_mat(picked, scores_t, size)
    picked = picked.astype(np.int)
    return picked
Esempio n. 7
0
def run_pixel_scores(cfg,clean,noisy,directions,results):
    P,B,E,T,C,PS,PS = clean.shape # input shape
    # clean = rearrange(clean,'t e b p c h w -> p b e t c h w')
    # noisy = rearrange(noisy,'t e b p c h w -> p b e t c h w')
    """
    P = different patches from same image ("R" in patch_match/pixel/score.py)
    B = different images
    E = differnet block regions around centered patch (batch of grid)
    T = burst of frames along a batch dimension
    """
    # score_fxn_names = ["ave","lgsubset","lgsubset_v_indices","lgsubset_v_ref"]
    # score_fxn_names = ["ave","lgsubset_v_ref","jackknife"]
    # score_fxn_names = ["ave","jackknife"]
    # score_fxn_names = ["ave","sim_trm"]
    # score_fxn_names = ["ransac"]
    score_fxn_names = ["ave","ransac","bootstrapping"]
    for score_fxn_name in score_fxn_names:
        score_fxn = get_score_function(score_fxn_name)
        scores,scores_t = score_fxn(cfg,noisy)
        scores,scores_t = scores.cpu(),scores_t.cpu()
        fieldname = f'pixel_{score_fxn_name}'
        results[fieldname] = {'scores':scores,'scores_t':scores_t}
Esempio n. 8
0
File: exp.py Progetto: gauenk/cl_gen
def execute_experiment(cfg):

    # -- init exp! --
    print("RUNNING EXP.")
    print(cfg)

    # -- create results record to save --
    dims = {
        'batch_results': None,
        'batch_to_record': None,
        'record_results': {
            'default': 0
        },
        'stack': {
            'default': 0
        },
        'cat': {
            'default': 0
        }
    }
    record = cache_io.ExpRecord(dims)

    # -- set random seed --
    set_seed(cfg.random_seed)

    # -- load dataset --
    data, loaders = load_image_dataset(cfg)
    image_iter = iter(loaders.tr)

    # -- get score function --
    score_fxn_ave = get_score_function("ave")
    score_fxn_mse = get_score_function("mse")
    score_fxn_bs = get_score_function("bootstrapping")
    score_fxn_bs_cf = get_score_function("bootstrapping_cf")
    # score_fxn_bs = get_score_function("bootstrapping_mod2")
    # score_fxn_bs = get_score_function(cfg.score_fxn_name)
    score_fxn_bsl = get_score_function("bootstrapping_limitB")

    # -- some constants --
    NUM_BATCHES = 3
    nframes, nblocks = cfg.nframes, cfg.nblocks
    patchsize = cfg.patchsize
    ppf = cfg.dynamic_info.ppf
    check_parameters(nblocks, patchsize)

    # -- create evaluator for ave; simple --
    iterations, K = 1, 1
    subsizes = []
    block_batchsize = 256
    eval_ave_simp = EvalBlockScores(score_fxn_ave, "ave", patchsize,
                                    block_batchsize, None)

    # -- create evaluator for ave --
    iterations, K = 1, 1
    subsizes = []
    eval_ave = EvalBlockScores(score_fxn_ave, "ave", patchsize,
                               block_batchsize, None)
    eval_mse = EvalBlockScores(score_fxn_mse, "mse", patchsize,
                               block_batchsize, None)

    # -- create evaluator for bootstrapping --
    block_batchsize = 81
    eval_plimb = EvalBootBlockScores(score_fxn_bsl, score_fxn_bs, "bsl",
                                     patchsize, block_batchsize, None)
    eval_prop = EvalBlockScores(score_fxn_bs, "bs", patchsize, block_batchsize,
                                None)
    eval_prop_cf = EvalBlockScores(score_fxn_bs_cf, "bs_cf", patchsize,
                                   block_batchsize, None)

    # -- iterate over images --
    for image_bindex in range(NUM_BATCHES):

        print("-=" * 30 + "-")
        print(f"Running image batch index: {image_bindex}")
        print("-=" * 30 + "-")
        torch.cuda.empty_cache()

        # -- sample & unpack batch --
        sample = next(image_iter)
        sample_to_cuda(sample)

        dyn_noisy = sample['noisy']  # dynamics and noise
        dyn_clean = sample['burst']  # dynamics and no noise
        static_noisy = sample['snoisy']  # no dynamics and noise
        static_clean = sample['sburst']  # no dynamics and no noise
        flow_gt = sample['flow']
        image_index = sample['index']
        tl_index = sample['tl_index']
        rng_state = sample['rng_state']
        if cfg.noise_params.ntype == "pn":
            dyn_noisy = anscombe.forward(dyn_noisy)
        # dyn_nosiy = dyn_clean
        dyn_noisy = static_clean

        # -- shape info --
        T, B, C, H, W = dyn_noisy.shape
        isize = edict({'h': H, 'w': W})
        ref_t = nframes // 2
        nimages, npix, nframes = B, H * W, T

        # -- create results dict --
        flows = edict()
        aligned = edict()
        runtimes = edict()
        optimal_scores = edict()  # score function at optimal

        # -- groundtruth flow --
        flow_gt_rs = rearrange(flow_gt, 'i tm1 two -> i 1 tm1 two')
        blocks_gt = flow_to_blocks(flow_gt_rs, nblocks)
        flows.of = repeat(flow_gt, 'i tm1 two -> i p tm1 two', p=npix)
        aligned.of = align_from_flow(dyn_clean, flows.of, nblocks, isize=isize)
        runtimes.of = 0.  # given
        optimal_scores.of = np.zeros(
            (nimages, npix, nframes))  # clean target is zero
        aligned.clean = static_clean

        # -- compute nearest neighbor fields [global] --
        start_time = time.perf_counter()
        shape_str = 't b h w two -> b (h w) t two'
        nnf_vals, nnf_pix = nnf.compute_burst_nnf(dyn_clean, ref_t, patchsize)
        nnf_pix_best = torch.LongTensor(
            rearrange(nnf_pix[..., 0, :], shape_str))
        nnf_pix_best = torch.LongTensor(nnf_pix_best)
        flows.nnf = pix_to_flow(nnf_pix_best)
        aligned.nnf = align_from_pix(dyn_clean, nnf_pix_best, nblocks)
        runtimes.nnf = time.perf_counter() - start_time
        optimal_scores.nnf = np.zeros(
            (nimages, npix, nframes))  # clean target is zero

        # -- compute proposed search of nnf --
        print("[Bootstrap] loss function")
        iterations = 1
        K, subsizes = get_boot_hyperparams(cfg.nframes, cfg.nblocks)
        start_time = time.perf_counter()
        optim = AlignOptimizer("v3")
        # flows.est = optim.run(dyn_noisy,patchsize,eval_prop,
        #                      nblocks,iterations,subsizes,K)
        flows.est = flows.of.clone()
        aligned.est = align_from_flow(dyn_clean,
                                      flows.est,
                                      patchsize,
                                      isize=isize)
        runtimes.est = time.perf_counter() - start_time

        # -- load adjacent blocks --
        nframes, nimages, ncolor, h, w = dyn_noisy.shape
        nsegs = h * w
        brange = exh_block_range(nimages, nsegs, nframes, nblocks)
        ref_block = get_ref_block(nblocks)
        curr_blocks = init_optim_block(nimages, nsegs, nframes, nblocks)
        curr_blocks = curr_blocks[:, :, :,
                                  None]  # nimages, nsegs, nframes, naligns
        frames = np.r_[np.arange(nframes // 2),
                       np.arange(nframes // 2 + 1, nframes)]
        frames = repeat(frames, 't -> i s t', i=nimages, s=nsegs)
        search_blocks = get_search_blocks(frames, brange, curr_blocks,
                                          f'cuda:{cfg.gpuid}')
        print("search_blocks.shape ", search_blocks.shape)
        init_blocks = rearrange(curr_blocks,
                                'i s t a -> i s a t').to(dyn_noisy.device)
        print("init_blocks.shape ", init_blocks.shape)
        search_blocks = search_blocks[0, 0]
        init_blocks_ = init_blocks[0, 0]
        search_blocks = eval_plimb.filter_blocks_to_1skip_neighbors(
            search_blocks, init_blocks_)
        search_blocks = repeat(search_blocks,
                               'a t -> i s a t',
                               i=nimages,
                               s=nsegs)
        print("search_blocks.shape ", search_blocks.shape)

        # -- compute MSE for the batch --
        est = edict()
        bscf = edict()
        plimb = edict()

        print("curr_blocks.shape ", curr_blocks.shape)
        eval_prop.score_fxn_name = ""
        scores, scores_t, blocks = eval_mse.score_burst_from_blocks(
            dyn_noisy, search_blocks, patchsize, nblocks)
        est.scores = scores
        est.scores_t = scores_t
        est.blocks = blocks
        print("Done with est.")

        # -- compute bootrapping in closed form --
        scores, scores_t, blocks = eval_prop_cf.score_burst_from_blocks(
            dyn_noisy, search_blocks, patchsize, nblocks)
        bscf.scores = scores
        bscf.scores_t = scores_t
        bscf.blocks = blocks

        # -- compute bootstrapping for the batch --
        print("Get init block from original bootstrap.")
        print(init_blocks[0, 0])
        scores, scores_t, blocks = eval_prop.score_burst_from_blocks(
            dyn_noisy, init_blocks, patchsize, nblocks)

        print("Starting prop.")
        eval_prop.score_fxn_name = "bs"
        eval_prop.score_cfg.bs_type = ""
        state = edict({'scores': scores, 'blocks': blocks})
        scores, scores_t, blocks = eval_plimb.score_burst_from_blocks(
            dyn_noisy, search_blocks, state, patchsize, nblocks)
        plimb.scores = scores
        plimb.scores_t = scores_t
        plimb.blocks = blocks

        print(est.scores.shape)
        print(bscf.scores.shape)
        print(state.scores.shape)
        print(plimb.scores.shape)

        diff_plimb = plimb.scores[0] - est.scores[0]
        perc_delta = torch.abs(diff_plimb) / est.scores[0]
        diff_bscf = bscf.scores[0] - est.scores[0]
        perc_delta_cf = torch.abs(diff_bscf) / est.scores[0]

        pix_idx_list = [0, 20, 30]  #np.arange(h*w)
        for p in pix_idx_list:
            print("-" * 10 + f" @ {p}")
            print("est", est.scores[0, p].cpu().numpy())
            print("state", state.scores[0, p].cpu().numpy())
            print("bscf", bscf.scores[0, p].cpu().numpy())
            print("plimb", plimb.scores[0, p].cpu().numpy())
            print("plimb/est", plimb.scores[0, p] / est.scores[0, p])
            print("plimb - est", plimb.scores[0, p] - est.scores[0, p])
            print("plimb - bscf", plimb.scores[0, p] - bscf.scores[0, p])

            print("%Delta [plimb]", perc_delta[p])
            print("L2-Norm [plimb]", torch.sum(diff_plimb[p]**2))
            print("Nmlz L2-Norm [plimb]", torch.mean(diff_plimb[p]**2))

            print("%Delta [bscf]", perc_delta_cf[p])
            print("L2-Norm [bscf]", torch.sum(diff_bscf[p]**2))
            print("Nmlz L2-Norm [bscf]", torch.mean(diff_bscf[p]**2))

        print("[Overall: plimb] %Delta: ", torch.mean(perc_delta).item())
        print("[Overall: plimb] L2-Norm: ", torch.sum(diff_plimb**2).item())
        print("[Overall: plimb] Nmlz L2-Norm: ",
              torch.mean(diff_plimb**2).item())
        print("[Overall: bscf] %Delta: ", torch.mean(perc_delta_cf).item())
        print("[Overall: bscf] L2-Norm: ", torch.sum(diff_bscf**2).item())
        print("[Overall: bscf] Nmlz L2-Norm: ",
              torch.mean(diff_bscf**2).item())

        # -- format results --
        pad = 3  #2*(nframes-1)*ppf+4
        isize = edict({'h': H - pad, 'w': W - pad})

        # -- flows to numpy --
        is_even = cfg.frame_size % 2 == 0
        mid_pix = cfg.frame_size * cfg.frame_size // 2 + (cfg.frame_size //
                                                          2) * is_even
        mid_pix = 32 * 10 + 23
        flows_np = edict_torch_to_numpy(flows)

        # -- End-Point-Errors --
        epes_of = compute_flows_epe_wrt_ref(flows, "of")
        epes_nnf = compute_flows_epe_wrt_ref(flows, "nnf")
        epes_nnf_local = compute_flows_epe_wrt_ref(flows, "nnf_local")
        nnf_acc = compute_acc_wrt_ref(flows, "nnf")
        nnf_local_acc = compute_acc_wrt_ref(flows, "nnf_local")

        # -- PSNRs --
        aligned = remove_frame_centers(aligned)
        psnrs = compute_frames_psnr(aligned, isize)

        # -- print report ---
        print("\n" * 3)  # banner
        print("-" * 25 + " Results " + "-" * 25)
        print_dict_ndarray_0_midpix(flows_np, mid_pix)
        print_runtimes(runtimes)
        print_verbose_psnrs(psnrs)
        print_delta_summary_psnrs(psnrs)
        print_verbose_epes(epes_of, epes_nnf)
        print_nnf_acc(nnf_acc)
        print_nnf_local_acc(nnf_local_acc)
        print_summary_epes(epes_of, epes_nnf)
        print_summary_psnrs(psnrs)

        # -- prepare results to be appended --
        psnrs = edict_torch_to_numpy(psnrs)
        epes_of = edict_torch_to_numpy(epes_of)
        epes_nnf = edict_torch_to_numpy(epes_nnf)
        epes_nnf_local = edict_torch_to_numpy(epes_nnf_local)
        nnf_acc = edict_torch_to_numpy(nnf_acc)
        nnf_local_acc = edict_torch_to_numpy(nnf_local_acc)
        image_index = torch_to_numpy(image_index)
        batch_results = {
            'runtimes': runtimes,
            'optimal_scores': optimal_scores,
            'psnrs': psnrs,
            'epes_of': epes_of,
            'epes_nnf': epes_nnf,
            'epes_nnf_local': epes_nnf_local,
            'nnf_acc': nnf_acc,
            'nnf_local_acc': nnf_local_acc
        }

        # -- format results --
        batch_results = flatten_internal_dict(batch_results)
        format_fields(batch_results, image_index, rng_state)

        print("shape check.")
        for key, value in batch_results.items():
            print(key, value.shape)

        record.append(batch_results)
    # print("\n"*3)
    # print("-"*20)
    # print(record.record)
    # print("-"*20)
    # print("\n"*3)
    # record.stack_record()
    record.cat_record()
    # print("\n"*3)
    # print("-"*20)
    # print(record.record)
    # print("-"*20)
    print("\n" * 3)

    print("\n" * 3)
    print("-" * 20)
    # df = pd.DataFrame().append(record.record,ignore_index=True)
    for key, val in record.record.items():
        print(key, val.shape)
    # print(df)
    print("-" * 20)
    print("\n" * 3)

    return record.record
Esempio n. 9
0
def test_nnf():

    # -- get config --
    cfg = config()
    print("Config for Testing.")
    print(cfg)

    # -- set random seed --
    set_seed(cfg.random_seed)

    # -- load dataset --
    data, loaders = load_image_dataset(cfg)
    image_iter = iter(loaders.tr)
    nskips = 2 + 4 + 2 + 4 + 1
    for skip in range(nskips):
        next(image_iter)

    # -- get score function --
    score_fxn_ave = get_score_function("ave")
    score_fxn_bs = get_score_function(cfg.score_fxn_name)

    # -- some constants --
    NUM_BATCHES = 10
    nframes, nblocks = cfg.nframes, cfg.nblocks
    patchsize = cfg.patchsize
    ppf = cfg.dynamic_info.ppf
    check_parameters(nblocks, patchsize)

    # -- create evaluator for ave; simple --
    iterations, K = 1, 1
    subsizes = []
    block_batchsize = 256
    eval_ave_simp = EvalBlockScores(score_fxn_ave, "ave", patchsize,
                                    block_batchsize, None)

    # -- create evaluator for ave --
    iterations, K = 1, 1
    subsizes = []
    eval_ave = EvalBlockScores(score_fxn_ave, "ave", patchsize,
                               block_batchsize, None)

    # -- create evaluator for bootstrapping --
    block_batchsize = 64
    eval_prop = EvalBlockScores(score_fxn_bs, "bs", patchsize, block_batchsize,
                                None)

    # -- iterate over images --
    for image_bindex in range(NUM_BATCHES):

        print("-=" * 30 + "-")
        print(f"Running image batch index: {image_bindex}")
        print("-=" * 30 + "-")

        # -- sample & unpack batch --
        sample = next(image_iter)
        sample_to_cuda(sample)

        dyn_noisy = sample['noisy']  # dynamics and noise
        dyn_clean = sample['burst']  # dynamics and no noise
        static_noisy = sample['snoisy']  # no dynamics and noise
        static_clean = sample['sburst']  # no dynamics and no noise
        flow_gt = sample['ref_flow']
        # flow_gt = sample['seq_flow']
        if cfg.noise_params.ntype == "pn":
            dyn_noisy = anscombe.forward(dyn_noisy)

        # -- shape info --
        T, B, C, H, W = dyn_noisy.shape
        isize = edict({'h': H, 'w': W})
        ref_t = nframes // 2
        npix = H * W

        # -- groundtruth flow --
        # print("flow_gt",flow_gt)
        flow_gt_rs = rearrange(flow_gt, 'i tm1 two -> i 1 tm1 two')
        blocks_gt = flow_to_blocks(flow_gt_rs, nblocks)
        # print("\n\n")
        # print("flow_gt[0,0] ",flow_gt)
        # print("blocks_gt[0,0] ",blocks_gt[0,0])
        flow_gt = repeat(flow_gt, 'i tm1 two -> i p tm1 two', p=npix)
        aligned_of = align_from_flow(dyn_clean, flow_gt, nblocks, isize=isize)
        pix_gt = flow_to_pix(flow_gt.clone(), isize=isize)

        # -- compute nearest neighbor fields --
        start_time = time.perf_counter()
        shape_str = 't b h w two -> b (h w) t two'
        nnf_vals, nnf_pix = nnf.compute_burst_nnf(dyn_clean, ref_t, patchsize)
        nnf_pix_best = torch.LongTensor(
            rearrange(nnf_pix[..., 0, :], shape_str))
        nnf_pix_best = torch.LongTensor(nnf_pix_best)
        pix_nnf = nnf_pix_best.clone()
        flow_nnf = pix_to_flow(nnf_pix_best)
        aligned_nnf = align_from_pix(dyn_clean, nnf_pix_best, nblocks)
        time_nnf = time.perf_counter() - start_time

        # -- compute proposed search of nnf --
        start_time = time.perf_counter()
        print(dyn_noisy.shape)
        # split_vals,split_pix = nnf.compute_burst_nnf(dyn_noisy,ref_t,patchsize)
        split_pix = np.copy(nnf_pix)
        split_pix_best = torch.LongTensor(
            rearrange(split_pix[..., 0, :], shape_str))
        split_pix_best = torch.LongTensor(split_pix_best)
        pix_split = split_pix_best.clone()
        flow_split = pix_to_flow(split_pix_best)
        aligned_split = align_from_pix(dyn_clean, split_pix_best, nblocks)
        time_split = time.perf_counter() - start_time

        # -- compute simple ave --
        iterations, K = 0, 1
        subsizes = []
        print("[simple] Ave loss function")
        start_time = time.perf_counter()
        optim = AlignOptimizer("v3")
        # flow_ave_simp = optim.run(dyn_noisy,patchsize,eval_ave_simp,
        #                      nblocks,iterations,subsizes,K)
        flow_ave_simp = flow_gt.clone().cpu()
        aligned_ave_simp = align_from_flow(dyn_clean,
                                           flow_ave_simp,
                                           nblocks,
                                           isize=isize)
        time_ave_simp = time.perf_counter() - start_time
        print(flow_ave_simp.shape)

        # -- compute complex ave --
        iterations, K = 0, 1
        subsizes = []
        print("[complex] Ave loss function")
        start_time = time.perf_counter()
        optim = AlignOptimizer("v3")
        flow_ave = optim.run(dyn_noisy, patchsize, eval_ave, nblocks,
                             iterations, subsizes, K)
        # flow_ave = flow_gt.clone()
        pix_ave = flow_to_pix(flow_ave.clone(), isize=isize)
        aligned_ave = align_from_flow(dyn_clean,
                                      flow_ave,
                                      nblocks,
                                      isize=isize)
        time_ave = time.perf_counter() - start_time

        # -- compute proposed search of nnf --
        # iterations,K = 50,3
        # subsizes = [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2]
        #iterations,K = 1,nblocks**2
        # K is a function of noise level.
        # iterations,K = 1,nblocks**2
        iterations, K = 1, 2 * nblocks  #**2
        # subsizes = [3]#,3,3,3,3,3,3,3,3,3]
        # subsizes = [3,3,3,3,3,3,3,]
        subsizes = [3, 3, 3, 3, 3, 3, 3, 3]
        # subsizes = [nframes]
        # subsizes = [nframes]
        print("[Bootstrap] loss function")
        start_time = time.perf_counter()
        optim = AlignOptimizer("v3")
        flow_est = optim.run(dyn_noisy, patchsize, eval_prop, nblocks,
                             iterations, subsizes, K)
        pix_est = flow_to_pix(flow_est.clone(), isize=isize)
        aligned_est = align_from_flow(dyn_clean,
                                      flow_est,
                                      patchsize,
                                      isize=isize)
        time_est = time.perf_counter() - start_time
        # flow_est = flow_gt.clone()
        # aligned_est = aligned_of.clone()
        # time_est = 0.

        # -- banner --
        print("\n" * 3)
        print("-" * 25 + " Results " + "-" * 25)

        # -- examples of flow --
        print("-" * 50)
        is_even = cfg.frame_size % 2 == 0
        mid_pix = cfg.frame_size * cfg.frame_size // 2 + (cfg.frame_size //
                                                          2) * is_even
        mid_pix = 32 * 10 + 23
        # mid_pix = 32*23+10
        flow_gt_np = torch_to_numpy(flow_gt)
        flow_nnf_np = torch_to_numpy(flow_nnf)
        flow_split_np = torch_to_numpy(flow_split)
        flow_ave_simp_np = torch_to_numpy(flow_ave_simp)
        flow_ave_np = torch_to_numpy(flow_ave)
        flow_est_np = torch_to_numpy(flow_est)
        print(flow_gt_np[0, mid_pix])
        print(flow_nnf_np[0, mid_pix])
        print(flow_split_np[0, mid_pix])
        print(flow_ave_simp_np[0, mid_pix])
        print(flow_ave_np[0, mid_pix])
        print(flow_est_np[0, mid_pix])
        print("-" * 50)
        pix_gt_np = torch_to_numpy(pix_gt)
        pix_nnf_np = torch_to_numpy(pix_nnf)
        pix_ave_np = torch_to_numpy(pix_ave)
        pix_est_np = torch_to_numpy(pix_est)
        print(pix_gt_np[0, mid_pix])
        print(pix_nnf_np[0, mid_pix])
        print(pix_ave_np[0, mid_pix])
        print(pix_est_np[0, mid_pix])

        # print(aligned_of[0,0,:,10,23].cpu() - static_clean[0,0,:,10,23].cpu())
        # print(aligned_ave[0,0,:,10,23].cpu() - static_clean[0,0,:,10,23].cpu())

        # print(aligned_of[0,0,:,23,10].cpu() - static_clean[0,0,:,23,10].cpu())
        # print(aligned_ave[0,0,:,23,10].cpu() - static_clean[0,0,:,23,10].cpu())

        print("-" * 50)

        # -- compare compute time --
        print("-" * 50)
        print("Compute Time [smaller is better]")
        print("-" * 50)
        print("[NNF]: %2.3e" % time_nnf)
        print("[Split]: %2.3e" % time_split)
        print("[Ave [Simple]]: %2.3e" % time_ave_simp)
        print("[Ave]: %2.3e" % time_ave)
        print("[Proposed]: %2.3e" % time_est)

        # -- compare gt v.s. nnf computations --
        nnf_of = compute_epe(flow_nnf, flow_gt)
        split_of = compute_epe(flow_split, flow_gt)
        ave_simp_of = compute_epe(flow_ave_simp, flow_gt)
        ave_of = compute_epe(flow_ave, flow_gt)
        est_of = compute_epe(flow_est, flow_gt)

        split_nnf = compute_epe(flow_split, flow_nnf)
        ave_simp_nnf = compute_epe(flow_ave_simp, flow_nnf)
        ave_nnf = compute_epe(flow_ave, flow_nnf)
        est_nnf = compute_epe(flow_est, flow_nnf)

        # -- End-Point-Errors --
        print("-" * 50)
        print("EPE Errors [smaller is better]")
        print("-" * 50)

        print("NNF v.s. Optical Flow.")
        print(nnf_of)
        print("Split v.s. Optical Flow.")
        print(split_of)
        print("Ave [Simple] v.s. Optical Flow.")
        print(ave_simp_of)
        print("Ave v.s. Optical Flow.")
        print(ave_of)
        print("Proposed v.s. Optical Flow.")
        print(est_of)
        print("Split v.s. NNF")
        print(split_nnf)
        print("Ave [Simple] v.s. NNF")
        print(ave_simp_nnf)
        print("Ave v.s. NNF")
        print(ave_nnf)
        print("Proposed v.s. NNF")
        print(est_nnf)

        # -- compare accuracy of method nnf v.s. actual nnf --
        def compute_flow_acc(guess, gt):
            both = torch.all(guess.type(torch.long) == gt.type(torch.long),
                             dim=-1)
            ncorrect = torch.sum(both)
            acc = 100 * float(ncorrect) / both.numel()
            return acc

        split_nnf_acc = compute_flow_acc(flow_split, flow_nnf)
        ave_simp_nnf_acc = compute_flow_acc(flow_ave_simp, flow_nnf)
        ave_nnf_acc = compute_flow_acc(flow_ave, flow_nnf)
        est_nnf_acc = compute_flow_acc(flow_est, flow_nnf)

        # -- PSNR to Reference Image --
        pad = 2 * (nframes - 1) * ppf + 4
        isize = edict({'h': H - pad, 'w': W - pad})
        # print("isize: ",isize)
        aligned_of = remove_center_frame(aligned_of)
        aligned_nnf = remove_center_frame(aligned_nnf)
        aligned_split = remove_center_frame(aligned_split)
        aligned_ave_simp = remove_center_frame(aligned_ave_simp)
        aligned_ave = remove_center_frame(aligned_ave)
        aligned_est = remove_center_frame(aligned_est)
        static_clean = remove_center_frame(static_clean)

        psnr_of = compute_aligned_psnr(aligned_of, static_clean, isize)
        psnr_nnf = compute_aligned_psnr(aligned_nnf, static_clean, isize)
        psnr_split = compute_aligned_psnr(aligned_split, static_clean, isize)
        psnr_ave_simp = compute_aligned_psnr(aligned_ave_simp, static_clean,
                                             isize)
        psnr_ave = compute_aligned_psnr(aligned_ave, static_clean, isize)
        psnr_est = compute_aligned_psnr(aligned_est, static_clean, isize)

        print("-" * 50)
        print("PSNR Values [bigger is better]")
        print("-" * 50)

        print("Optical Flow [groundtruth v1]")
        print(psnr_of)
        print("NNF [groundtruth v2]")
        print(psnr_nnf)
        print("Split [old method]")
        print(psnr_split)
        print("Ave [simple; old method]")
        print(psnr_ave_simp)
        print("Ave [old method]")
        print(psnr_ave)
        print("Proposed [new method]")
        print(psnr_est)

        # -- print nnf accuracy here --

        print("-" * 50)
        print("NNF Accuracy [bigger is better]")
        print("-" * 50)

        print("Split v.s. NNF")
        print(split_nnf_acc)
        print("Ave [Simple] v.s. NNF")
        print(ave_simp_nnf_acc)
        print("Ave v.s. NNF")
        print(ave_nnf_acc)
        print("Proposed v.s. NNF")
        print(est_nnf_acc)

        # -- location of PSNR errors --
        csize = 30
        # aligned_of = torch_to_numpy(tvF.center_crop(aligned_of,(csize,csize)))
        # aligned_ave = torch_to_numpy(tvF.center_crop(aligned_ave,(csize,csize)))
        # static_clean = torch_to_numpy(tvF.center_crop(static_clean,(csize,csize)))
        flow_gt = torch_to_numpy(flow_gt)
        flow_ave = torch_to_numpy(flow_ave)
        aligned_of = torch_to_numpy(aligned_of)
        aligned_ave = torch_to_numpy(aligned_ave)
        static_clean = torch_to_numpy(static_clean)

        # print("WHERE?")
        # print("OF")
        # print(aligned_of.shape)
        # for row in range(30):
        #     print(np.abs(aligned_of[0,0,0,row]- static_clean[0,0,0,row]))
        # print(np.where(~np.isclose(aligned_of,aligned_of)))
        # print(np.where(~np.isclose(flow_gt,flow_ave)))
        # print(np.where(~np.isclose(aligned_of,aligned_of)))
        # print(np.where(~np.isclose(aligned_of,static_clean)))
        # print("Ave")
        # indices = np.where(~np.isclose(aligned_ave,static_clean))
        # row,col = indices[-2:]
        # for elem in range(len(row)):
        #     print(np.c_[row,col][elem])
        # print(np.where(~np.isclose(aligned_ave,static_clean)))

        # -- Summary of End-Point-Errors --
        print("-" * 50)
        print("Summary of EPE Errors [smaller is better]")
        print("-" * 50)

        print("[NNF v.s. Optical Flow]: %2.3f" % nnf_of.mean().item())
        print("[Split v.s. Optical Flow]: %2.3f" % split_of.mean().item())
        print("[Ave [Simple] v.s. Optical Flow]: %2.3f" %
              ave_simp_of.mean().item())
        print("[Ave v.s. Optical Flow]: %2.3f" % ave_of.mean().item())
        print("[Proposed v.s. Optical Flow]: %2.3f" % est_of.mean().item())
        print("[Split v.s. NNF]: %2.3f" % split_nnf.mean().item())
        print("[Ave [Simple] v.s. NNF]: %2.3f" % ave_simp_nnf.mean().item())
        print("[Ave v.s. NNF]: %2.3f" % ave_nnf.mean().item())
        print("[Proposed v.s. NNF]: %2.3f" % est_nnf.mean().item())

        # -- Summary of PSNR to Reference Image --

        print("-" * 50)
        print("Summary PSNR Values [bigger is better]")
        print("-" * 50)

        print("[Optical Flow]: %2.3f" % psnr_of.mean().item())
        print("[NNF]: %2.3f" % psnr_nnf.mean().item())
        print("[Split]: %2.3f" % psnr_split.mean().item())
        print("[Ave [Simple]]: %2.3f" % psnr_ave_simp.mean().item())
        print("[Ave]: %2.3f" % psnr_ave.mean().item())
        print("[Proposed]: %2.3f" % psnr_est.mean().item())

        print("-" * 50)
        print("PSNR Comparisons [smaller is better]")
        print("-" * 50)
        delta_split = psnr_nnf - psnr_split
        delta_ave_simp = psnr_nnf - psnr_ave_simp
        delta_ave = psnr_nnf - psnr_ave
        delta_est = psnr_nnf - psnr_est
        print("ave([NNF] - [Split]): %2.3f" % delta_split.mean().item())
        print("ave([NNF] - [Ave [Simple]]): %2.3f" %
              delta_ave_simp.mean().item())
        print("ave([NNF] - [Ave]): %2.3f" % delta_ave.mean().item())
        print("ave([NNF] - [Proposed]): %2.3f" % delta_est.mean().item())
Esempio n. 10
0
File: exp.py Progetto: gauenk/cl_gen
def execute_experiment(cfg):

    # -- init exp! --
    print("RUNNING EXP.")
    print(cfg)

    # -- create results record to save --
    dims = {
        'batch_results': None,
        'batch_to_record': None,
        'record_results': {
            'default': 0
        },
        'stack': {
            'default': 0
        },
        'cat': {
            'default': 0
        }
    }
    record = cache_io.ExpRecord(dims)

    # -- set random seed --
    set_seed(cfg.random_seed)

    # -- load dataset --
    # data,loaders = load_image_dataset(cfg)
    data, loaders = load_dataset(cfg, cfg.dataset.mode)
    image_iter = iter(loaders.tr)

    # -- get score function --
    score_fxn_ave = get_score_function("ave")
    score_fxn_bs = get_score_function(cfg.score_fxn_name)

    # -- some constants --
    NUM_BATCHES = 10
    nframes, nblocks = cfg.nframes, cfg.nblocks
    patchsize = cfg.patchsize
    ps = patchsize
    ppf = cfg.dynamic_info.ppf
    check_parameters(nblocks, patchsize)

    # -- theory constants --
    std = cfg.noise_params.g.std / 255.
    p = cfg.patchsize**2 * 3
    t = cfg.nframes
    theory = edict()
    theory.c2 = ((t - 1) / t)**2 * std**2 + (t - 1) / t**2 * std**2
    theory.mean = theory.c2
    theory.mode = (1 - 2 / p) * theory.c2
    theory.var = 2 / p * theory.c2**2
    theory.std = np.sqrt(theory.var)
    pp.pprint(theory)

    # npn = no patch normalization
    theory_npn = edict()
    theory_npn.c2 = ((t - 1) / t)**2 * std**2 + (t - 1) / t**2 * std**2
    theory_npn.mean = theory_npn.c2 * p
    theory_npn.mode = (1 - 2 / p) * theory_npn.c2 * p
    theory_npn.var = 2 * theory_npn.c2**2 * p
    theory_npn.std = np.sqrt(theory_npn.var)
    pp.pprint(theory_npn)

    # oracle = clean reference frame
    theory_oracle = edict()
    theory_oracle.c2 = std**2
    theory_oracle.mean = theory_oracle.c2 * p
    theory_oracle.mode = (1 - 2 / p) * theory_oracle.c2 * p
    theory_oracle.var = 2 * theory_oracle.c2**2 * p
    theory_oracle.std = np.sqrt(theory_oracle.var)
    pp.pprint(theory_oracle)

    # -- create evaluator for ave; simple --
    iterations, K = 1, 1
    subsizes = []
    block_batchsize = 32
    eval_ave_simp = EvalBlockScores(score_fxn_ave, "ave", patchsize,
                                    block_batchsize, None)

    # -- create evaluator for ave --
    iterations, K = 1, 1
    subsizes = []
    eval_ave = EvalBlockScores(score_fxn_ave, "ave", patchsize,
                               block_batchsize, None)

    # -- create evaluator for bootstrapping --
    block_batchsize = 32
    eval_prop = EvalBlockScores(score_fxn_bs, "bs", patchsize, block_batchsize,
                                None)

    # -- init flownet model --
    cfg.gpuid = 1 - cfg.gpuid  # flip. flop.
    flownet_align = get_align_method(cfg, "flownet_v2", comp_align=False)
    cfg.gpuid = 1 - cfg.gpuid  # flippity flop.

    # -- get an image transform --
    image_xform = get_image_xform(cfg.image_xform, cfg.gpuid, cfg.frame_size)
    blockLabels, _ = nnf_utils.getBlockLabels(None, nblocks, np.int32,
                                              cfg.device, True)

    # -- iterate over images --
    NUM_BATCHES = min(NUM_BATCHES, len(image_iter))
    for image_bindex in range(NUM_BATCHES):

        print("-=" * 30 + "-")
        print(f"Running image batch index: {image_bindex}")
        print("-=" * 30 + "-")
        torch.cuda.empty_cache()

        # -- sample & unpack batch --
        nwaste = 0
        for w in range(nwaste):
            sample = next(image_iter)  # waste one
        sample = next(image_iter)
        sample_to_cuda(sample)
        convert_keys(sample)
        torch.cuda.synchronize()
        # for key,val in sample.items():
        #     print(key,type(val))
        #     if torch.is_tensor(val):
        #         print(key,val.device)

        dyn_noisy = sample['dyn_noisy']  # dynamics and noise
        dyn_clean = sample['dyn_clean']  # dynamics and no noise
        static_noisy = sample['static_noisy']  # no dynamics and noise
        static_clean = sample['static_clean']  # no dynamics and no noise
        nnf_gt = sample['nnf']
        flow_gt = sample['flow']
        if nnf_gt.ndim == 6:
            nnf_gt = nnf_gt[:, :, 0]  # pick top 1 out of K
        image_index = sample['image_index']
        rng_state = sample['rng_state']

        # TODO: anscombe is a type of image transform
        if not (image_xform is None):
            dyn_clean_ftrs = image_xform(dyn_clean)
            dyn_noisy_ftrs = image_xform(dyn_noisy)
        else:
            dyn_clean_ftrs = dyn_clean
            dyn_noisy_ftrs = dyn_noisy

        if "resize" in cfg.image_xform:
            vprint("Images, Flows, and NNF Modified.")
            dyn_clean = image_xform(dyn_clean)
            dyn_noisy = image_xform(dyn_noisy)
            T, B, C, H, W = dyn_noisy.shape
            flow_gt = torch.zeros((B, 1, T, H, W, 2))
            nnf_gt = torch.zeros((1, T, B, H, W, 2))

        save_image(dyn_clean, "dyn_clean.png")
        # print("SHAPES")
        # print(dyn_noisy.shape)
        # print(dyn_clean.shape)
        # print(nnf_gt.shape)

        # -- shape info --
        pad = cfg.nblocks // 2 + cfg.patchsize // 2
        T, B, C, H, W = dyn_noisy.shape
        isize = edict({'h': H, 'w': W})
        psize = edict({'h': H - 2 * pad, 'w': W - 2 * pad})
        ref_t = nframes // 2
        nimages, npix, nframes = B, H * W, T
        frame_size = [H, W]
        ifsize = [H - 2 * pad, W - 2 * pad]
        print("flow_gt.shape: ", flow_gt.shape)
        print("flow_gt: ", flow_gt[0, 0, :, H // 2, W // 2, :])

        # -- create results dict --
        pixs = edict()
        flows = edict()
        anoisy = edict()
        aligned = edict()
        runtimes = edict()
        optimal_scores = edict()  # score function at optimal

        # -- compute proposed search of nnf --
        # ave = torch.mean(dyn_noisy_ftrs[:,0,:,4:4+ps,4:4+ps],dim=0)
        # frames = dyn_noisy_ftrs[:,0,:,4:4+ps,4:4+ps]
        # gt_offset = torch.sum((frames - ave)**2/nframes).item()
        # print("Optimal: ",gt_offset)
        # gt_offset = -1.

        # -- FIND MODE of BURST --
        vprint("Our Method")
        flow_fmt = rearrange(flow_gt, 'i 1 t h w two -> t i h w 1 two')
        locs_fmt = flow2locs(flow_fmt)
        print("locs_fmt.shape: ", locs_fmt.shape)
        print(dyn_noisy_ftrs.min(), dyn_noisy_ftrs.max())
        vals, _ = evalAtLocs(dyn_noisy_ftrs,
                             locs_fmt,
                             patchsize,
                             nblocks,
                             return_mode=False)
        vals = torch.zeros_like(vals)
        # flow_fmt = rearrange(flow_gt,'i t h w two -> i (h w) t two')
        # vals,_ = bnnf_utils.evalAtFlow(dyn_noisy_ftrs, flow_fmt, patchsize,
        #                                nblocks, return_mode=False)
        mode = mode_vals(vals, ifsize)
        cc_vals = vals[0, 5:29, 5:29, 0].ravel()
        vstd = torch.std(cc_vals).item()
        print("[SubBurst] Computed Mode: ", mode)
        print("[SubBurst] Computed Std: ", vstd)

        # -- compute proposed search of nnf --
        vprint("dyn_noisy_ftrs.shape ", dyn_noisy_ftrs.shape)
        valMean = theory_npn.mode
        vprint("valMean: ", valMean)
        start_time = time.perf_counter()
        if cfg.nframes < 5:
            _, flows.est = bnnf_utils.runBurstNnf(dyn_noisy_ftrs,
                                                  patchsize,
                                                  nblocks,
                                                  k=1,
                                                  valMean=valMean,
                                                  blockLabels=None,
                                                  fmt=True,
                                                  to_flow=True)
        else:
            flows.est = rearrange(flow_gt,
                                  'i 1 t h w two -> 1 i (h w) t two').clone()
        flows.est = flows.est[0]
        runtimes.est = time.perf_counter() - start_time
        pixs.est = flow_to_pix(flows.est.clone(), nframes, isize=isize)
        aligned.est = align_from_flow(dyn_clean,
                                      flows.est,
                                      patchsize,
                                      isize=isize)
        if cfg.nframes > 7: aligned.est = torch.zeros_like(aligned.est)
        anoisy.est = align_from_flow(dyn_noisy,
                                     flows.est,
                                     patchsize,
                                     isize=isize)
        optimal_scores.est = np.zeros((nimages, npix, 1, nframes))

        # -- the proposed method --
        std = cfg.noise_params.g.std
        start_time = time.perf_counter()
        _flow = flow_gt.clone()
        # _,_flow = runKmSearch(dyn_noisy_ftrs, patchsize, nblocks, k = 1,
        #                       std = std/255.,mode="cuda")
        runtimes.kmb = time.perf_counter() - start_time
        flows.kmb = rearrange(_flow, 'i 1 t h w two -> i (h w) t two')
        pixs.kmb = flow_to_pix(flows.kmb.clone(), nframes, isize=isize)
        aligned.kmb = align_from_flow(dyn_clean, flows.kmb, 0, isize=isize)
        optimal_scores.kmb = torch_to_numpy(optimal_scores.est)

        # -- compute proposed search of nnf --
        vprint("Our BpSearch Method")
        # print(flow_gt)
        # std = cfg.noise_params.g.std/255.
        valMean = theory_npn.mode
        start_time = time.perf_counter()
        bp_nblocks = 3
        # _,bp_est,a_noisy = runBpSearch(dyn_noisy_ftrs, dyn_noisy_ftrs,
        #                                patchsize, bp_nblocks, k = 1,
        #                                valMean = valMean, std=std,
        #                                blockLabels=None,
        #                                l2_nblocks=nblocks,
        #                                fmt = True, to_flow=True,
        #                                search_type=cfg.bp_type,
        #                                gt_info={'flow':flow_gt})
        bp_est = flows.est[None, :].clone()
        flows.bp_est = bp_est[0]
        # flows.bp_est = rearrange(flow_gt,'i t h w two -> i (h w) t two')
        runtimes.bp_est = time.perf_counter() - start_time
        pixs.bp_est = flow_to_pix(flows.bp_est.clone(), nframes, isize=isize)
        # aligned.bp_est = a_clean
        aligned.bp_est = align_from_flow(dyn_clean,
                                         flows.bp_est,
                                         patchsize,
                                         isize=isize)
        anoisy.bp_est = align_from_flow(dyn_noisy,
                                        flows.bp_est,
                                        patchsize,
                                        isize=isize)
        optimal_scores.bp_est = np.zeros((nimages, npix, 1, nframes))

        # -- compute proposed search of nnf [with tiling ]--
        vprint("Our Burst Method (Tiled)")
        valMean = 0.
        start_time = time.perf_counter()
        if cfg.nframes < 5:
            _, flows.est_tile = bnnf_utils.runBurstNnf(dyn_noisy_ftrs,
                                                       patchsize,
                                                       nblocks,
                                                       k=1,
                                                       valMean=valMean,
                                                       blockLabels=None,
                                                       fmt=True,
                                                       to_flow=True,
                                                       tile_burst=True)
        else:
            flows.est_tile = rearrange(
                flow_gt, 'i 1 t h w two -> 1 i (h w) t two').clone()
        flows.est_tile = flows.est_tile[0]
        # flows.est_tile = rearrange(flow_gt,'i t h w two -> i (h w) t two')
        runtimes.est_tile = time.perf_counter() - start_time
        pixs.est_tile = flow_to_pix(flows.est_tile.clone(),
                                    nframes,
                                    isize=isize)
        aligned.est_tile = align_from_flow(dyn_clean,
                                           flows.est_tile,
                                           patchsize,
                                           isize=isize)
        if cfg.nframes > 7:
            aligned.est_tile = torch.zeros_like(aligned.est_tile)
        anoisy.est_tile = align_from_flow(dyn_noisy,
                                          flows.est_tile,
                                          patchsize,
                                          isize=isize)
        optimal_scores.est_tile = np.zeros((nimages, npix, 1, nframes))

        # -- compute new est method --
        vprint("[Burst-LK] loss function")
        vprint(flow_gt.shape)
        # print(flow_gt[0,:3,32,32,:])
        vprint(flow_gt.shape)
        start_time = time.perf_counter()
        if frame_size[0] <= 64 and cfg.nblocks < 10 and True:
            flows.blk = burstNnf.run(dyn_noisy_ftrs, patchsize, nblocks)
        else:
            flows.blk = rearrange(flow_gt, 'i 1 t h w two -> i (h w) t two')
        runtimes.blk = time.perf_counter() - start_time
        pixs.blk = flow_to_pix(flows.blk.clone(), nframes, isize=isize)
        aligned.blk = align_from_flow(dyn_clean,
                                      flows.blk,
                                      patchsize,
                                      isize=isize)
        optimal_scores.blk = np.zeros((nimages, npix, 1, nframes))
        # optimal_scores.blk = eval_prop.score_burst_from_flow(dyn_noisy,flows.nnf_local,
        #                                                      patchsize,nblocks)[1]
        optimal_scores.blk = torch_to_numpy(optimal_scores.blk)

        # -- compute new est method --
        vprint("Oracle")
        vprint(flow_gt.shape)
        # print(flow_gt[0,:3,32,32,:])
        vprint(flow_gt.shape)
        valMean = theory_oracle.mode
        oracle_burst = dyn_noisy_ftrs.clone()
        oracle_burst[nframes // 2] = dyn_clean_ftrs[nframes // 2]
        start_time = time.perf_counter()
        vals_oracle, pix_oracle = nnf_utils.runNnfBurst(
            oracle_burst,
            patchsize,
            nblocks,
            1,
            valMean=valMean,
            blockLabels=blockLabels)
        runtimes.oracle = time.perf_counter() - start_time
        pixs.oracle = rearrange(pix_oracle, 't i h w 1 two -> i (h w) t two')
        flows.oracle = pix_to_flow(pixs.oracle.clone())
        aligned.oracle = align_from_flow(dyn_clean,
                                         flows.oracle,
                                         patchsize,
                                         isize=isize)
        optimal_scores.oracle = np.zeros((nimages, npix, 1, nframes))
        optimal_scores.oracle = torch_to_numpy(optimal_scores.blk)

        # -- compute optical flow --
        vprint("[C Flow]")
        vprint(dyn_noisy_ftrs.shape)
        start_time = time.perf_counter()
        # flows.cflow = cflow.runBurst(dyn_clean_ftrs)
        # flows.cflow[...,1] = -flows.cflow[...,1]
        flows.cflow = torch.LongTensor(flows.blk.clone().cpu().numpy())
        # flows.cflow = flows.blk.clone()
        # flows.cflow = torch.round(flows.cflow)
        runtimes.cflow = time.perf_counter() - start_time
        pixs.cflow = flow_to_pix(flows.cflow.clone(), nframes, isize=isize)
        aligned.cflow = align_from_flow(dyn_clean,
                                        flows.cflow,
                                        patchsize,
                                        isize=isize)
        optimal_scores.cflow = np.zeros((nimages, npix, 1, nframes))
        # optimal_scores.blk = eval_prop.score_burst_from_flow(dyn_noisy,flows.nnf_local,
        #                                                      patchsize,nblocks)[1]
        optimal_scores.blk = torch_to_numpy(optimal_scores.blk)

        # -- compute groundtruth flow --
        dsname = cfg.dataset.name
        if "kitti" in dsname or 'bsd_burst' == dsname:
            pix_gt = nnf_gt.type(torch.float)
            if pix_gt.ndim == 3:
                pix_gt_rs = rearrange(pix_gt, 'i tm1 two -> i 1 tm1 two')
                pix_gt = repeat(pix_gt, 'i tm1 two -> i p tm1 two', p=npix)
            if pix_gt.ndim == 5:
                pix_gt = rearrange(pix_gt, 't i h w two -> i (h w) t two')
            pix_gt = torch.LongTensor(pix_gt.cpu().numpy().copy())
            # flows.of = torch.zeros_like(pix_gt)#pix_to_flow(pix_gt.clone())
            flows.of = pix_to_flow(pix_gt.clone())
        else:
            flows.of = flow_gt
            flows.of = rearrange(flow_gt, 'i 1 t h w two -> i (h w) t two')
        # -- align groundtruth flow --
        aligned.of = align_from_flow(dyn_clean, flows.of, nblocks, isize=isize)
        pixs.of = flow_to_pix(flows.of.clone(), nframes, isize=isize)
        runtimes.of = 0.  # given
        optimal_scores.of = np.zeros(
            (nimages, npix, 1, nframes))  # clean target is zero
        aligned.clean = static_clean
        anoisy.clean = static_clean
        # optimal_scores.of = eval_ave.score_burst_from_flow(dyn_noisy,
        #                                                    flows.of,
        #                                                    patchsize,nblocks)[0]

        # -- compute nearest neighbor fields [global] --
        vprint("NNF Global.")
        start_time = time.perf_counter()
        shape_str = 't b h w two -> b (h w) t two'
        nnf_vals, nnf_pix = nnf.compute_burst_nnf(dyn_clean_ftrs, ref_t,
                                                  patchsize)
        runtimes.nnf = time.perf_counter() - start_time
        pixs.nnf = torch.LongTensor(rearrange(nnf_pix[..., 0, :], shape_str))
        flows.nnf = pix_to_flow(pixs.nnf.clone())
        vprint(dyn_clean.shape, pixs.nnf.shape, nblocks)
        aligned.nnf = align_from_pix(dyn_clean, pixs.nnf, nblocks)
        anoisy.nnf = align_from_pix(dyn_noisy, pixs.nnf, nblocks)
        # aligned.nnf = align_from_flow(dyn_clean,flows.nnf,nblocks,isize=isize)
        optimal_scores.nnf = np.zeros(
            (nimages, npix, 1, nframes))  # clean target is zero

        # -- compute nearest neighbor fields [local] --
        vprint("NNF Local.")
        start_time = time.perf_counter()
        valMean = 0.
        vals_local, pix_local = nnf_utils.runNnfBurst(dyn_clean_ftrs,
                                                      patchsize,
                                                      nblocks,
                                                      1,
                                                      valMean=valMean,
                                                      blockLabels=blockLabels)
        runtimes.nnf_local = time.perf_counter() - start_time
        torch.cuda.synchronize()
        vprint("pix_local.shape ", pix_local.shape)
        pixs.nnf_local = rearrange(pix_local, 't i h w 1 two -> i (h w) t two')
        flows.nnf_local = pix_to_flow(pixs.nnf_local.clone())
        # aligned_local = align_from_flow(clean,flow_gt,cfg.nblocks)
        # aligned_local = align_from_pix(dyn_clean,pix_local,cfg.nblocks)
        vprint(flows.nnf_local.min(), flows.nnf_local.max())
        aligned.nnf_local = align_from_pix(dyn_clean, pixs.nnf_local, nblocks)
        anoisy.nnf_local = align_from_pix(dyn_noisy, pixs.nnf_local, nblocks)
        optimal_scores.nnf_local = optimal_scores.nnf
        # optimal_scores.nnf_local = eval_ave.score_burst_from_flow(dyn_noisy,
        #                                                           flows.nnf,
        #                                                           patchsize,nblocks)[1]
        optimal_scores.nnf_local = torch_to_numpy(optimal_scores.nnf_local)

        # -----------------------------------
        #
        # -- old way to compute NNF local --
        #
        # -----------------------------------

        # pixs.nnf = torch.LongTensor(rearrange(nnf_pix[...,0,:],shape_str))
        # flows.nnf = pix_to_flow(pixs.nnf.clone())
        # aligned.nnf = align_from_pix(dyn_clean,pixs.nnf,nblocks)
        # aligned.nnf = align_from_flow(dyn_clean,flows.nnf,nblocks,isize=isize)

        # flows.nnf_local = optim.run(dyn_clean_ftrs,patchsize,eval_ave,
        #                             nblocks,iterations,subsizes,K)

        # -----------------------------------
        # -----------------------------------

        # -- compute proposed search of nnf --
        vprint("Global NNF Noisy")
        start_time = time.perf_counter()
        split_vals, split_pix = nnf.compute_burst_nnf(dyn_noisy_ftrs, ref_t,
                                                      patchsize)
        runtimes.split = time.perf_counter() - start_time
        # split_pix = np.copy(nnf_pix)
        split_pix_best = torch.LongTensor(
            rearrange(split_pix[..., 0, :], shape_str))
        split_pix_best = torch.LongTensor(split_pix_best)
        pixs.split = split_pix_best.clone()
        flows.split = pix_to_flow(split_pix_best)
        aligned.split = align_from_pix(dyn_clean, split_pix_best, nblocks)
        anoisy.split = align_from_pix(dyn_noisy, split_pix_best, nblocks)
        optimal_scores.split = optimal_scores.nnf_local
        # optimal_scores.split = eval_ave.score_burst_from_flow(dyn_noisy,flows.nnf_local,
        #                                                       patchsize,nblocks)[1]
        optimal_scores.split = torch_to_numpy(optimal_scores.split)

        # -- compute complex ave --
        iterations, K = 0, 1
        subsizes = []
        vprint("[Ours] Ave loss function")
        start_time = time.perf_counter()
        estVar = torch.std(dyn_noisy_ftrs.reshape(-1)).item()**2
        valMean = 0.  #2 * estVar# * patchsize**2# / patchsize**2
        vals_local, pix_local = nnf_utils.runNnfBurst(dyn_noisy_ftrs,
                                                      patchsize,
                                                      nblocks,
                                                      1,
                                                      valMean=valMean,
                                                      blockLabels=blockLabels)
        runtimes.ave = time.perf_counter() - start_time
        pixs.ave = rearrange(pix_local, 't i h w 1 two -> i (h w) t two')
        flows.ave = pix_to_flow(pixs.ave.clone())
        optimal_scores.ave = optimal_scores.split  # same "ave" function
        aligned.ave = align_from_flow(dyn_clean,
                                      flows.ave,
                                      nblocks,
                                      isize=isize)
        anoisy.ave = align_from_flow(dyn_noisy,
                                     flows.ave,
                                     nblocks,
                                     isize=isize)
        optimal_scores.ave = optimal_scores.split  # same "ave" function

        # -- compute ave with smoothing --
        iterations, K = 0, 1
        subsizes = []
        vprint("[Ours] Ave loss function")
        start_time = time.perf_counter()
        pix_local = smooth_locs(pix_local, nclusters=1)
        runtimes.ave_smooth = time.perf_counter() - start_time + runtimes.ave
        pixs.ave_smooth = rearrange(pix_local,
                                    't i h w 1 two -> i (h w) t two')
        flows.ave_smooth = pix_to_flow(pixs.ave_smooth.clone())
        optimal_scores.ave_smooth = optimal_scores.split  # same "ave" function
        aligned.ave_smooth = align_from_flow(dyn_clean,
                                             flows.ave_smooth,
                                             nblocks,
                                             isize=isize)
        anoisy.ave_smooth = align_from_flow(dyn_noisy,
                                            flows.ave_smooth,
                                            nblocks,
                                            isize=isize)
        optimal_scores.ave_smooth = optimal_scores.split  # same "ave_smooth" function

        # -- compute  flow --
        vprint("L2-Local Recursive")
        start_time = time.perf_counter()
        vals_local, pix_local, wburst = nnf_utils.runNnfBurstRecursive(
            dyn_noisy_ftrs,
            dyn_clean,
            patchsize,
            nblocks,
            isize,
            1,
            valMean=valMean,
            blockLabels=blockLabels)
        runtimes.l2r = time.perf_counter() - start_time
        pixs.l2r = rearrange(pix_local, 't i h w 1 two -> i (h w) t two')
        flows.l2r = pix_to_flow(pixs.l2r.clone())
        aligned.l2r = wburst  #align_from_flow(dyn_clean,flows.l2r,nblocks,isize=isize)
        optimal_scores.l2r = optimal_scores.split  # same "ave" function

        # -- compute nvof flow --
        vprint("NVOF")
        start_time = time.perf_counter()
        # flows.nvof = nvof.nvof_burst(dyn_noisy_ftrs)
        flows.nvof = flows.ave.clone()
        runtimes.nvof = time.perf_counter() - start_time
        pixs.nvof = flow_to_pix(flows.nvof.clone(), nframes, isize=isize)
        aligned.nvof = align_from_flow(dyn_clean,
                                       flows.nvof,
                                       nblocks,
                                       isize=isize)
        anoisy.nvof = align_from_flow(dyn_noisy,
                                      flows.nvof,
                                      nblocks,
                                      isize=isize)
        optimal_scores.nvof = optimal_scores.split  # same "ave" function

        # -- compute flownet --
        vprint("FlowNetv2")
        start_time = time.perf_counter()
        _, flows.flownet = flownet_align(dyn_noisy_ftrs)
        # flows.flownet = flows.ave.clone().cpu()
        runtimes.flownet = time.perf_counter() - start_time
        pixs.flownet = flow_to_pix(flows.flownet.clone(), nframes, isize=isize)
        aligned.flownet = align_from_flow(dyn_clean,
                                          flows.flownet,
                                          nblocks,
                                          isize=isize)
        anoisy.flownet = align_from_flow(dyn_noisy,
                                         flows.flownet,
                                         nblocks,
                                         isize=isize)
        optimal_scores.flownet = optimal_scores.split

        # -- compute simple ave --
        iterations, K = 0, 1
        subsizes = []
        vprint("[simple] Ave loss function")
        start_time = time.perf_counter()
        optim = AlignOptimizer("v3")
        if cfg.patchsize < 11 and cfg.frame_size[0] <= 64 and False:
            flows.ave_simp = optim.run(dyn_noisy, patchsize, eval_ave_simp,
                                       nblocks, iterations, subsizes, K)
        else:
            flows.ave_simp = flows.ave.clone().cpu()
        runtimes.ave_simp = time.perf_counter() - start_time
        pixs.ave_simp = flow_to_pix(flows.ave_simp.clone(),
                                    nframes,
                                    isize=isize)
        aligned.ave_simp = align_from_flow(dyn_clean,
                                           flows.ave_simp,
                                           nblocks,
                                           isize=isize)
        anoisy.ave_simp = align_from_flow(dyn_noisy,
                                          flows.ave_simp,
                                          nblocks,
                                          isize=isize)
        optimal_scores.ave_simp = optimal_scores.split  # same "ave" function

        # -- format results --
        #pad = 2*(nframes-1)*ppf+4
        # pad = 2*(cfg.nblocks//2)#2*(nframes-1)*ppf+4
        # isize = edict({'h':H-pad,'w':W-pad})

        # -- flows to numpy --
        frame_size = cfg.frame_size[0]
        is_even = frame_size % 2 == 0
        mid_pix = frame_size * frame_size // 2 + (frame_size // 2) * is_even
        mid_pix = 32 * 10 + 23
        flows_np = edict_torch_to_numpy(flows)
        pixs_np = edict_torch_to_numpy(pixs)

        # -- End-Point-Errors --
        epes_of = compute_flows_epe_wrt_ref(flows, "of")
        epes_nnf = compute_flows_epe_wrt_ref(flows, "nnf")
        epes_nnf_local = compute_flows_epe_wrt_ref(flows, "nnf_local")
        nnf_acc = compute_acc_wrt_ref(flows, "nnf")
        nnf_local_acc = compute_acc_wrt_ref(flows, "nnf_local")

        # -- PSNRs --
        aligned = remove_center_frames(aligned)
        psnrs = compute_frames_psnr(aligned, psize)

        # -- denoised PSNRS --
        def burst_mean(in_burst):
            return torch.mean(in_burst, dim=0)[None, :]

        anoisy = remove_center_frames(anoisy)
        anoisy = apply_across_dict(anoisy, burst_mean)
        dn_psnrs = compute_frames_psnr(anoisy, psize)
        vprint(dn_psnrs)

        # -- print report ---
        print("\n" * 3)  # banner
        print("-" * 25 + " Results " + "-" * 25)
        # print_dict_ndarray_0_midpix(flows_np,mid_pix)
        # print_dict_ndarray_0_midpix(pixs_np,mid_pix)
        # print_verbose_psnrs(psnrs)
        # print_delta_summary_psnrs(psnrs)
        # print_verbose_epes(epes_of,epes_nnf)
        # print_nnf_acc(nnf_acc)
        # print_nnf_local_acc(nnf_local_acc)
        # print_summary_epes(epes_of,epes_nnf)
        # print_summary_denoised_psnrs(dn_psnrs)
        print_summary_psnrs(psnrs)
        print_runtimes(runtimes)

        # -- prepare results to be appended --
        psnrs = edict_torch_to_numpy(psnrs)
        epes_of = edict_torch_to_numpy(epes_of)
        epes_nnf = edict_torch_to_numpy(epes_nnf)
        epes_nnf_local = edict_torch_to_numpy(epes_nnf_local)
        nnf_acc = edict_torch_to_numpy(nnf_acc)
        nnf_local_acc = edict_torch_to_numpy(nnf_local_acc)
        image_index = torch_to_numpy(image_index)
        batch_results = {
            'runtimes': runtimes,
            'optimal_scores': optimal_scores,
            'psnrs': psnrs,
            'epes_of': epes_of,
            'epes_nnf': epes_nnf,
            'epes_nnf_local': epes_nnf_local,
            'nnf_acc': nnf_acc,
            'nnf_local_acc': nnf_local_acc
        }

        # -- format results --
        batch_results = flatten_internal_dict(batch_results)
        format_fields(batch_results, image_index, rng_state)

        # print("shape check.")
        # for key,value in batch_results.items():
        #     print(key,value.shape)

        record.append(batch_results)
    # print("\n"*3)
    # print("-"*20)
    # print(record.record)
    # print("-"*20)
    # print("\n"*3)
    # record.stack_record()
    record.cat_record()
    # print("\n"*3)
    # print("-"*20)
    # print(record.record)
    # print("-"*20)
    print("\n" * 3)
    print("-" * 20)
    # df = pd.DataFrame().append(record.record,ignore_index=True)
    for key, val in record.record.items():
        vprint(key, val.shape)
    # vprint(df)
    vprint("-" * 20)
    vprint("\n" * 3)

    return record.record
Esempio n. 11
0
def test(cfg, image, clean, burst, model, idx):

    # i3 = len(image.shape) == 3
    # c3 = len(clean.shape) == 3
    # b3 = len(burst.shape) == 3
    #
    # assert (i3 == c3) and (i3 == b3), "All three dims same"
    # if i3 and c3 and b3:

    T = burst.shape[0]
    # -- create results --
    results = {}

    # -- repeat along axis --
    rep = repeat(image, 'c h w -> tile c h w', tile=T)

    # -- reconstruct a clean image --
    rec = model(burst) + 0.5

    # -- parameters --
    params = torch.cat([param.view(-1) for param in model.parameters()])
    params_norm_mean = float(torch.norm(params).item())
    results['params_norm_mean'] = params_norm_mean

    # -- parameters --
    # named_params = dict(model.named_parameters())
    # print(named_params.keys())
    # filters = named_params['conv1.single_conv.0.weight']
    # print(filters.shape)
    # # params = torch.cat([param.view(-1) for param in model.parameters()])
    # # params_norm_mean = float(torch.norm(params).item())
    # results['params_filter_diff'] = params_filter_diff

    # -- size of params for each sample's activations path --
    trace_norm = activation_trace(model, burst, 'norm')
    results['trace_norm'] = trace_norm

    # -- save --
    if idx == 49 or idx == 40 or idx == 60:
        save_image(rec, f"fast_unet_rec_{idx}.png", normalize=True)

    # -- compute results --
    loss = F.mse_loss(rec, rep)
    psnr = float(np.mean(images_to_psnrs(image, rec[T // 2])))
    results['mse'] = loss.item()
    results['psnr_rec'] = psnr

    psnr = float(np.mean(images_to_psnrs(rec, rep)))
    results['psnr_burst'] = psnr

    # -- intra and input --
    intra_input = 0
    for t in range(T):
        intra_input += F.mse_loss(rec[t], rec[T // 2]).item()
        intra_input += F.mse_loss(rec[t], burst[T // 2]).item()
    results['psnr_intra_input'] = intra_input

    # -- this n2n training creates a barycenter for center image  --
    bc_loss = 0
    for t in range(T):
        bc_loss += F.mse_loss(burst[t], rec[T // 2]).item()
    results['psnr_bc_v1'] = bc_loss

    # -- compute psnr of clean and noisy frames --
    psnr_noisy = float(np.mean(images_to_psnrs(rep, burst + 0.5)))
    results['psnr_noisy'] = psnr_noisy
    psnr_clean = float(np.mean(images_to_psnrs(rep, clean)))
    results['psnr_clean'] = psnr_clean

    # -- compute scores --
    score_fxn_names = [
        'lgsubset_v_ref', 'lgsubset', 'ave', 'lgsubset_v_indices',
        'gaussian_ot'
    ]
    wrapped_l = []
    for name in score_fxn_names:
        score_fxn = get_score_function(name)
        wrapped_score = score_function_wrapper(score_fxn)
        if name == "gaussian_ot":
            score, scores_t = wrapped_score(cfg, rec - rep)
        else:
            score, scores_t = wrapped_score(cfg, rec)
        results[f"fnet_{name}"] = score.item()
        for t in range(T):
            results[f"fnet_{name}_{t}f"] = scores_t[t].item()

    # -- on raw pixels too --
    for name in score_fxn_names:
        if name == "gaussian_ot": continue
        score_fxn = get_score_function(name)
        wrapped_score = score_function_wrapper(score_fxn)
        score, scores_t = wrapped_score(cfg, burst)
        results[name] = score.item()
        for t in range(T):
            results[f"{name}_{t}f"] = scores_t[t].item()

    # print("Test Loss",loss.item())
    # print("Test PSNR: %2.3e" % np.mean(images_to_psnrs(rec+0.5,rep)))
    tv_utils.save_image(rec, "fast_unet_rec.png", normalize=True)
    tv_utils.save_image(burst, "fast_unet_burst.png", normalize=True)
    return results
Esempio n. 12
0
def create_ave_eval(evaluator):
    score_fxn = get_score_function("ave")
    ave_eval = EvalBlockScores(score_fxn, "ave", evaluator.patchsize,
                               evaluator.block_batchsize, evaluator.noise_info,
                               evaluator.gpuid)
    return ave_eval
Esempio n. 13
0
def l2(cfg, patches):
    score_fxn = get_score_function("ave")
    return run_scores_fxn(cfg, patches, score_fxn)
Esempio n. 14
0
def bootstrapping(cfg, patches):
    score_fxn = get_score_function("bootstrapping_mod2")
    return run_scores_fxn(cfg, patches, score_fxn)
Esempio n. 15
0
def run_with_seed(seed):

    # -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
    #
    #             Settings
    #
    # -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-

    # -- settings --
    cfg = get_cfg_defaults()
    cfg.use_anscombe = False
    cfg.noise_params.ntype = 'g'
    cfg.noise_params.g.std = 25.
    cfg.nframes = 3
    cfg.dynamic_info.nframes = cfg.nframes
    cfg.nblocks = 3
    cfg.patchsize = 11
    cfg.gpuid = 1
    cfg.device = f"cuda:{cfg.gpuid}"

    # -- seeds --
    cfg.seed = seed
    # cfg.seed = 123 # sky of a forest
    # cfg.seed = 345 # handrail and stairs
    # cfg.seed = 567 # cloudy blue sky
    # cfg.seed = 567 # cloudy blue sky

    # -- set seed --
    set_seed(cfg.seed)

    # -- load dataset --
    data, loaders = load_image_dataset(cfg)
    train_iter = iter(loaders.tr)

    # -- fetch sample --
    sample = next(train_iter)
    sample_to_cuda(sample)

    # -- unpack data --
    noisy, clean = sample['noisy'], sample['burst']
    nframes, nimages, ncolors, H, W = noisy.shape
    isize = edict({'h': H, 'w': W})

    # -- setup results --
    scores = edict()
    scores.noisy = edict()
    scores.clean = edict()
    blocks = edict()
    blocks.noisy = edict()
    blocks.clean = edict()

    # -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
    #
    #        Setup For Searches
    #
    # -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-

    # -- tile image to patches --
    pad = 2 * (cfg.nblocks // 2)
    h, w = cfg.patchsize + pad, cfg.patchsize + pad

    noisy_patches = tile_patches(noisy, cfg.patchsize + pad).pix
    noisy_patches = rearrange(noisy_patches,
                              'b t s (h w c) -> b s t c h w',
                              h=h,
                              w=w)
    nimages, npix, nframes, c, psH, psW = noisy_patches.shape

    clean_patches = tile_patches(clean, cfg.patchsize + pad).pix
    clean_patches = rearrange(clean_patches,
                              'b t s (h w c) -> b s t c h w',
                              h=h,
                              w=w)
    nimages, npix, nframes, c, psH, psW = clean_patches.shape

    masks = torch.ones(nimages, npix, nframes, c, psH, psW).to(cfg.device)

    # -- create constants --
    frames = np.r_[np.arange(cfg.nframes // 2),
                   np.arange(cfg.nframes // 2 + 1, cfg.nframes)]
    frames = repeat(frames, 'z -> i s z', i=nimages, s=npix)
    brange = exh_block_range(nimages, npix, cfg.nframes, cfg.nblocks)
    curr_blocks = init_optim_block(nimages, npix, cfg.nframes, cfg.nblocks)
    srch_blocks = get_search_blocks(frames, brange, curr_blocks, cfg.device)
    np_srch_blocks = torch_to_numpy(srch_blocks[0])
    S = len(srch_blocks[0, 0])

    # -- create constants --
    frames_pair = np.array([0])
    frames = repeat(frames_pair, 'z -> i s z', i=nimages, s=npix)
    brange = exh_block_range(nimages, npix, cfg.nframes, cfg.nblocks)
    curr_blocks_pair = init_optim_block(nimages, npix, cfg.nframes,
                                        cfg.nblocks)
    srch_blocks_pair = get_search_blocks(frames, brange, curr_blocks_pair,
                                         cfg.device)
    S_pair = len(srch_blocks[0, 0])

    # -- encode blocks --
    single_search_block = srch_blocks[0, 0].cpu().numpy()
    block_strings = search_blocks_to_str(single_search_block)
    labels = search_blocks_to_labels(single_search_block, block_strings)

    # -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
    #
    #        Execute Searches
    #
    # -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-

    #
    # --- run PAIRED split search ---
    #

    ave_fxn = get_score_function("ave")
    block_batchsize = 128
    evaluator = EvalBlockScores(ave_fxn, "ave", cfg.patchsize, block_batchsize,
                                None)
    get_topK = evaluator.compute_topK_scores

    # -- a) run clean --
    clean_scores, clean_blocks = get_topK(clean_patches, masks,
                                          srch_blocks_pair, cfg.nblocks,
                                          S_pair)
    scores_full = torch_to_numpy(clean_scores[0])
    blocks_full = torch_to_numpy(clean_blocks[0])

    # -- b) tile results to full blocks --
    scores_full, blocks_full = tile_pair_to_full(scores_full, blocks_full,
                                                 np_srch_blocks, frames_pair,
                                                 cfg.nframes, cfg.nblocks)
    scores.clean.ave = scores_full
    blocks.clean.ave = batch_search_blocks_to_labels(blocks_full,
                                                     block_strings)

    # -- a) run noisy --
    noisy_scores, noisy_blocks = get_topK(noisy_patches, masks,
                                          srch_blocks_pair, cfg.nblocks,
                                          S_pair)
    scores_full = torch_to_numpy(noisy_scores[0])
    blocks_full = torch_to_numpy(noisy_blocks[0])

    # -- b) tile results to full blocks --
    scores_full, blocks_full = tile_pair_to_full(scores_full, blocks_full,
                                                 np_srch_blocks, frames_pair,
                                                 cfg.nframes, cfg.nblocks)
    scores.noisy.ave = scores_full
    blocks.noisy.ave = batch_search_blocks_to_labels(blocks_full,
                                                     block_strings)

    #
    # --- run FULL split search ---
    #

    ave_fxn = get_score_function("ave")
    block_batchsize = 128
    evaluator = EvalBlockScores(ave_fxn, "ave", cfg.patchsize, block_batchsize,
                                None)
    get_topK = evaluator.compute_topK_scores

    # -- run clean --
    clean_scores, clean_blocks = get_topK(clean_patches, masks, srch_blocks,
                                          cfg.nblocks, S)

    clean_scores = torch_to_numpy(clean_scores)
    scores.clean.full_ave = clean_scores[0]

    clean_blocks = torch_to_numpy(clean_blocks)
    batch_blocks = clean_blocks[0, :, :, :]
    blocks.clean.full_ave = batch_search_blocks_to_labels(
        batch_blocks, block_strings)

    # -- run noisy --
    noisy_scores, noisy_blocks = get_topK(noisy_patches, masks, srch_blocks,
                                          cfg.nblocks, S)

    noisy_scores = torch_to_numpy(noisy_scores)
    scores.noisy.full_ave = noisy_scores[0]

    noisy_blocks = torch_to_numpy(noisy_blocks)
    batch_blocks = noisy_blocks[0, :, :, :]
    blocks.noisy.full_ave = batch_search_blocks_to_labels(
        batch_blocks, block_strings)

    #
    # --- run bootstrapping ---
    #

    bs_fxn = get_score_function("bootstrapping_mod2")
    block_batchsize = 32
    evaluator = EvalBlockScores(bs_fxn, "bs_mod2", cfg.patchsize,
                                block_batchsize, None)
    get_topK = evaluator.compute_topK_scores

    # -- run noisy --
    noisy_scores, noisy_blocks = get_topK(noisy_patches, masks, srch_blocks,
                                          cfg.nblocks, S)

    noisy_scores = torch_to_numpy(noisy_scores)
    scores.noisy.bs = noisy_scores[0]

    noisy_blocks = torch_to_numpy(noisy_blocks)
    batch_blocks = noisy_blocks[0, :, :, :]
    blocks.noisy.bs = batch_search_blocks_to_labels(batch_blocks,
                                                    block_strings)

    # -- run clean --
    clean_scores, clean_blocks = get_topK(clean_patches, masks, srch_blocks,
                                          cfg.nblocks, S)

    clean_scores = torch_to_numpy(clean_scores)
    scores.clean.bs = clean_scores[0]

    clean_blocks = torch_to_numpy(clean_blocks)
    batch_blocks = noisy_blocks[0, :, :, :]
    blocks.clean.bs = batch_search_blocks_to_labels(batch_blocks,
                                                    block_strings)

    # -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
    #
    #        Plot Results
    #
    # -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-

    print("Plotting Results.")
    plot_landscape(scores, blocks, seed)
Esempio n. 16
0
def test_global_dynamics():

    # -- run exp --
    cfg = get_cfg_defaults()
    cfg.random_seed = 123
    nbatches = 20

    # -- set random seed --
    set_seed(cfg.random_seed)

    # -- load dataset --
    print("load image dataset.")
    data, loaders = load_image_dataset(cfg)
    image_iter = iter(loaders.tr)

    # -- save path for viz --
    save_dir = Path(
        f"{settings.ROOT_PATH}/output/tests/datasets/test_global_dynamics/")
    if not save_dir.exists(): save_dir.mkdir(parents=True)

    # -- sample data --
    for image_index in range(nbatches):

        # -- sample image --
        index = -1
        # while index != 3233:
        #     sample = next(image_iter)
        #     convert_keys(sample)
        #     index = sample['image_index'][0][0].item()

        sample = next(image_iter)
        # batch_dim0(sample)
        convert_keys(sample)

        # -- extract info --
        noisy = sample['dyn_noisy']
        clean = sample['dyn_clean']
        snoisy = sample['static_noisy']
        sclean = sample['static_clean']
        flow = sample['flow_gt']
        index = sample['image_index'][0][0].item()
        nframes, nimages, c, h, w = noisy.shape
        mid_pix = h * w // 2 + 2 * cfg.nblocks
        print(f"Image Index {index}")

        # -- io info --
        image_dir = save_dir / f"index{index}/"
        if not image_dir.exists(): image_dir.mkdir()

        #
        # -- Compute NNF to Ensure things are OKAY --
        #

        isize = edict({'h': h, 'w': w})
        # pad = cfg.patchsize//2 if cfg.patchsize > 1 else 1
        pad = cfg.nblocks // 2 + 1
        psize = edict({'h': h - 2 * pad, 'w': w - 2 * pad})
        flow_gt = repeat(flow, 'i fm1 two -> i s fm1 two', s=h * w)
        pix_gt = flow_to_pix(flow_gt.clone(), nframes, isize=isize)

        def cc(image):
            return tvF.center_crop(image, (psize.h, psize.w))

        nnf_vals, nnf_pix = compute_burst_nnf(clean, nframes // 2,
                                              cfg.patchsize)
        shape_str = 't b h w two -> b (h w) t two'
        pix_global = torch.LongTensor(rearrange(nnf_pix[..., 0, :], shape_str))
        flow_global = pix_to_flow(pix_global.clone())
        # aligned_gt = warp_burst_flow(clean, flow_global)
        aligned_gt = align_from_pix(clean, pix_gt, cfg.nblocks)
        # isize = edict({'h':h,'w':w})
        # aligned_gt = align_from_flow(clean,flow_global,cfg.nblocks,isize=isize)
        # psnr = compute_aligned_psnr(sclean[[nframes//2]],clean[[nframes//2]],psize)
        psnr = compute_aligned_psnr(sclean, aligned_gt, psize)
        print(f"[GT Alignment] PSNR: {psnr}")

        # -- compute with nvidia's opencv optical flow --
        nd_clean = rearrange(clean.numpy(), 't 1 c h w -> t h w c')
        ref_t = nframes // 2
        frames, flows = [], []
        for t in range(nframes):
            if t == ref_t:
                frames.append(nd_clean[t][None, :])
                flows.append(torch.zeros(flows[-1].shape))
                continue
            from_frame = 255. * cv2.cvtColor(nd_clean[ref_t],
                                             cv2.COLOR_RGB2GRAY)
            to_frame = 255. * cv2.cvtColor(nd_clean[t], cv2.COLOR_RGB2GRAY)
            _flow = cv2.calcOpticalFlowFarneback(to_frame, from_frame, None,
                                                 0.5, 3, 3, 10, 5, 1.2, 0)
            _flow = np.round(_flow).astype(np.float32)  # not good for later
            w_frame = warp_flow(nd_clean[t], -_flow)
            _flow[..., 0] = -_flow[..., 0]  # my OF is probably weird.
            # print("w_frame.shape ",w_frame.shape)
            flows.append(torch.FloatTensor(_flow))
            frames.append(torch.FloatTensor(w_frame[None, :]))
        flows = torch.stack(flows)
        flows = rearrange(flows, 't h w two -> 1 (h w) t two')
        frames = torch.FloatTensor(np.stack(frames))
        frames = rearrange(frames, 't i h w c -> t i c h w')
        # print("flows.shape ",flows.shape)
        # print("frames.shape ",frames.shape)
        # print("sclean.shape ",sclean.shape)
        psnr = compute_aligned_psnr(sclean, frames, psize)
        print(f"[NVOF Alignment] PSNR: {psnr}")

        pix_nvof = flow_to_pix(flows.clone(), nframes, isize=isize)
        aligned_nvof = align_from_pix(clean, pix_nvof, cfg.nblocks)
        psnr = compute_aligned_psnr(sclean, aligned_nvof, psize)
        print(f"[NVOF Alignment v2] PSNR: {psnr}")

        psnr = compute_aligned_psnr(frames, aligned_nvof, psize)
        print(f"[NVOF Alignment Methods] PSNR: {psnr}")

        print(pix_global[0, mid_pix])
        print(pix_gt[0, mid_pix])
        print(flow_global[0, mid_pix])
        print(flow_gt[0, mid_pix])
        print(flows[0, mid_pix])

        #
        # -- Save Images to Qualitative Inspect --
        #

        fn = image_dir / "noisy.png"
        save_image(cc(noisy), fn, normalize=True, vrange=None)

        fn = image_dir / "clean.png"
        save_image(cc(clean), fn, normalize=True, vrange=None)

        print(cc(sclean).shape)
        fn = image_dir / "diff.png"
        save_image(cc(sclean) - cc(aligned_gt),
                   fn,
                   normalize=True,
                   vrange=None)

        fn = image_dir / "aligned_gt.png"
        save_image(cc(aligned_gt), fn, normalize=True, vrange=None)

        # return

        # -- NNF Global --
        nnf_vals, nnf_pix = compute_burst_nnf(clean, nframes // 2,
                                              cfg.patchsize)
        shape_str = 't b h w two -> b (h w) t two'
        pix_global = torch.LongTensor(rearrange(nnf_pix[..., 0, :], shape_str))
        flow_global = pix_to_flow(pix_global.clone())
        # aligned_global = align_from_flow(clean,flow_gt,cfg.nblocks)
        aligned_global = align_from_pix(clean, pix_gt, cfg.nblocks)
        psnr = compute_aligned_psnr(sclean, aligned_global, psize)
        print(f"[NNF Global] PSNR: {psnr}")

        # -- NNF Local (old) --
        iterations, K, subsizes = 0, 1, []
        optim = AlignOptimizer("v3")
        score_fxn_ave = get_score_function("ave")
        eval_ave = EvalBlockScores(score_fxn_ave, "ave", cfg.patchsize, 256,
                                   None)
        flow_local = optim.run(clean, cfg.patchsize, eval_ave, cfg.nblocks,
                               iterations, subsizes, K)
        pix_local = flow_to_pix(flow_local.clone(), nframes, isize=isize)
        # aligned_local = align_from_flow(clean,flow_gt,cfg.nblocks)
        aligned_local = align_from_pix(clean, pix_local, cfg.nblocks)
        psnr = compute_aligned_psnr(sclean, aligned_local, psize)
        print(f"[NNF Local (Old)] PSNR: {psnr}")

        # -- NNF Local (new) --
        _, pix_local = nnf_utils.runNnfBurst(clean,
                                             cfg.patchsize,
                                             cfg.nblocks,
                                             1,
                                             valMean=0.,
                                             blockLabels=None)
        pix_local = rearrange(pix_local, 't i h w 1 two -> i (h w) t two')
        flow_local = pix_to_flow(pix_local.clone())
        # aligned_local = align_from_flow(clean,flow_gt,cfg.nblocks)
        aligned_local = align_from_pix(clean, pix_local, cfg.nblocks)
        psnr = compute_aligned_psnr(sclean, aligned_local, psize)
        print(f"[NNF Local (New)] PSNR: {psnr}")

        # -- remove boundary from pix --
        pixes = {'gt': pix_gt, 'global': pix_global, 'local': pix_local}
        for field, pix in pixes.items():
            pix_img = rearrange(pix, 'i (h w) t two -> (i t) two h w', h=h)
            pix_cc = cc(pix_img)
            pixes[field] = pix_cc

        # -- pairwise diffs --
        field2 = "gt"
        for field1 in pixes.keys():
            if field1 == field2: continue
            delta = pixes[field1] - pixes[field2]
            delta = delta.type(torch.float)
            delta_fn = image_dir / f"delta_{field1}_{field2}.png"
            save_image(delta, delta_fn, normalize=True, vrange=None)
        print(pix_gt[0, mid_pix])
        print(pix_global[0, mid_pix])
        print(pix_local[0, mid_pix])

        print(flow_gt[0, mid_pix])
        print(flow_global[0, mid_pix])
        print(flow_local[0, mid_pix])

        #
        # -- Save Images to Qualitative Inspect --
        #

        fn = image_dir / "noisy.png"
        save_image(cc(noisy), fn, normalize=True, vrange=None)

        fn = image_dir / "clean.png"
        save_image(cc(clean), fn, normalize=True, vrange=None)

        fn = image_dir / "aligned_global.png"
        save_image(cc(aligned_global), fn, normalize=True, vrange=None)

        fn = image_dir / "aligned_local.png"
        save_image(cc(aligned_local), fn, normalize=True, vrange=None)
Esempio n. 17
0
def test_nnf():

    # -- get config --
    cfg = config()

    # -- set random seed --
    set_seed(cfg.random_seed)

    # -- load dataset --
    data, loaders = load_image_dataset(cfg)
    image_iter = iter(loaders.tr)

    # -- get score function --
    score_fxn = get_score_function(cfg.score_fxn_name)

    # -- some constants --
    NUM_BATCHES = 2
    nframes, nblocks = cfg.nframes, cfg.nblocks
    patchsize = cfg.patchsize
    check_parameters(nblocks, patchsize)

    # -- create evaluator
    iterations, K = 10, 2
    subsizes = [2, 2, 2, 2, 2]
    evaluator = combo.eval_scores.EvalBlockScores(score_fxn, patchsize, 100,
                                                  None)

    # -- iterate over images --
    for image_bindex in range(NUM_BATCHES):

        # -- sample & unpack batch --
        sample = next(image_iter)
        sample_to_cuda(sample)
        dyn_noisy = sample['noisy']  # dynamics and noise
        dyn_clean = sample['burst']  # dynamics and no noise
        static_noisy = sample['snoisy']  # no dynamics and noise
        static_clean = sample['sburst']  # no dynamics and no noise
        flow_gt = sample['flow']

        # -- shape info --
        T, B, C, H, W = dyn_noisy.shape
        isize = edict({'h': H, 'w': W})
        ref_t = nframes // 2
        npix = H * W

        # -- groundtruth flow --
        flow_gt = repeat(flow_gt, 'i tm1 two -> i p tm1 two', p=npix)
        print("sample['flow']: ", flow_gt.shape)
        aligned_of = align_from_flow(dyn_clean,
                                     flow_gt,
                                     patchsize,
                                     isize=isize)

        # -- compute nearest neighbor fields --
        shape_str = 't b h w two -> b (h w) t two'
        nnf_vals, nnf_pix = nnf.compute_burst_nnf(dyn_clean, ref_t, patchsize)
        nnf_pix_best = torch.LongTensor(
            rearrange(nnf_pix[..., 0, :], shape_str))
        nnf_pix_best = torch.LongTensor(nnf_pix_best)
        flow_nnf = pix_to_flow(nnf_pix_best)
        aligned_nnf = align_from_pix(dyn_clean, nnf_pix_best, patchsize)

        # -- compute proposed search of nnf --
        flow_split = optim.v1.run_image_burst(dyn_clean, patchsize, evaluator,
                                              nblocks, iterations, subsizes, K)
        isize = edict({'h': H, 'w': W})
        aligned_split = align_from_flow(dyn_clean,
                                        flow_split,
                                        patchsize,
                                        isize=isize)

        # -- compute proposed search of nnf --
        flow_est = optim.v3.run_image_burst(dyn_clean, patchsize, evaluator,
                                            nblocks, iterations, subsizes, K)
        aligned_est = align_from_flow(dyn_clean,
                                      flow_est,
                                      patchsize,
                                      isize=isize)

        # -- banner --
        print("-" * 25 + " Results " + "-" * 25)

        # -- compare gt v.s. nnf computations --
        nnf_of = compute_epe(flow_nnf, flow_gt)
        split_of = compute_epe(flow_split, flow_gt)
        est_of = compute_epe(flow_est, flow_gt)

        split_nnf = compute_epe(flow_split, flow_nnf)
        est_nnf = compute_epe(flow_est, flow_nnf)

        print("-" * 50)
        print("EPE Errors")
        print("-" * 50)
        print("NNF v.s. Optical Flow.")
        print(nnf_of)
        print("Split v.s. Optical Flow.")
        print(split_of)
        print("Proposed v.s. Optical Flow.")
        print(est_of)
        print("Split v.s. NNF")
        print(split_nnf)
        print("Proposed v.s. NNF")
        print(est_nnf)

        # -- psnr eval --
        pad = 2 * patchsize
        isize = edict({'h': H - pad, 'w': W - pad})
        psnr_of = compute_aligned_psnr(aligned_of, static_clean, isize)
        psnr_nnf = compute_aligned_psnr(aligned_nnf, static_clean, isize)
        psnr_split = compute_aligned_psnr(aligned_split, static_clean, isize)
        psnr_est = compute_aligned_psnr(aligned_est, static_clean, isize)
        print("-" * 50)
        print("PSNR Values")
        print("-" * 50)
        print("Optical Flow [groundtruth v1]")
        print(psnr_of)
        print("NNF [groundtruth v2]")
        print(psnr_nnf)
        print("Split [old method]")
        print(psnr_split)
        print("Proposed [new method]")
        print(psnr_est)
Esempio n. 18
0
def run_eval_score(cfg,data,eval_fn):

    exp_mesh,exp_fields = create_eval_mesh(cfg)
    record = init_record(exp_fields)
    align_clean_score = get_score_function("refcmp")

    # -- sample images --
    noise_xform,dynamic_xform,score_function = init_exp(cfg,exp_mesh[0])
    for image_id in tqdm(range(3)):

        # -- sample image --
        full_image = data.tr[image_id][2]

        # -- simulate dynamics --
        torch.manual_seed(123)
        burst = dynamic_xform(full_image)
        burst = burst.cuda(non_blocking=True)

        # -- tile clean --
        clean = tile_burst_patches(burst,cfg.patchsize,cfg.nblocks)

        # -- run over each experiment --
        for exp in tqdm(exp_mesh,leave=False):
            noise_xform,dynamic_xform,score_function = init_exp(cfg,exp)
            # block_search_space = get_block_arangements(exp.nblocks,exp.nframes)
            bss = get_small_test_block_arangements(EVAL_DIR,cfg.nblocks,cfg.nframes,2,3)
            block_search_space = bss 
            block_search_space.cuda(non_blocking=True)
    
            # -- sample noise --
            noisy = noise_xform(clean)
            
            # -- sample block collections --
            for block_id in tqdm(range(960,990,10),leave=False):
                # -- create blocks from patches and block index --
                clean_blocks = get_pixel_blocks(clean,block_id)
                noisy_blocks = get_pixel_blocks(noisy,block_id)

                # -- create filename to save loss landscape --
                score_paths = score_path_from_exp(eval_fn,exp,image_id,block_id)

                # -- compute scores for blocks --
                results = {}
                results["clean"] = alignment_optimizer(cfg,score_function,
                                                       clean_blocks,clean_blocks,
                                                       block_search_space,
                                                       score_paths.clean)
                results["noisy"] = alignment_optimizer(cfg,score_function,
                                                       noisy_blocks,clean_blocks,
                                                       block_search_space,
                                                       score_paths.noisy)
                results["align"] = alignment_optimizer(cfg,align_clean_score,
                                                       clean_blocks,clean_blocks,
                                                       block_search_space,
                                                       score_paths.align)
                results['dpixClean'] = compute_pixel_difference(clean_blocks,
                                                                     block_search_space)
                results['dpixNoisy'] = compute_pixel_difference(noisy_blocks,
                                                                     block_search_space)

            
                # -- append to record --
                record = update_record(record,exp,results,image_id,block_id)

    # -- save record --
    print(f"Saving record of information to [{eval_fn}]")
    record.to_csv(eval_fn)
    return record