コード例 #1
0
ファイル: test_noise_dyn_once.py プロジェクト: gauenk/cl_gen
def test_noise_once():

    # -- run exp --
    cfg = get_cfg_defaults()
    cfg.random_seed = 123
    nbatches = 20
    ref = cfg.nframes//2

    # -- set random seed --
    set_seed(cfg.random_seed)	

    # -- load single dataset --
    print("a \"many\" dataset.")
    cfg.dynamic_info.sim_once = False
    cfg.noise_params.sim_once = False
    many_data,loaders = load_dataset(cfg,"dynamic")
    
    # -- load many dataset --
    print("yes a sim once.")
    cfg.dynamic_info.sim_once = False
    cfg.noise_params.sim_once = True
    once_data,loaders = load_dataset(cfg,"dynamic")

    # -- the actual test --
    nsamples = 10
    nchecks = 4
    for i in range(nsamples):

        # -- collect samples --
        many_bursts = []
        once_bursts = []
        for j in range(nchecks):
            many_sample = many_data.tr[i]
            once_sample = once_data.tr[i]
            many_burst = many_sample['dyn_noisy'][ref]
            once_burst = once_sample['dyn_noisy'][ref]
            many_bursts.append(many_burst)
            once_bursts.append(once_burst)
        many_bursts = torch.stack(many_bursts)
        once_bursts = torch.stack(once_bursts)

        # -- confirm change for "many" --
        for j1 in range(nchecks):
            for j2 in range(nchecks):
                if j1 == j2: continue
                delta = many_bursts[j1] - many_bursts[j2]
                delta = torch.mean(delta**2).item()
                assert delta > 0, "not equal for many!"
        print("Passed \"many\".")
        
        # -- confirm static for "once" --
        save_image(once_bursts,"once_bursts.png")
        for j1 in range(nchecks):
            for j2 in range(nchecks):
                if j1 == j2: continue
                delta = once_bursts[j1] - once_bursts[j2]
                delta = torch.sum(delta**2).item()
                assert delta == 0, "must be equal for many!"
        print("Passed \"once\".")
コード例 #2
0
def create_patch_plot_images(aves):

    ave_images = []
    nsubsets = aves.shape[0]
    for s in range(nsubsets):

        # -- init matplotlib --
        fig = Figure(figsize=(4, 4), dpi=100)
        canvas = FigureCanvas(fig)
        ax = fig.gca()

        # -- draw patch image --
        # ax.text(0.0,0.0,"Test", fontsize=45)
        frame = aves[s]
        ax.imshow(frame, cmap='Greys')
        ax.grid(which='major',
                axis='both',
                linestyle='-',
                color=gcolor,
                linewidth=3)
        ax.set_xticks(np.arange(-.5, frame.shape[1], 1))
        ax.set_yticks(np.arange(-.5, frame.shape[0], 1))
        ax.set_xticklabels([])
        ax.set_yticklabels([])
        ax.set_aspect('equal')
        ax.margins(0)
        no_pointy_tics(ax)
        canvas.draw()  # draw the canvas, cache the renderer
        fig.tight_layout(pad=0)

        # -- get image --
        # width, height = fig.get_size_inches() * fig.get_dpi()
        # width, height = int(width), int(height)
        # image = np.frombuffer(canvas.tostring_rgb(), dtype='uint8')
        # image = image.reshape(height,width,3)
        canvas_b, (width, height) = canvas.print_to_buffer()
        image = np.frombuffer(canvas_b, np.uint8)
        image = image.reshape(height, width, 4)
        image = image[:, :, :3]

        image_fl = image.astype(np.float) / 255.
        # print(image_fl)
        thimage = torch.FloatTensor(image_fl).type(torch.float)
        thimage = thimage.transpose(2, 0)
        # print(thimage)
        # print(type(thimage))
        # print(thimage.shape)
        save_image(thimage, f"./image_{s}.png")

        ave_images.append(image)

        # -- close figure / reset matplotlib --
        plt.close("all")
        plt.clf()
    ave_images = np.stack(ave_images)
    return ave_images
コード例 #3
0
ファイル: test_bsdBurst.py プロジェクト: gauenk/cl_gen
def test_bsdBurst_dataset():

    # -- run exp --
    cfg = get_cfg_defaults()
    cfg.random_seed = 123
    cfg.batch_size = 1
    cfg.frame_size = None
    cfg.dataset.name = "bsd_burst"

    # -- set random seed --
    set_seed(cfg.random_seed)

    # -- establish nnf frames are created --
    create_nnf_for_frame_size_grid(cfg)

    # -- save path for viz --
    save_dir = Path(
        f"{settings.ROOT_PATH}/output/tests/datasets/test_bsdBurst/")
    if not save_dir.exists(): save_dir.mkdir(parents=True)

    # -- load dataset --
    print("load image dataset.")
    data, loaders = load_dataset(cfg, "dynamic")
    print("num of bursts: ", len(loaders.tr))
    nbursts = len(data.tr)

    # -- ensure nnf created --
    for burst_index in tqdm.tqdm(range(nbursts)):
        data.tr[burst_index]

    # -- for image bursts --
    image_iter = iter(loaders.tr)
    for burst_index in range(nbursts):

        # -- sample image --
        sample = next(image_iter)
        noisy = sample['dyn_noisy']
        clean = sample['dyn_clean']
        snoisy = sample['static_noisy']
        sclean = sample['static_clean']
        flow = sample['flow']
        index = sample['image_index'][0][0].item()
        nframes, nimages, c, h, w = noisy.shape
        mid_pix = h * w // 2 + 2 * cfg.nblocks
        print(f"Image Index {index}")

        print(noisy.shape)
        print(clean.shape)

        # -- io info --
        image_dir = save_dir / f"index{index}/"
        if not image_dir.exists(): image_dir.mkdir()

        fn = str(save_dir / "./bsdBurst_example.png")
        save_image(sample['dyn_noisy'], fn, normalize=True, vrange=(0., 1.))
コード例 #4
0
def visualize_nnf(fn,vals,locs):

    # -- compute index grids --
    H,W = locs.shape[:2]
    hgrid = repeat(np.arange(H),'h -> h w',w=W)
    wgrid = repeat(np.arange(W),'w -> h w',h=H)
    index = np.stack([hgrid,wgrid],axis=-1)
    
    # -- compute delta --
    k,epsilon = 0,1e-8
    delta = locs[:,:,k] - index

    # -- create flow image --
    vis = flow_to_color(delta)
    vis = rearrange(torch.FloatTensor(vis)/255.,'h w c -> c h w')
    save_image(vis,fn)
コード例 #5
0
def main():
    seed = 234
    np.random.seed(seed)
    torch.manual_seed(seed)

    # -- settings --
    cfg = get_cfg_defaults()
    cfg.use_anscombe = True
    cfg.noise_params.ntype = 'g'
    cfg.noise_params.g.std = 25.
    cfg.nframes = 3
    cfg.num_workers = 0
    cfg.dynamic_info.nframes = cfg.nframes
    cfg.dynamic_info.ppf = 10
    cfg.nblocks = 3
    cfg.patchsize = 10
    cfg.gpuid = 1
    cfg.device = f"cuda:{cfg.gpuid}"

    # -- load dataset --
    data, loaders = load_image_dataset(cfg)
    train_iter = iter(loaders.tr)

    # -- fetch sample --
    sample = next(train_iter)
    sample_to_cuda(sample)

    # -- unpack data --
    noisy, clean = sample['noisy'], sample['burst']

    # -- save ave image --
    save_image(torch.mean(noisy[:, 0], dim=0),
               SAVE_DIR / "./bootstrap_noisy_ave.png")

    # -- format for plots --
    print("noisy.shape", noisy.shape)
    noisy = rearrange(noisy[:, 0], 't c h w -> t h w c')
    clean = rearrange(clean[:, 0], 't c h w -> t h w c')

    plot_bootstrapping(noisy, clean)
コード例 #6
0
ファイル: test_multiscale.py プロジェクト: gauenk/cl_gen
def run_multiscale_nnf(cfg, noisy, clean, nlevels=3, verbose=False):
    T, C, H, W = noisy.shape
    nframes = T
    noisy = noisy[:, None]
    clean = clean[:, None]

    isize = edict({'h': H, 'w': W})
    isize_l = [H, W]
    pad3 = cfg.nblocks // 2 + 3 // 2
    psize3 = edict({'h': H - pad3, 'w': W - pad3})
    pad = cfg.nblocks // 2 + cfg.patchsize // 2
    psize = edict({'h': H - pad, 'w': W - pad})
    cfg_patchsize = cfg.patchsize

    factor = 2
    noisy = tvF.resize(noisy[:, 0], [H // factor, W // factor],
                       interpolation=InterpMode.BILINEAR)[:, None]
    clean = tvF.resize(clean[:, 0], [H // factor, W // factor],
                       interpolation=InterpMode.BILINEAR)[:, None]
    T, _, C, H, W = noisy.shape
    isize = edict({'h': H, 'w': W})
    isize_l = [H, W]
    pad3 = cfg.nblocks // 2 + 3 // 2
    psize3 = edict({'h': H - pad3, 'w': W - pad3})
    pad = cfg.nblocks // 2 + cfg.patchsize // 2
    psize = edict({'h': H - pad, 'w': W - pad})
    cfg_patchsize = cfg.patchsize

    # -- looks good --
    cfg.patchsize = 3
    align_fxn = get_align_method(cfg, "l2_global")
    _, flow = align_fxn(clean.to(0))
    aclean = align_from_flow(clean, flow, cfg.nblocks, isize=isize)
    save_image("aclean.png", aclean)
    apsnr = align_psnr(aclean, psize3)
    print("[global] clean: ", apsnr)

    # -- looks not good --
    _, flow = align_fxn(noisy.to(0))
    isize = edict({'h': H, 'w': W})
    aclean = align_from_flow(clean, flow, cfg.nblocks, isize=isize)
    save_image("aclean_rs1.png", aclean)
    apsnr = align_psnr(aclean, psize3)
    print("noisy: ", apsnr)

    # -- fix it --
    cfg.nblocks = 5
    align_fxn = get_align_method(cfg, "pair_l2_local")
    _, flow = align_fxn(aclean.to(0))
    isize = edict({'h': H, 'w': W})
    aclean = align_from_flow(aclean, flow, cfg.nblocks, isize=isize)
    save_image("aclean_rs1.png", aclean)
    apsnr = align_psnr(aclean, psize3)
    print("[fixed] noisy: ", apsnr)

    #
    # -- [Tiled] try it again and to fix it --
    #
    img_ps = 3
    cfg.patchsize = img_ps
    cfg.nblocks = 50
    tnoisy = padAndTileBatch(noisy, cfg.patchsize, cfg.nblocks)
    tclean = padAndTileBatch(clean, cfg.patchsize, cfg.nblocks)
    t2i_clean = tvF.center_crop(tiled_to_img(tclean, img_ps), isize_l)
    print(t2i_clean.shape, clean.shape)
    save_image("atiled_to_img.png", t2i_clean)
    delta = torch.sum(torch.abs(clean - t2i_clean)).item()
    assert delta < 1e-8, "tiled to image must work!"

    cfg.patchsize = 3
    align_fxn = get_align_method(cfg, "pair_l2_local")
    _, flow = align_fxn(tnoisy.to(0))
    print(flow.shape, tclean.shape, clean.shape, np.sqrt(flow.shape[1]))

    nbHalf = cfg.nblocks // 2
    pisize = edict({'h': H + 2 * nbHalf, 'w': W + 2 * nbHalf})
    aclean = align_from_flow(tclean, flow, cfg.nblocks, isize=pisize)
    aclean_img = tvF.center_crop(tiled_to_img(aclean, img_ps), isize_l)
    save_image("aclean_rs1_tiled.png", aclean_img)
    apsnr = align_psnr(aclean_img, psize3)
    print("[tiled] noisy: ", apsnr)

    # i want to use a different block size but I need to correct the image padding..?

    # def shrink_search_space(tclean,flow,nblocks_prev,nblocks_curr):
    #     print("tclean.shape: ",tclean.shape)
    #     print("flow.shape: ",flow.shape)
    #     T,_,C,H,W = tclean.shape
    #     flow = rearrange(flow,'i (h w) t two -> t i two h w',h=H)
    #     tclean = tvF.center_crop(tclean,new_size)
    #      = tvF.center_crop(tclean,new_size)

    nblocks_prev = cfg.nblocks
    cfg.nblocks = 5
    # tclean,flow = shrink_search_space(tclean,flow,nblocks_prev,cfg.nblocks)
    align_fxn = get_align_method(cfg, "pair_l2_local")
    at_clean = align_from_flow(tclean, flow, cfg.nblocks, isize=pisize)
    _, flow_at = align_fxn(at_clean.to(0))
    aaclean = align_from_flow(at_clean, flow_at, cfg.nblocks, isize=pisize)
    aaclean_img = tvF.center_crop(tiled_to_img(aaclean, img_ps), isize_l)
    save_image("aclean_rs1_fixed.png", aaclean_img)
    apsnr = align_psnr(aaclean_img, psize3)
    print("[fixed] noisy: ", apsnr)

    exit()

    cfg.patchsize = 1  #cfg_patchsize

    align_fxn = get_align_method(cfg, "pair_l2_local")
    # clusters = cluster_flow(flow,H,nclusters=4)
    cflow = flow  #replace_flow_median(flow,clusters,H,cfg.nblocks)
    # save_image("clusters.png",clusters.type(torch.float))
    cflow_img = flow2img(cflow, H, T)
    save_image("cflow.png", cflow_img)
    aclean = align_from_flow(clean, cflow, cfg.nblocks, isize=isize)
    save_image("aclean_rs1_cf.png", aclean)

    print(cflow[:, 64 * 64 + 64])
    apsnr = align_psnr(aclean, psize)
    print("noisy_cf: ", apsnr)
    print(flow.shape)

    # flow = rearrange(flow,'i (h w) t two -> t i two h w',h=H)
    # print_stats(flow)
    flow_img = flow2img(flow, H, T)
    save_image("flow.png", flow_img)
    print(torch.histc(flow.type(torch.float)))

    factor = 2
    cfg.nblocks = max(cfg.nblocks // 2, 3)
    cfg.patchsize = 1
    # cfg.patchsize = max(cfg.patchsize//2,3)
    noisy_rs = tvF.resize(noisy[:, 0], [H // factor, W // factor],
                          interpolation=InterpMode.BILINEAR)[:, None]
    _, flow_rs = align_fxn(noisy_rs.to(0))

    clean_rs = tvF.resize(clean[:, 0], [H // factor, W // factor],
                          interpolation=InterpMode.BILINEAR)[:, None]
    isize = edict({'h': H // factor, 'w': W // factor})
    aclean = align_from_flow(clean_rs, flow_rs, cfg.nblocks, isize=isize)
    save_image("aclean_rs2.png", aclean)
    apsnr = align_psnr(aclean, psize)
    print("rs2", apsnr, cfg.nblocks, cfg.patchsize)

    clusters = cluster_flow(flow_rs, H // factor, nclusters=3)
    save_image("clusters_rs.png", clusters.type(torch.float))
    # cflow_rs = cluster_flow(flow_rs,H//factor,nclusters=5)
    # print(cflow_rs)

    aclean = align_from_flow(clean_rs, cflow_rs, cfg.nblocks, isize=isize)
    save_image("aclean_rs2_cl.png", aclean)
    apsnr = align_psnr(aclean, psize)
    print("rs2_cl", apsnr, cfg.nblocks, cfg.patchsize)
    exit()

    print(flow_rs.shape)
    # flow_rs = rearrange(flow_rs,'i (h w) t two -> t i two h w',h=H//factor)
    print(flow_rs.shape)
    flow_img = flow2img(flow_rs, H // factor, T)
    save_image("flow_rs2.png", flow_img)
    fmin, fmax, fmean = print_stats(flow_rs)
    print(torch.histc(flow_rs.type(torch.float), max=50, min=-50))

    factor = 4
    cfg.nblocks = max(cfg.nblocks // 2, 3)
    # cfg.patchsize = max(cfg.patchsize//2,3)
    noisy_rs = tvF.resize(noisy[:, 0], [H // factor, W // factor],
                          interpolation=InterpMode.BILINEAR)[:, None]
    _, flow_rs = align_fxn(noisy_rs.to(0))

    clean_rs = tvF.resize(clean[:, 0], [H // factor, W // factor],
                          interpolation=InterpMode.BILINEAR)[:, None]
    isize = edict({'h': H // factor, 'w': W // factor})
    aclean = align_from_flow(clean_rs, flow_rs, cfg.nblocks, isize=isize)
    save_image("aclean_rs4.png", aclean)
    apsnr = align_psnr(aclean, psize)
    print(apsnr, cfg.nblocks, cfg.patchsize)

    print(flow_rs.shape)
    # flow_rs = rearrange(flow_rs,'i (h w) t two -> t i two h w',h=H//factor)
    print(flow_rs.shape)
    flow_img = flow2img(flow_rs, H // factor, T)
    save_image("flow_rs4.png", flow_img)
    fmin, fmax, fmean = print_stats(flow_rs)
    print(torch.histc(flow_rs.type(torch.float), max=50, min=-50))
コード例 #7
0
def test_global_dynamics():

    # -- run exp --
    cfg = get_cfg_defaults()
    cfg.random_seed = 123
    nbatches = 20

    # -- set random seed --
    set_seed(cfg.random_seed)

    # -- load dataset --
    print("load image dataset.")
    data, loaders = load_image_dataset(cfg)
    image_iter = iter(loaders.tr)

    # -- save path for viz --
    save_dir = Path(
        f"{settings.ROOT_PATH}/output/tests/datasets/test_global_dynamics/")
    if not save_dir.exists(): save_dir.mkdir(parents=True)

    # -- sample data --
    for image_index in range(nbatches):

        # -- sample image --
        index = -1
        # while index != 3233:
        #     sample = next(image_iter)
        #     convert_keys(sample)
        #     index = sample['image_index'][0][0].item()

        sample = next(image_iter)
        # batch_dim0(sample)
        convert_keys(sample)

        # -- extract info --
        noisy = sample['dyn_noisy']
        clean = sample['dyn_clean']
        snoisy = sample['static_noisy']
        sclean = sample['static_clean']
        flow = sample['flow_gt']
        index = sample['image_index'][0][0].item()
        nframes, nimages, c, h, w = noisy.shape
        mid_pix = h * w // 2 + 2 * cfg.nblocks
        print(f"Image Index {index}")

        # -- io info --
        image_dir = save_dir / f"index{index}/"
        if not image_dir.exists(): image_dir.mkdir()

        #
        # -- Compute NNF to Ensure things are OKAY --
        #

        isize = edict({'h': h, 'w': w})
        # pad = cfg.patchsize//2 if cfg.patchsize > 1 else 1
        pad = cfg.nblocks // 2 + 1
        psize = edict({'h': h - 2 * pad, 'w': w - 2 * pad})
        flow_gt = repeat(flow, 'i fm1 two -> i s fm1 two', s=h * w)
        pix_gt = flow_to_pix(flow_gt.clone(), nframes, isize=isize)

        def cc(image):
            return tvF.center_crop(image, (psize.h, psize.w))

        nnf_vals, nnf_pix = compute_burst_nnf(clean, nframes // 2,
                                              cfg.patchsize)
        shape_str = 't b h w two -> b (h w) t two'
        pix_global = torch.LongTensor(rearrange(nnf_pix[..., 0, :], shape_str))
        flow_global = pix_to_flow(pix_global.clone())
        # aligned_gt = warp_burst_flow(clean, flow_global)
        aligned_gt = align_from_pix(clean, pix_gt, cfg.nblocks)
        # isize = edict({'h':h,'w':w})
        # aligned_gt = align_from_flow(clean,flow_global,cfg.nblocks,isize=isize)
        # psnr = compute_aligned_psnr(sclean[[nframes//2]],clean[[nframes//2]],psize)
        psnr = compute_aligned_psnr(sclean, aligned_gt, psize)
        print(f"[GT Alignment] PSNR: {psnr}")

        # -- compute with nvidia's opencv optical flow --
        nd_clean = rearrange(clean.numpy(), 't 1 c h w -> t h w c')
        ref_t = nframes // 2
        frames, flows = [], []
        for t in range(nframes):
            if t == ref_t:
                frames.append(nd_clean[t][None, :])
                flows.append(torch.zeros(flows[-1].shape))
                continue
            from_frame = 255. * cv2.cvtColor(nd_clean[ref_t],
                                             cv2.COLOR_RGB2GRAY)
            to_frame = 255. * cv2.cvtColor(nd_clean[t], cv2.COLOR_RGB2GRAY)
            _flow = cv2.calcOpticalFlowFarneback(to_frame, from_frame, None,
                                                 0.5, 3, 3, 10, 5, 1.2, 0)
            _flow = np.round(_flow).astype(np.float32)  # not good for later
            w_frame = warp_flow(nd_clean[t], -_flow)
            _flow[..., 0] = -_flow[..., 0]  # my OF is probably weird.
            # print("w_frame.shape ",w_frame.shape)
            flows.append(torch.FloatTensor(_flow))
            frames.append(torch.FloatTensor(w_frame[None, :]))
        flows = torch.stack(flows)
        flows = rearrange(flows, 't h w two -> 1 (h w) t two')
        frames = torch.FloatTensor(np.stack(frames))
        frames = rearrange(frames, 't i h w c -> t i c h w')
        # print("flows.shape ",flows.shape)
        # print("frames.shape ",frames.shape)
        # print("sclean.shape ",sclean.shape)
        psnr = compute_aligned_psnr(sclean, frames, psize)
        print(f"[NVOF Alignment] PSNR: {psnr}")

        pix_nvof = flow_to_pix(flows.clone(), nframes, isize=isize)
        aligned_nvof = align_from_pix(clean, pix_nvof, cfg.nblocks)
        psnr = compute_aligned_psnr(sclean, aligned_nvof, psize)
        print(f"[NVOF Alignment v2] PSNR: {psnr}")

        psnr = compute_aligned_psnr(frames, aligned_nvof, psize)
        print(f"[NVOF Alignment Methods] PSNR: {psnr}")

        print(pix_global[0, mid_pix])
        print(pix_gt[0, mid_pix])
        print(flow_global[0, mid_pix])
        print(flow_gt[0, mid_pix])
        print(flows[0, mid_pix])

        #
        # -- Save Images to Qualitative Inspect --
        #

        fn = image_dir / "noisy.png"
        save_image(cc(noisy), fn, normalize=True, vrange=None)

        fn = image_dir / "clean.png"
        save_image(cc(clean), fn, normalize=True, vrange=None)

        print(cc(sclean).shape)
        fn = image_dir / "diff.png"
        save_image(cc(sclean) - cc(aligned_gt),
                   fn,
                   normalize=True,
                   vrange=None)

        fn = image_dir / "aligned_gt.png"
        save_image(cc(aligned_gt), fn, normalize=True, vrange=None)

        # return

        # -- NNF Global --
        nnf_vals, nnf_pix = compute_burst_nnf(clean, nframes // 2,
                                              cfg.patchsize)
        shape_str = 't b h w two -> b (h w) t two'
        pix_global = torch.LongTensor(rearrange(nnf_pix[..., 0, :], shape_str))
        flow_global = pix_to_flow(pix_global.clone())
        # aligned_global = align_from_flow(clean,flow_gt,cfg.nblocks)
        aligned_global = align_from_pix(clean, pix_gt, cfg.nblocks)
        psnr = compute_aligned_psnr(sclean, aligned_global, psize)
        print(f"[NNF Global] PSNR: {psnr}")

        # -- NNF Local (old) --
        iterations, K, subsizes = 0, 1, []
        optim = AlignOptimizer("v3")
        score_fxn_ave = get_score_function("ave")
        eval_ave = EvalBlockScores(score_fxn_ave, "ave", cfg.patchsize, 256,
                                   None)
        flow_local = optim.run(clean, cfg.patchsize, eval_ave, cfg.nblocks,
                               iterations, subsizes, K)
        pix_local = flow_to_pix(flow_local.clone(), nframes, isize=isize)
        # aligned_local = align_from_flow(clean,flow_gt,cfg.nblocks)
        aligned_local = align_from_pix(clean, pix_local, cfg.nblocks)
        psnr = compute_aligned_psnr(sclean, aligned_local, psize)
        print(f"[NNF Local (Old)] PSNR: {psnr}")

        # -- NNF Local (new) --
        _, pix_local = nnf_utils.runNnfBurst(clean,
                                             cfg.patchsize,
                                             cfg.nblocks,
                                             1,
                                             valMean=0.,
                                             blockLabels=None)
        pix_local = rearrange(pix_local, 't i h w 1 two -> i (h w) t two')
        flow_local = pix_to_flow(pix_local.clone())
        # aligned_local = align_from_flow(clean,flow_gt,cfg.nblocks)
        aligned_local = align_from_pix(clean, pix_local, cfg.nblocks)
        psnr = compute_aligned_psnr(sclean, aligned_local, psize)
        print(f"[NNF Local (New)] PSNR: {psnr}")

        # -- remove boundary from pix --
        pixes = {'gt': pix_gt, 'global': pix_global, 'local': pix_local}
        for field, pix in pixes.items():
            pix_img = rearrange(pix, 'i (h w) t two -> (i t) two h w', h=h)
            pix_cc = cc(pix_img)
            pixes[field] = pix_cc

        # -- pairwise diffs --
        field2 = "gt"
        for field1 in pixes.keys():
            if field1 == field2: continue
            delta = pixes[field1] - pixes[field2]
            delta = delta.type(torch.float)
            delta_fn = image_dir / f"delta_{field1}_{field2}.png"
            save_image(delta, delta_fn, normalize=True, vrange=None)
        print(pix_gt[0, mid_pix])
        print(pix_global[0, mid_pix])
        print(pix_local[0, mid_pix])

        print(flow_gt[0, mid_pix])
        print(flow_global[0, mid_pix])
        print(flow_local[0, mid_pix])

        #
        # -- Save Images to Qualitative Inspect --
        #

        fn = image_dir / "noisy.png"
        save_image(cc(noisy), fn, normalize=True, vrange=None)

        fn = image_dir / "clean.png"
        save_image(cc(clean), fn, normalize=True, vrange=None)

        fn = image_dir / "aligned_global.png"
        save_image(cc(aligned_global), fn, normalize=True, vrange=None)

        fn = image_dir / "aligned_local.png"
        save_image(cc(aligned_local), fn, normalize=True, vrange=None)
コード例 #8
0
ファイル: main.py プロジェクト: gauenk/cl_gen
def run():

    # -- get experiment config --
    cfg = get_main_config()
    cfg.batch_size = 100
    cfg.nframes = 3
    cfg.frame_size = 350
    cfg.N = cfg.nframes
    cfg.dynamic.frame_size = cfg.frame_size
    cfg.dynamic.frames = cfg.nframes
    cfg.gpuid = 0
    cfg.random_seed = 0
    T = cfg.nframes

    # -- setup seed --
    np.random.seed(cfg.random_seed)
    torch.manual_seed(cfg.random_seed)

    # -- load image data --
    data, loader = load_image_dataset(cfg)
    data_iter = iter(loader.tr)

    # -- get image sample --
    N = 2
    for i in range(N):
        sample = next(data_iter)
    dyn_noisy = sample['noisy']  # dynamics and noise
    dyn_clean = sample['burst']  # dynamics and no noise
    static_noisy = sample['snoisy']  # no dynamics and noise
    static_clean = sample['sburst']  # no dynamics and no noise
    flow = sample['flow']
    # save_image(dyn_clean[T//2],"samples.png")
    pick = 26
    save_image(dyn_clean[T // 2, pick], "samples.png")
    cropped = True

    # -- get picks --
    noisy = dyn_noisy[T // 2, pick] + 0.5
    clean = dyn_clean[T // 2, pick] + 0.5

    # -- optionally crop --
    if cropped:
        noisy = tvF.crop(noisy, 150, 0, 275, 125)
        clean = tvF.crop(clean, 150, 0, 275, 125)

    # -- noise model misspecification --
    save_noise_model_misspecification(cfg, clean)

    # -- pic list --
    pics = OrderedDict()
    psnrs = OrderedDict()

    # -- get clean ref --
    pics['Clean'] = clean
    psnrs['Clean'] = None

    # -- apply noise to image --
    noise_type = "g-100p0"
    noise_xform = get_noise_xform(cfg, noise_type)
    noisy = noise_xform(clean) + 0.5
    pics['Noisy'] = noisy
    psnrs['Noisy'] = None

    # -- RAFT --
    noise_type = "g-25p0"
    noise_xform = get_noise_xform(cfg, noise_type)
    raft = noise_xform(clean) + 0.5
    pics['RAFT'] = raft
    psnrs['RAFT'] = 25.

    # -- LSRMTF --
    noise_type = "g-30p0"
    noise_xform = get_noise_xform(cfg, noise_type)
    lsrmtf = noise_xform(clean) + 0.5
    # pics['lsrmtf'] = lsrmtf

    # -- NNF --
    noise_type = "g-10p0"
    noise_xform = get_noise_xform(cfg, noise_type)
    nnf = noise_xform(clean) + 0.5
    pics['LDOF'] = nnf
    psnrs['LDOF'] = 29.

    # -- OURS --
    noise_type = "g-5p0"
    noise_xform = get_noise_xform(cfg, noise_type)
    ours = noise_xform(clean) + 0.5
    pics['Ours'] = ours
    psnrs['Ours'] = 32.

    # -- OURS+SEG --
    noise_type = "g-2p0"
    noise_xform = get_noise_xform(cfg, noise_type)
    ours_seg = noise_xform(clean) + 0.5
    pics['Ours+Seg'] = ours_seg
    psnrs['Ours+Seg'] = 36.

    # -- ALL PICS --
    names = list(pics.keys())
    M = len(names)
    dpi = mpl.rcParams['figure.dpi']
    H, W = clean.shape[-2:]
    figsize = M * (W / float(dpi)), H / float(dpi)
    fig, axes = plt.subplots(1, M, figsize=figsize, sharex=True, sharey=True)
    fig.subplots_adjust(hspace=0.05, wspace=0.05)
    for i, ax in enumerate(axes):
        name = names[i]
        pic = rearrange(pics[name], 'c h w -> h w c')
        pic = torch.clip(pic, 0, 1)
        ax.imshow(pic, interpolation='none', aspect='auto')
        ax.set_ylabel('')
        ax.set_yticks([])
        ax.set_xticks([])
        ax.set_yticklabels([])
        ax.set_xticklabels([])
        # label = textwrap.fill(name + '\n' + '1.2',15)
        label = name
        if not (psnrs[name] is None): label += '\n' + str(psnrs[name])
        ax.set_xlabel(label, fontsize=12)

    # -- save plot --
    DIR = Path("./output/pretty_plots")
    if not DIR.exists(): DIR.mkdir()
    if cropped: fn = DIR / "./example_denoised_images_cropped.png"
    else: fn = DIR / "./example_denoised_images.png"
    plt.savefig(fn, transparent=True, bbox_inches='tight', dpi=300)
    plt.close('all')
    print(f"Wrote plot to [{fn}]")
コード例 #9
0
def create_weight_burst_plot(input_fig, input_ax, noisy, clean, burst, weights,
                             nblocks, nframes, patchsize, postfix, bcolor):

    # -- create colormap --
    weights = np.copy(weights)
    weights_nmlz = weights - weights.min()
    cmap = 'Greys'

    # -- create subplots --
    fig = plt.figure(figsize=(6, 10))
    fig.canvas.draw()

    # if input_ax is None:
    #     height,width = 8,8
    #     fig,ax = plt.subplots(figsize=(height,width))
    # else: fig,ax = input_fig,input_ax

    # -- create gridspec --
    nrows = weights.shape[0]
    ncols = 2 * nframes + 1
    axes = create_paired_gridspec(nrows,
                                  ncols,
                                  patchsize,
                                  nframes,
                                  left=None,
                                  right=None,
                                  wspace=None,
                                  hspace=None)

    # -- create row --
    aves = []
    vmin, vmax = -1. / nframes, 1.
    for row in range(nrows):
        ave = 0
        weight_row = weights[row, :]
        weight_row_nmlz = weights_nmlz[row, :]
        print(weight_row)
        for t in range(nframes):
            frame = burst[t][:patchsize, :patchsize]
            weight_nmlz = weight_row_nmlz[t].reshape(1, 1, 1)
            weight = weight_row[t].reshape(1, 1, 1)

            #ave += weight_row[t] * frame
            ave += weight_row[t] * noisy[t].cpu()

            w_axis = axes[row][2 * t]
            w_axis.imshow(weight, cmap='Greys', vmin=vmin, vmax=1.0)
            w_axis.set_xticklabels([])
            w_axis.set_yticklabels([])
            w_axis.set_aspect('equal')
            no_pointy_tics(w_axis)

            frame_ax = axes[row][2 * t + 1]

            # -- replace weight with image --
            weight = noisy[t] - noisy[t].min()
            weight /= weight.max()
            weight = weight.cpu().numpy()
            frame_ax.imshow(weight, vmin=0, vmax=1.0)
            # -- use weight --
            # frame_ax.imshow(frame, cmap='Greys',vmin=vmin,vmax=1.0)
            # frame_ax.grid(which='major', axis='both',
            #         linestyle='-', color=gcolor, linewidth=2)
            # frame_ax.set_xticks(np.arange(-.5, frame.shape[1], 1));
            # frame_ax.set_yticks(np.arange(-.5, frame.shape[0], 1));
            frame_ax.set_xticklabels([])
            frame_ax.set_yticklabels([])
            frame_ax.set_aspect('equal')
            no_pointy_tics(frame_ax)

        # -- create averaged patch in row --
        aves.append(ave)
        frame_ax = axes[row][-1]
        frame_ax.imshow(ave, cmap='Greys')
        # frame_ax.imshow(ave, cmap='Greys')
        # frame_ax.grid(which='major', axis='both',
        #         linestyle='-', color=gcolor, linewidth=2)
        # frame_ax.set_xticks(np.arange(-.5, frame.shape[1], 1));
        # frame_ax.set_yticks(np.arange(-.5, frame.shape[0], 1));
        frame_ax.set_xticklabels([])
        frame_ax.set_yticklabels([])
        frame_ax.set_aspect('equal')
        no_pointy_tics(frame_ax)

        # -- write plus signs --
        method = "use_image"
        if method == "use_pix":
            if nrows < 5:
                left, top = 5.75, 2.25
            else:
                left, top = 5.25, 2.25
        else:
            left, top = 70.75, 35.25
        for t in range(nframes - 1):
            axes[row][2 * t + 1].text(left, top, r'$+$', fontsize=20)
        axes[row][2 * nframes - 1].text(left, top, r'$=$', fontsize=20)

    # -- save frame - ave image --
    ave = torch.stack(aves, dim=0)
    tosave = rearrange(
        torch.mean(noisy.cpu(), dim=0) - ave, 's h w c -> s c h w')
    save_image(tosave, SAVE_DIR / "./bootstrap_noisy_delta_ave.png")

    # -- save figure --
    fname = SAVE_DIR / f"bootstrapping_{postfix}.png"
    plt.savefig(fname, transparent=True, bbox_inches='tight', dpi=300)
    plt.close("all")
    plt.clf()

    # -- stack and return aves --
    aves = np.stack(aves)
    return aves
コード例 #10
0
ファイル: exp.py プロジェクト: gauenk/cl_gen
def execute_experiment(cfg):

    # -- init exp! --
    print("RUNNING EXP.")
    print(cfg)

    # -- create results record to save --
    dims = {
        'batch_results': None,
        'batch_to_record': None,
        'record_results': {
            'default': 0
        },
        'stack': {
            'default': 0
        },
        'cat': {
            'default': 0
        }
    }
    record = cache_io.ExpRecord(dims)

    # -- set random seed --
    set_seed(cfg.random_seed)

    # -- load dataset --
    # data,loaders = load_image_dataset(cfg)
    data, loaders = load_dataset(cfg, cfg.dataset.mode)
    image_iter = iter(loaders.tr)

    # -- get score function --
    score_fxn_ave = get_score_function("ave")
    score_fxn_bs = get_score_function(cfg.score_fxn_name)

    # -- some constants --
    NUM_BATCHES = 10
    nframes, nblocks = cfg.nframes, cfg.nblocks
    patchsize = cfg.patchsize
    ps = patchsize
    ppf = cfg.dynamic_info.ppf
    check_parameters(nblocks, patchsize)

    # -- theory constants --
    std = cfg.noise_params.g.std / 255.
    p = cfg.patchsize**2 * 3
    t = cfg.nframes
    theory = edict()
    theory.c2 = ((t - 1) / t)**2 * std**2 + (t - 1) / t**2 * std**2
    theory.mean = theory.c2
    theory.mode = (1 - 2 / p) * theory.c2
    theory.var = 2 / p * theory.c2**2
    theory.std = np.sqrt(theory.var)
    pp.pprint(theory)

    # npn = no patch normalization
    theory_npn = edict()
    theory_npn.c2 = ((t - 1) / t)**2 * std**2 + (t - 1) / t**2 * std**2
    theory_npn.mean = theory_npn.c2 * p
    theory_npn.mode = (1 - 2 / p) * theory_npn.c2 * p
    theory_npn.var = 2 * theory_npn.c2**2 * p
    theory_npn.std = np.sqrt(theory_npn.var)
    pp.pprint(theory_npn)

    # oracle = clean reference frame
    theory_oracle = edict()
    theory_oracle.c2 = std**2
    theory_oracle.mean = theory_oracle.c2 * p
    theory_oracle.mode = (1 - 2 / p) * theory_oracle.c2 * p
    theory_oracle.var = 2 * theory_oracle.c2**2 * p
    theory_oracle.std = np.sqrt(theory_oracle.var)
    pp.pprint(theory_oracle)

    # -- create evaluator for ave; simple --
    iterations, K = 1, 1
    subsizes = []
    block_batchsize = 32
    eval_ave_simp = EvalBlockScores(score_fxn_ave, "ave", patchsize,
                                    block_batchsize, None)

    # -- create evaluator for ave --
    iterations, K = 1, 1
    subsizes = []
    eval_ave = EvalBlockScores(score_fxn_ave, "ave", patchsize,
                               block_batchsize, None)

    # -- create evaluator for bootstrapping --
    block_batchsize = 32
    eval_prop = EvalBlockScores(score_fxn_bs, "bs", patchsize, block_batchsize,
                                None)

    # -- init flownet model --
    cfg.gpuid = 1 - cfg.gpuid  # flip. flop.
    flownet_align = get_align_method(cfg, "flownet_v2", comp_align=False)
    cfg.gpuid = 1 - cfg.gpuid  # flippity flop.

    # -- get an image transform --
    image_xform = get_image_xform(cfg.image_xform, cfg.gpuid, cfg.frame_size)
    blockLabels, _ = nnf_utils.getBlockLabels(None, nblocks, np.int32,
                                              cfg.device, True)

    # -- iterate over images --
    NUM_BATCHES = min(NUM_BATCHES, len(image_iter))
    for image_bindex in range(NUM_BATCHES):

        print("-=" * 30 + "-")
        print(f"Running image batch index: {image_bindex}")
        print("-=" * 30 + "-")
        torch.cuda.empty_cache()

        # -- sample & unpack batch --
        nwaste = 0
        for w in range(nwaste):
            sample = next(image_iter)  # waste one
        sample = next(image_iter)
        sample_to_cuda(sample)
        convert_keys(sample)
        torch.cuda.synchronize()
        # for key,val in sample.items():
        #     print(key,type(val))
        #     if torch.is_tensor(val):
        #         print(key,val.device)

        dyn_noisy = sample['dyn_noisy']  # dynamics and noise
        dyn_clean = sample['dyn_clean']  # dynamics and no noise
        static_noisy = sample['static_noisy']  # no dynamics and noise
        static_clean = sample['static_clean']  # no dynamics and no noise
        nnf_gt = sample['nnf']
        flow_gt = sample['flow']
        if nnf_gt.ndim == 6:
            nnf_gt = nnf_gt[:, :, 0]  # pick top 1 out of K
        image_index = sample['image_index']
        rng_state = sample['rng_state']

        # TODO: anscombe is a type of image transform
        if not (image_xform is None):
            dyn_clean_ftrs = image_xform(dyn_clean)
            dyn_noisy_ftrs = image_xform(dyn_noisy)
        else:
            dyn_clean_ftrs = dyn_clean
            dyn_noisy_ftrs = dyn_noisy

        if "resize" in cfg.image_xform:
            vprint("Images, Flows, and NNF Modified.")
            dyn_clean = image_xform(dyn_clean)
            dyn_noisy = image_xform(dyn_noisy)
            T, B, C, H, W = dyn_noisy.shape
            flow_gt = torch.zeros((B, 1, T, H, W, 2))
            nnf_gt = torch.zeros((1, T, B, H, W, 2))

        save_image(dyn_clean, "dyn_clean.png")
        # print("SHAPES")
        # print(dyn_noisy.shape)
        # print(dyn_clean.shape)
        # print(nnf_gt.shape)

        # -- shape info --
        pad = cfg.nblocks // 2 + cfg.patchsize // 2
        T, B, C, H, W = dyn_noisy.shape
        isize = edict({'h': H, 'w': W})
        psize = edict({'h': H - 2 * pad, 'w': W - 2 * pad})
        ref_t = nframes // 2
        nimages, npix, nframes = B, H * W, T
        frame_size = [H, W]
        ifsize = [H - 2 * pad, W - 2 * pad]
        print("flow_gt.shape: ", flow_gt.shape)
        print("flow_gt: ", flow_gt[0, 0, :, H // 2, W // 2, :])

        # -- create results dict --
        pixs = edict()
        flows = edict()
        anoisy = edict()
        aligned = edict()
        runtimes = edict()
        optimal_scores = edict()  # score function at optimal

        # -- compute proposed search of nnf --
        # ave = torch.mean(dyn_noisy_ftrs[:,0,:,4:4+ps,4:4+ps],dim=0)
        # frames = dyn_noisy_ftrs[:,0,:,4:4+ps,4:4+ps]
        # gt_offset = torch.sum((frames - ave)**2/nframes).item()
        # print("Optimal: ",gt_offset)
        # gt_offset = -1.

        # -- FIND MODE of BURST --
        vprint("Our Method")
        flow_fmt = rearrange(flow_gt, 'i 1 t h w two -> t i h w 1 two')
        locs_fmt = flow2locs(flow_fmt)
        print("locs_fmt.shape: ", locs_fmt.shape)
        print(dyn_noisy_ftrs.min(), dyn_noisy_ftrs.max())
        vals, _ = evalAtLocs(dyn_noisy_ftrs,
                             locs_fmt,
                             patchsize,
                             nblocks,
                             return_mode=False)
        vals = torch.zeros_like(vals)
        # flow_fmt = rearrange(flow_gt,'i t h w two -> i (h w) t two')
        # vals,_ = bnnf_utils.evalAtFlow(dyn_noisy_ftrs, flow_fmt, patchsize,
        #                                nblocks, return_mode=False)
        mode = mode_vals(vals, ifsize)
        cc_vals = vals[0, 5:29, 5:29, 0].ravel()
        vstd = torch.std(cc_vals).item()
        print("[SubBurst] Computed Mode: ", mode)
        print("[SubBurst] Computed Std: ", vstd)

        # -- compute proposed search of nnf --
        vprint("dyn_noisy_ftrs.shape ", dyn_noisy_ftrs.shape)
        valMean = theory_npn.mode
        vprint("valMean: ", valMean)
        start_time = time.perf_counter()
        if cfg.nframes < 5:
            _, flows.est = bnnf_utils.runBurstNnf(dyn_noisy_ftrs,
                                                  patchsize,
                                                  nblocks,
                                                  k=1,
                                                  valMean=valMean,
                                                  blockLabels=None,
                                                  fmt=True,
                                                  to_flow=True)
        else:
            flows.est = rearrange(flow_gt,
                                  'i 1 t h w two -> 1 i (h w) t two').clone()
        flows.est = flows.est[0]
        runtimes.est = time.perf_counter() - start_time
        pixs.est = flow_to_pix(flows.est.clone(), nframes, isize=isize)
        aligned.est = align_from_flow(dyn_clean,
                                      flows.est,
                                      patchsize,
                                      isize=isize)
        if cfg.nframes > 7: aligned.est = torch.zeros_like(aligned.est)
        anoisy.est = align_from_flow(dyn_noisy,
                                     flows.est,
                                     patchsize,
                                     isize=isize)
        optimal_scores.est = np.zeros((nimages, npix, 1, nframes))

        # -- the proposed method --
        std = cfg.noise_params.g.std
        start_time = time.perf_counter()
        _flow = flow_gt.clone()
        # _,_flow = runKmSearch(dyn_noisy_ftrs, patchsize, nblocks, k = 1,
        #                       std = std/255.,mode="cuda")
        runtimes.kmb = time.perf_counter() - start_time
        flows.kmb = rearrange(_flow, 'i 1 t h w two -> i (h w) t two')
        pixs.kmb = flow_to_pix(flows.kmb.clone(), nframes, isize=isize)
        aligned.kmb = align_from_flow(dyn_clean, flows.kmb, 0, isize=isize)
        optimal_scores.kmb = torch_to_numpy(optimal_scores.est)

        # -- compute proposed search of nnf --
        vprint("Our BpSearch Method")
        # print(flow_gt)
        # std = cfg.noise_params.g.std/255.
        valMean = theory_npn.mode
        start_time = time.perf_counter()
        bp_nblocks = 3
        # _,bp_est,a_noisy = runBpSearch(dyn_noisy_ftrs, dyn_noisy_ftrs,
        #                                patchsize, bp_nblocks, k = 1,
        #                                valMean = valMean, std=std,
        #                                blockLabels=None,
        #                                l2_nblocks=nblocks,
        #                                fmt = True, to_flow=True,
        #                                search_type=cfg.bp_type,
        #                                gt_info={'flow':flow_gt})
        bp_est = flows.est[None, :].clone()
        flows.bp_est = bp_est[0]
        # flows.bp_est = rearrange(flow_gt,'i t h w two -> i (h w) t two')
        runtimes.bp_est = time.perf_counter() - start_time
        pixs.bp_est = flow_to_pix(flows.bp_est.clone(), nframes, isize=isize)
        # aligned.bp_est = a_clean
        aligned.bp_est = align_from_flow(dyn_clean,
                                         flows.bp_est,
                                         patchsize,
                                         isize=isize)
        anoisy.bp_est = align_from_flow(dyn_noisy,
                                        flows.bp_est,
                                        patchsize,
                                        isize=isize)
        optimal_scores.bp_est = np.zeros((nimages, npix, 1, nframes))

        # -- compute proposed search of nnf [with tiling ]--
        vprint("Our Burst Method (Tiled)")
        valMean = 0.
        start_time = time.perf_counter()
        if cfg.nframes < 5:
            _, flows.est_tile = bnnf_utils.runBurstNnf(dyn_noisy_ftrs,
                                                       patchsize,
                                                       nblocks,
                                                       k=1,
                                                       valMean=valMean,
                                                       blockLabels=None,
                                                       fmt=True,
                                                       to_flow=True,
                                                       tile_burst=True)
        else:
            flows.est_tile = rearrange(
                flow_gt, 'i 1 t h w two -> 1 i (h w) t two').clone()
        flows.est_tile = flows.est_tile[0]
        # flows.est_tile = rearrange(flow_gt,'i t h w two -> i (h w) t two')
        runtimes.est_tile = time.perf_counter() - start_time
        pixs.est_tile = flow_to_pix(flows.est_tile.clone(),
                                    nframes,
                                    isize=isize)
        aligned.est_tile = align_from_flow(dyn_clean,
                                           flows.est_tile,
                                           patchsize,
                                           isize=isize)
        if cfg.nframes > 7:
            aligned.est_tile = torch.zeros_like(aligned.est_tile)
        anoisy.est_tile = align_from_flow(dyn_noisy,
                                          flows.est_tile,
                                          patchsize,
                                          isize=isize)
        optimal_scores.est_tile = np.zeros((nimages, npix, 1, nframes))

        # -- compute new est method --
        vprint("[Burst-LK] loss function")
        vprint(flow_gt.shape)
        # print(flow_gt[0,:3,32,32,:])
        vprint(flow_gt.shape)
        start_time = time.perf_counter()
        if frame_size[0] <= 64 and cfg.nblocks < 10 and True:
            flows.blk = burstNnf.run(dyn_noisy_ftrs, patchsize, nblocks)
        else:
            flows.blk = rearrange(flow_gt, 'i 1 t h w two -> i (h w) t two')
        runtimes.blk = time.perf_counter() - start_time
        pixs.blk = flow_to_pix(flows.blk.clone(), nframes, isize=isize)
        aligned.blk = align_from_flow(dyn_clean,
                                      flows.blk,
                                      patchsize,
                                      isize=isize)
        optimal_scores.blk = np.zeros((nimages, npix, 1, nframes))
        # optimal_scores.blk = eval_prop.score_burst_from_flow(dyn_noisy,flows.nnf_local,
        #                                                      patchsize,nblocks)[1]
        optimal_scores.blk = torch_to_numpy(optimal_scores.blk)

        # -- compute new est method --
        vprint("Oracle")
        vprint(flow_gt.shape)
        # print(flow_gt[0,:3,32,32,:])
        vprint(flow_gt.shape)
        valMean = theory_oracle.mode
        oracle_burst = dyn_noisy_ftrs.clone()
        oracle_burst[nframes // 2] = dyn_clean_ftrs[nframes // 2]
        start_time = time.perf_counter()
        vals_oracle, pix_oracle = nnf_utils.runNnfBurst(
            oracle_burst,
            patchsize,
            nblocks,
            1,
            valMean=valMean,
            blockLabels=blockLabels)
        runtimes.oracle = time.perf_counter() - start_time
        pixs.oracle = rearrange(pix_oracle, 't i h w 1 two -> i (h w) t two')
        flows.oracle = pix_to_flow(pixs.oracle.clone())
        aligned.oracle = align_from_flow(dyn_clean,
                                         flows.oracle,
                                         patchsize,
                                         isize=isize)
        optimal_scores.oracle = np.zeros((nimages, npix, 1, nframes))
        optimal_scores.oracle = torch_to_numpy(optimal_scores.blk)

        # -- compute optical flow --
        vprint("[C Flow]")
        vprint(dyn_noisy_ftrs.shape)
        start_time = time.perf_counter()
        # flows.cflow = cflow.runBurst(dyn_clean_ftrs)
        # flows.cflow[...,1] = -flows.cflow[...,1]
        flows.cflow = torch.LongTensor(flows.blk.clone().cpu().numpy())
        # flows.cflow = flows.blk.clone()
        # flows.cflow = torch.round(flows.cflow)
        runtimes.cflow = time.perf_counter() - start_time
        pixs.cflow = flow_to_pix(flows.cflow.clone(), nframes, isize=isize)
        aligned.cflow = align_from_flow(dyn_clean,
                                        flows.cflow,
                                        patchsize,
                                        isize=isize)
        optimal_scores.cflow = np.zeros((nimages, npix, 1, nframes))
        # optimal_scores.blk = eval_prop.score_burst_from_flow(dyn_noisy,flows.nnf_local,
        #                                                      patchsize,nblocks)[1]
        optimal_scores.blk = torch_to_numpy(optimal_scores.blk)

        # -- compute groundtruth flow --
        dsname = cfg.dataset.name
        if "kitti" in dsname or 'bsd_burst' == dsname:
            pix_gt = nnf_gt.type(torch.float)
            if pix_gt.ndim == 3:
                pix_gt_rs = rearrange(pix_gt, 'i tm1 two -> i 1 tm1 two')
                pix_gt = repeat(pix_gt, 'i tm1 two -> i p tm1 two', p=npix)
            if pix_gt.ndim == 5:
                pix_gt = rearrange(pix_gt, 't i h w two -> i (h w) t two')
            pix_gt = torch.LongTensor(pix_gt.cpu().numpy().copy())
            # flows.of = torch.zeros_like(pix_gt)#pix_to_flow(pix_gt.clone())
            flows.of = pix_to_flow(pix_gt.clone())
        else:
            flows.of = flow_gt
            flows.of = rearrange(flow_gt, 'i 1 t h w two -> i (h w) t two')
        # -- align groundtruth flow --
        aligned.of = align_from_flow(dyn_clean, flows.of, nblocks, isize=isize)
        pixs.of = flow_to_pix(flows.of.clone(), nframes, isize=isize)
        runtimes.of = 0.  # given
        optimal_scores.of = np.zeros(
            (nimages, npix, 1, nframes))  # clean target is zero
        aligned.clean = static_clean
        anoisy.clean = static_clean
        # optimal_scores.of = eval_ave.score_burst_from_flow(dyn_noisy,
        #                                                    flows.of,
        #                                                    patchsize,nblocks)[0]

        # -- compute nearest neighbor fields [global] --
        vprint("NNF Global.")
        start_time = time.perf_counter()
        shape_str = 't b h w two -> b (h w) t two'
        nnf_vals, nnf_pix = nnf.compute_burst_nnf(dyn_clean_ftrs, ref_t,
                                                  patchsize)
        runtimes.nnf = time.perf_counter() - start_time
        pixs.nnf = torch.LongTensor(rearrange(nnf_pix[..., 0, :], shape_str))
        flows.nnf = pix_to_flow(pixs.nnf.clone())
        vprint(dyn_clean.shape, pixs.nnf.shape, nblocks)
        aligned.nnf = align_from_pix(dyn_clean, pixs.nnf, nblocks)
        anoisy.nnf = align_from_pix(dyn_noisy, pixs.nnf, nblocks)
        # aligned.nnf = align_from_flow(dyn_clean,flows.nnf,nblocks,isize=isize)
        optimal_scores.nnf = np.zeros(
            (nimages, npix, 1, nframes))  # clean target is zero

        # -- compute nearest neighbor fields [local] --
        vprint("NNF Local.")
        start_time = time.perf_counter()
        valMean = 0.
        vals_local, pix_local = nnf_utils.runNnfBurst(dyn_clean_ftrs,
                                                      patchsize,
                                                      nblocks,
                                                      1,
                                                      valMean=valMean,
                                                      blockLabels=blockLabels)
        runtimes.nnf_local = time.perf_counter() - start_time
        torch.cuda.synchronize()
        vprint("pix_local.shape ", pix_local.shape)
        pixs.nnf_local = rearrange(pix_local, 't i h w 1 two -> i (h w) t two')
        flows.nnf_local = pix_to_flow(pixs.nnf_local.clone())
        # aligned_local = align_from_flow(clean,flow_gt,cfg.nblocks)
        # aligned_local = align_from_pix(dyn_clean,pix_local,cfg.nblocks)
        vprint(flows.nnf_local.min(), flows.nnf_local.max())
        aligned.nnf_local = align_from_pix(dyn_clean, pixs.nnf_local, nblocks)
        anoisy.nnf_local = align_from_pix(dyn_noisy, pixs.nnf_local, nblocks)
        optimal_scores.nnf_local = optimal_scores.nnf
        # optimal_scores.nnf_local = eval_ave.score_burst_from_flow(dyn_noisy,
        #                                                           flows.nnf,
        #                                                           patchsize,nblocks)[1]
        optimal_scores.nnf_local = torch_to_numpy(optimal_scores.nnf_local)

        # -----------------------------------
        #
        # -- old way to compute NNF local --
        #
        # -----------------------------------

        # pixs.nnf = torch.LongTensor(rearrange(nnf_pix[...,0,:],shape_str))
        # flows.nnf = pix_to_flow(pixs.nnf.clone())
        # aligned.nnf = align_from_pix(dyn_clean,pixs.nnf,nblocks)
        # aligned.nnf = align_from_flow(dyn_clean,flows.nnf,nblocks,isize=isize)

        # flows.nnf_local = optim.run(dyn_clean_ftrs,patchsize,eval_ave,
        #                             nblocks,iterations,subsizes,K)

        # -----------------------------------
        # -----------------------------------

        # -- compute proposed search of nnf --
        vprint("Global NNF Noisy")
        start_time = time.perf_counter()
        split_vals, split_pix = nnf.compute_burst_nnf(dyn_noisy_ftrs, ref_t,
                                                      patchsize)
        runtimes.split = time.perf_counter() - start_time
        # split_pix = np.copy(nnf_pix)
        split_pix_best = torch.LongTensor(
            rearrange(split_pix[..., 0, :], shape_str))
        split_pix_best = torch.LongTensor(split_pix_best)
        pixs.split = split_pix_best.clone()
        flows.split = pix_to_flow(split_pix_best)
        aligned.split = align_from_pix(dyn_clean, split_pix_best, nblocks)
        anoisy.split = align_from_pix(dyn_noisy, split_pix_best, nblocks)
        optimal_scores.split = optimal_scores.nnf_local
        # optimal_scores.split = eval_ave.score_burst_from_flow(dyn_noisy,flows.nnf_local,
        #                                                       patchsize,nblocks)[1]
        optimal_scores.split = torch_to_numpy(optimal_scores.split)

        # -- compute complex ave --
        iterations, K = 0, 1
        subsizes = []
        vprint("[Ours] Ave loss function")
        start_time = time.perf_counter()
        estVar = torch.std(dyn_noisy_ftrs.reshape(-1)).item()**2
        valMean = 0.  #2 * estVar# * patchsize**2# / patchsize**2
        vals_local, pix_local = nnf_utils.runNnfBurst(dyn_noisy_ftrs,
                                                      patchsize,
                                                      nblocks,
                                                      1,
                                                      valMean=valMean,
                                                      blockLabels=blockLabels)
        runtimes.ave = time.perf_counter() - start_time
        pixs.ave = rearrange(pix_local, 't i h w 1 two -> i (h w) t two')
        flows.ave = pix_to_flow(pixs.ave.clone())
        optimal_scores.ave = optimal_scores.split  # same "ave" function
        aligned.ave = align_from_flow(dyn_clean,
                                      flows.ave,
                                      nblocks,
                                      isize=isize)
        anoisy.ave = align_from_flow(dyn_noisy,
                                     flows.ave,
                                     nblocks,
                                     isize=isize)
        optimal_scores.ave = optimal_scores.split  # same "ave" function

        # -- compute ave with smoothing --
        iterations, K = 0, 1
        subsizes = []
        vprint("[Ours] Ave loss function")
        start_time = time.perf_counter()
        pix_local = smooth_locs(pix_local, nclusters=1)
        runtimes.ave_smooth = time.perf_counter() - start_time + runtimes.ave
        pixs.ave_smooth = rearrange(pix_local,
                                    't i h w 1 two -> i (h w) t two')
        flows.ave_smooth = pix_to_flow(pixs.ave_smooth.clone())
        optimal_scores.ave_smooth = optimal_scores.split  # same "ave" function
        aligned.ave_smooth = align_from_flow(dyn_clean,
                                             flows.ave_smooth,
                                             nblocks,
                                             isize=isize)
        anoisy.ave_smooth = align_from_flow(dyn_noisy,
                                            flows.ave_smooth,
                                            nblocks,
                                            isize=isize)
        optimal_scores.ave_smooth = optimal_scores.split  # same "ave_smooth" function

        # -- compute  flow --
        vprint("L2-Local Recursive")
        start_time = time.perf_counter()
        vals_local, pix_local, wburst = nnf_utils.runNnfBurstRecursive(
            dyn_noisy_ftrs,
            dyn_clean,
            patchsize,
            nblocks,
            isize,
            1,
            valMean=valMean,
            blockLabels=blockLabels)
        runtimes.l2r = time.perf_counter() - start_time
        pixs.l2r = rearrange(pix_local, 't i h w 1 two -> i (h w) t two')
        flows.l2r = pix_to_flow(pixs.l2r.clone())
        aligned.l2r = wburst  #align_from_flow(dyn_clean,flows.l2r,nblocks,isize=isize)
        optimal_scores.l2r = optimal_scores.split  # same "ave" function

        # -- compute nvof flow --
        vprint("NVOF")
        start_time = time.perf_counter()
        # flows.nvof = nvof.nvof_burst(dyn_noisy_ftrs)
        flows.nvof = flows.ave.clone()
        runtimes.nvof = time.perf_counter() - start_time
        pixs.nvof = flow_to_pix(flows.nvof.clone(), nframes, isize=isize)
        aligned.nvof = align_from_flow(dyn_clean,
                                       flows.nvof,
                                       nblocks,
                                       isize=isize)
        anoisy.nvof = align_from_flow(dyn_noisy,
                                      flows.nvof,
                                      nblocks,
                                      isize=isize)
        optimal_scores.nvof = optimal_scores.split  # same "ave" function

        # -- compute flownet --
        vprint("FlowNetv2")
        start_time = time.perf_counter()
        _, flows.flownet = flownet_align(dyn_noisy_ftrs)
        # flows.flownet = flows.ave.clone().cpu()
        runtimes.flownet = time.perf_counter() - start_time
        pixs.flownet = flow_to_pix(flows.flownet.clone(), nframes, isize=isize)
        aligned.flownet = align_from_flow(dyn_clean,
                                          flows.flownet,
                                          nblocks,
                                          isize=isize)
        anoisy.flownet = align_from_flow(dyn_noisy,
                                         flows.flownet,
                                         nblocks,
                                         isize=isize)
        optimal_scores.flownet = optimal_scores.split

        # -- compute simple ave --
        iterations, K = 0, 1
        subsizes = []
        vprint("[simple] Ave loss function")
        start_time = time.perf_counter()
        optim = AlignOptimizer("v3")
        if cfg.patchsize < 11 and cfg.frame_size[0] <= 64 and False:
            flows.ave_simp = optim.run(dyn_noisy, patchsize, eval_ave_simp,
                                       nblocks, iterations, subsizes, K)
        else:
            flows.ave_simp = flows.ave.clone().cpu()
        runtimes.ave_simp = time.perf_counter() - start_time
        pixs.ave_simp = flow_to_pix(flows.ave_simp.clone(),
                                    nframes,
                                    isize=isize)
        aligned.ave_simp = align_from_flow(dyn_clean,
                                           flows.ave_simp,
                                           nblocks,
                                           isize=isize)
        anoisy.ave_simp = align_from_flow(dyn_noisy,
                                          flows.ave_simp,
                                          nblocks,
                                          isize=isize)
        optimal_scores.ave_simp = optimal_scores.split  # same "ave" function

        # -- format results --
        #pad = 2*(nframes-1)*ppf+4
        # pad = 2*(cfg.nblocks//2)#2*(nframes-1)*ppf+4
        # isize = edict({'h':H-pad,'w':W-pad})

        # -- flows to numpy --
        frame_size = cfg.frame_size[0]
        is_even = frame_size % 2 == 0
        mid_pix = frame_size * frame_size // 2 + (frame_size // 2) * is_even
        mid_pix = 32 * 10 + 23
        flows_np = edict_torch_to_numpy(flows)
        pixs_np = edict_torch_to_numpy(pixs)

        # -- End-Point-Errors --
        epes_of = compute_flows_epe_wrt_ref(flows, "of")
        epes_nnf = compute_flows_epe_wrt_ref(flows, "nnf")
        epes_nnf_local = compute_flows_epe_wrt_ref(flows, "nnf_local")
        nnf_acc = compute_acc_wrt_ref(flows, "nnf")
        nnf_local_acc = compute_acc_wrt_ref(flows, "nnf_local")

        # -- PSNRs --
        aligned = remove_center_frames(aligned)
        psnrs = compute_frames_psnr(aligned, psize)

        # -- denoised PSNRS --
        def burst_mean(in_burst):
            return torch.mean(in_burst, dim=0)[None, :]

        anoisy = remove_center_frames(anoisy)
        anoisy = apply_across_dict(anoisy, burst_mean)
        dn_psnrs = compute_frames_psnr(anoisy, psize)
        vprint(dn_psnrs)

        # -- print report ---
        print("\n" * 3)  # banner
        print("-" * 25 + " Results " + "-" * 25)
        # print_dict_ndarray_0_midpix(flows_np,mid_pix)
        # print_dict_ndarray_0_midpix(pixs_np,mid_pix)
        # print_verbose_psnrs(psnrs)
        # print_delta_summary_psnrs(psnrs)
        # print_verbose_epes(epes_of,epes_nnf)
        # print_nnf_acc(nnf_acc)
        # print_nnf_local_acc(nnf_local_acc)
        # print_summary_epes(epes_of,epes_nnf)
        # print_summary_denoised_psnrs(dn_psnrs)
        print_summary_psnrs(psnrs)
        print_runtimes(runtimes)

        # -- prepare results to be appended --
        psnrs = edict_torch_to_numpy(psnrs)
        epes_of = edict_torch_to_numpy(epes_of)
        epes_nnf = edict_torch_to_numpy(epes_nnf)
        epes_nnf_local = edict_torch_to_numpy(epes_nnf_local)
        nnf_acc = edict_torch_to_numpy(nnf_acc)
        nnf_local_acc = edict_torch_to_numpy(nnf_local_acc)
        image_index = torch_to_numpy(image_index)
        batch_results = {
            'runtimes': runtimes,
            'optimal_scores': optimal_scores,
            'psnrs': psnrs,
            'epes_of': epes_of,
            'epes_nnf': epes_nnf,
            'epes_nnf_local': epes_nnf_local,
            'nnf_acc': nnf_acc,
            'nnf_local_acc': nnf_local_acc
        }

        # -- format results --
        batch_results = flatten_internal_dict(batch_results)
        format_fields(batch_results, image_index, rng_state)

        # print("shape check.")
        # for key,value in batch_results.items():
        #     print(key,value.shape)

        record.append(batch_results)
    # print("\n"*3)
    # print("-"*20)
    # print(record.record)
    # print("-"*20)
    # print("\n"*3)
    # record.stack_record()
    record.cat_record()
    # print("\n"*3)
    # print("-"*20)
    # print(record.record)
    # print("-"*20)
    print("\n" * 3)
    print("-" * 20)
    # df = pd.DataFrame().append(record.record,ignore_index=True)
    for key, val in record.record.items():
        vprint(key, val.shape)
    # vprint(df)
    vprint("-" * 20)
    vprint("\n" * 3)

    return record.record
コード例 #11
0
def test_davis_dataset():

    # -- run exp --
    cfg = get_cfg_defaults()
    cfg.random_seed = 123
    nbatches = 20

    # -- set random seed --
    set_seed(cfg.random_seed)

    # -- load dataset --
    cfg.nframes = 5
    # cfg.frame_size = None
    # cfg.frame_size = [256,256]
    cfg.frame_size = [96, 96]
    cfg.dataset.name = "davis"
    data, loaders = load_dataset(cfg, "dynamic")
    image_iter = iter(loaders.tr)
    sample = next(image_iter)
    print(len(loaders.tr))
    fn = "./davis_example.png"
    save_image(sample['dyn_noisy'], fn, normalize=True, vrange=(0., 1.))

    # -- save path for viz --
    save_dir = SAVE_PATH
    if not save_dir.exists(): save_dir.mkdir(parents=True)

    # -- sample data --
    for image_index in range(nbatches):

        # -- sample image --
        index = -1
        # while index != 3233:
        #     sample = next(image_iter)
        #     convert_keys(sample)
        #     index = sample['image_index'][0][0].item()

        sample = next(image_iter)
        sample = next(image_iter)
        # batch_dim0(sample)
        # convert_keys(sample)

        # -- extract info --
        noisy = sample['dyn_noisy']
        clean = sample['dyn_clean']
        snoisy = sample['static_noisy']
        sclean = sample['static_clean']
        flow_est = sample['ref_flow']
        pix_gt = sample['ref_pix']
        index = sample['index'][0][0].item()
        nframes, nimages, c, h, w = noisy.shape
        mid_pix = h * w // 2 + 2 * cfg.nblocks
        shape_str = 'b k t h w two -> k b (h w) t two'
        pix_gt = rearrange(pix_gt, shape_str)[0]
        flow_est = rearrange(flow_est, shape_str)[0]

        # -- print shapes --
        print("-" * 50)
        for key, val in sample.items():
            if isinstance(val, list): continue
            print("{}: {}".format(key, val.shape))
        print("-" * 50)
        print(f"Image Index {index}")

        # -- io info --
        image_dir = save_dir / f"index{index}/"
        if not image_dir.exists(): image_dir.mkdir()

        #
        # -- Compute NNF to Ensure things are OKAY --
        #

        isize = edict({'h': h, 'w': w})
        # pad = cfg.patchsize//2 if cfg.patchsize > 1 else 1
        pad = cfg.nblocks // 2 + 1
        psize = edict({'h': h - 2 * pad, 'w': w - 2 * pad})

        # flow_gt = rearrange(flow,'i 1 fm1 h w two -> i (h w) fm1 two')
        # pix_gt = flow_gt.clone()
        # pix_gt = flow_to_pix(flow_gt.clone(),nframes,isize=isize)
        def cc(image):
            return tvF.center_crop(image, (psize.h, psize.w))

        # -- align from [pix or flow] --
        pix_gt = pix_gt.type(torch.long)
        aligned_gt = align_from_pix(clean, pix_gt.clone(), 0)  #cfg.nblocks)
        psnr = compute_aligned_psnr(sclean, aligned_gt, psize).T[0]
        print(f"[Dataset Pix Alignment] PSNR: {psnr}")

        flow_gt = pix_to_flow(pix_gt)
        aligned_gt = align_from_flow(clean, flow_gt.clone(), 0, isize=isize)
        psnr = compute_aligned_psnr(sclean, aligned_gt, psize).T[0]
        print(f"[Dataset Flow Alignment] PSNR: {psnr}")

        aligned_gt = align_from_flow(clean, flow_est.clone(), 0, isize=isize)
        psnr = compute_aligned_psnr(sclean, aligned_gt, psize).T[0]
        print(f"[(est) Dataset Flow Alignment] PSNR: {psnr}")

        # aligned_gt = warp_burst_flow(clean, flow_global)
        # isize = edict({'h':h,'w':w})
        # flow_est = pix_to_flow_est(pix_gt)
        print(torch.stack([flow_gt, flow_est], -1))
        assert torch.sum((flow_est - flow_gt)**2).item() < 1e-8

        # -- compute the nnf again [for checking] --
        nnf_vals, nnf_pix = compute_burst_nnf(clean, nframes // 2,
                                              cfg.patchsize)
        shape_str = 't b h w two -> b (h w) t two'
        pix_global = torch.LongTensor(rearrange(nnf_pix[..., 0, :], shape_str))
        flow_global = pix_to_flow(pix_global.clone())
        # print(flow_gt.shape,flow_global.shape)

        # -- explore --
        # print("NFrames: ",nframes)

        print(pix_global.shape, pix_gt.shape)
        for t in range(nframes):
            delta = pix_global[:, :, t] != pix_gt[:, :, t]
            delta = delta.type(torch.float)
            delta = 100 * torch.mean(delta)
            print("[%d]: %2.3f" % (t, delta))
        # print(torch.sum(torch.abs(pix_global - pix_gt)))
        print(pix_global[:, :, 41])
        print(pix_gt[:, :, 41])

        # print(torch.stack([pix_global,pix_gt],-1))
        # print(torch.where(pix_global!=pix_gt))
        # print(torch.sum((pix_global-pix_gt)**2))
        # print(torch.sum((pix_global!=pix_gt).type(torch.float)))

        agg = torch.stack([pix_global.ravel(), pix_gt.ravel()], -1)
        # print(agg)
        # print(agg.shape)
        # print(torch.sum((agg[:,0]!=agg[:,1]).type(torch.float)))
        # print(torch.where(agg[:,0]!=agg[:,1]))

        # -- create report --
        print(pix_global.shape, pix_gt.shape)
        print(pix_global.min(), pix_gt.min())
        print(pix_global.max(), pix_gt.max())
        print(type(pix_global), type(pix_gt))

        aligned_gt = align_from_pix(clean, pix_global, cfg.nblocks)
        psnr = compute_aligned_psnr(sclean, aligned_gt, psize).T[0]
        print(f"[GT Alignment] PSNR: {psnr}")

        #
        # -- Save Images to Qualitative Inspect --
        #

        fn = image_dir / "noisy.png"
        save_image(cc(noisy), fn, normalize=True, vrange=None)

        fn = image_dir / "clean.png"
        save_image(cc(clean), fn, normalize=True, vrange=None)

        print(cc(sclean).shape)
        fn = image_dir / "diff.png"
        save_image(cc(sclean) - cc(aligned_gt),
                   fn,
                   normalize=True,
                   vrange=None)

        fn = image_dir / "aligned_gt.png"
        save_image(cc(aligned_gt), fn, normalize=True, vrange=None)
        print(image_dir)

        return
コード例 #12
0
def main():

    #
    # -- init experiment --
    #

    cfg = edict()
    cfg.gpuid = 1
    cfg.noise_params = edict()
    cfg.noise_params.g = edict()
    # data = load_dataset(cfg)
    torch.manual_seed(143)  #131 = 80% vs 20%

    #
    # -- pick our noise --
    #

    # -- gaussian noise --
    # cfg.noise_type = 'g'
    # cfg.noise_params['g']['mean'] = 0.
    # cfg.noise_params['g']['stddev'] = 125.
    # cfg.noise_params.ntype = cfg.noise_type

    # -- poisson noise --
    cfg.noise_type = "pn"
    cfg.noise_params['pn'] = edict()
    cfg.noise_params['pn']['alpha'] = 1.0
    cfg.noise_params['pn']['std'] = 0.0
    cfg.noise_params.ntype = cfg.noise_type

    # -- low-light noise --
    # cfg.noise_type = "qis"
    # cfg.noise_params['qis'] = edict()
    # cfg.noise_params['qis']['alpha'] = 4.0
    # cfg.noise_params['qis']['readout'] = 0.0
    # cfg.noise_params['qis']['nbits'] = 3
    # cfg.noise_params['qis']['use_adc'] = True
    # cfg.noise_params.ntype = cfg.noise_type

    #
    # -- setup the dynamics --
    #

    cfg.nframes = 5
    cfg.frame_size = 350
    cfg.nblocks = 5
    T = cfg.nframes

    cfg.dynamic = edict()
    cfg.dynamic.frames = cfg.nframes
    cfg.dynamic.bool = True
    cfg.dynamic.ppf = 1
    cfg.dynamic.mode = "global"
    cfg.dynamic.random_eraser = False
    cfg.dynamic.frame_size = cfg.frame_size
    cfg.dynamic.total_pixels = cfg.dynamic.ppf * (cfg.nframes - 1)

    # -- setup noise and dynamics --
    noise_xform = get_noise_transform(cfg.noise_params, noise_only=True)

    def null(image):
        return image

    dynamics_xform = get_dynamic_transform(cfg.dynamic, null)

    # -- sample data --
    image_path = "./data/512-512-grayscale-image-Cameraman.png"
    image = Image.open(image_path).convert("RGB")
    image = image.crop((0, 0, cfg.frame_size, cfg.frame_size))
    clean, res, raw, flow = dynamics_xform(image)
    clean = clean[:, None]
    burst = noise_xform(clean + 0.5)
    flow = flow[None, :]
    reference = repeat(clean[[T // 2]], '1 b c h w -> t b c h w', t=T)
    print("Flow")
    print(flow)

    # -- our method --
    ref_frame = T // 2
    nblocks = cfg.nblocks
    method = "simple"
    noise_info = cfg.noise_params
    scores, aligned_simp, dacc_simp = lpas_search(burst, ref_frame, nblocks,
                                                  flow, method, clean,
                                                  noise_info)

    # -- split search --
    ref_frame = T // 2
    nblocks = cfg.nblocks
    method = "split"
    noise_info = cfg.noise_params
    scores, aligned_split, dacc_split = lpas_search(burst, ref_frame, nblocks,
                                                    flow, method, clean,
                                                    noise_info)

    # -- quantitative comparison --
    crop_size = 256
    image1, image2 = cc(aligned_simp, crop_size), cc(reference, crop_size)
    psnrs = images_to_psnrs(image1, image2)
    print("Aligned Simple Method: ", psnrs, dacc_simp.item())
    image1, image2 = cc(aligned_split, crop_size), cc(reference, crop_size)
    psnrs = images_to_psnrs(image1, image2)
    print("Aligned Split Method: ", psnrs, dacc_split.item())

    # -- compute noise 2 sim --
    # T,K = cfg.nframes,cfg.nframes
    # patchsize = 31
    # query = burst[[T//2]]
    # database = torch.cat([burst[:T//2],burst[T//2+1:]])
    # clean_db = clean
    # sim_outputs = compute_similar_bursts_analysis(cfg,query,database,clean_db,K,-1.,
    #                                               patchsize=patchsize,shuffle_k=False,
    #                                               kindex=None,only_middle=False,
    #                                               search_method="l2",db_level="burst")
    # sims,csims,wsims,b_dist,b_indx = sim_outputs

    # -- display images --
    print(aligned_simp.shape)
    print(aligned_split.shape)
    print_tensor_stats("aligned", aligned_simp)

    # print(csims.shape)
    save_image(burst, "lpas_demo_burst.png", [-0.5, 0.5])
    save_image(clean, "lpas_demo_clean.png")

    save_image(aligned_simp, "lpas_demo_aligned_simp.png")
    save_image(aligned_split, "lpas_demo_aligned_split.png")
    save_image(cc(aligned_simp, crop_size), "lpas_demo_aligned_simp_ccrop.png")
    save_image(cc(aligned_split, crop_size),
               "lpas_demo_aligned_split_ccrop.png")

    delta_full_simp = aligned_simp - aligned_simp[T // 2]
    delta_full_split = aligned_split - aligned_split[T // 2]
    save_image(delta_full_simp, "lpas_demo_aligned_full_delta_simp.png",
               [-0.5, 0.5])
    save_image(delta_full_split, "lpas_demo_aligned_full_delta_split.png",
               [-0.5, 0.5])

    delta_cc_simp = cc(delta_full_simp, crop_size)
    delta_cc_split = cc(delta_full_split, crop_size)
    save_image(delta_full_simp, "lpas_demo_aligned_cc_delta_simp.png")
    save_image(delta_full_split, "lpas_demo_aligned_cc_delta_split.png")

    top = 75
    size = 64
    simp = tvF.crop(aligned_simp, top, 200, size, size)
    split = tvF.crop(aligned_split, top, 200, size, size)
    print_tensor_stats("delta", simp)
    save_image(simp, "lpas_demo_aligned_simp_inspect.png")
    save_image(split, "lpas_demo_aligned_split_inspect.png")

    delta_simp = simp - simp[T // 2]
    delta_split = split - split[T // 2]
    print_tensor_stats("delta", delta_simp)
    save_image(delta_simp, "lpas_demo_aligned_simp_inspect_delta.png",
               [-1, 1.])
    save_image(delta_split, "lpas_demo_aligned_split_inspect_delta.png",
               [-1, 1.])
コード例 #13
0
def test_set8_dataset():

    # -- PID --
    pid = os.getpid()
    print(f"PID: {pid}")

    # -- run exp --
    cfg = get_cfg_defaults()
    cfg.random_seed = 123
    nbatches = 3

    # -- set random seed --
    set_seed(cfg.random_seed)

    # -- load dataset --
    cfg.nframes = 0
    cfg.frame_size = [256, 256]
    # cfg.frame_size = [128,128]
    # cfg.frame_size = [32,32]
    cfg.dataset.name = "set8"
    data, loaders = load_dataset(cfg, "dynamic")
    image_iter = iter(loaders.tr)
    sample = next(image_iter)
    print(len(loaders.tr))
    fn = "./set8_example.png"
    save_image(sample['dyn_noisy'], fn, normalize=True, vrange=(0., 1.))

    # -- save path for viz --
    save_dir = SAVE_PATH
    if not save_dir.exists(): save_dir.mkdir(parents=True)

    # -- sample data --
    for image_index in range(nbatches):

        # -- sample image --
        index = -1
        # while index != 3233:
        #     sample = next(image_iter)
        #     convert_keys(sample)
        #     index = sample['image_index'][0][0].item()

        sample = next(image_iter)
        # batch_dim0(sample)
        # convert_keys(sample)

        # -- extract info --
        noisy = sample['dyn_noisy']
        clean = sample['dyn_clean']
        snoisy = sample['static_noisy']
        sclean = sample['static_clean']
        flow = sample['ref_flow']
        index = sample['index'][0][0].item()
        nframes, nimages, c, h, w = noisy.shape
        mid_pix = h * w // 2 + 2 * cfg.nblocks

        # -- print shapes --
        print("-" * 50)
        for key, val in sample.items():
            if isinstance(val, list): continue
            print("{}: {}".format(key, val.shape))
        print("-" * 50)

        print(f"Image Index {index}")

        # -- io info --
        image_dir = save_dir / f"index{index}/"
        if not image_dir.exists(): image_dir.mkdir()

        #
        # -- Compute NNF to Ensure things are OKAY --
        #

        isize = edict({'h': h, 'w': w})
        # pad = cfg.patchsize//2 if cfg.patchsize > 1 else 1
        pad = cfg.nblocks // 2 + 1
        psize = edict({'h': h - 2 * pad, 'w': w - 2 * pad})
        flow_gt = rearrange(flow, 'i 1 fm1 h w two -> i (h w) fm1 two')
        pix_gt = flow_to_pix(flow_gt.clone(), nframes, isize=isize)

        def cc(image):
            return tvF.center_crop(image, (psize.h, psize.w))

        nnf_vals, nnf_pix = compute_burst_nnf(clean, nframes // 2,
                                              cfg.patchsize)
        shape_str = 't b h w two -> b (h w) t two'
        pix_global = torch.LongTensor(rearrange(nnf_pix[..., 0, :], shape_str))
        flow_global = pix_to_flow(pix_global.clone())
        # aligned_gt = warp_burst_flow(clean, flow_global)
        aligned_gt = align_from_pix(clean, pix_gt, cfg.nblocks)
        # isize = edict({'h':h,'w':w})
        # aligned_gt = align_from_flow(clean,flow_global,cfg.nblocks,isize=isize)
        # psnr = compute_aligned_psnr(sclean[[nframes//2]],clean[[nframes//2]],psize)
        psnr = compute_aligned_psnr(sclean, aligned_gt, psize)
        print(f"[GT Alignment] PSNR: {psnr}")

        #
        # -- Save Images to Qualitative Inspect --
        #

        fn = image_dir / "noisy.png"
        save_image(cc(noisy), fn, normalize=True, vrange=None)

        fn = image_dir / "clean.png"
        save_image(cc(clean), fn, normalize=True, vrange=None)

        print(cc(sclean).shape)
        fn = image_dir / "diff.png"
        save_image(cc(sclean) - cc(aligned_gt),
                   fn,
                   normalize=True,
                   vrange=None)

        fn = image_dir / "aligned_gt.png"
        save_image(cc(aligned_gt), fn, normalize=True, vrange=None)