예제 #1
0
def test_bsdBurst_dataset():

    # -- run exp --
    cfg = get_cfg_defaults()
    cfg.random_seed = 123
    cfg.batch_size = 1
    cfg.frame_size = None
    cfg.dataset.name = "bsd_burst"

    # -- set random seed --
    set_seed(cfg.random_seed)

    # -- establish nnf frames are created --
    create_nnf_for_frame_size_grid(cfg)

    # -- save path for viz --
    save_dir = Path(
        f"{settings.ROOT_PATH}/output/tests/datasets/test_bsdBurst/")
    if not save_dir.exists(): save_dir.mkdir(parents=True)

    # -- load dataset --
    print("load image dataset.")
    data, loaders = load_dataset(cfg, "dynamic")
    print("num of bursts: ", len(loaders.tr))
    nbursts = len(data.tr)

    # -- ensure nnf created --
    for burst_index in tqdm.tqdm(range(nbursts)):
        data.tr[burst_index]

    # -- for image bursts --
    image_iter = iter(loaders.tr)
    for burst_index in range(nbursts):

        # -- sample image --
        sample = next(image_iter)
        noisy = sample['dyn_noisy']
        clean = sample['dyn_clean']
        snoisy = sample['static_noisy']
        sclean = sample['static_clean']
        flow = sample['flow']
        index = sample['image_index'][0][0].item()
        nframes, nimages, c, h, w = noisy.shape
        mid_pix = h * w // 2 + 2 * cfg.nblocks
        print(f"Image Index {index}")

        print(noisy.shape)
        print(clean.shape)

        # -- io info --
        image_dir = save_dir / f"index{index}/"
        if not image_dir.exists(): image_dir.mkdir()

        fn = str(save_dir / "./bsdBurst_example.png")
        save_image(sample['dyn_noisy'], fn, normalize=True, vrange=(0., 1.))
예제 #2
0
def test_davis_dataset():

    # -- run exp --
    cfg = get_cfg_defaults()
    cfg.random_seed = 123
    nbatches = 20

    # -- set random seed --
    set_seed(cfg.random_seed)

    # -- load dataset --
    cfg.nframes = 5
    # cfg.frame_size = None
    # cfg.frame_size = [256,256]
    cfg.frame_size = [96, 96]
    cfg.dataset.name = "davis"
    data, loaders = load_dataset(cfg, "dynamic")
    image_iter = iter(loaders.tr)
    sample = next(image_iter)
    print(len(loaders.tr))
    fn = "./davis_example.png"
    save_image(sample['dyn_noisy'], fn, normalize=True, vrange=(0., 1.))

    # -- save path for viz --
    save_dir = SAVE_PATH
    if not save_dir.exists(): save_dir.mkdir(parents=True)

    # -- sample data --
    for image_index in range(nbatches):

        # -- sample image --
        index = -1
        # while index != 3233:
        #     sample = next(image_iter)
        #     convert_keys(sample)
        #     index = sample['image_index'][0][0].item()

        sample = next(image_iter)
        sample = next(image_iter)
        # batch_dim0(sample)
        # convert_keys(sample)

        # -- extract info --
        noisy = sample['dyn_noisy']
        clean = sample['dyn_clean']
        snoisy = sample['static_noisy']
        sclean = sample['static_clean']
        flow_est = sample['ref_flow']
        pix_gt = sample['ref_pix']
        index = sample['index'][0][0].item()
        nframes, nimages, c, h, w = noisy.shape
        mid_pix = h * w // 2 + 2 * cfg.nblocks
        shape_str = 'b k t h w two -> k b (h w) t two'
        pix_gt = rearrange(pix_gt, shape_str)[0]
        flow_est = rearrange(flow_est, shape_str)[0]

        # -- print shapes --
        print("-" * 50)
        for key, val in sample.items():
            if isinstance(val, list): continue
            print("{}: {}".format(key, val.shape))
        print("-" * 50)
        print(f"Image Index {index}")

        # -- io info --
        image_dir = save_dir / f"index{index}/"
        if not image_dir.exists(): image_dir.mkdir()

        #
        # -- Compute NNF to Ensure things are OKAY --
        #

        isize = edict({'h': h, 'w': w})
        # pad = cfg.patchsize//2 if cfg.patchsize > 1 else 1
        pad = cfg.nblocks // 2 + 1
        psize = edict({'h': h - 2 * pad, 'w': w - 2 * pad})

        # flow_gt = rearrange(flow,'i 1 fm1 h w two -> i (h w) fm1 two')
        # pix_gt = flow_gt.clone()
        # pix_gt = flow_to_pix(flow_gt.clone(),nframes,isize=isize)
        def cc(image):
            return tvF.center_crop(image, (psize.h, psize.w))

        # -- align from [pix or flow] --
        pix_gt = pix_gt.type(torch.long)
        aligned_gt = align_from_pix(clean, pix_gt.clone(), 0)  #cfg.nblocks)
        psnr = compute_aligned_psnr(sclean, aligned_gt, psize).T[0]
        print(f"[Dataset Pix Alignment] PSNR: {psnr}")

        flow_gt = pix_to_flow(pix_gt)
        aligned_gt = align_from_flow(clean, flow_gt.clone(), 0, isize=isize)
        psnr = compute_aligned_psnr(sclean, aligned_gt, psize).T[0]
        print(f"[Dataset Flow Alignment] PSNR: {psnr}")

        aligned_gt = align_from_flow(clean, flow_est.clone(), 0, isize=isize)
        psnr = compute_aligned_psnr(sclean, aligned_gt, psize).T[0]
        print(f"[(est) Dataset Flow Alignment] PSNR: {psnr}")

        # aligned_gt = warp_burst_flow(clean, flow_global)
        # isize = edict({'h':h,'w':w})
        # flow_est = pix_to_flow_est(pix_gt)
        print(torch.stack([flow_gt, flow_est], -1))
        assert torch.sum((flow_est - flow_gt)**2).item() < 1e-8

        # -- compute the nnf again [for checking] --
        nnf_vals, nnf_pix = compute_burst_nnf(clean, nframes // 2,
                                              cfg.patchsize)
        shape_str = 't b h w two -> b (h w) t two'
        pix_global = torch.LongTensor(rearrange(nnf_pix[..., 0, :], shape_str))
        flow_global = pix_to_flow(pix_global.clone())
        # print(flow_gt.shape,flow_global.shape)

        # -- explore --
        # print("NFrames: ",nframes)

        print(pix_global.shape, pix_gt.shape)
        for t in range(nframes):
            delta = pix_global[:, :, t] != pix_gt[:, :, t]
            delta = delta.type(torch.float)
            delta = 100 * torch.mean(delta)
            print("[%d]: %2.3f" % (t, delta))
        # print(torch.sum(torch.abs(pix_global - pix_gt)))
        print(pix_global[:, :, 41])
        print(pix_gt[:, :, 41])

        # print(torch.stack([pix_global,pix_gt],-1))
        # print(torch.where(pix_global!=pix_gt))
        # print(torch.sum((pix_global-pix_gt)**2))
        # print(torch.sum((pix_global!=pix_gt).type(torch.float)))

        agg = torch.stack([pix_global.ravel(), pix_gt.ravel()], -1)
        # print(agg)
        # print(agg.shape)
        # print(torch.sum((agg[:,0]!=agg[:,1]).type(torch.float)))
        # print(torch.where(agg[:,0]!=agg[:,1]))

        # -- create report --
        print(pix_global.shape, pix_gt.shape)
        print(pix_global.min(), pix_gt.min())
        print(pix_global.max(), pix_gt.max())
        print(type(pix_global), type(pix_gt))

        aligned_gt = align_from_pix(clean, pix_global, cfg.nblocks)
        psnr = compute_aligned_psnr(sclean, aligned_gt, psize).T[0]
        print(f"[GT Alignment] PSNR: {psnr}")

        #
        # -- Save Images to Qualitative Inspect --
        #

        fn = image_dir / "noisy.png"
        save_image(cc(noisy), fn, normalize=True, vrange=None)

        fn = image_dir / "clean.png"
        save_image(cc(clean), fn, normalize=True, vrange=None)

        print(cc(sclean).shape)
        fn = image_dir / "diff.png"
        save_image(cc(sclean) - cc(aligned_gt),
                   fn,
                   normalize=True,
                   vrange=None)

        fn = image_dir / "aligned_gt.png"
        save_image(cc(aligned_gt), fn, normalize=True, vrange=None)
        print(image_dir)

        return
예제 #3
0
def test_burst_kitti_dataset():

    # -- run exp --
    cfg = get_cfg_defaults()
    cfg.random_seed = 123
    nbatches = 20

    # -- set random seed --
    set_seed(cfg.random_seed)

    # -- load dataset --
    print("load image dataset.")
    # cfg.dataset.name = "burst_with_flow_kitti"
    cfg.nframes = 21
    cfg.frame_size = [128, 128]
    # write_burst_with_flow_kitti_nnf(cfg)
    cfg.dataset.name = "burst_kitti"
    # write_burst_kitti_nnf(cfg)
    data, loaders = load_dataset(cfg, "dynamic")
    image_iter = iter(loaders.tr)
    sample = next(image_iter)
    print(len(loaders.tr))
    fn = "./kitti_example.png"
    save_image(sample['dyn_noisy'], fn, normalize=True, vrange=(0., 1.))

    image_iter = iter(loaders.tr)
    for i in range(1000):
        next(image_iter)
    image_iter = iter(loaders.te)
    for i in range(1000):
        next(image_iter)

    # data,loaders = load_image_dataset(cfg)

    # -- save path for viz --
    save_dir = Path(
        f"{settings.ROOT_PATH}/output/tests/datasets/test_global_dynamics/")
    if not save_dir.exists(): save_dir.mkdir(parents=True)

    # -- sample data --
    for image_index in range(nbatches):

        # -- sample image --
        index = -1
        # while index != 3233:
        #     sample = next(image_iter)
        #     convert_keys(sample)
        #     index = sample['image_index'][0][0].item()

        sample = next(image_iter)
        # batch_dim0(sample)
        convert_keys(sample)

        # -- extract info --
        noisy = sample['dyn_noisy']
        clean = sample['dyn_clean']
        snoisy = sample['static_noisy']
        sclean = sample['static_clean']
        flow = sample['flow_gt']
        index = sample['image_index'][0][0].item()
        nframes, nimages, c, h, w = noisy.shape
        mid_pix = h * w // 2 + 2 * cfg.nblocks
        print(f"Image Index {index}")

        # -- io info --
        image_dir = save_dir / f"index{index}/"
        if not image_dir.exists(): image_dir.mkdir()

        #
        # -- Compute NNF to Ensure things are OKAY --
        #

        isize = edict({'h': h, 'w': w})
        # pad = cfg.patchsize//2 if cfg.patchsize > 1 else 1
        pad = cfg.nblocks // 2 + 1
        psize = edict({'h': h - 2 * pad, 'w': w - 2 * pad})
        flow_gt = repeat(flow, 'i fm1 two -> i s fm1 two', s=h * w)
        pix_gt = flow_to_pix(flow_gt.clone(), nframes, isize=isize)

        def cc(image):
            return tvF.center_crop(image, (psize.h, psize.w))

        nnf_vals, nnf_pix = compute_burst_nnf(clean, nframes // 2,
                                              cfg.patchsize)
        shape_str = 't b h w two -> b (h w) t two'
        pix_global = torch.LongTensor(rearrange(nnf_pix[..., 0, :], shape_str))
        flow_global = pix_to_flow(pix_global.clone())
        # aligned_gt = warp_burst_flow(clean, flow_global)
        aligned_gt = align_from_pix(clean, pix_gt, cfg.nblocks)
        # isize = edict({'h':h,'w':w})
        # aligned_gt = align_from_flow(clean,flow_global,cfg.nblocks,isize=isize)
        # psnr = compute_aligned_psnr(sclean[[nframes//2]],clean[[nframes//2]],psize)
        psnr = compute_aligned_psnr(sclean, aligned_gt, psize)
        print(f"[GT Alignment] PSNR: {psnr}")

        # -- compute with nvidia's opencv optical flow --
        nd_clean = rearrange(clean.numpy(), 't 1 c h w -> t h w c')
        ref_t = nframes // 2
        frames, flows = [], []
        for t in range(nframes):
            if t == ref_t:
                frames.append(nd_clean[t][None, :])
                flows.append(torch.zeros(flows[-1].shape))
                continue
            from_frame = 255. * cv2.cvtColor(nd_clean[ref_t],
                                             cv2.COLOR_RGB2GRAY)
            to_frame = 255. * cv2.cvtColor(nd_clean[t], cv2.COLOR_RGB2GRAY)
            _flow = cv2.calcOpticalFlowFarneback(to_frame, from_frame, None,
                                                 0.5, 3, 3, 10, 5, 1.2, 0)
            _flow = np.round(_flow).astype(np.float32)  # not good for later
            w_frame = warp_flow(nd_clean[t], -_flow)
            _flow[..., 0] = -_flow[..., 0]  # my OF is probably weird.
            # print("w_frame.shape ",w_frame.shape)
            flows.append(torch.FloatTensor(_flow))
            frames.append(torch.FloatTensor(w_frame[None, :]))
        flows = torch.stack(flows)
        flows = rearrange(flows, 't h w two -> 1 (h w) t two')
        frames = torch.FloatTensor(np.stack(frames))
        frames = rearrange(frames, 't i h w c -> t i c h w')
        # print("flows.shape ",flows.shape)
        # print("frames.shape ",frames.shape)
        # print("sclean.shape ",sclean.shape)
        psnr = compute_aligned_psnr(sclean, frames, psize)
        print(f"[NVOF Alignment] PSNR: {psnr}")

        pix_nvof = flow_to_pix(flows.clone(), nframes, isize=isize)
        aligned_nvof = align_from_pix(clean, pix_nvof, cfg.nblocks)
        psnr = compute_aligned_psnr(sclean, aligned_nvof, psize)
        print(f"[NVOF Alignment v2] PSNR: {psnr}")

        psnr = compute_aligned_psnr(frames, aligned_nvof, psize)
        print(f"[NVOF Alignment Methods] PSNR: {psnr}")

        print(pix_global[0, mid_pix])
        print(pix_gt[0, mid_pix])
        print(flow_global[0, mid_pix])
        print(flow_gt[0, mid_pix])
        print(flows[0, mid_pix])

        #
        # -- Save Images to Qualitative Inspect --
        #

        fn = image_dir / "noisy.png"
        save_image(cc(noisy), fn, normalize=True, vrange=None)

        fn = image_dir / "clean.png"
        save_image(cc(clean), fn, normalize=True, vrange=None)

        print(cc(sclean).shape)
        fn = image_dir / "diff.png"
        save_image(cc(sclean) - cc(aligned_gt),
                   fn,
                   normalize=True,
                   vrange=None)

        fn = image_dir / "aligned_gt.png"
        save_image(cc(aligned_gt), fn, normalize=True, vrange=None)

        # return

        # -- NNF Global --
        nnf_vals, nnf_pix = compute_burst_nnf(clean, nframes // 2,
                                              cfg.patchsize)
        shape_str = 't b h w two -> b (h w) t two'
        pix_global = torch.LongTensor(rearrange(nnf_pix[..., 0, :], shape_str))
        flow_global = pix_to_flow(pix_global.clone())
        # aligned_global = align_from_flow(clean,flow_gt,cfg.nblocks)
        aligned_global = align_from_pix(clean, pix_gt, cfg.nblocks)
        psnr = compute_aligned_psnr(sclean, aligned_global, psize)
        print(f"[NNF Global] PSNR: {psnr}")

        # -- NNF Local --
        iterations, K, subsizes = 0, 1, []
        optim = AlignOptimizer("v3")
        score_fxn_ave = get_score_function("ave")
        eval_ave = EvalBlockScores(score_fxn_ave, "ave", cfg.patchsize, 256,
                                   None)
        flow_local = optim.run(clean, cfg.patchsize, eval_ave, cfg.nblocks,
                               iterations, subsizes, K)
        pix_local = flow_to_pix(flow_local.clone(), nframes, isize=isize)
        # aligned_local = align_from_flow(clean,flow_gt,cfg.nblocks)
        aligned_local = align_from_pix(clean, pix_gt, cfg.nblocks)
        psnr = compute_aligned_psnr(sclean, aligned_local, psize)
        print(f"[NNF Local] PSNR: {psnr}")

        # -- remove boundary from pix --
        pixes = {'gt': pix_gt, 'global': pix_global, 'local': pix_local}
        for field, pix in pixes.items():
            pix_img = rearrange(pix, 'i (h w) t two -> (i t) two h w', h=h)
            pix_cc = cc(pix_img)
            pixes[field] = pix_cc

        # -- pairwise diffs --
        field2 = "gt"
        for field1 in pixes.keys():
            if field1 == field2: continue
            delta = pixes[field1] - pixes[field2]
            delta = delta.type(torch.float)
            delta_fn = image_dir / f"delta_{field1}_{field2}.png"
            save_image(delta, delta_fn, normalize=True, vrange=None)
        print(pix_gt[0, mid_pix])
        print(pix_global[0, mid_pix])
        print(pix_local[0, mid_pix])

        print(flow_gt[0, mid_pix])
        print(flow_global[0, mid_pix])
        print(flow_local[0, mid_pix])

        #
        # -- Save Images to Qualitative Inspect --
        #

        fn = image_dir / "noisy.png"
        save_image(cc(noisy), fn, normalize=True, vrange=None)

        fn = image_dir / "clean.png"
        save_image(cc(clean), fn, normalize=True, vrange=None)

        fn = image_dir / "aligned_global.png"
        save_image(cc(aligned_global), fn, normalize=True, vrange=None)

        fn = image_dir / "aligned_local.png"
        save_image(cc(aligned_local), fn, normalize=True, vrange=None)
예제 #4
0
def test_set8_dataset():

    # -- PID --
    pid = os.getpid()
    print(f"PID: {pid}")

    # -- run exp --
    cfg = get_cfg_defaults()
    cfg.random_seed = 123
    nbatches = 3

    # -- set random seed --
    set_seed(cfg.random_seed)

    # -- load dataset --
    cfg.nframes = 0
    cfg.frame_size = [256, 256]
    # cfg.frame_size = [128,128]
    # cfg.frame_size = [32,32]
    cfg.dataset.name = "set8"
    data, loaders = load_dataset(cfg, "dynamic")
    image_iter = iter(loaders.tr)
    sample = next(image_iter)
    print(len(loaders.tr))
    fn = "./set8_example.png"
    save_image(sample['dyn_noisy'], fn, normalize=True, vrange=(0., 1.))

    # -- save path for viz --
    save_dir = SAVE_PATH
    if not save_dir.exists(): save_dir.mkdir(parents=True)

    # -- sample data --
    for image_index in range(nbatches):

        # -- sample image --
        index = -1
        # while index != 3233:
        #     sample = next(image_iter)
        #     convert_keys(sample)
        #     index = sample['image_index'][0][0].item()

        sample = next(image_iter)
        # batch_dim0(sample)
        # convert_keys(sample)

        # -- extract info --
        noisy = sample['dyn_noisy']
        clean = sample['dyn_clean']
        snoisy = sample['static_noisy']
        sclean = sample['static_clean']
        flow = sample['ref_flow']
        index = sample['index'][0][0].item()
        nframes, nimages, c, h, w = noisy.shape
        mid_pix = h * w // 2 + 2 * cfg.nblocks

        # -- print shapes --
        print("-" * 50)
        for key, val in sample.items():
            if isinstance(val, list): continue
            print("{}: {}".format(key, val.shape))
        print("-" * 50)

        print(f"Image Index {index}")

        # -- io info --
        image_dir = save_dir / f"index{index}/"
        if not image_dir.exists(): image_dir.mkdir()

        #
        # -- Compute NNF to Ensure things are OKAY --
        #

        isize = edict({'h': h, 'w': w})
        # pad = cfg.patchsize//2 if cfg.patchsize > 1 else 1
        pad = cfg.nblocks // 2 + 1
        psize = edict({'h': h - 2 * pad, 'w': w - 2 * pad})
        flow_gt = rearrange(flow, 'i 1 fm1 h w two -> i (h w) fm1 two')
        pix_gt = flow_to_pix(flow_gt.clone(), nframes, isize=isize)

        def cc(image):
            return tvF.center_crop(image, (psize.h, psize.w))

        nnf_vals, nnf_pix = compute_burst_nnf(clean, nframes // 2,
                                              cfg.patchsize)
        shape_str = 't b h w two -> b (h w) t two'
        pix_global = torch.LongTensor(rearrange(nnf_pix[..., 0, :], shape_str))
        flow_global = pix_to_flow(pix_global.clone())
        # aligned_gt = warp_burst_flow(clean, flow_global)
        aligned_gt = align_from_pix(clean, pix_gt, cfg.nblocks)
        # isize = edict({'h':h,'w':w})
        # aligned_gt = align_from_flow(clean,flow_global,cfg.nblocks,isize=isize)
        # psnr = compute_aligned_psnr(sclean[[nframes//2]],clean[[nframes//2]],psize)
        psnr = compute_aligned_psnr(sclean, aligned_gt, psize)
        print(f"[GT Alignment] PSNR: {psnr}")

        #
        # -- Save Images to Qualitative Inspect --
        #

        fn = image_dir / "noisy.png"
        save_image(cc(noisy), fn, normalize=True, vrange=None)

        fn = image_dir / "clean.png"
        save_image(cc(clean), fn, normalize=True, vrange=None)

        print(cc(sclean).shape)
        fn = image_dir / "diff.png"
        save_image(cc(sclean) - cc(aligned_gt),
                   fn,
                   normalize=True,
                   vrange=None)

        fn = image_dir / "aligned_gt.png"
        save_image(cc(aligned_gt), fn, normalize=True, vrange=None)