Exemplo n.º 1
0
def main():

    # -- settings --
    cfg = get_cfg_defaults()
    cfg.use_anscombe = True
    cfg.noise_params.ntype = 'g'
    cfg.noise_params.g.std = 25.
    cfg.nframes = 3
    cfg.num_workers = 0
    cfg.dynamic_info.nframes = cfg.nframes
    cfg.dynamic_info.ppf = 10
    cfg.nblocks = 3
    cfg.patchsize = 10
    cfg.gpuid = 1
    cfg.device = f"cuda:{cfg.gpuid}"

    # -- get exps -- 
    experiments,order,egrid = get_exp_cfgs()

    # -- setup cache --
    cache_name = "quality_v_noisy"
    cache_root = EXP_PATH / cache_name
    cache = cache_io.ExpCache(cache_root,cache_name)

    # -- Run Experiments --
    exp_cfgs = []
    for config in tqdm.tqdm(experiments,total=len(experiments)):
        results = cache.load_exp(config)
        uuid = cache.get_uuid(config)
        print(uuid)
        exp_cfg = setup_exp_cfg(cfg,config)
        exp_cfg.uuid = uuid
        exp_cfgs.append(exp_cfg)
        if results is None:
            results = execute_experiment(exp_cfg)
            print(results)
            cache.save_exp(exp_cfg.uuid,config,results)
    records = cache.load_flat_records(experiments)
    print(records)

    # -- init search methods --
    create_quality_plot(cfg,records)
Exemplo n.º 2
0
def main():
    seed = 234
    np.random.seed(seed)
    torch.manual_seed(seed)

    # -- settings --
    cfg = get_cfg_defaults()
    cfg.use_anscombe = True
    cfg.noise_params.ntype = 'g'
    cfg.noise_params.g.std = 25.
    cfg.nframes = 3
    cfg.num_workers = 0
    cfg.dynamic_info.nframes = cfg.nframes
    cfg.dynamic_info.ppf = 10
    cfg.nblocks = 3
    cfg.patchsize = 10
    cfg.gpuid = 1
    cfg.device = f"cuda:{cfg.gpuid}"

    # -- load dataset --
    data, loaders = load_image_dataset(cfg)
    train_iter = iter(loaders.tr)

    # -- fetch sample --
    sample = next(train_iter)
    sample_to_cuda(sample)

    # -- unpack data --
    noisy, clean = sample['noisy'], sample['burst']

    # -- save ave image --
    save_image(torch.mean(noisy[:, 0], dim=0),
               SAVE_DIR / "./bootstrap_noisy_ave.png")

    # -- format for plots --
    print("noisy.shape", noisy.shape)
    noisy = rearrange(noisy[:, 0], 't c h w -> t h w c')
    clean = rearrange(clean[:, 0], 't c h w -> t h w c')

    plot_bootstrapping(noisy, clean)
Exemplo n.º 3
0
if __name__ == '__main__':
    DEVICE = torch.device('cuda')

    parser = argparse.ArgumentParser()
    parser.set_defaults(entry=lambda cmd_args: parser.print_help())
    parser.add_argument('--entry', type=str, default="train")
    parser.add_argument('--exp-config', type=str, default="")
    parser.add_argument('--model-path', type=str, default="")
    parser.add_argument('--record-file', type=str, default="")

    cmd_args = parser.parse_args()

    if cmd_args.entry == "train":
        assert not cmd_args.exp_config == ""

        _cfg = get_cfg_defaults()
        _cfg.merge_from_file(cmd_args.exp_config)
        if _cfg.EXP.EXP_ID == "":
            _cfg.EXP.EXP_ID = str(datetime.now())[:-7].replace(' ', '-')
        _cfg.freeze()
        print(_cfg)

        torch.manual_seed(_cfg.EXP.SEED)
        np.random.seed(_cfg.EXP.SEED)
        random.seed(_cfg.EXP.SEED)

        entry_train(_cfg, cmd_args.record_file)

    elif cmd_args.entry == "test":
        assert not cmd_args.exp_config == ""
Exemplo n.º 4
0
def run_with_seed(seed):

    # -- settings --
    cfg = get_cfg_defaults()
    cfg.use_anscombe = False
    cfg.noise_params.ntype = 'g'
    cfg.noise_params.g.std = 10.
    cfg.nframes = 5
    cfg.patchsize = 11

    # -- seeds --
    cfg.seed = seed
    # cfg.seed = 123 # sky of a forest
    # cfg.seed = 345 # handrail and stairs
    # cfg.seed = 567 # cloudy blue sky
    # cfg.seed = 567 # cloudy blue sky

    # -- set seed --
    set_seed(cfg.seed)

    # -- load dataset --
    data, loaders = load_image_dataset(cfg)
    train_iter = iter(loaders.tr)

    # -- fetch sample --
    sample = next(train_iter)
    sample_to_cuda(sample)

    # -- unpack data --
    noisy, clean = sample['noisy'], sample['burst']
    nframes, nimages, ncolors, H, W = noisy.shape
    isize = edict({'h': H, 'w': W})

    # -- boxes for plotting --
    boxes = edict()
    aligned = edict()

    # -- compute clean nnf --
    vprint("[start] clean nnf.")
    align_fxn = get_align_method(cfg, "l2_global")
    aligned.gt, flow = align_fxn(clean, None, None)
    boxes.gt = boxes_from_flow(flow, H, W)
    vprint("[done] clean nnf.")

    # -- compute nnf --
    vprint("[start] global nnf.")
    align_fxn = get_align_method(cfg, "l2_global")
    _, flow = align_fxn(noisy, None, None)
    aligned.global_l2 = align_from_flow(clean, flow, cfg.nblocks, isize=isize)
    boxes.global_l2 = boxes_from_flow(flow, H, W)
    vprint("[done] global nnf.")

    # -- compute local nnf --
    vprint("[start] local nnf.")
    align_fxn = get_align_method(cfg, "l2_local")
    _, flow = align_fxn(noisy, None, None)
    aligned.local_l2 = align_from_flow(clean, flow, cfg.nblocks, isize=isize)
    boxes.local_l2 = boxes_from_flow(flow, H, W)
    vprint("[done] local nnf.")

    # -- compute proposed score --
    vprint("[start] bootstrapping.")
    align_fxn = get_align_method(cfg, "bs_local_v2")
    _, flow = align_fxn(noisy, None, None)
    aligned.local_bs = align_from_flow(clean, flow, cfg.nblocks, isize=isize)
    boxes.local_bs = boxes_from_flow(flow, H, W)
    vprint("[done] bootstrapping.")

    # -- reshape to image --
    noisy = rearrange(noisy, 't b c h w -> t b h w c')
    clean = rearrange(clean, 't b c h w -> t b h w c')

    # -- normalize to [0,1] --
    noisy -= noisy.min()
    clean -= clean.min()
    noisy /= noisy.max()
    clean /= clean.max()

    # -- clamp to [0,1] --
    # noisy = noisy.clamp(0,1)
    # clean = clean.clamp(0,1)

    # print_tensor_stats("noisy",noisy)
    # print_tensor_stats("clean",clean)

    # -- cuda to cpu --
    noisy = noisy.cpu()
    clean = clean.cpu()
    for field in boxes.keys():
        boxes[field] = boxes[field].cpu().numpy()

    # -- plot boxes for middle pix --
    ref_pix = edict({'x': H // 2, 'y': W // 2})
    field = 'global_l2'
    plot_boxes(noisy, clean, aligned, field, boxes, ref_pix, cfg.patchsize,
               seed)
Exemplo n.º 5
0
def run_with_seed(seed):

    # -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
    #
    #             Settings
    #
    # -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-

    # -- settings --
    cfg = get_cfg_defaults()
    cfg.use_anscombe = False
    cfg.noise_params.ntype = 'g'
    cfg.noise_params.g.std = 25.
    cfg.nframes = 3
    cfg.dynamic_info.nframes = cfg.nframes
    cfg.nblocks = 3
    cfg.patchsize = 11
    cfg.gpuid = 1
    cfg.device = f"cuda:{cfg.gpuid}"

    # -- seeds --
    cfg.seed = seed
    # cfg.seed = 123 # sky of a forest
    # cfg.seed = 345 # handrail and stairs
    # cfg.seed = 567 # cloudy blue sky
    # cfg.seed = 567 # cloudy blue sky

    # -- set seed --
    set_seed(cfg.seed)

    # -- load dataset --
    data, loaders = load_image_dataset(cfg)
    train_iter = iter(loaders.tr)

    # -- fetch sample --
    sample = next(train_iter)
    sample_to_cuda(sample)

    # -- unpack data --
    noisy, clean = sample['noisy'], sample['burst']
    nframes, nimages, ncolors, H, W = noisy.shape
    isize = edict({'h': H, 'w': W})

    # -- setup results --
    scores = edict()
    scores.noisy = edict()
    scores.clean = edict()
    blocks = edict()
    blocks.noisy = edict()
    blocks.clean = edict()

    # -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
    #
    #        Setup For Searches
    #
    # -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-

    # -- tile image to patches --
    pad = 2 * (cfg.nblocks // 2)
    h, w = cfg.patchsize + pad, cfg.patchsize + pad

    noisy_patches = tile_patches(noisy, cfg.patchsize + pad).pix
    noisy_patches = rearrange(noisy_patches,
                              'b t s (h w c) -> b s t c h w',
                              h=h,
                              w=w)
    nimages, npix, nframes, c, psH, psW = noisy_patches.shape

    clean_patches = tile_patches(clean, cfg.patchsize + pad).pix
    clean_patches = rearrange(clean_patches,
                              'b t s (h w c) -> b s t c h w',
                              h=h,
                              w=w)
    nimages, npix, nframes, c, psH, psW = clean_patches.shape

    masks = torch.ones(nimages, npix, nframes, c, psH, psW).to(cfg.device)

    # -- create constants --
    frames = np.r_[np.arange(cfg.nframes // 2),
                   np.arange(cfg.nframes // 2 + 1, cfg.nframes)]
    frames = repeat(frames, 'z -> i s z', i=nimages, s=npix)
    brange = exh_block_range(nimages, npix, cfg.nframes, cfg.nblocks)
    curr_blocks = init_optim_block(nimages, npix, cfg.nframes, cfg.nblocks)
    srch_blocks = get_search_blocks(frames, brange, curr_blocks, cfg.device)
    np_srch_blocks = torch_to_numpy(srch_blocks[0])
    S = len(srch_blocks[0, 0])

    # -- create constants --
    frames_pair = np.array([0])
    frames = repeat(frames_pair, 'z -> i s z', i=nimages, s=npix)
    brange = exh_block_range(nimages, npix, cfg.nframes, cfg.nblocks)
    curr_blocks_pair = init_optim_block(nimages, npix, cfg.nframes,
                                        cfg.nblocks)
    srch_blocks_pair = get_search_blocks(frames, brange, curr_blocks_pair,
                                         cfg.device)
    S_pair = len(srch_blocks[0, 0])

    # -- encode blocks --
    single_search_block = srch_blocks[0, 0].cpu().numpy()
    block_strings = search_blocks_to_str(single_search_block)
    labels = search_blocks_to_labels(single_search_block, block_strings)

    # -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
    #
    #        Execute Searches
    #
    # -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-

    #
    # --- run PAIRED split search ---
    #

    ave_fxn = get_score_function("ave")
    block_batchsize = 128
    evaluator = EvalBlockScores(ave_fxn, "ave", cfg.patchsize, block_batchsize,
                                None)
    get_topK = evaluator.compute_topK_scores

    # -- a) run clean --
    clean_scores, clean_blocks = get_topK(clean_patches, masks,
                                          srch_blocks_pair, cfg.nblocks,
                                          S_pair)
    scores_full = torch_to_numpy(clean_scores[0])
    blocks_full = torch_to_numpy(clean_blocks[0])

    # -- b) tile results to full blocks --
    scores_full, blocks_full = tile_pair_to_full(scores_full, blocks_full,
                                                 np_srch_blocks, frames_pair,
                                                 cfg.nframes, cfg.nblocks)
    scores.clean.ave = scores_full
    blocks.clean.ave = batch_search_blocks_to_labels(blocks_full,
                                                     block_strings)

    # -- a) run noisy --
    noisy_scores, noisy_blocks = get_topK(noisy_patches, masks,
                                          srch_blocks_pair, cfg.nblocks,
                                          S_pair)
    scores_full = torch_to_numpy(noisy_scores[0])
    blocks_full = torch_to_numpy(noisy_blocks[0])

    # -- b) tile results to full blocks --
    scores_full, blocks_full = tile_pair_to_full(scores_full, blocks_full,
                                                 np_srch_blocks, frames_pair,
                                                 cfg.nframes, cfg.nblocks)
    scores.noisy.ave = scores_full
    blocks.noisy.ave = batch_search_blocks_to_labels(blocks_full,
                                                     block_strings)

    #
    # --- run FULL split search ---
    #

    ave_fxn = get_score_function("ave")
    block_batchsize = 128
    evaluator = EvalBlockScores(ave_fxn, "ave", cfg.patchsize, block_batchsize,
                                None)
    get_topK = evaluator.compute_topK_scores

    # -- run clean --
    clean_scores, clean_blocks = get_topK(clean_patches, masks, srch_blocks,
                                          cfg.nblocks, S)

    clean_scores = torch_to_numpy(clean_scores)
    scores.clean.full_ave = clean_scores[0]

    clean_blocks = torch_to_numpy(clean_blocks)
    batch_blocks = clean_blocks[0, :, :, :]
    blocks.clean.full_ave = batch_search_blocks_to_labels(
        batch_blocks, block_strings)

    # -- run noisy --
    noisy_scores, noisy_blocks = get_topK(noisy_patches, masks, srch_blocks,
                                          cfg.nblocks, S)

    noisy_scores = torch_to_numpy(noisy_scores)
    scores.noisy.full_ave = noisy_scores[0]

    noisy_blocks = torch_to_numpy(noisy_blocks)
    batch_blocks = noisy_blocks[0, :, :, :]
    blocks.noisy.full_ave = batch_search_blocks_to_labels(
        batch_blocks, block_strings)

    #
    # --- run bootstrapping ---
    #

    bs_fxn = get_score_function("bootstrapping_mod2")
    block_batchsize = 32
    evaluator = EvalBlockScores(bs_fxn, "bs_mod2", cfg.patchsize,
                                block_batchsize, None)
    get_topK = evaluator.compute_topK_scores

    # -- run noisy --
    noisy_scores, noisy_blocks = get_topK(noisy_patches, masks, srch_blocks,
                                          cfg.nblocks, S)

    noisy_scores = torch_to_numpy(noisy_scores)
    scores.noisy.bs = noisy_scores[0]

    noisy_blocks = torch_to_numpy(noisy_blocks)
    batch_blocks = noisy_blocks[0, :, :, :]
    blocks.noisy.bs = batch_search_blocks_to_labels(batch_blocks,
                                                    block_strings)

    # -- run clean --
    clean_scores, clean_blocks = get_topK(clean_patches, masks, srch_blocks,
                                          cfg.nblocks, S)

    clean_scores = torch_to_numpy(clean_scores)
    scores.clean.bs = clean_scores[0]

    clean_blocks = torch_to_numpy(clean_blocks)
    batch_blocks = noisy_blocks[0, :, :, :]
    blocks.clean.bs = batch_search_blocks_to_labels(batch_blocks,
                                                    block_strings)

    # -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
    #
    #        Plot Results
    #
    # -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-

    print("Plotting Results.")
    plot_landscape(scores, blocks, seed)