def run_me(): # -- Load Default Config -- cfg = get_main_config() cfg.gpuid = 0 cfg.device = f"cuda:{cfg.gpuid}" # torch.cuda(device=cfg.gpuid) # -- data loader config -- cfg.drop_last = {'tr': True, 'val': True, 'te': True} # -- Init Experiment Cache -- cache_root = Path("./output/n2a/exp_cache/") cache_version = "1" cache = cache_io.ExpCache(cache_root, cache_version) cache.clear() # -- Load Experiment Mesh -- mesh_version = "1" experiments, order = create_mesh(mesh_version) config_setup = get_setup_fxn(mesh_version) # -- Run Experiment -- for config in tqdm(experiments): results = cache.load_exp(config) uuid = cache.get_uuid(config) if results is None: exp_cfg = config_setup(cfg, config) exp_cfg.uuid = uuid results = execute_nnf_experiment(exp_cfg) cache.save_exp(exp_cfg.uuid, config, results) records = cache.load_records(experiments)
def main(): # -- settings -- cfg = get_cfg_defaults() cfg.use_anscombe = True cfg.noise_params.ntype = 'g' cfg.noise_params.g.std = 25. cfg.nframes = 3 cfg.num_workers = 0 cfg.dynamic_info.nframes = cfg.nframes cfg.dynamic_info.ppf = 10 cfg.nblocks = 3 cfg.patchsize = 10 cfg.gpuid = 1 cfg.device = f"cuda:{cfg.gpuid}" # -- get exps -- experiments,order,egrid = get_exp_cfgs() # -- setup cache -- cache_name = "quality_v_noisy" cache_root = EXP_PATH / cache_name cache = cache_io.ExpCache(cache_root,cache_name) # -- Run Experiments -- exp_cfgs = [] for config in tqdm.tqdm(experiments,total=len(experiments)): results = cache.load_exp(config) uuid = cache.get_uuid(config) print(uuid) exp_cfg = setup_exp_cfg(cfg,config) exp_cfg.uuid = uuid exp_cfgs.append(exp_cfg) if results is None: results = execute_experiment(exp_cfg) print(results) cache.save_exp(exp_cfg.uuid,config,results) records = cache.load_flat_records(experiments) print(records) # -- init search methods -- create_quality_plot(cfg,records)
def run_exp(exp_info): # -- Experiment Picker -- execute_experiment = exp_info['exec'] plot_experiment = exp_info['plot'] cache_name = exp_info['cache_name'] config_name = exp_info['config_name'] get_cfg_defaults = exp_info['get_cfg_defaults'] get_exp_cfgs = exp_info['get_exp_cfgs'] setup_exp_cfg = exp_info['setup_exp_cfg'] # -- Load Default Config -- cfg = get_cfg_defaults() cfg.gpuid = 1 cfg.device = f"cuda:{cfg.gpuid}" cfg.pid = os.getpid() # torch.cuda(device=cfg.gpuid) # -- Init Experiment Cache -- cache_root = EXP_PATH / cache_name cache = cache_io.ExpCache(cache_root, cache_name) cache.clear() # -- Load Experiment Mesh -- experiments, order, exp_grids = get_exp_cfgs(config_name) # -- Run experiments -- exec_exps = {'exec': execute_experiment, 'setup': setup_exp_cfg} run_experiment_set(cfg, cache, experiments, exec_exps) records = cache.load_flat_records(experiments) # -- g-75p0 and pn-20p0 -> {std:75,alpha:-1},{std:-1,alpha:20}, respectively -- expand_noise_nums(records) # -- psnrs,epes_of,epes_nnf means -- fields = ['psnrs', 'epes_of', 'epes_nnf'] compute_field_means(records, fields) # -- Run Plots -- plot_experiment(records, exp_grids)
def run_exp(exp_info): # -- Experiment Picker -- execute_experiment = exp_info['exec'] plot_experiment = exp_info['plot'] cache_name = exp_info['cache_name'] config_name = exp_info['config_name'] get_cfg_defaults = exp_info['get_cfg_defaults'] get_exp_cfgs = exp_info['get_exp_cfgs'] setup_exp_cfg = exp_info['setup_exp_cfg'] # -- Load Default Config -- cfg = get_cfg_defaults() cfg.gpuid = 0 cfg.device = f"cuda:{cfg.gpuid}" torch.cuda.set_device(device=cfg.gpuid) # -- Init Experiment Cache -- # "v5" has current results cache_name += "_v6" cache_root = EXP_PATH / cache_name cache = cache_io.ExpCache(cache_root,cache_name) cache.clear() # -- Load Experiment Mesh -- experiments,order,exp_grids = get_exp_cfgs(config_name) # -- Run Experiment -- exp_cfgs = [] for config in tqdm(experiments,total=len(experiments)): results = cache.load_exp(config) uuid = cache.get_uuid(config) print(uuid) exp_cfg = setup_exp_cfg(cfg,config) exp_cfg.uuid = uuid exp_cfgs.append(exp_cfg) if results is None: results = execute_experiment(exp_cfg) for key,val in results.items(): print("r",key,val.shape) cache.save_exp(exp_cfg.uuid,config,results) records = cache.load_flat_records(experiments) # print(records) print(records.columns) # -- g-75p0 and pn-20p0 -> {std:75,alpha:-1},{std:-1,alpha:20}, respectively -- expand_noise_nums(records) # -- psnrs,epes_of,epes_nnf means -- fields = ['psnrs','epes_of','epes_nnf'] compute_field_means(records,fields) # frecords = records[records['methods'].isin(['ave','est','split'])] # frecords = records[records['methods'].isin(['est','ave'])] # for img_index,igroup in frecords.groupby('image_index'): # print(img_index) # for nframes,fgroup in igroup.groupby('nframes'): # print(nframes) # print(fgroup[['std','patchsize','psnrs_mean','methods','random_seed']]) # for elem in records[['std','bsname','psnrs_mean']].iterrows(): # print(elem) # -- Run Plots -- plot_experiment(records,exp_grids,exp_cfgs)
def test_exps(): root = "./output/tests/cache_io/" version = "1.0" cache = cache_io.ExpCache(root,version) cache.
def read_subburst_files(idir, fdir, sdir, split, isize, ps, nframes): """ We take all the standard files and then """ # -- try from cache -- cache_root, cache_name = idir / ".cache/", "v1" cache = cache_io.ExpCache(cache_root, cache_name) config = create_cache_config(fdir, sdir, split, isize, ps, nframes) # cache.clear() results = cache.load_exp(config) if not (results is None): spaths = results['spaths'] out_nframes = results['out_nframes'] all_eq = results['all_eq'] return spaths, out_nframes, all_eq uuid = cache.get_uuid(config) # -- we read all frames -- global CHECK_VALID CHECK_VALID = False paths, out_nframes, all_eq = read_files(idir, fdir, sdir, split, isize, ps, -1) CHECK_VALID = True # -- init output var -- spaths = edict({ 'images': {}, 'flows': {}, 'crops': {}, 'vals': {}, 'frame_ids': {} }) # -- we take each subset of "nframes" from each burst groups = list(paths['images'].keys()) for group in groups: print(group) # -- take frame range for each step -- tframes = len(paths['images'][group]) for fstart in range(tframes - nframes): print(fstart) # -- primary logic does not depend on image patches -- ref_frame = fstart + nframes // 2 index = slice(fstart, fstart + nframes) ipaths = paths['images'][group][index] fpaths = paths['flows'][group][index] vpaths = paths['vals'][group][index] frame_ids = [fstart + i for i in range(nframes)] # -- take frame for each patch -- icrops = read_icrops(ipaths, isize) ncrops = len(icrops) for crop_idx in range(ncrops): # -- modify the fpaths and vpaths -- icrop = icrops[crop_idx] vpaths, fpaths = get_flow_paths(fdir, group, isize, ps, ipaths, frame_ids, icrop) # -- "sgroup" defines how many samples our dataset contains -- start_frame, start_height, start_width = fstart, icrop[ 0], icrop[1] start_str = "-%d-%d-%d" % (start_frame, start_height, start_width) sgroup = group + start_str # -- fill in results -- spaths['images'][sgroup] = ipaths spaths['flows'][sgroup] = fpaths spaths['vals'][sgroup] = vpaths spaths['frame_ids'][sgroup] = frame_ids spaths['crops'][sgroup] = icrop # -- save results to cache output -- results = {'spaths': spaths, 'out_nframes': out_nframes, 'all_eq': all_eq} cache.save_exp(uuid, config, results) return spaths, out_nframes, all_eq