def all_models(cls, env): choices = cls.param_choices(env) if choices: for prm in dict_product(choices): yield cls(**prm) else: yield cls()
def generate_configs(output_dir, ngen=int(1.0e+5), pixDepth=1, pixWidth=1.5, npix=5000, spectra_path=None, jobs=1): hist_dir = get_dir('../data/background_spectra') hists = [ osp.join(hist_dir, item) for item in os.listdir(hist_dir) if item.endswith('.root') ] particle_names = [osp.basename(hist)[:-len('.root')] for hist in hists] fluxes = [get_total_flux(hist) for hist in hists] print("Total flux: %.3e" % np.sum(fluxes)) priors = [flux / np.sum(fluxes) for flux in fluxes] print("Priors:\n %s" % '\n '.join([ "%s: %.3e" % (particle, prior) for particle, prior in zip(particle_names, priors) ])) assert len(hists), 'there is no data for cosmic background spectra!' if spectra_path is None: spectra_path = hist_dir runtime_hists = [ osp.join(spectra_path, osp.basename(hist)) for hist in hists ] configs = dict( beamEnergy=-1, particle_meta=[ dict(output=osp.join( output_dir, '%s_%04d' % (osp.basename(hist)[:-len('.root')], job)), particle=particle_name, energyHisto=hist, job=job) for particle_name, hist in zip(particle_names, runtime_hists) for job in range(jobs) ], ngen=ngen, pixDepth=pixDepth, pixWidth=pixWidth, npix=npix, ) return dict_product(configs)
def conditions(self): conds = dict_product({ 'creation_date': str(datetime.now()), 'clickDelay': 0, 'moveDelay': 500, 'encourage_planning': [False, True], # 'timeLimit': 240, # 'energy': 200, # 'moveEnergy': 2, # 'clickEnergy': 1, 'depth': 3, 'breadth': 2, 'inspectCost': 1, 'bonus_rate': 0.001 }) for c in conds: if c['encourage_planning']: c['mu'] = -4 c['sigma'] = 16 else: c['mu'] = 8 c['sigma'] = 3 yield c
def trials(self, params): for _ in range(params['n_trial']): yield from dict_product(params)
BASE_CONFIG = json.load(f) PARAMS = { "game": ["Humanoid-v2"], "mode": ["ppo"], "clip_eps": [1e32], "out_dir": ["results/ppo_noclip_humanoid/agents"], "norm_rewards": ["returns"], "initialization": ["xavier"], "anneal_lr": [False], "value_clipping": [True], "entropy_coeff": [0.005], "ppo_lr_adam": [2e-5] * 40, "clip_grad_norm": [0.5], "val_lr": [5e-5], "lambda": [0.85], "cpu": [True], "advanced_logging": [True] } all_configs = [{**BASE_CONFIG, **p} for p in dict_product(PARAMS)] if os.path.isdir("agent_configs/") or os.path.isdir("agents/"): raise ValueError( "Please delete the 'agent_configs/' and 'agents/' directories") os.makedirs("agent_configs/") os.makedirs("agents/") for i, config in enumerate(all_configs): with open(f"agent_configs/{i}.json", "w") as f: json.dump(config, f)
# print(f'exact solution computed in {t.elapsed} seconds') # agents['optimal'] = FunctionPolicy(pi) for name, pol in agents.items(): np.random.seed(45) df = pd.DataFrame(Agent(env, pol).run_many(5000, pbar=False)) df['name'] = name df['n_city'] = n_city df['max_sims'] = max_sims df = df.set_index(['n_city', 'max_sims', 'name']) df.to_csv(f'data/weather/sims/{n_city}_{max_sims}_{name}.pkl') print(f'wrote {fn}') from utils import dict_product params = list( dict_product({ 'n_city': [10, 20, 30], 'max_sims': [ 1, 2, 3, 4, ] })) print(f'running {len(params)} simulations') jobs = [delayed(simulate)(**prm) for prm in params] Parallel(min(len(jobs), 45))(jobs)