def setUp(self): self.test_product_a = prod.Product("a", price=2.00, quality=4.5) self.test_product_b = prod.Product("b", price=3.00, quality=4.9) self.experiment = exp.Experiment(self.test_product_a) self.ab_test = exp.Experiment(self.test_product_a, self.test_product_b) self.trials = 1000
def arq_test_shell( arg_list: list, tries: int, arq_mode: str, # 'gbn' or 'sr' test_mode: str, # 'time' or 'number' window_size=-1, probability=-1.0, session_seconds=-1, transfer_packs=-1) -> list: if window_size != -1 and probability != -1: raise ValueError if test_mode != 'time' and test_mode != 'number': raise ValueError if arq_mode != 'gbn' and arq_mode != 'sr': raise ValueError ws, prob = window_size, probability output_list = [] for arg in arg_list: result = 0 for try_expr in range(tries): if window_size == -1: ws = arg else: prob = arg if test_mode == 'number': exp = expr.Experiment(arq_mode, ws, prob, seconds=session_seconds) result += exp.calc_efficiency() else: exp = expr.Experiment(arq_mode, ws, prob, transfer_number=transfer_packs) result += exp.calc_time() print('done', arg) result = int(result / tries) output_list.append(result) return output_list
def __init__(self, log_file, sim_file, exp_dir): # Store experiment input file self.exp_dir = exp_dir self._sim_file = sim_file # Set up logging self.log = lg.Log(log_file) # Latest atm file self._atm_log = 'atm_log.txt' self._check_atm() # Store standard parameter values self._store_standard_params() self._store_output_units() # Build simulation-wide objects self.log.log("Generating Simulation object") self.load = ld.Loader(self) self.phys = ph.Physics() self.noise = ns.Noise(self.phys) # Store parameter values self._store_param_dict() # Length of status bar self._bar_len = 100 # Generate simulation objects self.log.log("Generating Experiment object") self.exp = ex.Experiment(self) self.log.log("Generating Sensitivity object") self.sns = sn.Sensitivity(self) self.log.log("Generating Display object") self.dsp = dp.Display(self) # Output arrays self.senses = [] self.opt_pows = []
def vary(self): """ Run parmaeter vary simulation """ # Start by generating "fiducial" experiments tot_sims = (self._sim.param("nexp") * self._sim.param("ndet") * self._sim.param("nobs")) self._log.out(("Simulting %d experiment realizations each with " "%d detector realizations and %d sky realizations. " "Total sims = %d" % (self._sim.param("nexp"), self._sim.param("ndet"), self._sim.param("nobs"), tot_sims))) self._exps = [] self._sens = [] for n in range(self._nexp): self._status(n, self._nexp) exp = ex.Experiment(self._sim) exp.evaluate() sns = self._sim.sns.sensitivity(exp) self._exps.append(exp) self._sens.append(sns) self._done() # Loop over parameter set and adjust sensitivities adj_sns = [] tot_adjs = self._nexp * len(self._set_arr) self._log.out( ("Looping over %d parameter sets for %d realizations. " "Number of experiment realizations to adjust = %d" % (len(self._set_arr), self._sim.param("nexp"), tot_adjs))) for n, (exp, sens) in enumerate(zip(self._exps, self._sens)): adj_sns.append(self._vary_exp(exp, sens, n, tot_adjs)) self._done() # Combine and save experiment realizations self.adj_sns = np.concatenate(adj_sns, axis=-1) self._save() return
agent_list_adap = [] for _ in range(numIters): agent_list_adap.append(AdaptiveDiscretization(epLen, nEps, scaling)) dict = { 'seed': 1, 'epFreq': 1, 'targetPath': './tmp.csv', 'deBug': False, 'nEps': nEps, 'recFreq': 10, 'numIters': numIters } exp = experiment.Experiment(env, agent_list_adap, dict) adap_fig = exp.run() dt_adapt_data = exp.save_data() if (dt_adapt_data.groupby( ['episode']).mean().tail(1))['epReward'].iloc[0] > max_reward_adapt: max_reward_adapt = (dt_adapt_data.groupby( ['episode']).mean().tail(1))['epReward'].iloc[0] opt_adapt_scaling = scaling dt_adapt = dt_adapt_data opt_adapt_agent_list = agent_list_adap # RUNNING EXPERIMENT FOR EPSILON NET ALGORITHM epsilon = (nEps * epLen)**(-1 / 4) action_net = np.arange(start=0, stop=1, step=epsilon)
for _ in range(numIters): agent_list_adap.append( multiple_ambulance_agent.MultipleAmbulanceAgent( epLen, nEps, scaling)) dict = { 'seed': 1, 'epFreq': 1, 'targetPath': './tmp.csv', 'deBug': False, 'nEps': nEps, 'recFreq': 10, 'numIters': numIters } exp = experiment.Experiment(env1, agent_list_adap, dict) adap_fig = exp.run() dt_adapt_data = exp.save_data() if (dt_adapt_data.groupby([ 'episode' ]).mean().tail(1))['epReward'].iloc[0] > max_reward_adapt: max_reward_adapt = (dt_adapt_data.groupby( ['episode']).mean().tail(1))['epReward'].iloc[0] opt_adapt_scaling = scaling dt_adapt = dt_adapt_data opt_adapt_agent_list = agent_list_adap # RUNNING EXPERIMENT FOR ADAPTIVE ALGORITHM - STOCHASTIC VERSION agent_list_adap_stochastic = [] for _ in range(numIters):