def test_basic_state_update(): initial_state = { 'a': a } state_update_blocks = [ { 'policies': {}, 'variables': { 'a': update_a } }, ] params = {} TIMESTEPS = 10 RUNS = 1 model = Model(initial_state=initial_state, state_update_blocks=state_update_blocks, params=params) simulation = Simulation(model=model, timesteps=TIMESTEPS, runs=RUNS) experiment = Experiment(simulation) result = experiment.run() assert result[0]['a'] == 0 assert result[1]['a'] == 1 assert result[-2]['a'] == 9 assert result[-1]['a'] == 10
def test_regression_policy_names(): initial_state = {'a': 0} state_update_blocks = [ { 'policies': { 'policy': policy_a }, 'variables': { 'a': update_a } }, ] params = {'param_a': [0]} TIMESTEPS = 10 RUNS = 1 model = Model(initial_state=initial_state, state_update_blocks=state_update_blocks, params=params) simulation = Simulation(model=model, timesteps=TIMESTEPS, runs=RUNS) experiment = Experiment(simulation) assert isinstance(experiment.run(), list)
def test_backend_single_process(): states = basic.states state_update_blocks = basic.state_update_blocks params = basic.params TIMESTEPS = 10 #basic.TIMESTEPS RUNS = basic.RUNS model = Model(initial_state=states, state_update_blocks=state_update_blocks, params=params) simulation = Simulation(model=model, timesteps=TIMESTEPS, runs=RUNS) experiment = Experiment(simulation) processes = 1 experiment.engine = Engine(backend=Backend.MULTIPROCESSING, processes=processes) df_multiprocessing = pd.DataFrame(experiment.run()) if _has_ray_extension: experiment.engine = Engine(backend=Backend.RAY, processes=processes) df_ray = pd.DataFrame(experiment.run()) experiment.engine = Engine(backend=Backend.PATHOS, processes=processes) df_pathos = pd.DataFrame(experiment.run()) experiment.engine = Engine(backend=Backend.SINGLE_PROCESS) df_single_process = pd.DataFrame(experiment.run()) if _has_ray_extension: assert df_multiprocessing.equals(df_ray) assert df_multiprocessing.equals(df_pathos) assert df_multiprocessing.equals(df_single_process)
def test_raise_exceptions_false(): initial_state = { 'state_a': 0 } state_update_blocks = [ { 'policies': {}, 'variables': { 'state_a': update_state_invalid_result } }, ] params = {} TIMESTEPS = 10 RUNS = 1 model = Model(initial_state=initial_state, state_update_blocks=state_update_blocks, params=params) simulation = Simulation(model=model, timesteps=TIMESTEPS, runs=RUNS) experiment = Experiment(simulation) experiment.engine = Engine(raise_exceptions=False) results = experiment.run() _results = experiment.results assert len(results) > 0 assert results == _results exceptions = experiment.exceptions print(exceptions) assert any([True if isinstance(exception['exception'], Exception) else False for exception in exceptions]) assert isinstance(results, list)
def test_regression_state_names(): # Test that state names of more than one charachter don't fail! initial_state = {'state_a': 0} state_update_blocks = [ { 'policies': {}, 'variables': { 'state_a': update_state_a } }, ] params = {} TIMESTEPS = 10 RUNS = 1 model = Model(initial_state=initial_state, state_update_blocks=state_update_blocks, params=params) simulation = Simulation(model=model, timesteps=TIMESTEPS, runs=RUNS) experiment = Experiment(simulation) assert isinstance(experiment.run(), list)
def test_add_simulations(): experiment = Experiment() experiment.add_simulations([simulation, simulation]) assert experiment.get_simulations() == [simulation, simulation] experiment.add_simulations(simulation) assert experiment.get_simulations() == [simulation, simulation, simulation] with pytest.raises(Exception): experiment.add_simulations(None)
def test_run(): states = basic.states state_update_blocks = basic.state_update_blocks params = basic.params TIMESTEPS = basic.TIMESTEPS RUNS = basic.RUNS model = Model(initial_state=states, state_update_blocks=state_update_blocks, params=params) simulation = Simulation(model=model, timesteps=TIMESTEPS, runs=RUNS) experiment = Experiment(simulations=[simulation]) experiment.run() assert True
def test_clear_simulations(): experiment = Experiment() experiment.add_simulations([simulation, simulation, simulation]) assert experiment.clear_simulations() assert experiment.get_simulations() == [] assert not experiment.clear_simulations() assert experiment.get_simulations() == []
def test_to_HDF5(): states = basic.states state_update_blocks = basic.state_update_blocks params = basic.params TIMESTEPS = basic.TIMESTEPS RUNS = basic.RUNS model = Model(initial_state=states, state_update_blocks=state_update_blocks, params=params) simulation = Simulation(model=model, timesteps=TIMESTEPS, runs=RUNS) HDF5_store_file = 'experiment_results.hdf5' experiment = Experiment(simulations=[simulation]) experiment.after_experiment = lambda experiment: save_to_HDF5(experiment, HDF5_store_file, 'experiment_0') raw_result = experiment.run() df = pd.read_hdf(HDF5_store_file, 'experiment_0') assert df.equals(pd.DataFrame(raw_result)) experiment = Experiment(simulations=[simulation]) experiment.after_experiment = lambda experiment: save_to_HDF5(experiment, HDF5_store_file, 'experiment_1') raw_result = experiment.run() df = pd.read_hdf(HDF5_store_file, 'experiment_1') assert df.equals(pd.DataFrame(raw_result)) assert len(raw_result) > 0 assert raw_result == experiment.results assert simulation.run() == raw_result
def test_run(): states = basic.states state_update_blocks = basic.state_update_blocks params = basic.params TIMESTEPS = basic.TIMESTEPS RUNS = basic.RUNS model = Model(initial_state=states, state_update_blocks=state_update_blocks, params=params) simulation = Simulation(model=model, timesteps=TIMESTEPS, runs=RUNS) experiment = Experiment(simulations=[simulation]) raw_result = experiment.run() assert len(raw_result) > 0 assert raw_result == experiment.results assert simulation.run() == raw_result
def test_regression_deepcopy(): initial_state = {'state_a': 0} state_update_blocks = [ { 'policies': {}, 'variables': { 'state_a': update_state_a } }, ] params = {} TIMESTEPS = 10 RUNS = 1 model = Model(initial_state=initial_state, state_update_blocks=state_update_blocks, params=params) simulation = Simulation(model=model) experiment = Experiment(simulation) simulation.run() _ = copy.deepcopy(simulation)
def test_base_results(): states = basic.states state_update_blocks = basic.state_update_blocks params = basic.params TIMESTEPS = basic.TIMESTEPS RUNS = basic.RUNS model = Model(initial_state=states, state_update_blocks=state_update_blocks, params=params) simulation = Simulation(model=model, timesteps=TIMESTEPS, runs=RUNS) experiment = Experiment([simulation]) simulation_results = simulation.run() experiment_results = experiment.run() # Check Executable results & exceptions assert simulation_results == experiment_results assert simulation.results == experiment.results assert simulation.exceptions == experiment.exceptions
def test_simulation_dataframe_structure(): states = basic.states state_update_blocks = basic.state_update_blocks params = basic.params TIMESTEPS = basic.TIMESTEPS RUNS = basic.RUNS model = Model(initial_state=states, state_update_blocks=state_update_blocks, params=params) simulation = Simulation(model=model, timesteps=TIMESTEPS, runs=RUNS) experiment = Experiment([simulation, simulation, simulation]) data_radcad = experiment.run() df_radcad = pd.DataFrame(data_radcad) c = config_sim({"N": RUNS, "T": range(TIMESTEPS), "M": params}) exp = cadCADExperiment() exp.append_configs(model_id='a', initial_state=states, partial_state_update_blocks=state_update_blocks, sim_configs=c) exp.append_configs(model_id='b', initial_state=states, partial_state_update_blocks=state_update_blocks, sim_configs=c) exp.append_configs(model_id='c', initial_state=states, partial_state_update_blocks=state_update_blocks, sim_configs=c) exec_mode = ExecutionMode() local_mode_ctx = ExecutionContext(context=exec_mode.local_mode) simulation = Executor(exec_context=local_mode_ctx, configs=exp.configs) data_cadcad, tensor_field, sessions = simulation.execute() df_cadcad = pd.DataFrame(data_cadcad) assert_frame_equal(df_radcad, df_cadcad) assert df_radcad.equals(df_cadcad)
def test_run_ray_remote(): states = basic.states state_update_blocks = basic.state_update_blocks params = basic.params TIMESTEPS = basic.TIMESTEPS RUNS = basic.RUNS model = Model(initial_state=states, state_update_blocks=state_update_blocks, params=params) simulation = Simulation(model=model, timesteps=TIMESTEPS, runs=RUNS) engine = Engine(backend=Backend.RAY_REMOTE) experiment = Experiment(simulation, engine=engine) if not RAY_ADDRESS or not RAY_REDIS_PASSWORD: assert False, "RAY_ADDRESS or RAY_REDIS_PASSWORD not set" ray.init(_redis_password=RAY_REDIS_PASSWORD) result = experiment.run() df = pd.DataFrame(result) print(df) assert True
def test_multiple_partial_state_updates(): initial_state = { 'a': 0 } state_update_blocks = [ { 'policies': {}, 'variables': { 'a': lambda params, substep, state_history, previous_state, policy_input: ('a', previous_state['a'] + 1), } }, { 'policies': {}, 'variables': { 'a': lambda params, substep, state_history, previous_state, policy_input: ('a', previous_state['a'] + 1), } }, { 'policies': {}, 'variables': { 'a': lambda params, substep, state_history, previous_state, policy_input: ('a', previous_state['a'] + 1), } }, ] params = {} TIMESTEPS = 10 RUNS = 1 model = Model(initial_state=initial_state, state_update_blocks=state_update_blocks, params=params) simulation = Simulation(model=model, timesteps=TIMESTEPS, runs=RUNS) experiment = Experiment(simulation) experiment.engine = Engine(backend=Backend.SINGLE_PROCESS) result = experiment.run() df = pd.DataFrame(result) assert df.query('timestep == 10 and substep == 3')['a'].item() == 30
def test_model_generator(): states = basic.states state_update_blocks = basic.state_update_blocks params = basic.params TIMESTEPS = 10 RUNS = basic.RUNS model = Model(initial_state=states, state_update_blocks=state_update_blocks, params=params) simulation = Simulation(model=model, timesteps=TIMESTEPS, runs=RUNS) experiment = Experiment(simulations=[simulation]) raw_result_experiment = experiment.run() # The Model generator doesn't handle parameter sweeps, and only executes one run df_experiment = pd.DataFrame(raw_result_experiment).query( 'run == 1 and subset == 0') assert len(raw_result_experiment) > 0 assert raw_result_experiment == experiment.results assert simulation.run() == raw_result_experiment # Create a generator from the Model instance model_generator = iter(model) raw_result_model = [] # Set initial state raw_result_model.append(model.state) # Emulate the behaviour of the radCAD Engine for t in range(TIMESTEPS): _model = next(model_generator) raw_result_model.append(_model.substeps) # Flatten the results raw_result_model = utils.flatten(raw_result_model) df_model = pd.DataFrame(raw_result_model) assert_frame_equal(df_experiment, df_model)
def test_run(): states = basic.states state_update_blocks = basic.state_update_blocks params = basic.params TIMESTEPS = 10 RUNS = 1 model = Model(initial_state=states, state_update_blocks=state_update_blocks, params=params) simulation = Simulation(model=model, timesteps=TIMESTEPS, runs=RUNS) experiment = Experiment(simulations=[simulation]) experiment.engine = Engine(drop_substeps=True) drop_substeps_result = pd.DataFrame(experiment.run()) simulation_result = pd.DataFrame(simulation.run()) keep = (simulation_result.substep == simulation_result['substep'].max()) keep |= (simulation_result.substep == 0) simulation_result = simulation_result.loc[keep] assert simulation_result.reset_index(drop=True).equals( drop_substeps_result.reset_index(drop=True))
def test_invalid_state_update_function(): states = basic.states state_update_blocks = [ { 'policies': { 'p': basic.policy, }, 'variables': { 'a': basic.update_b } }, ] params = basic.params TIMESTEPS = basic.TIMESTEPS RUNS = basic.RUNS model = Model(initial_state=states, state_update_blocks=state_update_blocks, params=params) simulation = Simulation(model=model, timesteps=TIMESTEPS, runs=RUNS) experiment = Experiment(simulation) with pytest.raises(KeyError) as err: experiment.run()
def execute(self, engine=Engine()): simulations = [] for config in self.configs: initial_state = config.initial_state state_update_blocks = config.partial_state_update_blocks timesteps = max(list(config.sim_config["T"])) + 1 runs = config.sim_config["N"] params = config.sim_config[ "M"] # {key: [value] for key, value in config.sim_config['M'].items()} model = Model( initial_state=initial_state, state_update_blocks=state_update_blocks, params=params, ) simulation = Simulation(model=model, timesteps=timesteps, runs=1) simulations.append(simulation) experiment = Experiment(simulations=simulations) experiment.engine = engine result = experiment.run() return result, None, None
def test_add_simulations(): experiment = Experiment() experiment.add_simulations([simulation, simulation]) assert experiment.get_simulations() == [simulation, simulation] experiment.add_simulations(simulation) assert experiment.get_simulations() == [simulation, simulation, simulation]
import pytest import pandas as pd from radcad import Model, Simulation, Experiment from tests.test_cases import benchmark_model states = benchmark_model.states state_update_blocks = benchmark_model.state_update_blocks params = benchmark_model.params TIMESTEPS = 100_000 RUNS = 3 model = Model(initial_state=states, state_update_blocks=state_update_blocks, params=params) simulation_radcad = Simulation(model=model, timesteps=TIMESTEPS, runs=RUNS) experiment = Experiment([simulation_radcad, simulation_radcad, simulation_radcad]) def test_benchmark_radcad(benchmark): benchmark.pedantic(radcad_simulation, iterations=1, rounds=3) def radcad_simulation(): data_radcad = experiment.run()
def test_hooks(capsys): states = basic.states state_update_blocks = basic.state_update_blocks params = params = {'a': [1, 2], 'b': [1]} TIMESTEPS = 1 RUNS = 2 model = Model(initial_state=states, state_update_blocks=state_update_blocks, params=params) simulation = Simulation(model=model, timesteps=TIMESTEPS, runs=RUNS) experiment = Experiment(simulations=[simulation, simulation]) experiment.before_experiment = lambda experiment=None: print( f'before_experiment') experiment.before_simulation = lambda simulation=None: print( f'before_simulation {simulation.index}') experiment.before_run = lambda context=None: print( f'before_run {context.run}') experiment.before_subset = lambda context=None: print( f'before_subset {context.subset}') experiment.after_subset = lambda context=None: print( f'after_subset {context.subset}') experiment.after_run = lambda context=None: print( f'after_run {context.run}') experiment.after_simulation = lambda simulation=None: print( f'after_simulation {simulation.index}') experiment.after_experiment = lambda experiment=None: print( f'after_experiment') experiment.run() captured = capsys.readouterr() assert captured.out.replace('\n', '').replace(' ', '') == """ before_experiment before_simulation 0 before_run 0 before_subset 0 after_subset 0 before_subset 1 after_subset 1 after_run 0 before_run 1 before_subset 0 after_subset 0 before_subset 1 after_subset 1 after_run 1 after_simulation 0 before_simulation 1 before_run 0 before_subset 0 after_subset 0 before_subset 1 after_subset 1 after_run 0 before_run 1 before_subset 0 after_subset 0 before_subset 1 after_subset 1 after_run 1 after_simulation 1 after_experiment """.replace('\n', '').replace(' ', '')
def test_hooks(capfd): simulation = Simulation(model=model, timesteps=TIMESTEPS, runs=3) experiment = Experiment(simulation) experiment.before_experiment = lambda experiment=None: print(f"Before experiment with {len(experiment.simulations)} simulations") experiment.after_experiment = lambda experiment=None: print(f"After experiment with {len(experiment.simulations)} simulations") experiment.before_simulation = lambda simulation=None: print(f"Before simulation {simulation.index} with params {simulation.model.params}") experiment.after_simulation = lambda simulation=None: print(f"After simulation {simulation.index} with params {simulation.model.params}") experiment.before_run = lambda context=None: print(f"Before run {context}") experiment.after_run = lambda context=None: print(f"After run {context}") experiment.before_subset = lambda context=None: print(f"Before subset {context}") experiment.after_subset = lambda context=None: print(f"After subset {context}") experiment.run() # out, err = capfd.readouterr() assert True
from cadCAD.engine import Executor from cadCAD import configs from tests.test_cases import benchmark_model states = benchmark_model.states state_update_blocks = benchmark_model.state_update_blocks params = benchmark_model.params TIMESTEPS = benchmark_model.TIMESTEPS RUNS = benchmark_model.RUNS model = Model(initial_state=states, state_update_blocks=state_update_blocks, params=params) simulation_radcad = Simulation(model=model, timesteps=TIMESTEPS, runs=RUNS) experiment = Experiment(simulation_radcad) experiment.engine = Engine(backend=Backend.BASIC) c = config_sim({"N": RUNS, "T": range(TIMESTEPS), "M": params}) exp = cadCADExperiment() exp.append_configs(initial_state=states, partial_state_update_blocks=state_update_blocks, sim_configs=c) exec_mode = ExecutionMode() local_mode_ctx = ExecutionContext(context=exec_mode.single_proc) simulation_cadcad = Executor(exec_context=local_mode_ctx, configs=configs) def test_benchmark_radcad(benchmark):
def test_experiment_init(): experiment = Experiment(simulations=[simulation]) assert experiment.get_simulations() == [simulation] experiment = Experiment(simulations=simulation) assert experiment.get_simulations() == [simulation] experiment = Experiment(simulation) assert experiment.get_simulations() == [simulation] experiment = Experiment([simulation]) assert experiment.get_simulations() == [simulation] with pytest.raises(Exception): Experiment(invalid_arg=None)
import pytest import pandas as pd from radcad import Model, Simulation, Experiment from radcad.engine import Engine, Backend from tests.test_cases import benchmark_model states = benchmark_model.states state_update_blocks = benchmark_model.state_update_blocks params = benchmark_model.params TIMESTEPS = 100_000 RUNS = 5 model = Model(initial_state=states, state_update_blocks=state_update_blocks, params=params) simulation = Simulation(model=model, timesteps=TIMESTEPS, runs=RUNS) experiment = Experiment([simulation]) experiment.engine = Engine(backend=Backend.SINGLE_PROCESS) if __name__ == "__main__": results = experiment.run() assert len(results) > 0
def test_experiment_init(): experiment = Experiment(simulations=[simulation]) assert experiment.get_simulations() == [simulation] experiment = Experiment(simulations=simulation) assert experiment.get_simulations() == [simulation] experiment = Experiment(simulation) assert experiment.get_simulations() == [simulation] experiment = Experiment([simulation]) assert experiment.get_simulations() == [simulation]
from cadCAD.engine import Executor from cadCAD import configs import tests.test_cases.predator_prey_model as benchmark_model initial_state = benchmark_model.initial_state state_update_blocks = benchmark_model.state_update_blocks params = benchmark_model.params TIMESTEPS = benchmark_model.TIMESTEPS RUNS = benchmark_model.MONTE_CARLO_RUNS model = Model(initial_state=initial_state, state_update_blocks=state_update_blocks, params=params) simulation_radcad = Simulation(model=model, timesteps=TIMESTEPS, runs=RUNS) experiment = Experiment(simulation_radcad) experiment.engine = Engine(backend=Backend.SINGLE_PROCESS) c = config_sim({"N": RUNS, "T": range(TIMESTEPS), "M": params}) exp = cadCADExperiment() exp.append_configs(initial_state=initial_state, partial_state_update_blocks=state_update_blocks, sim_configs=c) exec_mode = ExecutionMode() local_mode_ctx = ExecutionContext(context=exec_mode.local_mode) simulation_cadcad = Executor(exec_context=local_mode_ctx, configs=configs) def test_benchmark_radcad(benchmark):