def create_data(problem, new_path): """ problem : dict with specified input variables and range instead of discrete values otherwise saltelli will not work Run each batch iterations with all the samples obtained from saltelli and save at the end of the run Saves data with time stamp as .csv and .pickle """ # Set the repetitions, the amount of steps, and the amount of distinct values per variable replicates = 10 max_steps = 100 distinct_samples= 10 # Define output parameters model_reporters = { 'step_data': lambda m: m.datacollector.get_model_vars_dataframe(), 'obstacle_density': lambda m: m.obstacle_density, 'food_density': lambda m: m.food_density, 'nr_hives': lambda m: m.nr_hives } data = {} # Sample from data with every interactions, computationally expensive but gives all combinations params_values = saltelli.sample(problem,N=distinct_samples) #transform to int value and overwrite array if copy needed set flag to True params_values = params_values.astype(int, copy=False) batch = BatchRunnerMP(BeeForagingModel, nr_processes=os.cpu_count(), max_steps=max_steps, variable_parameters={val:[] for val in problem['names']}, model_reporters=model_reporters, display_progress=True) counter = 0 # progress bar pbar = tqdm(total=len(params_values)) for _ in range(replicates): for values in params_values: var_parameters = {} # #collect all data samples from salteli sampling for n, v, in zip(problem['names'],values): var_parameters[n] = v batch.run_iteration(var_parameters, tuple(values),counter) counter +=1 pbar.update(counter) pbar.close() data = batch.get_model_vars_dataframe() data.to_csv(f'pickles/analysis_{new_path}.csv') data.to_pickle(f'pickles/analysis_{new_path}.p') return data
def perform_analysis(): problem = { 'num_vars': 1, 'names': [ 'tolerance' ], # available parameters: 'tolerance','social_extroversion','mobility','decay' 'bounds': [[0.01, 0.99]] } # Set the repetitions, the amount of steps, and the amount of distinct values per variable replicates = 10 max_steps = 10 distinct_samples = 10 # Set the outputs model_reporters = { "Friends score": lambda m: m.avg_friends_score(), "Friends distance": lambda m: m.avg_friends_social_distance(), "Friends spatial distance": lambda m: m.avg_friends_spatial_distance() } data = {} begin = time.time() for i, var in enumerate(problem['names']): # Get the bounds for this variable and get <distinct_samples> samples within this space (uniform) samples = np.linspace(*problem['bounds'][i], num=distinct_samples) batch = BatchRunnerMP(Friends, max_steps=max_steps, iterations=replicates, variable_parameters={var: samples}, model_reporters=model_reporters, display_progress=True, nr_processes=multiprocessing.cpu_count() - 1) batch.run_all() end = time.time() print("Performed", replicates * distinct_samples, "model runs in", np.round(end - begin), "seconds.") data[var] = batch.get_model_vars_dataframe() return [problem, data]
def run_ofat(self): """ Collects data of model for ofat analysis. Writes raw data as nested pandas dataframe to pickle and csv. """ # We define our variables and bounds params = { 'obstacle_density': [0, 15, 30], 'food_density': [5, 15, 25], 'nr_hives': [1, 3, 5] } # Set the repetitions, the amount of steps, and the amount of distinct values per variable replicates = 1 max_steps = 30 # Define output parameters model_reporters = { 'step_data': lambda m: m.datacollector.get_model_vars_dataframe(), 'obstacle_density': lambda m: m.obstacle_density, 'food_density': lambda m: m.food_density, 'nr_hives': lambda m: m.nr_hives } data = {} for var in params: batch = BatchRunnerMP(BeeForagingModel, max_steps=max_steps, nr_processes=os.cpu_count(), iterations=replicates, variable_parameters={var: params[var]}, model_reporters=model_reporters, display_progress=True) batch.run_all() data = batch.get_model_vars_dataframe() data.to_csv(f'pickles/{self.time_stamp}_{var}.csv') data.to_pickle(f'pickles/{self.time_stamp}_{var}.p')
import matplotlib.pyplot as plt from mesa.batchrunner import BatchRunnerMP from tictactoe import get_game_grid, TicTacToe runner = BatchRunnerMP( TicTacToe, nr_processes=2, iterations=256, model_reporters={ 'endgame': lambda m: m.endgame, 'endgrid': get_game_grid }, ) runner.run_all() df = runner.get_model_vars_dataframe() print(df.endgame.value_counts()) fig = plt.figure(figsize=(16, 16)) for i in range(256): subplot = fig.add_subplot(16, 16, i + 1) subplot.imshow(df.endgrid[i]) subplot.axis('off') subplot.set_title(df.endgame[i].name, size=8) fig.tight_layout() plt.subplots_adjust(wspace=-.9)
# The variables parameters will be invoke along with the fixed parameters allowing for either or both to be honored. # The BatchRunner won’t collect the data every step of the model, but only at the end of each run. batch_run = BatchRunnerMP(model_cls=EconomiaSocialista, nr_processes=CPU_COUNT, variable_parameters=variable_params, fixed_parameters=fixed_params, iterations=5, max_steps=100, model_reporters=model_reporters, agent_reporters=agent_reporters, display_progress=True) batch_run.run_all() run_model_data = batch_run.get_model_vars_dataframe() # Gini plt.title('Coeficiente de Gini por iteración') plt.ylabel('Coeficiente de Gini') plt.xlabel('Iteración') plt.scatter(run_model_data.index.array, run_model_data.Gini) plt.show() plt.title('Coeficiente de Gini ante cambios en el ingreso inicial') plt.ylabel('Coeficiente de Gini') plt.xlabel('Ingreso inicial') plt.scatter(run_model_data.ingreso_inicial, run_model_data.Gini) plt.show() plt.title('Coeficiente de Gini ante cambios en el costo de vida')
"recovered_seed_pc": [0.01, 0.1, 0.23], "high_risk_pc": [0.25], "grid_area": ["Small", "Large"], "house_init": ["Neighborhood"], "release_strat": [ "Everyone release", "Random individual houses", "Low risk individuals", "Low risk houses" ], "mobility_speed": ["low"], "weeks_to_second_release": [2, 4] } br = BatchRunnerMP( Virus, nr_processes=4, variable_parameters=br_params, iterations=3, # number of times to run each parameter combination max_steps=600, # number of steps for each model run model_reporters={"Data Collector": lambda m: m.datacollector}) if __name__ == '__main__': br.run_all() br_df = br.get_model_vars_dataframe() br_step_data = pd.DataFrame() for i in range(len(br_df["Data Collector"])): if isinstance(br_df["Data Collector"][i], DataCollector): i_run_data = br_df["Data Collector"][i].get_model_vars_dataframe() br_step_data = br_step_data.append(i_run_data, ignore_index=True) br_step_data.to_csv( "/Users/shwu2259/GroupRotation/VirusModel_lowmob_lowfrac.csv")
#if var == 'wolf_gain_from_food': # samples = np.linspace(*problem['bounds'][i], num=distinct_samples, dtype=int) batch = BatchRunnerMP(Friends, max_steps=max_steps, iterations=replicates, variable_parameters={var: samples}, model_reporters=model_reporters, display_progress=True, nr_processes=multiprocessing.cpu_count() - 1) batch.run_all() end = time.time() print("Model run-time:", end - begin) data[var] = batch.get_model_vars_dataframe() def perform_analysis(): problem = { 'num_vars': 1, 'names': [ 'tolerance' ], # available parameters: 'tolerance','social_extroversion','mobility','decay' 'bounds': [[0.01, 0.99]] } # Set the repetitions, the amount of steps, and the amount of distinct values per variable replicates = 10 max_steps = 10 distinct_samples = 10