Esempio n. 1
0
fixed_params = {
    "width": width,
    "height": height,
    "NumMidges": num_agents,
    "NumDeer": num_deer,
    "mapfile": mapfile,
    "trapfile": trapfile
}

variable_params = {"dps": np.arange(0.40, 0.50, 0.02)}

batch_run = BatchRunner(WorldModel,
                        variable_params,
                        fixed_params,
                        iterations=1,
                        max_steps=40,
                        model_reporters={
                            "MidgePop": nummidges,
                            "DeerBites": deerbites
                        })


def run_batch():
    batch_run.run_all()
    data = batch_run.get_model_vars_dataframe()
    data.head()
    fig, axs = plt.subplots(2)
    fig.suptitle('Midge Run')
    axs[0].set_title("Midge Population with Different DPS")
    axs[0].set_xlabel("DPS")
    axs[0].set_ylabel("Population")
Esempio n. 2
0
sys.path.append("/Users/rafael/Documents/GitHub/InCES-model/Industrial_communities")
from mesa.datacollection import DataCollector
from Model import Modelrun
##Batchrun
from mesa.batchrunner import BatchRunner
import pandas as pd 

#Run parameters 
m_step_data = pd.DataFrame()
n_communities = [25]
n_industries = [75]

model_param = {"n_industries": n_industries, "n_communities": n_communities} #All variables in place - Everything that can be changed enters here

#Batchrun settings      
br = BatchRunner(Modelrun, model_param, iterations = 500, max_steps = 20, model_reporters = {"Data Collector": lambda m: m.datacollector})
br.run_all()


#Data generation
m_df = br.get_model_vars_dataframe()
m_step_data = pd.DataFrame()
print(range(len(m_df["Data Collector"])))
for i in range(len(m_df["Data Collector"])):
    if isinstance(m_df["Data Collector"][i], DataCollector):
        i_run_data = m_df["Data Collector"][i].get_model_vars_dataframe()
       #a_run_data = a_df["data Collector"][i].get_agent_vars_dataframe()
        m_step_data = m_step_data.append(i_run_data, ignore_index=True)
m_step_data.to_csv("Model_run_USA_S3-03.csv")
        
        
Esempio n. 3
0
iterations = 10
max_steps = 200

fixed_params = {'grass': True}

variable_params = {
    'initial_hunter': range(5, 30, 5),
    'hunting_season_end': range(1, 11, 1)
}

batchrun = BatchRunner(model_cls=HuntersModel,
                       fixed_parameters=fixed_params,
                       variable_parameters=variable_params,
                       iterations=iterations,
                       max_steps=max_steps,
                       model_reporters={
                           'average_welfare': lambda m: m.average_welfare(),
                       },
                       agent_reporters=None,
                       display_progress=True)
batchrun.run_all()

#batchdata = batchrun.get_model_vars_dataframe()
data2 = batchrun.get_model_vars_dataframe()

#print(batchdata.head())
#batchdata.head()

plt.scatter(data2.hunting_season_end, data2.average_welfare)
plt.xlabel('hunting_season_end')
plt.ylabel('average_welfare')
Esempio n. 4
0
max_steps = 2400
distinct_samples = 10

# check that the directory exists
if not os.path.exists('results_SOBOL'):
    os.makedirs('results_SOBOL')

while True:
    # We get all our samples here
    param_values = saltelli.sample(problem,
                                   distinct_samples,
                                   calc_second_order=False)

    batch = BatchRunner(
        FishingModel,
        max_steps=max_steps,
        variable_parameters={name: []
                             for name in problem['names']},
        model_reporters=model_reporters)

    count = 0
    data = pd.DataFrame(index=range(replicates * len(param_values)),
                        columns=[
                            'energy_gain', 'full_catch_reward',
                            'initial_wallet_survival', 'catch_rate',
                            'fish_reproduction_number', 'beta_fisherman_spawn'
                        ])
    data['Run'], data['Fish mean'], data['Fish slope'], data[
        'Fish variance'], data[
            'Cumulative_gain'] = None, None, None, None, None

    for vals in param_values:
Esempio n. 5
0
    "Recovered: 10-19": count_age_10_19_recovered,
    "Recovered: 20-29": count_age_20_29_recovered,
    "Recovered: 30-39": count_age_30_39_recovered,
    "Recovered: 40-49": count_age_40_49_recovered,
    "Recovered: 50-59": count_age_50_59_recovered,
    "Recovered: 60-69": count_age_60_69_recovered,
    "Recovered: 70-79": count_age_70_79_recovered,
    "Recovered: 80+": count_age_80_up_recovered,
    "Peak Infected": max_active_cases_counter,
    "Peak Date": max_active_cases_counter
}

param_sweep_default_scenario = BatchRunner(
    GridSimulationEnvironment,
    variable_parameters=variable_params,
    fixed_parameters=default_scenario_model_params,
    iterations=1,
    max_steps=1,
    model_reporters=model_reporters)

param_sweep_default_scenario.run_all()

param_sweep_default_scenario_df = param_sweep_default_scenario.get_model_vars_dataframe(
)
param_sweep_default_scenario_df.to_csv("param_sweep_default_scenario_df.csv")

param_sweep_default_vaccination_scenario = BatchRunner(
    GridSimulationEnvironment,
    variable_parameters=variable_params,
    fixed_parameters=default_vaccination_model_params,
    iterations=1,
max_steps = 100  #
distinct_samples = 512

# generating samples
param_values = saltelli.sample(problem, distinct_samples)
variable_parameters = {name: [] for name in problem['names']}
#print(variable_parameters)
#print(len(param_values))
#print(param_values[450])

# In[35]:

batch = BatchRunner(BoltzmannWealthModelNetwork,
                    variable_parameters=variable_parameters,
                    fixed_parameters=fixed_parameters,
                    max_steps=max_steps,
                    iterations=2,
                    model_reporters=model_reporters,
                    display_progress=True)
count = 0
for i in range(replicates):
    for vals in param_values:
        vals = list(vals)
        #print(vals)

        # Transform to dict with parameter names and their values
        variable_parameters = {}
        for name, val in zip(problem['names'], vals):
            variable_parameters[name] = val
        #print(variable_parameters)
        batch.run_iteration(variable_parameters, tuple(vals), count)
method_types = [
    'Random',
    'Front-to-back',
    'Front-to-back (4 groups)',
    'Back-to-front',
    'Back-to-front (4 groups)',
    'Window-Middle-Aisle',
    'Steffen Perfect',
    'Steffen Modified'
]
colors = ["blue", "red", "purple", "yellow", "green", "cyan", "gold", "magenta"]
plot_design = []

for i in method_types:
    fixed_params = {"method": i}
    batch_run = BatchRunner(PlaneModel, None, fixed_params, iterations=100,
                            model_reporters={"method_time": lambda m: m.schedule.time}, display_progress=False,max_steps=1500)
    batch_run.run_all()
    all_times = (batch_run.get_model_vars_dataframe()['method_time'])
    c_times = collections.Counter(all_times)
    common = sorted(c_times.elements())
    plot_design.append(common)
    average_time = mean(batch_run.get_model_vars_dataframe()['method_time'])
    print("{}: {}".format(i, average_time))

df = pd.read_csv('https://raw.githubusercontent.com/selva86/datasets/master/diamonds.csv')
kwargs = dict(hist_kws={'alpha':.4}, kde_kws={'linewidth':2})

for j in range(len(plot_design)):
    a = np.asarray(plot_design[j])
    #x1 = df.loc[df.cut == "Good", "depth"]
    #plt.hist(a, bins, alpha=0.3, color=colors[j])
Esempio n. 8
0
 batch_run = BatchRunner(
     ABM_CE_PV,
     variable_params,
     fixed_params,
     iterations=1,
     max_steps=30,
     model_reporters={
         "Year":
         lambda c: ABM_CE_PV.report_output(c, "year"),
         "Agents repairing":
         lambda c: ABM_CE_PV.count_EoL(c, "repairing"),
         "Agents selling":
         lambda c: ABM_CE_PV.count_EoL(c, "selling"),
         "Agents recycling":
         lambda c: ABM_CE_PV.count_EoL(c, "recycling"),
         "Agents landfilling":
         lambda c: ABM_CE_PV.count_EoL(c, "landfilling"),
         "Agents storing":
         lambda c: ABM_CE_PV.count_EoL(c, "hoarding"),
         "Agents buying new":
         lambda c: ABM_CE_PV.count_EoL(c, "buy_new"),
         "Agents buying used":
         lambda c: ABM_CE_PV.count_EoL(c, "buy_used"),
         "Agents buying certified":
         lambda c: ABM_CE_PV.count_EoL(c, "certified"),
         "Total product":
         lambda c: ABM_CE_PV.report_output(c, "product_stock"),
         "New product":
         lambda c: ABM_CE_PV.report_output(c, "product_stock_new"),
         "Used product":
         lambda c: ABM_CE_PV.report_output(c, "product_stock_used"),
         "New product_mass":
         lambda c: ABM_CE_PV.report_output(c, "prod_stock_new_mass"
                                           ),
         "Used product_mass":
         lambda c: ABM_CE_PV.report_output(c, "prod_stock_used_mass"
                                           ),
         "End-of-life - repaired":
         lambda c: ABM_CE_PV.report_output(c, "product_repaired"),
         "End-of-life - sold":
         lambda c: ABM_CE_PV.report_output(c, "product_sold"),
         "End-of-life - recycled":
         lambda c: ABM_CE_PV.report_output(c, "product_recycled"),
         "End-of-life - landfilled":
         lambda c: ABM_CE_PV.report_output(c, "product_landfilled"),
         "End-of-life - stored":
         lambda c: ABM_CE_PV.report_output(c, "product_hoarded"),
         "eol - new repaired weight":
         lambda c: ABM_CE_PV.report_output(c, "product_new_repaired"
                                           ),
         "eol - new sold weight":
         lambda c: ABM_CE_PV.report_output(c, "product_new_sold"),
         "eol - new recycled weight":
         lambda c: ABM_CE_PV.report_output(c, "product_new_recycled"
                                           ),
         "eol - new landfilled weight":
         lambda c: ABM_CE_PV.report_output(
             c, "product_new_landfilled"),
         "eol - new stored weight":
         lambda c: ABM_CE_PV.report_output(c, "product_new_hoarded"
                                           ),
         "eol - used repaired weight":
         lambda c: ABM_CE_PV.report_output(c,
                                           "product_used_repaired"),
         "eol - used sold weight":
         lambda c: ABM_CE_PV.report_output(c, "product_used_sold"),
         "eol - used recycled weight":
         lambda c: ABM_CE_PV.report_output(c,
                                           "product_used_recycled"),
         "eol - used landfilled weight":
         lambda c: ABM_CE_PV.report_output(
             c, "product_used_landfilled"),
         "eol - used stored weight":
         lambda c: ABM_CE_PV.report_output(c, "product_used_hoarded"
                                           ),
         "Average landfilling cost":
         lambda c: ABM_CE_PV.report_output(c,
                                           "average_landfill_cost"),
         "Average storing cost":
         lambda c: ABM_CE_PV.report_output(c,
                                           "average_hoarding_cost"),
         "Average recycling cost":
         lambda c: ABM_CE_PV.report_output(
             c, "average_recycling_cost"),
         "Average repairing cost":
         lambda c: ABM_CE_PV.report_output(
             c, "average_repairing_cost"),
         "Average selling cost":
         lambda c: ABM_CE_PV.report_output(
             c, "average_second_hand_price"),
         "Recycled material volume":
         lambda c: ABM_CE_PV.report_output(c, "recycled_mat_volume"
                                           ),
         "Recycled material value":
         lambda c: ABM_CE_PV.report_output(c, "recycled_mat_value"),
         "Producer costs":
         lambda c: ABM_CE_PV.report_output(c, "producer_costs"),
         "Consumer costs":
         lambda c: ABM_CE_PV.report_output(c, "consumer_costs"),
         "Recycler costs":
         lambda c: ABM_CE_PV.report_output(c, "recycler_costs"),
         "Refurbisher costs":
         lambda c: ABM_CE_PV.report_output(c, "refurbisher_costs")
     })
Esempio n. 9
0
# tested scenarios, and the values to test.  Here, we'll test different
# numbers of agents in the model between 2 and 100 (in increments of 1), and 
# different levels of movement from 0 to 1 in increments of 0.1
variable_params = {"N":range(2,100,1), 
                   "level_of_movement":arange(0.0,1.0,0.1)}

# Run each combination 5 times, for 365 days of simulated time
num_iterations = 5
num_steps = 365

# Set up a BatchRunner with the above information
batch_run = BatchRunner(Disease_Model, 
                        fixed_parameters=fixed_params,
                        variable_parameters=variable_params,
                        iterations=num_iterations,
                        max_steps=num_steps,
                        model_reporters=
                        {"Total_Infected":calculate_number_infected,
                         "Total_Imm":calculate_number_immunised}
                        )

# Tell the BatchRunner to run
batch_run.run_all()

# Store the results in a Pandas DataFrame
run_data = batch_run.get_model_vars_dataframe()

# Plot a scatter plot of level of movement vs total infected
plt.scatter(run_data.level_of_movement, run_data.Total_Infected)

Esempio n. 10
0
variable_params = {
    "subset_no": np.arange(a),
    "type_no": (1, 2),
}

batch_run = BatchRunner(
    NormModel,
    variable_params,
    fixed_params,
    iterations=1000,
    max_steps=255,
    model_reporters={
        "PerHate": percent_haters,
        "NetworkType": network_type,
        "AveSensitivity": average_sensitivity,
        "MeanDeg": net_avg_deg,
        "MaxDeg": net_max_deg,
        "NetConnect": net_conn,
        "NetClust": net_clust,
        "FinalStep": final_step,
    },
    # agent_reporters={"Hate": "behavior",
    #                  "Step": "step_no"}
)

batch_run.run_all()

# agent_data = batch_run.get_agent_vars_dataframe()
run_data = batch_run.get_model_vars_dataframe()
run_data.to_csv('counting_steps.csv')
Esempio n. 11
0
#                 'visibility': [True, False],
#                 "initial_explorers": range(1, 5),
#                 "initial_competitors": range(0, 5),
#                 "initial_predators": range(0, 5)
#                 }

br = BatchRunnerMP(
    PsyRTSGame,
    model_paramsT,
    iterations=1,
    max_steps=150,
    model_reporters={"Data Collector": lambda m: m.datacollector})

brP = BatchRunner(
    PsyRTSGame,
    model_paramsP,
    iterations=5,
    max_steps=150,
    model_reporters={"Data Collector": lambda m: m.datacollector})

if __name__ == '__main__':
    br.run_all()
    br_df = br.get_model_vars_dataframe()
    #br_step_data =  br_df
    br_step_data = pd.DataFrame()

    for i in range(len(br_df["Data Collector"])):
        if isinstance(br_df["Data Collector"][i], DataCollector):
            i_run_data = br_df["Data Collector"][i].get_model_vars_dataframe()
            br_step_data = br_step_data.append(i_run_data, ignore_index=True)
    #
    # brP.run_all()
Esempio n. 12
0
#           "max_vision": 10,
#           "max_init_sugar": 5,
#           "min_age": 30,
#           "max_age": 60,
#           "init_poll": np.linspace(1,10, 10),
#           "ex_ratio": 2,
#           "poll_growth_rule": False,
#           "inheritance_rule": True}

# ### Create Batch Runs With Specific Rule Sets

# In[12]:

sweep_growth = BatchRunner(SugarscapeModel,
                           params_growth,
                           iterations=3,
                           max_steps=200,
                           model_reporters=model_reporters)

sweep_amenity = BatchRunner(SugarscapeModel,
                            params_amenity,
                            iterations=3,
                            max_steps=200,
                            model_reporters=model_reporters)

sweep_inh_growth = BatchRunner(SugarscapeModel,
                               params_inh_growth,
                               iterations=3,
                               max_steps=200,
                               model_reporters=model_reporters)
fixed_parameters = {'N' : 100}
model_reporters  ={'TotalTrap':lambda m:m.Count, 'TotalSwitch': lambda m:m.total}
#
# Set the repetitions, the amount of steps, and the amount of distinct values per variable
replicates = 10
max_steps = 100#
distinct_samples = 512
# generating samples
param_values = saltelli.sample(problem, distinct_samples)
variable_parameters={name:[] for name in problem['names']}

data = {}

for i, var in enumerate(problem['names']):
    # Get the bounds for this variable and get <distinct_samples> samples within this space (uniform)
    samples = np.linspace(*problem['bounds'][i], num=distinct_samples)
    batch = BatchRunner(BoltzmannWealthModelNetwork, 
                        max_steps=max_steps,
                        iterations=replicates,
                        variable_parameters={var: samples},
                        model_reporters=model_reporters,
                        display_progress=True)
    
    batch.run_all()
    
    data[var] = batch.get_model_vars_dataframe()

header = data.keys()
for head in header:
    data[head].to_csv("OFAT_{}.csv".format(head))
Esempio n. 14
0
    'Initial_Outbreak': 0.1,
    'TR': 0.5,
    'RT': 28,
    'MR': 0.02,
    'Policy': 0.0
}
variable_params = None

iters = 20
steps = 100
#
# %%
batch_run_0 = BatchRunner(
    SocialDistancing_Model,
    variable_params,
    fixed_params,
    iterations=iters,
    max_steps=steps,
    model_reporters={"Data Collector": lambda m: m.datacollector})

batch_run_0.run_all()
run_data_0 = batch_run_0.get_model_vars_dataframe()

# %%

fixed_params['Policy'] = 0.25

batch_run_25 = BatchRunner(
    SocialDistancing_Model,
    variable_params,
    fixed_params,
Esempio n. 15
0
# -*- coding: utf-8 -*-
from model import miModelo
from mesa.batchrunner import BatchRunner
from EjemplosClase.mesa.ejemplo2.model import contarAgentes
import matplotlib.pyplot as plt

fixed_params = {}
variable_params = {"N": range(4, 15, 1), "seed": range(0, 10)}

batchRun = BatchRunner(miModelo,
                       fixed_parameters=fixed_params,
                       variable_parameters=variable_params,
                       iterations=1,
                       max_steps=5000,
                       model_reporters={"numTicks": contarAgentes})

batchRun.run_all()

batch_data = batchRun.get_model_vars_dataframe()

print(batch_data)

plt.scatter(batch_data.N, batch_data.numTicks)
plt.show()
        "nrobots":
        6,
        "radar_radius":
        6,
        "wifi_range":
        3,
        "alpha":
        8.175,
        "gamma":
        gamma,
        "dump_datas":
        False,
        "optimization_task":
        False,
        "load_file":
        "./robot_exploration/maps/30_maps/random{}.py".format(maps_index),
        "gamma_variation":
        True,  # record datas for alpha variation studies
        "gamma_csv":
        "./robot_exploration/results/gamma_variations_high_alpha/gamma_variation{}.csv"
        .format(gamma)
    }

    batch_run = BatchRunner(ExplorationArea,
                            None,
                            fixed_params,
                            iterations=10,
                            max_steps=10000)

    batch_run.run_all()
Esempio n. 17
0
}

# TODO Create a BatchRun with the right parameters

batch_run = BatchRunner(
    NormModel,
    variable_params,
    fixed_params,
    iterations=2,  # TODO How many iterations per set of parameters?
    max_steps=30,  # TODO How many steps per simulation run?

    # TODO Create a traditional model (no batch run), based on this one. Run it and see how many runs we'll need.

    # TODO Decide, which network parameters and which agent parameters are to be collected.
    # TODO Write reporters, which will collect the data in a usable fashion.
    model_reporters={
        "PerHate": percent_haters,
        "AveKnowing": percent_hate_knowing,
        "MeanDeg": net_avg_deg,
        "Culling": net_culling,
        "AveHate": average_hate,
        "MaxDeg": max_deg,
        #                  "AveCont": average_contempt,
        #                  "CorHatCon": cor_hate_cont
    },
    # agent_reporters={"Hate": "behavior",
    #                  "Step": "step_no"}
)

batch_run.run_all()

# TODO Save collected data to files for further analysis
Esempio n. 18
0
}




model_paramsFixed = {

        "impactPartialVisibility": .25,
        "impactTotalVisibility": .1,

 }

nombreCSV = "model_paramsTEx3cond6"+".csv"
br = BatchRunner(PsyRTSGame,
                 variable_parameters=model_paramsTEx3cond6,
                 fixed_parameters= model_paramsFixed,
                 iterations= 50,
                 max_steps=150,
                 model_reporters={"Data Collector": lambda m: m.datacollector})

if __name__ == '__main__':
    br.run_all()
    br_df = br.get_model_vars_dataframe()
    br_step_data = pd.DataFrame()

    for i in range(len(br_df["Data Collector"])):
        if isinstance(br_df["Data Collector"][i], DataCollector):
            i_run_data = br_df["Data Collector"][i].get_model_vars_dataframe()
            br_step_data = br_step_data.append(i_run_data, ignore_index=True)

    concat = br_step_data
    df = concat
Esempio n. 19
0
variables = {'num_vars': 1, 'names': ['human_count'], 'bounds': [[10, 80]]}

# Set the repetitions, the amount of steps, and the amount of distinct values per variable
replicates = 5
max_steps = 1000
distinct_samples = 8

# We get all our samples here
param_values = [[10], [20], [30], [40], [50], [60], [70], [80]]

# Set the outputs
model_reporters = {'Total_steps': lambda m: m.schedule_Human.get_steps()}

batch = BatchRunner(
    Classroom,
    max_steps=max_steps,
    variable_parameters={name: []
                         for name in variables['names']},
    model_reporters=model_reporters)

count = 0
for i in range(replicates):
    for vals in param_values:
        # Change parameters that should be integers
        vals = list(vals)
        vals[0] = int(vals[0])

        # Transform to dict with parameter names and their values
        variable_parameters = {}
        for name, val in zip(variables['names'], vals):
            variable_parameters[name] = val
Esempio n. 20
0
                human_count) * 100)
}

# Create the batch runner
print(
    "Running batch test with %i runs for each parameter and %i human agents." %
    (runs, human_count))

batch_start = time.time()  # Time the batch run

# Run the batch runner 'runs' times (the number of iterations to make) and output a dataset and graphs each iteration
for i in range(1, runs + 1):
    iteration_start = time.time()  # Time the iteration

    param_run = BatchRunner(FireEvacuation,
                            variable_parameters=variable_params,
                            fixed_parameters=fixed_params,
                            model_reporters=model_reporter)

    param_run.run_all()  # Run all simulations

    iteration_end = time.time()
    end_timestamp = time.strftime("%Y%m%d-%H%M%S")

    # Save the dataframe to a file so we have the oppurtunity to concatenate separate dataframes from separate runs
    dataframe = param_run.get_model_vars_dataframe()
    dataframe.to_pickle(path=OUTPUT_DIR + "/batch_results/dataframe_" +
                        end_timestamp + ".pickle")

    elapsed = iteration_end - iteration_start  # Get the elapsed time in seconds
    print("Batch runner finished iteration %i. Took: %s" %
          (i, str(timedelta(seconds=elapsed))))
Esempio n. 21
0
    ],
    "startingBehav": [
        'C',
        #'D',
    ],
    #"sensitivity": [0],
    "sensitive_agents": [
        (0, ),  #(0, 13]),
    ],  # This will get clunky if we want to randomly distribute them every time, or if we want to include all agents
}
""" For collecting training data for kNN, please run one init_ppD at a time.
    Otherwise, it doesn't export the ppD variable correctly to the pickle! """

br = BatchRunner(PDModel,
                 br_params,
                 iterations=5,
                 max_steps=10000,
                 model_reporters={"Data Collector": lambda m: m.datacollector})

if __name__ == '__main__':
    br.run_all()
    br_df = br.get_model_vars_dataframe()
    br_step_data = pd.DataFrame()
    for i in range(len(br_df["Data Collector"])):
        if isinstance(br_df["Data Collector"][i], DataCollector):
            i_run_data = br_df["Data Collector"][i].get_model_vars_dataframe()
            br_step_data = br_step_data.append(i_run_data, ignore_index=True)
    br_step_data.to_csv(
        "PDModel_Step_Data_%s.csv" % (str(random.randint(
            1, 200000))))  # this might not be as useful for importing
Esempio n. 22
0
import datetime

import numpy as np
from mesa.batchrunner import BatchRunner

from LanguageShift.LanguageModel import LanguageModel

model_params = {'diffusivity': [0.013, 0.009], 'timestep': 1, 'filename': 'doctoreddata.csv',
                'grid_pickle': 'neighbor.pkl'}

model_reporters = {}
agent_reporters = {"pop": lambda x: x.population,
                   "p_p_german": lambda x: x.p_probability[0],
                   "p_p_slovene": lambda x: x.p_probability[1],
                   "p_german": lambda x: x.probability[0],
                   "p_slovene": lambda x: x.probability[1],
                   "p_diff": lambda x: np.sum(np.abs(x.probability - x.p_probability)),
                   "lat": lambda x: x.pos[0],
                   "long": lambda x: x.pos[1]}

batch = BatchRunner(LanguageModel, parameter_values=model_params, agent_reporters=agent_reporters, max_steps=30,
                    iterations=1)
batch.run_all()

batch.get_agent_vars_dataframe().to_csv('batch_output' + str(datetime.date.today()))
Esempio n. 23
0
replicates = 10
max_steps = 50
distinct_samples = 10

model_reporters = {"Sheep deaths": lambda m: Sheep.sheepdeaths}

data = {}

for i, var in enumerate(problem['names']):
    samples = np.linspace(*problem['bounds'][i],
                          num=distinct_samples,
                          dtype=int)

    batch = BatchRunner(TotC,
                        max_steps=max_steps,
                        iterations=replicates,
                        variable_parameters={var: samples},
                        model_reporters=model_reporters,
                        display_progress=True)
    batch.run_all()
    data[var] = batch.get_model_vars_dataframe()

# Running  ^^^^^
# Plotting vvvvv


def plot_param_var_conf(ax, df, var, param, i):
    """
    Helper function for plot_all_vars. Plots the individual parameter vs
    variables passed.

    Args:
Esempio n. 24
0
              "phage_mutation_step" : 0.1,
              "phage_mutation_freq" : [0.1, 1],
              "re_degrade_foreign_0": [0, 0.99, 0.999],
              "re_degrade_foreign_1": [0, 0.99, 0.999],
              "epi_inheritance" : [-2, -1, 1, 0.5, 0.1], #-1 = genetic, -2 = random
              "phage_inactivation_time" : 3}



batch_run = BatchRunner(BaseModel, 
                        parameters, 
                        iterations=10, 
                        max_steps=200,
                        agent_reporters = {
                                "breed" : lambda a : a.breed,
                                "methylation" : lambda a: a.methylation,
                                "genotype" : lambda a: a.genotype,
                                "affinity_0" : helper_functions.get_affinity(0),
                                "affinity_1" : helper_functions.get_affinity(1)},
                       model_reporters={
                                "phage" : lambda m : m.schedule.get_breed_count(Phage),
                                "bacteria" : lambda m : m.schedule.get_breed_count(Bacteria),
                                "bacteria_meth_1" : lambda m: get_breed_filtered_count(Bacteria,by_methylation(1))(m),
                                "phage_meth_1" : lambda m: get_breed_filtered_count(Phage,by_methylation(1))(m),
                                "avg_affinity" : helper_functions.avg_phage_affinity
        })

batch_run.run_all()
run_data_agents = batch_run.get_agent_vars_dataframe()
run_data_agents.to_csv("evolvable-phage-endpoint.csv")
Esempio n. 25
0
    # Show the Gini Wealth Distribution
    gini = model.datacollector.get_model_vars_dataframe()
    gini.plot()
    plt.show()

    # Show all agents wealth as a histogram
    agent_wealth = model.datacollector.get_agent_vars_dataframe()
    agent_wealth.head()
    end_wealth = agent_wealth.xs(99, level="Step")["Wealth"]
    end_wealth.hist(bins=range(agent_wealth.Wealth.max() + 1))
    plt.show()

    # Show a single agent's wealth over each time step
    one_agent_wealth = agent_wealth.xs(14, level="AgentID")
    one_agent_wealth.Wealth.plot()
    plt.show()

    # Use BatchRunner to run multiple instantiations at the same time
    parameters = {"width": 10, "height": 10, "N": range(10, 500, 10)}
    batch_run = BatchRunner(MoneyModel,
                            parameters,
                            iterations=5,
                            max_steps=100,
                            model_reporters={"Gini": compute_gini})
    # batch_run_all()
    # show BatchRunner data as a scatter plot
    run_data = batch_run.get_model_vars_dataframe()
    run_data.head()
    plt.scatter(run_data.N, run_data.Gini)
    plt.show()
Esempio n. 26
0
from mesa.datacollection import DataCollector
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np


fixed_params = {'num_agents': 50,
                'percent_trolls': 0.1}

variable_params = {'percent_mods': np.arange(0.0, 0.8, 0.02),
                   'mod_power': range(0,30,1),
                   }

batch_run = BatchRunner(TrollModNetwork,
                        variable_params,
                        fixed_params,
                        iterations=3,
                        max_steps=20,
                        model_reporters={"average_trolling_delta": compute_avg_delta})


if __name__ == '__main__':
    
    batch_run.run_all()
    run_data = batch_run.get_model_vars_dataframe()
    plt.scatter(run_data.mod_power, run_data.percent_mods, c=run_data.average_trolling_delta, cmap='nipy_spectral')
    plt.clim(0,1.75)
    cbar = plt.colorbar()
    cbar.set_label("Average trolling delta")
    plt.xlabel('Mod Power')
    plt.ylabel('Percent mods')
    plt.show()
Esempio n. 27
0


#regular batch runner input
fixed_params = {"avg_node_degree":10,
                "num_nodes": 400,
               "initial_outbreak_size" : 1,
                "threshold" : 3
                }
variable_params = {
                      "rewire_prob": [0.000001, 0.00001,0.0001,0.001,0.01,0.1,1]
                  }  #  {"rewire_prob": range(0.05, 1.0, 0.05)}

batch_run = BatchRunner(InfoSpread,
                        variable_params,
                        fixed_params,
                        iterations=1,
                        max_steps=100,
                        model_reporters={"infection_list": infected_list})

#Batch runner GUI set up 
if __name__ == '__main__':
    # running the full iteration of model: round = variable parameter x rounds of iteration
    batch_run.run_all() 
    
    # getting the pandas data frame
    run_data = batch_run.get_model_vars_dataframe()
    
    #data processing
    #This instance shows only the 
    x = run_data.rewire_prob
    time = calculate_infection_time(run_data)
Esempio n. 28
0
gini = model.datacollector.get_model_vars_dataframe()
gini.plot()
plt.close
##Agent wealth data
agent_wealth = model.datacollector.get_agent_vars_dataframe()
agent_wealth.head()

plt.show()
one_agent_wealth = agent_wealth.xs(14, level="AgentID")
one_agent_wealth.Wealth.plot()

##Batch runner
fixed_params = {"width": 10, "height": 10}
variable_params = {"N": range(10, 500, 10)}

batch_run = BatchRunner(MoneyModel,
                        fixed_parameters=fixed_params,
                        variable_parameters=variable_params,
                        iterations=5,
                        max_steps=100,
                        model_reporters={"Gini": compute_gini})
batch_run.run_all()

run_data = batch_run.get_model_vars_dataframe()
run_data.head()
plt.scatter(run_data.N, run_data.Gini)

###Server for visualization
server.port = 8521  # The default
server.launch()
Esempio n. 29
0
'''
# =============================================================================
# When running this file the batchrunner will be used for the model. 
# No visulaization will happen.
# =============================================================================
'''
from mesa.batchrunner import BatchRunner
from formation_flying.model import FormationFlying
from formation_flying.parameters import model_params, max_steps, n_iterations, model_reporter_parameters, agent_reporter_parameters, variable_params

batch_run = BatchRunner(FormationFlying,
                        fixed_parameters=model_params,
                        variable_parameters=variable_params,
                        iterations=n_iterations,
                        max_steps=max_steps,
                        model_reporters=model_reporter_parameters,
                        agent_reporters=agent_reporter_parameters)

batch_run.run_all()

run_data = batch_run.get_model_vars_dataframe()
run_data.head()
Esempio n. 30
0
from mesa.batchrunner import BatchRunner
from AntModel import AntModel
from DataCollection import ant_state_collector

# Default settings
STEP_COUNT = 300
NUM_LNIGER = 300
NUM_FJAPON = 30
NUM_MK_COL = 5
NUM_FT_COL = 5
GRID_WIDTH = 150
GRID_HEIGHT = 150

fixed_params = {"width": GRID_WIDTH,
                "height": GRID_HEIGHT,
                "num_ln": NUM_LNIGER,
                "num_mk_col": NUM_MK_COL,
                "num_ft_col": NUM_FT_COL}
variable_params = {"num_fj": range(0, 50)}

batch_run = BatchRunner(AntModel,
                        fixed_parameters=fixed_params,
                        variable_parameters=variable_params,
                        iterations=5,
                        max_steps=STEP_COUNT,
                        agent_reporters={"State:": ant_state_collector})
batch_run.run_all()
df = batch_run.get_agent_vars_dataframe()
df.to_csv(path_or_buf="out.csv")