Пример #1
0
    def run_world_with_demand_and_power_plants(self):

        number_of_steps = 40
        data_folder = "minimum_sized_country_3"

        fixed_params = {
            "initialization_year": 2018,
            "number_of_steps": number_of_steps,
            "demand_change": [1.0] * 50,
            "carbon_price_scenario": [10] * 50,
            "data_folder": data_folder,
            "time_run": False
        }

        variable_params = {
            "total_demand":
            [1000, 5000, 10000, 20000, 30000, 40000, 50000, 75000, 100000]
        }

        batch_run = BatchRunnerMP(World,
                                  fixed_parameters=fixed_params,
                                  variable_parameters=variable_params,
                                  iterations=1,
                                  max_steps=number_of_steps,
                                  nr_processes=3)

        batch_run.run_all()
Пример #2
0
def create_data(problem, new_path):
    """
    problem : dict with specified input variables and range instead of discrete values otherwise saltelli will not work

    Run each batch iterations with all the samples obtained from saltelli and save at the end of the run

    Saves data with time stamp as .csv and .pickle
    """

    # Set the repetitions, the amount of steps, and the amount of distinct values per variable
    replicates = 10
    max_steps = 100
    distinct_samples= 10

    # Define output parameters
    model_reporters = {
        'step_data': lambda m: m.datacollector.get_model_vars_dataframe(),
        'obstacle_density': lambda m: m.obstacle_density,
        'food_density': lambda m: m.food_density,
        'nr_hives': lambda m: m.nr_hives
    }

    data = {}

    # Sample from data with every interactions, computationally expensive but gives all combinations
    params_values = saltelli.sample(problem,N=distinct_samples)


    #transform to int value and overwrite array if copy needed set flag to True
    params_values = params_values.astype(int, copy=False)


    batch = BatchRunnerMP(BeeForagingModel,
                        nr_processes=os.cpu_count(),
                        max_steps=max_steps,
                        variable_parameters={val:[] for val in problem['names']},
                        model_reporters=model_reporters,
                         display_progress=True)
    counter = 0

    # progress bar
    pbar = tqdm(total=len(params_values))
    for _  in range(replicates):
        for values in params_values:
            var_parameters = {}

            # #collect all data samples from salteli sampling
            for n, v, in zip(problem['names'],values):

                var_parameters[n] = v
            batch.run_iteration(var_parameters, tuple(values),counter)
            counter +=1
            pbar.update(counter)
    pbar.close()
    data = batch.get_model_vars_dataframe()

    data.to_csv(f'pickles/analysis_{new_path}.csv')
    data.to_pickle(f'pickles/analysis_{new_path}.p')
    return data
Пример #3
0
    def launch_batch_processing(self):
        batch = BatchRunnerMP(
            self.mock_model,
            nr_processes=None,
            variable_parameters=self.variable_params,
            fixed_parameters=self.fixed_params,
            iterations=self.iterations,
            max_steps=self.max_steps,
            model_reporters=self.model_reporters,
            agent_reporters=self.agent_reporters,
        )

        batch.run_all()
        return batch
Пример #4
0
    def launch_batch_processing_debug(self):
        """
        Tests with one processor for debugging purposes
        """

        batch = BatchRunnerMP(
            self.mock_model,
            nr_processes=1,
            variable_parameters=self.variable_params,
            fixed_parameters=self.fixed_params,
            iterations=self.iterations,
            max_steps=self.max_steps,
            model_reporters=self.model_reporters,
            agent_reporters=self.agent_reporters,
        )

        batch.run_all()
        return batch
Пример #5
0
def perform_analysis():
    problem = {
        'num_vars': 1,
        'names': [
            'tolerance'
        ],  # available parameters: 'tolerance','social_extroversion','mobility','decay'
        'bounds': [[0.01, 0.99]]
    }

    # Set the repetitions, the amount of steps, and the amount of distinct values per variable
    replicates = 10
    max_steps = 10
    distinct_samples = 10

    # Set the outputs
    model_reporters = {
        "Friends score": lambda m: m.avg_friends_score(),
        "Friends distance": lambda m: m.avg_friends_social_distance(),
        "Friends spatial distance": lambda m: m.avg_friends_spatial_distance()
    }

    data = {}
    begin = time.time()

    for i, var in enumerate(problem['names']):
        # Get the bounds for this variable and get <distinct_samples> samples within this space (uniform)
        samples = np.linspace(*problem['bounds'][i], num=distinct_samples)

        batch = BatchRunnerMP(Friends,
                              max_steps=max_steps,
                              iterations=replicates,
                              variable_parameters={var: samples},
                              model_reporters=model_reporters,
                              display_progress=True,
                              nr_processes=multiprocessing.cpu_count() - 1)

        batch.run_all()
        end = time.time()
        print("Performed", replicates * distinct_samples, "model runs in",
              np.round(end - begin), "seconds.")

        data[var] = batch.get_model_vars_dataframe()
    return [problem, data]
Пример #6
0
    def run_ofat(self):
        """
        Collects data of model for ofat analysis.
        Writes raw data as nested pandas dataframe to pickle and csv.
        """

        # We define our variables and bounds
        params = {
            'obstacle_density': [0, 15, 30],
            'food_density': [5, 15, 25],
            'nr_hives': [1, 3, 5]
        }

        # Set the repetitions, the amount of steps, and the amount of distinct values per variable

        replicates = 1
        max_steps = 30

        # Define output parameters
        model_reporters = {
            'step_data': lambda m: m.datacollector.get_model_vars_dataframe(),
            'obstacle_density': lambda m: m.obstacle_density,
            'food_density': lambda m: m.food_density,
            'nr_hives': lambda m: m.nr_hives
        }

        data = {}

        for var in params:

            batch = BatchRunnerMP(BeeForagingModel,
                                  max_steps=max_steps,
                                  nr_processes=os.cpu_count(),
                                  iterations=replicates,
                                  variable_parameters={var: params[var]},
                                  model_reporters=model_reporters,
                                  display_progress=True)

            batch.run_all()
            data = batch.get_model_vars_dataframe()
            data.to_csv(f'pickles/{self.time_stamp}_{var}.csv')
            data.to_pickle(f'pickles/{self.time_stamp}_{var}.p')
Пример #7
0
    "recovered_seed_pc": [0.01, 0.1, 0.23],
    "high_risk_pc": [0.25],
    "grid_area": ["Small", "Large"],
    "house_init": ["Neighborhood"],
    "release_strat": [
        "Everyone release", "Random individual houses", "Low risk individuals",
        "Low risk houses"
    ],
    "mobility_speed": ["low"],
    "weeks_to_second_release": [2, 4]
}

br = BatchRunnerMP(
    Virus,
    nr_processes=4,
    variable_parameters=br_params,
    iterations=3,  # number of times to run each parameter combination
    max_steps=600,  # number of steps for each model run
    model_reporters={"Data Collector": lambda m: m.datacollector})

if __name__ == '__main__':
    br.run_all()
    br_df = br.get_model_vars_dataframe()
    br_step_data = pd.DataFrame()
    for i in range(len(br_df["Data Collector"])):
        if isinstance(br_df["Data Collector"][i], DataCollector):
            i_run_data = br_df["Data Collector"][i].get_model_vars_dataframe()
            br_step_data = br_step_data.append(i_run_data, ignore_index=True)
    br_step_data.to_csv(
        "/Users/shwu2259/GroupRotation/VirusModel_lowmob_lowfrac.csv")
}

start_date = datetime.datetime(2020, 2, 20)  # Setting
num_iterations = 1  # Setting
num_max_steps_in_reality = 95  # Setting
num_max_steps_in_simulation = 165  # Setting
end_date_in_reality = start_date + datetime.timedelta(
    days=num_max_steps_in_reality)  # 2020-05-25
end_date_in_simulation = start_date + datetime.timedelta(
    days=num_max_steps_in_simulation
)  # 2020-09-22 if num_max_steps_in_simulation == 215

try:
    br = BatchRunnerMP(
        BatchHostNetwork,
        br_params,
        iterations=num_iterations,
        max_steps=num_max_steps_in_simulation,
        model_reporters={'Data Collector': lambda m: m.datacollector})
except Exception as e:
    print('Multiprocessing batch run not applied, reason as:', e)
    br = CustomBatchRunner(
        BatchHostNetwork,
        br_params,
        iterations=num_iterations,
        max_steps=num_max_steps_in_simulation,
        model_reporters={'Data Collector': lambda m: m.datacollector})


def main(on_switch=False,
         graph_switch=False,
         stats_test_switch=False,
Пример #9
0
        "market_time_splices": MARKET_TIME_SPLICES,
        "data_folder": data_folder,
        "number_of_steps": number_of_steps,
        "long_term_fitting_params": prices_individual,
        "highest_demand": 63910,
        "nuclear_subsidy": beis_params[-3],
        "future_price_uncertainty_m": beis_params[-2],
        "future_price_uncertainty_c": beis_params[-1],
        "dropbox": False
    }

    variable_params = {
        "demand_change": [[0.99] * number_of_steps, [0.98] * number_of_steps,
                          [0.97] * number_of_steps, [1.00] * number_of_steps,
                          [1.01] * number_of_steps, [1.02] * number_of_steps,
                          [1.025] * number_of_steps]
    }

    #    variable_params = {"carbon_price_scenario": [[30] * 50],
    #    "demand_change": [[1.01] * 50]
    #    }

    batch_run = BatchRunnerMP(World,
                              fixed_parameters=fixed_params,
                              variable_parameters=variable_params,
                              iterations=10,
                              max_steps=number_of_steps,
                              nr_processes=7)

    batch_run.run_all()
Пример #10
0
import matplotlib.pyplot as plt
from mesa.batchrunner import BatchRunnerMP

from tictactoe import get_game_grid, TicTacToe

runner = BatchRunnerMP(
    TicTacToe,
    nr_processes=2,
    iterations=256,
    model_reporters={
        'endgame': lambda m: m.endgame,
        'endgrid': get_game_grid
    },
)

runner.run_all()

df = runner.get_model_vars_dataframe()
print(df.endgame.value_counts())

fig = plt.figure(figsize=(16, 16))
for i in range(256):
    subplot = fig.add_subplot(16, 16, i + 1)
    subplot.imshow(df.endgrid[i])
    subplot.axis('off')
    subplot.set_title(df.endgame[i].name, size=8)

fig.tight_layout()
plt.subplots_adjust(wspace=-.9)
Пример #11
0
        #     len_t = self.same_grid + self.other_grid
        #     print("Step={}: {} ({}%) crashes on same grid, {} ({}%) crashes on other grid".format(self.schedule.steps, self.same_grid, 100*self.same_grid/len_t, self.other_grid, 100*self.other_grid/len_t))
        # self.datacollector.collect(self)


## Initialize maps
''' Batch run '''
# Parameters
if __name__ == "__main__":

    if MP:
        batch_run = BatchRunnerMP(
            DeliveryModel,
            nr_processes=30,
            fixed_parameters=fixed_params,
            variable_parameters=variable_params,
            iterations=n_iterations,
            max_steps=max_steps,
            model_reporters=model_reporter_parameters,
        )
    elif not MP:
        batch_run = BatchRunner(
            DeliveryModel,
            fixed_parameters=fixed_params,
            variable_parameters=variable_params,
            iterations=n_iterations,
            max_steps=max_steps,
            model_reporters=model_reporter_parameters,
        )
    batch_run.run_all()
Пример #12
0
from model import WhaleModel
from mesa.batchrunner import BatchRunnerMP

fixed_params = {}
variable_params = {"mate_choice": [None, "Male", "Female"], "grandmother_effect": [True, False]}

# fixed_params = {"mate_choice": None}
# variable_params = {"grandmother_effect": [True, False]}

def get_data_collector(model):
    return model.datacollector

batch_run = BatchRunnerMP(WhaleModel,
                        fixed_parameters=fixed_params,
                        variable_parameters=variable_params,
                        iterations=10,
                        max_steps=10000,
                        model_reporters={"data": get_data_collector},
                        display_progress=True,
                        nr_processes=4)
batch_run.run_all()

# all_dfs = {x:[] for x in variable_params["mate_choice"]} #key = mate_choice; value = list of dataframes with that mate_choice

# run_data = batch_run.get_model_vars_dataframe()

# for index, row in run_data.iterrows():
#     df = pd.DataFrame.from_dict(row["data"].model_vars)
#     all_dfs[row["mate_choice"]].append(df)

# data = {}
Пример #13
0
    CPU_COUNT = os.cpu_count() or 2
    config = args.config
    config_slice = args.slice
    run_config = simulation_config[config]
    variable_params = run_config["variable_params"]
    fixed_slice = run_config["slice"][config_slice]
    fixed_params = run_config["fixed_params"]
    fixed_params.update(fixed_slice)

    # create multi-process runner
    batch_run = BatchRunnerMP(
        (BaseModel if MODEL_NAME == "base" else AlternativeModel),
        nr_processes=CPU_COUNT,
        variable_parameters=variable_params,
        fixed_parameters=fixed_params,
        iterations=run_config["num_iterations"],
        max_steps=run_config["num_steps"],
        display_progress=True,
        model_reporters={
            "history": track_model_steps,
        },
    )

    # run simulation
    total_iter, num_conf, num_iter = get_info(
        batch_run, variable_params)
    print(f'Starting simulation with the following setup:')
    print(f'- Simulation model: {batch_run.model_cls.__name__}')
    print(f'- Configuration: {config}')
    print(f'- Configuration slice: {config_slice}')
    print(f'- Fixed parameters: {fixed_params}')
    print(f'- Variable parameters: {variable_params}')
Пример #14
0
 batch_run = BatchRunnerMP(
     ABM_CE_PV,
     nr_processes=6,
     variable_parameters=variable_params,
     fixed_parameters=fixed_params,
     iterations=1,
     max_steps=30,
     model_reporters={
         "Year":
         lambda c: ABM_CE_PV.report_output(c, "year"),
         "Agents repairing":
         lambda c: ABM_CE_PV.count_EoL(c, "repairing"),
         "Agents selling":
         lambda c: ABM_CE_PV.count_EoL(c, "selling"),
         "Agents recycling":
         lambda c: ABM_CE_PV.count_EoL(c, "recycling"),
         "Agents landfilling":
         lambda c: ABM_CE_PV.count_EoL(c, "landfilling"),
         "Agents storing":
         lambda c: ABM_CE_PV.count_EoL(c, "hoarding"),
         "Agents buying new":
         lambda c: ABM_CE_PV.count_EoL(c, "buy_new"),
         "Agents buying used":
         lambda c: ABM_CE_PV.count_EoL(c, "buy_used"),
         "Agents buying certified":
         lambda c: ABM_CE_PV.count_EoL(c, "certified"),
         "Total product":
         lambda c: ABM_CE_PV.report_output(c, "product_stock"),
         "New product":
         lambda c: ABM_CE_PV.report_output(c, "product_stock_new"),
         "Used product":
         lambda c: ABM_CE_PV.report_output(c, "product_stock_used"),
         "New product_mass":
         lambda c: ABM_CE_PV.report_output(c, "prod_stock_new_mass"
                                           ),
         "Used product_mass":
         lambda c: ABM_CE_PV.report_output(c, "prod_stock_used_mass"
                                           ),
         "End-of-life - repaired":
         lambda c: ABM_CE_PV.report_output(c, "product_repaired"),
         "End-of-life - sold":
         lambda c: ABM_CE_PV.report_output(c, "product_sold"),
         "End-of-life - recycled":
         lambda c: ABM_CE_PV.report_output(c, "product_recycled"),
         "End-of-life - landfilled":
         lambda c: ABM_CE_PV.report_output(c, "product_landfilled"),
         "End-of-life - stored":
         lambda c: ABM_CE_PV.report_output(c, "product_hoarded"),
         "eol - new repaired weight":
         lambda c: ABM_CE_PV.report_output(c, "product_new_repaired"
                                           ),
         "eol - new sold weight":
         lambda c: ABM_CE_PV.report_output(c, "product_new_sold"),
         "eol - new recycled weight":
         lambda c: ABM_CE_PV.report_output(c, "product_new_recycled"
                                           ),
         "eol - new landfilled weight":
         lambda c: ABM_CE_PV.report_output(
             c, "product_new_landfilled"),
         "eol - new stored weight":
         lambda c: ABM_CE_PV.report_output(c, "product_new_hoarded"
                                           ),
         "eol - used repaired weight":
         lambda c: ABM_CE_PV.report_output(c,
                                           "product_used_repaired"),
         "eol - used sold weight":
         lambda c: ABM_CE_PV.report_output(c, "product_used_sold"),
         "eol - used recycled weight":
         lambda c: ABM_CE_PV.report_output(c,
                                           "product_used_recycled"),
         "eol - used landfilled weight":
         lambda c: ABM_CE_PV.report_output(
             c, "product_used_landfilled"),
         "eol - used stored weight":
         lambda c: ABM_CE_PV.report_output(c, "product_used_hoarded"
                                           ),
         "Average landfilling cost":
         lambda c: ABM_CE_PV.report_output(c,
                                           "average_landfill_cost"),
         "Average storing cost":
         lambda c: ABM_CE_PV.report_output(c,
                                           "average_hoarding_cost"),
         "Average recycling cost":
         lambda c: ABM_CE_PV.report_output(
             c, "average_recycling_cost"),
         "Average repairing cost":
         lambda c: ABM_CE_PV.report_output(
             c, "average_repairing_cost"),
         "Average selling cost":
         lambda c: ABM_CE_PV.report_output(
             c, "average_second_hand_price"),
         "Recycled material volume":
         lambda c: ABM_CE_PV.report_output(c, "recycled_mat_volume"
                                           ),
         "Recycled material value":
         lambda c: ABM_CE_PV.report_output(c, "recycled_mat_value"),
         "Producer costs":
         lambda c: ABM_CE_PV.report_output(c, "producer_costs"),
         "Consumer costs":
         lambda c: ABM_CE_PV.report_output(c, "consumer_costs"),
         "Recycler costs":
         lambda c: ABM_CE_PV.report_output(c, "recycler_costs"),
         "Refurbisher costs":
         lambda c: ABM_CE_PV.report_output(c, "refurbisher_costs"),
         "Refurbisher costs w margins":
         lambda c: ABM_CE_PV.report_output(
             c, "refurbisher_costs_w_margins")
     })
Пример #15
0
    "initial_predators": [0, 3, 5]
    # "initial_competitors": [ 0, 3, 5],
    # "initial_predators": [ 0, 3, 5]
    #
}

# var_model_params = {
#                 'visibility': [True, False],
#                 "initial_explorers": range(1, 5),
#                 "initial_competitors": range(0, 5),
#                 "initial_predators": range(0, 5)
#                 }

br = BatchRunnerMP(
    PsyRTSGame,
    model_paramsT,
    iterations=1,
    max_steps=150,
    model_reporters={"Data Collector": lambda m: m.datacollector})

brP = BatchRunner(
    PsyRTSGame,
    model_paramsP,
    iterations=5,
    max_steps=150,
    model_reporters={"Data Collector": lambda m: m.datacollector})

if __name__ == '__main__':
    br.run_all()
    br_df = br.get_model_vars_dataframe()
    #br_step_data =  br_df
    br_step_data = pd.DataFrame()
Пример #16
0
}

# variable parameters defining each configuration
variable_params = {
    "p_1": [0.1, 0.5, 0.9],
    "p_2": [0.1, 0.5, 0.9],
}

batch_run = BatchRunnerMP(
    OriginalMarchModel,
    nr_processes=CPU_COUNT,
    variable_parameters=variable_params,
    fixed_parameters=fixed_params,
    iterations=80,
    max_steps=100,
    display_progress=True,
    model_reporters={
        "history": track_model_steps,
        "ACK": calc_code_kl,
        "AHK": calc_human_kl,
        "VHK": calc_kl_var,
        "DISSIM": calc_dissim,
    },
)

# simulation batch run
total_iter, num_conf, num_iter = get_info(batch_run)
print(f'Starting simulation with the following setup:')
print(f'- Total number of iterations: {total_iter}')
print(f'- Number of configurations: {num_conf}')
print(f'- Iterations per configuration: {num_iter}')
print(f'- Number of processing cores: {CPU_COUNT}')
Пример #17
0
def run_model(
    input_file,
    output_file,
    n_processors,
    class_id=None,
    all_classes=True,
    webserver=False,
    model_params=None,
    test_mode=False,
    speedup=1,
):
    input_filepath = os.path.join(os.getcwd(), input_file)
    all_data = InputData(input_filepath)

    class_ids = all_data.get_class_ids()
    if webserver:
        if all_classes:
            click.echo("Cannot run over all classes in webserver mode (yet!)")
            sys.exit(2)
    else:
        if not all_classes:
            if class_id not in class_ids:
                click.echo(
                    f"Invalid class ID {class_id}. Valid classes are: {class_ids}"
                )
                sys.exit(1)
            if not webserver:
                class_ids = [class_id]

    output_data_writer = OutputDataWriter(output_file)

    # Get data first to determine grid size
    model_initial_state = ModelState(0, 0, 0, 0, 0)
    logging.info("Running on classes: %s",
                 ", ".join([str(i) for i in class_ids]))

    if not model_params:
        model_params = DEFAULT_MODEL_PARAMS

    if test_mode:
        model_params.maths_ticks_mean = 10
        model_params.maths_ticks_sd = 0.1
        model_params.ticks_per_home_day = 10

    # To ensure each thread in the BatchProcessor gets a different random
    # number generator, we use a seed sequence to generate a new seed for
    # each instance of SimModel (one per class), as they run on parallel
    # processors in batch mode, and we need to ensure they don't all produce
    # the same numbers

    # We use a non-reproducible seed sequence to ensure changes to parameters
    # are not masked by the random numbers generated
    ss = np.random.SeedSequence()

    # If we want the rngs to be reproducible in batch mode, we can use the
    # following to get a SeedSequence (see
    # https://albertcthomas.github.io/good-practices-random-number-generators/)
    # random_number_generator = np.random.default_rng(2021)
    # ss = random_number_generator.bit_generator._seed_seq

    # Create an rng for each class
    rngs = [np.random.default_rng(s) for s in ss.spawn(len(class_ids))]

    if webserver:
        canvas_grid = create_canvas_grid(14, 14)
        css_element = CssElement()
        pupil_element = PupilMonitorElement()
        class_element = ClassMonitorElement()
        max_speedup = round(model_params.maths_ticks_mean / 100) * 100

        summary_filepath = os.path.join(
            os.path.dirname(os.path.dirname(os.path.abspath(__file__))),
            "classes_input",
            "sample_class_summaries.csv",
        )
        summary_data = None
        if os.path.isfile(summary_filepath):
            summary_data = pd.read_csv(summary_filepath)

        server = ModularServer(
            SimModel,
            [
                sim_element,
                class_element,
                pupil_element,
                canvas_grid,
                sim_chart,
                css_element,
            ],
            "SimulatED",
            {
                "all_data":
                all_data,
                "model_initial_state":
                model_initial_state,
                "output_data_writer":
                output_data_writer,
                "model_params":
                model_params,
                "summary_data":
                summary_data,
                "canvas_grid":
                canvas_grid,
                "instructions":
                UserSettableParameter(
                    "static_text",
                    value=
                    "<p>Classrooms in school are places when students learn and this model examines how this "
                    "learning in mathematics changes over a year. As time passes students can be attentive (green), "
                    "passive (yellow) or disruptive (red).</p>"
                    "<p>There are some variables related to the classroom which you can change in the model using the "
                    "option on the left. Then click Reset before setting it going. (Setting <em>Frames Per Second</em> "
                    "to 0 runs the model at maximum speed.)</p>"
                    "<p>Whilst the model runs you can watch individual students or the average score for the class."
                    "Try changing the value of the parameters to improve class learning.</p>",
                ),
                "class_id":
                UserSettableParameter("choice",
                                      "Class ID",
                                      value=class_ids[0],
                                      choices=class_ids),
                "teacher_quality_mean":
                UserSettableParameter(
                    "slider",
                    "Teaching quality mean",
                    model_params.teacher_quality_mean,
                    0,
                    5.0,
                    0.1,
                ),
                "teacher_control_mean":
                UserSettableParameter(
                    "slider",
                    "Teaching control mean",
                    model_params.teacher_control_mean,
                    0.00,
                    5.0,
                    0.1,
                ),
                "random_select":
                UserSettableParameter(
                    "slider",
                    "Mean for random number used at each step",
                    model_params.random_select,
                    0.00,
                    10.0,
                    0.1,
                ),
                "group_size":
                UserSettableParameter(
                    "slider",
                    "Size of each group of pupils",
                    model_params.group_size,
                    1,
                    40,
                    1,
                ),
                "group_by_ability":
                UserSettableParameter(
                    "checkbox",
                    "Group pupils by ability (rather than at random)",
                    model_params.group_by_ability,
                ),
                "speedup":
                UserSettableParameter(
                    "slider",
                    "How much to speed up (and approximate) the simulation",
                    1,
                    1,
                    max_speedup,
                ),
            },
        )

        port = int(os.getenv("PORT", 4200))
        server.launch(port=port, open_browser=False)

    else:
        print(f"BatchRunnerMP will use {n_processors} processors")
        batch_run = BatchRunnerMP(
            SimModel,
            variable_parameters={
                "class_id_and_rng": list(zip(class_ids, rngs)),
            },
            fixed_parameters={
                "all_data": all_data,
                "model_initial_state": model_initial_state,
                "output_data_writer": output_data_writer,
                "model_params": model_params,
                "speedup": speedup,
            },
            nr_processes=n_processors,
            iterations=1,
            max_steps=1000000,
        )

        batch_run.run_all()
Пример #18
0
    "ingreso_inicial": rng.integers(0, 1000, 10)
}

model_reporters = {"Gini": compute_gini, "S80/S20": compute_s80_s20}

agent_reporters = {"Ingreso total": "ingreso_total"}

CPU_COUNT = int(multiprocessing.cpu_count() / 2) + 2

# The variables parameters will be invoke along with the fixed parameters allowing for either or both to be honored.
# The BatchRunner won’t collect the data every step of the model, but only at the end of each run.
batch_run = BatchRunnerMP(model_cls=EconomiaSocialista,
                          nr_processes=CPU_COUNT,
                          variable_parameters=variable_params,
                          fixed_parameters=fixed_params,
                          iterations=5,
                          max_steps=100,
                          model_reporters=model_reporters,
                          agent_reporters=agent_reporters,
                          display_progress=True)

batch_run.run_all()

run_model_data = batch_run.get_model_vars_dataframe()

# Gini
plt.title('Coeficiente de Gini por iteración')
plt.ylabel('Coeficiente de Gini')
plt.xlabel('Iteración')
plt.scatter(run_model_data.index.array, run_model_data.Gini)
plt.show()
Пример #19
0
# parameter lists for each parameter to be tested in batch run
model_params = {
    "initial_population": [100, 500, 1000, 2000],
    "share_knowledge": [False, True],
    "solidarity": [False, True],
    "recreate": [0, 1],
}

fixed_parameters = {}

br = BatchRunnerMP(
    model_cls=SugarscapeCg,
    nr_processes=8,
    variable_parameters=model_params,
    fixed_parameters={},
    iterations=5,
    max_steps=1000,
    model_reporters={"Data Collector": lambda m: m.datacollector},
    display_progress=True,
)

if __name__ == "__main__":
    br.run_all()
    br_df = br.get_model_vars_dataframe()
    br_step_data = pd.DataFrame()
    for i in range(len(br_df["Data Collector"])):
        if isinstance(br_df["Data Collector"][i], DataCollector):
            i_run_data = br_df["Data Collector"][i].get_model_vars_dataframe()
            br_step_data = br_step_data.append(i_run_data.tail(1),
                                               ignore_index=True)
    br_step_data.to_csv("ants.csv")
}

# variable parameters defining each configuration
variable_params = {
    "num_data_scientist": [5, 15, 30],
    "p_1": [0.1, 0.5, 0.9],
    "p_2": [0.1, 0.5, 0.9],
    "p_ml": [0.2, 0.5, 0.8],
}

batch_run = BatchRunnerMP(
    AlternativeMLModel,
    nr_processes=CPU_COUNT,
    variable_parameters=variable_params,
    fixed_parameters=fixed_params,
    iterations=2,
    max_steps=100,
    display_progress=True,
    model_reporters={
        "history": track_model_steps,
    },
)

# simulation batch run
total_iter, num_conf, num_iter = get_info(batch_run)
print(f'Starting simulation with the following setup:')
print(f'- Total number of iterations: {total_iter}')
print(f'- Number of configurations: {num_conf}')
print(f'- Iterations per configuration: {num_iter}')
print(f'- Number of processing cores: {CPU_COUNT}')
start = time.time()
batch_run.run_all()
Пример #21
0
data = {}

for i, var in enumerate(problem['names']):
    # Get the bounds for this variable and get <distinct_samples> samples within this space (uniform)
    samples = np.linspace(*problem['bounds'][i], num=distinct_samples)

    # Keep in mind that wolf_gain_from_food should be integers. You will have to change
    # your code to acommidate for this or sample in such a way that you only get integers.
    #if var == 'wolf_gain_from_food':
    #    samples = np.linspace(*problem['bounds'][i], num=distinct_samples, dtype=int)

    batch = BatchRunnerMP(Friends,
                          max_steps=max_steps,
                          iterations=replicates,
                          variable_parameters={var: samples},
                          model_reporters=model_reporters,
                          display_progress=True,
                          nr_processes=multiprocessing.cpu_count() - 1)

    batch.run_all()
    end = time.time()
    print("Model run-time:", end - begin)

    data[var] = batch.get_model_vars_dataframe()


def perform_analysis():
    problem = {
        'num_vars': 1,
        'names': [