def execute_simulation(add_rio=False,
                       ol_start=0,
                       rio_length=18,
                       rio_visitors=380e3,
                       n_simulations=5):

    sol_global = []
    sol_rio = []
    sol_moscow = []
    sol_berlin = []
    sol_beijing = []
    sol_sydney = []
    sol_new_york = []
    params = {}
    params['global'] = []
    params['rio'] = []
    params['moscow'] = []
    params['berlin'] = []
    params['beijing'] = []
    params['sydney'] = []
    params['new'] = []
    for j in range(n_simulations):
        print("running simulation {0} / {1}".format(j + 1, n_simulations))
        state = State(regions, routes, verbose=True)
        state.set_outbreak('Rio De Janeiro', 1e3)  #'Rio De Janeiro', 1000)
        sim = Simulator(state,
                        transfer_prob=0.005,
                        beta=2,
                        gamma=0.5,
                        verbose=True)

        sol_global.append([])
        sol_rio.append([])
        sol_moscow.append([])
        sol_berlin.append([])
        sol_beijing.append([])
        sol_sydney.append([])
        sol_new_york.append([])
        state_list = []
        for i, state in enumerate(sim.run(iterations=120)):
            state_list.append(state)
            if i == ol_start and add_rio:  # start outbreak x days before olympics
                sim.add_event(2560,
                              days=rio_length,
                              total_transfer=rio_visitors)

            sol_global[j].append(state.total_sir().as_tuple(total=True))
            sol_rio[j].append(state.region_sir[2560].as_tuple(total=True))
            sol_moscow[j].append(state.region_sir[4029].as_tuple(total=True))
            sol_berlin[j].append(state.region_sir[351].as_tuple(total=True))
            sol_beijing[j].append(state.region_sir[3364].as_tuple(total=True))
            sol_sydney[j].append(state.region_sir[3361].as_tuple(total=True))
            sol_new_york[j].append(state.region_sir[3797].as_tuple(total=True))

        params['global'].append(
            sir.ParameterEstimator(iter([x.total_sir() for x in state_list]),
                                   method='max').beta)
        params['rio'].append(
            sir.ParameterEstimator(iter(
                [x.region_sir[2560] for x in state_list]),
                                   method='max').beta)
        params['moscow'].append(
            sir.ParameterEstimator(iter(
                [x.region_sir[4029] for x in state_list]),
                                   method='max').beta)
        params['berlin'].append(
            sir.ParameterEstimator(iter(
                [x.region_sir[351] for x in state_list]),
                                   method='max').beta)
        params['beijing'].append(
            sir.ParameterEstimator(iter(
                [x.region_sir[3364] for x in state_list]),
                                   method='max').beta)
        params['sydney'].append(
            sir.ParameterEstimator(iter(
                [x.region_sir[3361] for x in state_list]),
                                   method='max').beta)
        params['new'].append(
            sir.ParameterEstimator(iter(
                [x.region_sir[2560] for x in state_list]),
                                   method='max').beta)

    if add_rio:
        fig_name = "rio-{0}-{1}-{2:d}.pdf".format(ol_start, rio_length,
                                                  int(rio_visitors))
    else:
        fig_name = "no_rio.pdf"

    plot_sir([
        sol_global, sol_rio, sol_new_york, sol_berlin, sol_moscow, sol_beijing,
        sol_sydney
    ], [
        'Global', 'Rio De Janeiro', 'New York', 'Berlin', 'Moscow', 'Beijing',
        'Sydney'
    ], fig_name)

    # estimate means and variance
    global_values = sol_global
    peak_times_global = [np.argmax([x[1] for x in y]) for y in global_values]
    peak_amount_global = [
        y[peak][1] for peak, y in zip(peak_times_global, global_values)
    ]

    peak_times_rio = [np.argmax([x[1] for x in y]) for y in sol_rio]
    peak_times_new_york = [np.argmax([x[1] for x in y]) for y in sol_new_york]
    peak_times_berlin = [np.argmax([x[1] for x in y]) for y in sol_berlin]
    peak_times_moscow = [np.argmax([x[1] for x in y]) for y in sol_moscow]
    peak_times_beijing = [np.argmax([x[1] for x in y]) for y in sol_beijing]
    peak_times_sydney = [np.argmax([x[1] for x in y]) for y in sol_sydney]

    t_deviations = scipy.stats.t.ppf(0.975, len(peak_times_rio) - 1)

    # estimate variance with control variates
    with open('control-{0}.csv'.format(add_rio), 'w') as csvfile:
        writer = csv.writer(csvfile, delimiter=',')
        writer.writerow([
            'global_amount', 'global_amount_control', 'global_peak_time',
            'global_peak_time_control', 'rio_time', 'rio_time_control',
            'new_york_time', 'new_york_time_control', 'berlin_time',
            'berlin_time_control', 'moscow_time', 'moscow_time_control',
            'beijing_time', 'beijing_time_control', 'sydney_time',
            'sydney_time_control'
        ])
        for i in range(n_simulations):
            writer.writerow([
                peak_amount_global[i], params['global'][i],
                peak_times_global[i], params['global'][i], peak_times_rio[i],
                params['rio'][i], peak_times_rio[i], params['new'][i],
                peak_times_rio[i], params['berlin'][i], peak_times_rio[i],
                params['moscow'][i], peak_times_rio[i], params['beijing'][i],
                peak_times_rio[i], params['sydney'][i]
            ])
    amount_global_control_conf = control_variate_conf(peak_amount_global,
                                                      params['global'])
    time_global_control_conf = control_variate_conf(peak_times_global,
                                                    params['global'])
    time_rio_control_conf = control_variate_conf(peak_times_rio, params['rio'])
    time_new_york_control_conf = control_variate_conf(peak_times_new_york,
                                                      params['new'])
    time_berlin_control_conf = control_variate_conf(peak_times_berlin,
                                                    params['berlin'])
    time_moscow_control_conf = control_variate_conf(peak_times_moscow,
                                                    params['moscow'])
    time_beijing_control_conf = control_variate_conf(peak_times_beijing,
                                                     params['beijing'])
    time_sydney_control_conf = control_variate_conf(peak_times_sydney,
                                                    params['sydney'])

    return [(np.mean(peak_amount_global), t_deviations *
             np.std(peak_amount_global, ddof=1) / math.sqrt(n_simulations),
             amount_global_control_conf),
            (np.mean(peak_times_global), t_deviations *
             np.std(peak_times_global, ddof=1) / math.sqrt(n_simulations),
             time_global_control_conf),
            (np.mean(peak_times_rio), t_deviations *
             np.std(peak_times_rio, ddof=1) / math.sqrt(n_simulations),
             time_rio_control_conf),
            (np.mean(peak_times_new_york), t_deviations *
             np.std(peak_times_new_york, ddof=1) / math.sqrt(n_simulations),
             time_new_york_control_conf),
            (np.mean(peak_times_berlin), t_deviations *
             np.std(peak_times_berlin, ddof=1) / math.sqrt(n_simulations),
             time_berlin_control_conf),
            (np.mean(peak_times_moscow), t_deviations *
             np.std(peak_times_moscow, ddof=1) / math.sqrt(n_simulations),
             time_moscow_control_conf),
            (np.mean(peak_times_beijing), t_deviations *
             np.std(peak_times_beijing, ddof=1) / math.sqrt(n_simulations),
             time_beijing_control_conf),
            (np.mean(peak_times_sydney), t_deviations *
             np.std(peak_times_sydney, ddof=1) / math.sqrt(n_simulations),
             time_sydney_control_conf)]
def execute_simulation(add_rio=False, ol_start=0, rio_length=18,
                       rio_visitors=380e3, n_simulations=5):

    sol_global = []
    sol_rio = []
    sol_moscow = []
    sol_berlin = []
    sol_beijing = []
    sol_sydney = []
    sol_new_york = []
    params = {}
    params['global'] = []
    params['rio'] = []
    params['moscow'] = []
    params['berlin'] = []
    params['beijing'] = []
    params['sydney'] = []
    params['new'] = []
    for j in range(n_simulations):
        print("running simulation {0} / {1}".format(j + 1, n_simulations))
        state = State(regions, routes, verbose=True)
        state.set_outbreak('Rio De Janeiro', 1e3)#'Rio De Janeiro', 1000)
        sim = Simulator(state, transfer_prob=0.005, beta=2, gamma=0.5,
                        verbose=True)

        sol_global.append([])
        sol_rio.append([])
        sol_moscow.append([])
        sol_berlin.append([])
        sol_beijing.append([])
        sol_sydney.append([])
        sol_new_york.append([])
        state_list = []
        for i, state in enumerate(sim.run(iterations=120)):
            state_list.append(state)
            if i == ol_start and add_rio: # start outbreak x days before olympics
                sim.add_event(2560, days=rio_length, total_transfer=rio_visitors)

            sol_global[j].append(state.total_sir().as_tuple(total=True))
            sol_rio[j].append(state.region_sir[2560].as_tuple(total=True))
            sol_moscow[j].append(state.region_sir[4029].as_tuple(total=True))
            sol_berlin[j].append(state.region_sir[351].as_tuple(total=True))
            sol_beijing[j].append(state.region_sir[3364].as_tuple(total=True))
            sol_sydney[j].append(state.region_sir[3361].as_tuple(total=True))
            sol_new_york[j].append(state.region_sir[3797].as_tuple(total=True))

        params['global'].append(sir.ParameterEstimator(
            iter([x.total_sir() for x in state_list]), method='max').beta)
        params['rio'].append(sir.ParameterEstimator(
            iter([x.region_sir[2560] for x in state_list]), method='max').beta)
        params['moscow'].append(sir.ParameterEstimator(
            iter([x.region_sir[4029] for x in state_list]), method='max').beta)
        params['berlin'].append(sir.ParameterEstimator(
            iter([x.region_sir[351] for x in state_list]), method='max').beta)
        params['beijing'].append(sir.ParameterEstimator(
            iter([x.region_sir[3364] for x in state_list]), method='max').beta)
        params['sydney'].append(sir.ParameterEstimator(
            iter([x.region_sir[3361] for x in state_list]), method='max').beta)
        params['new'].append(sir.ParameterEstimator(
            iter([x.region_sir[2560] for x in state_list]), method='max').beta)

    if add_rio:
        fig_name = "rio-{0}-{1}-{2:d}.pdf".format(ol_start, rio_length,
                                                  int(rio_visitors))
    else:
        fig_name = "no_rio.pdf"

    plot_sir([sol_global, sol_rio, sol_new_york, sol_berlin,
              sol_moscow, sol_beijing, sol_sydney],
             ['Global', 'Rio De Janeiro', 'New York', 'Berlin',
              'Moscow', 'Beijing', 'Sydney'], fig_name)

    # estimate means and variance
    global_values = sol_global
    peak_times_global = [np.argmax([x[1] for x in y])
                         for y in global_values]
    peak_amount_global = [y[peak][1]
                          for peak, y in zip(peak_times_global, global_values)]


    peak_times_rio = [np.argmax([x[1] for x in y])
                      for y in  sol_rio]
    peak_times_new_york = [np.argmax([x[1] for x in y])
                           for y in  sol_new_york]
    peak_times_berlin = [np.argmax([x[1] for x in y])
                         for y in  sol_berlin]
    peak_times_moscow = [np.argmax([x[1] for x in y])
                         for y in  sol_moscow]
    peak_times_beijing = [np.argmax([x[1] for x in y])
                          for y in  sol_beijing]
    peak_times_sydney = [np.argmax([x[1] for x in y])
                         for y in  sol_sydney]

    t_deviations = scipy.stats.t.ppf(0.975, len(peak_times_rio)-1)

    # estimate variance with control variates
    with open('control-{0}.csv'.format(add_rio), 'w') as csvfile:
        writer = csv.writer(csvfile, delimiter=',')
        writer.writerow(['global_amount', 'global_amount_control',
                         'global_peak_time', 'global_peak_time_control',
                         'rio_time', 'rio_time_control',
                         'new_york_time', 'new_york_time_control',
                         'berlin_time', 'berlin_time_control',
                         'moscow_time', 'moscow_time_control',
                         'beijing_time', 'beijing_time_control',
                         'sydney_time', 'sydney_time_control'])
        for i in range(n_simulations):
            writer.writerow([peak_amount_global[i], params['global'][i],
                             peak_times_global[i], params['global'][i],
                             peak_times_rio[i], params['rio'][i],
                             peak_times_rio[i], params['new'][i],
                             peak_times_rio[i], params['berlin'][i],
                             peak_times_rio[i], params['moscow'][i],
                             peak_times_rio[i], params['beijing'][i],
                             peak_times_rio[i], params['sydney'][i]
                             ])
    amount_global_control_conf = control_variate_conf(peak_amount_global, params['global'])
    time_global_control_conf = control_variate_conf(peak_times_global, params['global'])
    time_rio_control_conf = control_variate_conf(peak_times_rio, params['rio'])
    time_new_york_control_conf = control_variate_conf(peak_times_new_york, params['new'])
    time_berlin_control_conf = control_variate_conf(peak_times_berlin, params['berlin'])
    time_moscow_control_conf = control_variate_conf(peak_times_moscow, params['moscow'])
    time_beijing_control_conf = control_variate_conf(peak_times_beijing, params['beijing'])
    time_sydney_control_conf = control_variate_conf(peak_times_sydney, params['sydney'])

    return [(np.mean(peak_amount_global),
             t_deviations * np.std(peak_amount_global, ddof=1) / math.sqrt(n_simulations),
             amount_global_control_conf),
            (np.mean(peak_times_global),
             t_deviations * np.std(peak_times_global, ddof=1) / math.sqrt(n_simulations),
             time_global_control_conf),
            (np.mean(peak_times_rio),
             t_deviations * np.std(peak_times_rio, ddof=1) / math.sqrt(n_simulations),
             time_rio_control_conf),
            (np.mean(peak_times_new_york),
             t_deviations * np.std(peak_times_new_york, ddof=1) / math.sqrt(n_simulations),
             time_new_york_control_conf),
            (np.mean(peak_times_berlin),
             t_deviations * np.std(peak_times_berlin, ddof=1) / math.sqrt(n_simulations),
             time_berlin_control_conf),
            (np.mean(peak_times_moscow),
             t_deviations * np.std(peak_times_moscow, ddof=1) / math.sqrt(n_simulations),
             time_moscow_control_conf),
            (np.mean(peak_times_beijing),
             t_deviations * np.std(peak_times_beijing, ddof=1) / math.sqrt(n_simulations),
             time_beijing_control_conf),
            (np.mean(peak_times_sydney),
             t_deviations * np.std(peak_times_sydney, ddof=1) / math.sqrt(n_simulations),
             time_sydney_control_conf)
           ]
Exemplo n.º 3
0
def main():

    parser = argparse.ArgumentParser()
    parser.add_argument("sls_trace_file", help="Path to SLS trace file (JSON format).")
    parser.add_argument("network_topology_file", help="Path to topo file (JSON format).")
    parser.add_argument("node_mem_mb", type=int, help="Amount of memory in MB for each node.")
    parser.add_argument("node_cores", type=int, help="Amount of virtual cores for each node.")
    parser.add_argument("node_hb_ms", type=int, help="Period of the NM heartbeats in milliseconds.")
    parser.add_argument("am_hb_ms", type=int, help="Period of the AM heartbeats in milliseconds.")
    parser.add_argument("am_container_mb", type=int, help="Amount of memory in MB for each AM container.")
    parser.add_argument("am_container_cores", type=int, help="Number of cores taken by each AM container.")
    parser.add_argument("scheduler_type", type=get_enumparser(YarnSchedulerType, YarnSchedulerType.SYMBEX),
                        choices=filter(lambda x: x is not YarnSchedulerType.SYMBEX, YarnSchedulerType),
                        help="Type of scheduler to run", default=YarnSchedulerType.REGULAR.name, nargs="?")

    elastic_args = parser.add_argument_group("ELASTIC arguments")
    elastic_args.add_argument("penalty", type=get_enumparser(YarnPenalty), choices=YarnPenalty,
                              help="The performance penalty model to use", nargs="?")
    elastic_args.add_argument("ib", type=float, help="The initial bump of the penalty model", nargs="?")
    elastic_args.add_argument("--ep", type=int,
                              help="Percentage of nodes which are always elastic (works only for GREEDY & SMARTG)")

    peek_args = parser.add_argument_group("PEEK arguments")
    peek_args.add_argument("-p", "--peek-pushback-strategy", type=get_enumparser(PushbackStrategy),
                           choices=PushbackStrategy, nargs="?", help="Strategy to use to avoid pushback in PEEK.",
                           default=None)

    race_args = parser.add_argument_group("RACE arguments")
    from events.yarn.elastic.race import YarnRaceMetric
    race_args.add_argument("--race-metric", type=get_enumparser(YarnRaceMetric), choices=YarnRaceMetric,
                           help="Metric by which to compare REGULAR vs. SMARTG in RACE.",
                           default=YarnRaceMetric.TOTAL_JRT.name)
    race_args.add_argument("--race-lockstep-regular", action="store_true",
                           help="Switch flag to REGULAR until end of simulation.", default=False)
    race_args.add_argument("--race-duration-range", type=parsenumlist,
                           help="Duration range (min, max) in ms of the " +
                                "RACE simulations w.r.t. the simulation timeline.",
                           default=(0, 0))
    race_args.add_argument("--race-never-degrade", action="store_true",
                           help="Always favour REGULAR if any job gets worse running times by using SMARTG.",
                           default=False)
    race_args.add_argument("--race-cutoff-perc", type=int,
                           help="Only simulate a percentage of the remaining containers.",
                           default=0)

    symbex_args = parser.add_argument_group("SYMBEX arguments")
    symbex_args.add_argument("--symbex", action="store_true",
                             help="Run a symbex-type exploration on SMARTG vs REGULAR scheduling decisions.",
                             default=False)
    symbex_args.add_argument("--symbex-mode", type=get_enumparser(SymbexMode), choices=SymbexMode,
                             help="Symbex exploration to run.", default=SymbexMode.DECISION.name)
    symbex_args.add_argument("--symbex-dfs", action="store_true",
                             help="Do a DFS-type exploration rather than a BFS one.",
                             default=False)
    symbex_args.add_argument("--symbex-workers", type=int, help="Number of concurrent symbex worker threads to run.",
                             default=1)

    errinj_args = parser.add_argument_group("Error injection arguments")
    errinj_args.add_argument("--duration-error", type=int,
                             help="Percentage by which to mis-estimate the running times of the containers.")
    errinj_args.add_argument("--duration-error-type", type=get_enumparser(YarnErrorType), choices=YarnErrorType,
                             help="Whether duration error is positive, negative or either.",
                             default=YarnErrorType.MIXED.name)
    errinj_args.add_argument("--duration-error-mode", type=get_enumparser(YarnErrorMode), choices=YarnErrorMode,
                             help="Whether duration error is constant or random.",
                             default=YarnErrorMode.CONSTANT.name)
    el_errinj_args = parser.add_argument_group("ELASTIC Error injection arguments")
    el_errinj_args.add_argument("--duration-error-only-elastic", action="store_true",
                                help="Inject duration error only for ELASTIC containers", default=False)
    el_errinj_args.add_argument("--mem-error", type=int, help="Percentage by which to mis-estimate the ideal memory " +
                                                              "of the containers.")
    el_errinj_args.add_argument("--mem-error-type", type=get_enumparser(YarnErrorType), choices=YarnErrorType,
                                help="Whether the memory misestimation error is positive, negative or either.",
                                default=YarnErrorType.MIXED.name)
    el_errinj_args.add_argument("--mem-error-mode", type=get_enumparser(YarnErrorMode), choices=YarnErrorMode,
                                help="Whether the memory misestimation error is constant or random.",
                                default=YarnErrorMode.CONSTANT.name)
    el_errinj_args.add_argument("--ib-error", type=int, help="Percentage by which to mis-estimate the IB of tasks.")
    el_errinj_args.add_argument("--ib-error-type", type=get_enumparser(YarnErrorType), choices=YarnErrorType,
                                help="Whether IB error is positive, negative or either.",
                                default=YarnErrorType.MIXED.name)
    el_errinj_args.add_argument("--ib-error-mode", type=get_enumparser(YarnErrorMode), choices=YarnErrorMode,
                                help="Whether IB error is constant or random.", default=YarnErrorMode.CONSTANT.name)

    yarn_args = parser.add_argument_group("YARN arguments")
    yarn_args.add_argument("-r", "--use-reservations", action="store_true",
                           help="Whether an app can place a reservation on a node to guarantee its priority there.",
                           default=False)
    yarn_args.add_argument("-g", "--use-gaps", action="store_true",
                           help="Used in conjunction with reservations, allows a container to be scheduled on a " +
                                "reserved node if the duration is less than the reservation duration.", default=False)
    yarn_args.add_argument("--gaps-allow-ams", action="store_true",
                           help="Used in conjunction with reservations and gaps, allows AM containers to be scheduled" +
                                " on a reserved node.", default=False)
    yarn_args.add_argument("--assign-multiple", action="store_true",
                           help="Run the simulation as if the assignMultiple flag is set (multiple assignments per " +
                                "heartbeat are allowed).", default=False)

    parser.add_argument("--oracle-debug", action="store_true",
                        help="Display DEBUG messages for all oracle simulations (generates A LOT of output).")
    parser.add_argument("--mem-overestimate-range", type=parsenumlist, help="Range (min, max) of multipliers for " +
                                                                            "memory overestimates by users.",
                        default=(1, 1))
    parser.add_argument("--mem-overestimate-assume", type=float, help="Multiplier by which the system assumes " +
                                                                      "that users overestimate memory.", default=1)
    parser.add_argument("--meganode", action="store_true", help="Pool all node resources together and " +
                                                                "run simulation on this one node.", default=False)
    parser.add_argument("--occupancy-stats-interval", type=int,
                        help="Gather occupancy stats each interval (in seconds).", default=0)
    parser.add_argument("--occupancy-stats-file", type=str, help="File in which to output occupancy stats.",
                        default=None)
    parser.add_argument("-v", "--verbose", action="count")

    args = parser.parse_args()

    verify_user_config(args)

    if args.verbose == 1:
        logging.getLogger().setLevel(logging.INFO)
    elif args.verbose == 2:
        logging.getLogger().setLevel(logging.DEBUG)

    # Start a new simulator
    simulator = Simulator()

    # Create and run the new YarnGenerator
    yarn_generator = YarnGenerator()
    state = yarn_generator.generate_state(simulator, args)

    # Add the event to the simulator
    simulator.add_event(yarn_generator.get_simulation_start_event())

    # Catch SIGINT and drop to a console for debugging purposes.
    signal.signal(signal.SIGINT, signal_handler)

    # Run the simulation
    if not args.symbex:
        simulator.run()
    else:
        SymbexRunner(state, args.symbex_workers).run()
this_dir = path.dirname(path.realpath(__file__))

len_simulation = 90

state = State(regions, routes, verbose=True)
state.set_outbreak('Rio De Janeiro', 1000)

sim = Simulator(
    state,
    transfer_prob=0.005,
    beta=2,
    gamma=0.5,
    verbose=True,
)
sim.add_event(2560, days=18, total_transfer=380e3)
base_map = WorldMap(resolution="c")
base_map.animate(sim, frames=len_simulation, max_infected=0.1)
base_map.ani.save(path.join(this_dir, '../../report/plots/gifs/rio.mp4'),
                  writer="mencoder_file",
                  fps=3,
                  savefig_kwargs={'bbox_inches': 'tight'})

state = State(regions, routes, verbose=True)
state.set_outbreak('Rio De Janeiro', 1000)
sim = Simulator(state, transfer_prob=0.005, beta=2, gamma=0.5, verbose=True)
base_map = WorldMap(resolution="c")
base_map.animate(sim, frames=len_simulation, max_infected=0.1)
base_map.ani.save(path.join(this_dir, '../../report/plots/gifs/no_rio.mp4'),
                  writer="mencoder_file",
                  fps=3,
import os.path as path
import time

from display import WorldMap
from simulator import State, Simulator
from world import regions, routes

this_dir = path.dirname(path.realpath(__file__))

len_simulation = 67

state = State(regions, routes, verbose=True)
state.set_outbreak('Rio De Janeiro', 1000)

sim = Simulator(state, transfer_prob=0.005, beta=2, gamma=0.5, verbose=True,)
sim.add_event(2560, days=18, total_transfer=380e3)

for i, state in enumerate(sim.run(len_simulation)):
    fig_name = path.join(this_dir, '../../report/plots/gifs/frames/rio-{0}.pdf'.format(i))
    if i == 46:
        base_map = WorldMap(resolution="c")
        base_map.scatter_infections(state, max_infected=0.1, time=i)
        base_map.save_fig(fig_name)
        break


state = State(regions, routes, verbose=True)
state.set_outbreak('Rio De Janeiro', 1000)
sim = Simulator(state, transfer_prob=0.005, beta=2, gamma=0.5, verbose=True)

for i, state in enumerate(sim.run(len_simulation)):