Beispiel #1
0
    def run(self):

        demand_model_configs_list = EFFCS_SimConfGrid(
            self.demand_model_configs_grid).conf_list
        print("demand_model_configs_list", demand_model_configs_list)
        for demand_model_config in demand_model_configs_list:

            print("demand_model_config", demand_model_config)

            # demand_model_path = os.path.join(
            #     os.path.dirname(os.path.dirname(__file__)),
            #     "demand_modelling",
            #     "demand_models",
            #     demand_model_config["city"],
            # )
            ROOT_DIR = os.path.abspath(os.curdir)

            demand_model_path = os.path.join(
                ROOT_DIR,
                "odysseus",
                "demand_modelling",
                "demand_models",
                demand_model_config["city"],
            )
            os.makedirs(demand_model_path, exist_ok=True)

            if not os.path.exists(
                    os.path.join(demand_model_path, "city_obj.pickle")):
                demand_model = DemandModel(demand_model_config["city"],
                                           demand_model_config,
                                           int(self.train_range[0]),
                                           int(self.train_range[1]),
                                           int(self.train_range[2]),
                                           int(self.train_range[3]),
                                           int(self.test_range[0]),
                                           int(self.test_range[1]),
                                           int(self.test_range[2]),
                                           int(self.test_range[3]))
                demand_model.save_results()
                if self.in_flow:
                    demand_model.save_in_flow_count()
                if self.out_flow:
                    demand_model.save_out_flow_count()
                return {"status": "complete"}
            else:
                return {"status": "not_run"}
Beispiel #2
0
def multiple_runs(conf_dict):
	sim_general_conf = conf_dict["sim_general_conf"]
	sim_scenario_conf_grid = conf_dict["sim_scenario_conf"]
	sim_scenario_name = conf_dict["sim_scenario_name"]
	supply_model_object = conf_dict["supply_model_object"]
	demand_model_folder = conf_dict["demand_model_folder"]

	if "n_cpus" not in conf_dict:
		n_cpus = mp.cpu_count()
	else:
		n_cpus = conf_dict["n_cpus"]

	sim_technique = sim_general_conf["sim_technique"]
	city = sim_general_conf["city"]

	results_path = os.path.join(
		os.path.dirname(os.path.dirname(__file__)),
		"results",
		city,
		"multiple_runs",
		sim_scenario_name,
	)
	os.makedirs(results_path, exist_ok=True)

	with mp.Pool(n_cpus, maxtasksperchild=1) as pool:

		sim_conf_grid = EFFCS_SimConfGrid(sim_scenario_conf_grid)

		pool_stats_dict = {}
		conf_tuples = []

		for conf_id, sim_scenario_conf in enumerate(sim_conf_grid.conf_list):
			sim_scenario_conf["conf_id"] = conf_id
			if "const_load_factor" in sim_general_conf.keys():
				if sim_general_conf["const_load_factor"] != False:
					round_lambda = round(sim_scenario_conf["requests_rate_factor"], 2)
					round_vehicles_factor = round(sim_scenario_conf["n_vehicles_factor"], 2)
					if round(round_lambda / round_vehicles_factor, 2) == sim_general_conf["const_load_factor"]:
						conf_tuples += [(
							sim_general_conf,
							sim_scenario_conf,
							demand_model_folder,
							supply_model_object
						)]
				else:
					conf_tuples += [(
						sim_general_conf,
						sim_scenario_conf,
						demand_model_folder,
						supply_model_object
					)]
			else:
				conf_tuples += [(
					sim_general_conf,
					sim_scenario_conf,
					demand_model_folder,
					supply_model_object
				)]

		with tqdm(total=len(conf_tuples), unit="sim", postfix=str(n_cpus)+" cpu(s)", smoothing=0, dynamic_ncols=True) as pbar:

			def collect_result(res):
				res_id = res["conf_id"]
				pool_stats_dict[res_id] = res
				pbar.update()

			def print_error(err):
				tqdm.write(str(datetime.datetime.now()) + " ERROR: Simulation failed! Cause: " + str(err), file=sys.stderr)
				pbar.update()

			async_results = []

			if sim_technique == "eventG":
				for conf_tuple in conf_tuples:
					async_result = pool.apply_async(
						get_eventG_sim_stats, (conf_tuple,), callback=collect_result, error_callback=print_error
					)
					async_results.append(async_result)
			elif sim_technique == "traceB":
				for conf_tuple in conf_tuples:
					async_result = pool.apply_async(
						get_traceB_sim_stats, (conf_tuple,), callback=collect_result, error_callback=print_error
					)
					async_results.append(async_result)

			[result.wait() for result in async_results]

	print(datetime.datetime.now(), city, "multiple runs finished!")

	sim_stats_df = pd.concat([pool_stats_dict[res_id] for res_id in sorted(pool_stats_dict)], axis=1, ignore_index=True).T
	sim_stats_df.to_csv(os.path.join(results_path, "sim_stats.csv"))
	pd.Series(sim_general_conf).to_csv(os.path.join(results_path, "sim_general_conf.csv"), header=True)
	pd.Series(sim_scenario_conf_grid).to_csv(os.path.join(results_path, "sim_scenario_conf_grid.csv"), header=True)

	sim_stats_df.to_pickle(os.path.join(results_path, "sim_stats.pickle"))
	pd.Series(sim_general_conf).to_pickle(os.path.join(results_path, "sim_general_conf.pickle"))
	pd.Series(sim_scenario_conf_grid).to_pickle(os.path.join(results_path, "sim_scenario_conf_grid.pickle"))
Beispiel #3
0
            exit(2)
        else:
            print("Existing object. I am recovering it...")
            with open(os.path.join(folder_path, "supply_model.pickle"),
                      "rb") as f:
                supply_model = pickle.load(f)

else:
    #modello di offerta inesistente
    supply_model = None

confs_dict = {}
confs_dict["multiple_runs"] = sim_scenario_conf_grid
confs_dict["single_run"] = sim_scenario_conf

sim_general_conf_list = EFFCS_SimConfGrid(sim_general_conf_grid).conf_list
for sim_general_conf in sim_general_conf_list:
    sim_run_mode = sim_general_conf["sim_run_mode"]
    print(sim_general_conf)

    if sim_run_mode == "single_run":
        single_run(
            (sim_general_conf, confs_dict[sim_general_conf["sim_run_mode"]],
             sim_general_conf["sim_scenario_name"], supply_model))

    elif sim_run_mode == "multiple_runs":
        if args.n_cpus is not None:
            multiple_runs(sim_general_conf,
                          confs_dict[sim_general_conf["sim_run_mode"]],
                          sim_general_conf["sim_scenario_name"],
                          n_cpus=int(args.n_cpus))
Beispiel #4
0
def main(argv):
    """
    Test pick-up and drop-off Rainbow agents on ESBDQN API.
    """
    del argv  # Unused arguments

    # Load configuration
    sim_conf = importlib.import_module('api.configs.{}.{}'.format(
        DEFAULT_sim_scenario_name, FLAGS.conf_filename))

    # Extract a single conf pair
    sim_general_conf  = EFFCS_SimConfGrid(sim_conf.General)       \
                                          .conf_list[0]
    sim_scenario_conf = EFFCS_SimConfGrid(sim_conf.Multiple_runs) \
                                          .conf_list[0]

    logging.info('Rainbow agents on ODySSEUS running on %s.',
                 jax.lib.xla_bridge.get_backend().platform.upper())

    checkpoint = PickleCheckpoint(FLAGS.checkpoint_dirpath,
                                  'ODySSEUS-' + sim_general_conf['city'])

    if not checkpoint.can_be_restored():
        raise IOError('Cannot load the trained Rainbow agents.')

    logging.info('Restoring checkpoint...')

    checkpoint.restore()

    # Generate RNG key
    rng_state = np.random.RandomState(FLAGS.seed)
    rng_state.set_state(checkpoint.state.rng_state)

    rng_key = jax.random.PRNGKey(
        rng_state.randint(-sys.maxsize - 1, sys.maxsize + 1, dtype=np.int64))

    def environment_builder() -> EscooterSimulator:
        """
        Create the ODySSEUS environment.
        """
        return EscooterSimulator((sim_general_conf, sim_scenario_conf),
                                 None,
                                 rt=True)

    def preprocessor_builder():
        """
        Create the ODySSEUS input preprocessor.
        """
        return processor(max_abs_reward=FLAGS.max_abs_reward,
                         zero_discount_on_life_loss=True)

    env = environment_builder()

    logging.info('Action spec: %s', env.action_spec())
    logging.info('Observation spec: %s', env.observation_spec())

    # Take [0] as both Rainbow have
    # the same number of actions
    num_actions = env.action_spec()[0].num_values
    support = jnp.linspace(-FLAGS.vmax, FLAGS.vmax, FLAGS.num_atoms)

    network = hk.transform(
        rainbow_odysseus_network(num_actions, support,
                                 FLAGS.noisy_weight_init))

    _, eval_rng_key = jax.random.split(rng_key)

    # Create pick-up/drop-off agents
    P_eval_agent = parts.EpsilonGreedyActor(
        preprocessor=preprocessor_builder(),
        network=copy.deepcopy(network),
        exploration_epsilon=0,
        rng_key=eval_rng_key,
    )

    D_eval_agent = parts.EpsilonGreedyActor(
        preprocessor=preprocessor_builder(),
        network=copy.deepcopy(network),
        exploration_epsilon=0,
        rng_key=eval_rng_key,
    )

    P_eval_agent.set_state(checkpoint.state.P_agent['eval'])
    D_eval_agent.set_state(checkpoint.state.D_agent['eval'])

    env.run(P_eval_agent, D_eval_agent)
Beispiel #5
0
def main(argv):
    """
    Train pick-up and drop-off Rainbow agents on ODySSEUS.
    """
    del argv # Unused arguments

    # Metadata configuration
    parent_dir = pathlib.Path(__file__).parent.absolute()

    sim_input_conf_dir = parent_dir / 'configs' / DEFAULT_sim_scenario_name

    # Load configuration
    sim_conf = importlib.import_module('esbdqn.configs.{}.{}'
                                       .format(DEFAULT_sim_scenario_name,
                                               FLAGS.conf_filename))

    # Extract a single conf pair
    sim_general_conf  = EFFCS_SimConfGrid(sim_conf.General)       \
                                          .conf_list[0]
    sim_scenario_conf = EFFCS_SimConfGrid(sim_conf.Multiple_runs) \
                                          .conf_list[0]

    experiment_dir = parent_dir                     \
                        / 'experiments'             \
                        / DEFAULT_sim_scenario_name \
                        / FLAGS.exp_name            \
                        / sim_general_conf['city']

    if pathlib.Path.exists(experiment_dir):
        # Ensure configuration has not changed
        if not filecmp.cmp(str(sim_input_conf_dir
                               / FLAGS.conf_filename)   + '.py',
                           str(experiment_dir
                               / DEFAULT_conf_filename) + ".py",
                           shallow=False):
            raise IOError('Configuration changed at: {}'
                          .format(str(experiment_dir)))
    else:
        pathlib.Path.mkdir(experiment_dir, parents=True,
                           exist_ok=True)

        # Copy configuration files
        shutil.rmtree(experiment_dir)
        shutil.copytree(sim_input_conf_dir, experiment_dir)

        # Rename to the default name
        conf_filepath = experiment_dir / (FLAGS.conf_filename + ".py")
        conf_filepath.rename(experiment_dir
                             / (DEFAULT_conf_filename + ".py"))

        # Delete all other potential conf files
        for filename in experiment_dir.glob(
                DEFAULT_conf_filename + "_*.py"):
            filename.unlink()

    # Create results files
    results_dir = experiment_dir / 'results'

    pathlib.Path.mkdir(results_dir, parents=True,
                       exist_ok=True)

    results_filepath = results_dir / DEFAULT_resu_filename

    logging.info('Rainbow agents on ODySSEUS running on %s.',
                 jax.lib.xla_bridge.get_backend().platform.upper())

    if FLAGS.checkpoint:
        checkpoint = PickleCheckpoint(
            experiment_dir / 'models',
            'ODySSEUS-' + sim_general_conf['city'])
    else:
        checkpoint = parts.NullCheckpoint()

    checkpoint_restored = False

    if FLAGS.checkpoint:
        if checkpoint.can_be_restored():
            logging.info('Restoring checkpoint...')

            checkpoint.restore()
            checkpoint_restored = True

    # Generate RNG key
    rng_state = np.random.RandomState(FLAGS.seed)

    if checkpoint_restored:
        rng_state.set_state(checkpoint.state
                                      .rng_state)

    rng_key   = jax.random.PRNGKey(
        rng_state.randint(-sys.maxsize - 1,
                          sys.maxsize + 1,
                          dtype=np.int64))

    # Generate results file writer
    if sim_general_conf['save_history']:
        writer = parts.CsvWriter(str(results_filepath))

        if checkpoint_restored:
            writer.set_state(checkpoint.state
                                       .writer)
    else:
        writer = parts.NullWriter()

    def environment_builder() -> ConstrainedEnvironment:
        """
        Create the ODySSEUS environment.
        """
        return EscooterSimulator(
                        (sim_general_conf,
                         sim_scenario_conf),
                    FLAGS.n_lives)

    def preprocessor_builder():
        """
        Create the ODySSEUS input preprocessor.
        """
        return processor(
            max_abs_reward=FLAGS.max_abs_reward,
            zero_discount_on_life_loss=True
        )

    env = environment_builder()

    logging.info('Environment: %s', FLAGS.exp_name)
    logging.info('Action spec: %s', env.action_spec())
    logging.info('Observation spec: %s', env.observation_spec())

    # Take [0] as both Rainbow have
    # the same number of actions
    num_actions = env.action_spec()[0].num_values
    support = jnp.linspace(-FLAGS.vmax, FLAGS.vmax,
                           FLAGS.num_atoms)

    network = hk.transform(rainbow_odysseus_network(
                           num_actions, support,
                           FLAGS.noisy_weight_init))

    # Create sample network input from reset.
    sample_processed_timestep = preprocessor_builder()(env.reset())
    sample_processed_timestep = t.cast(dm_env.TimeStep,
                                       sample_processed_timestep)

    sample_processed_network_input = sample_processed_timestep.observation

    # Note the t in the replay is not exactly
    # aligned with the Rainbow agents t.
    importance_sampling_exponent_schedule = parts.LinearSchedule(
        begin_t=int(FLAGS.min_replay_capacity_fraction * FLAGS.replay_capacity),
        end_t=(FLAGS.num_iterations * FLAGS.num_train_frames),
        begin_value=FLAGS.importance_sampling_exponent_begin_value,
        end_value=FLAGS.importance_sampling_exponent_end_value)

    if FLAGS.compress_state:
        def encoder(transition):
            return transition._replace(
                s_tm1=replay.compress_array(transition.s_tm1),
                s_t=replay.compress_array(transition.s_t))

        def decoder(transition):
            return transition._replace(
                s_tm1=replay.uncompress_array(transition.s_tm1),
                s_t=replay.uncompress_array(transition.s_t))
    else:
        encoder = None
        decoder = None

    replay_struct = replay.Transition(
        s_tm1=None,
        a_tm1=None,
        r_t=None,
        discount_t=None,
        s_t=None,
    )

    transition_accumulator = replay.NStepTransitionAccumulator(FLAGS.n_steps)

    transition_replay = replay.PrioritizedTransitionReplay(
        FLAGS.replay_capacity, replay_struct,
        FLAGS.priority_exponent,
        importance_sampling_exponent_schedule,
        FLAGS.uniform_sample_probability,
        FLAGS.normalize_weights,
        rng_state, encoder, decoder)

    optimizer = optax.adam(
        learning_rate=FLAGS.learning_rate,
        eps=FLAGS.optimizer_epsilon)

    if FLAGS.max_global_grad_norm > 0:
        optimizer = optax.chain(
            optax.clip_by_global_norm(
                FLAGS.max_global_grad_norm),
            optimizer)

    train_rng_key, eval_rng_key = jax.random.split(rng_key)

    # Create pick-up/drop-off agents
    P_train_agent = agent.Rainbow(
        preprocessor=preprocessor_builder(),
        sample_network_input=copy.deepcopy(sample_processed_network_input),
        network=copy.deepcopy(network),
        support=copy.deepcopy(support),
        optimizer=copy.deepcopy(optimizer),
        transition_accumulator=copy.deepcopy(transition_accumulator),
        replay=copy.deepcopy(transition_replay),
        batch_size=FLAGS.batch_size,
        min_replay_capacity_fraction=FLAGS.min_replay_capacity_fraction,
        learn_period=FLAGS.learn_period,
        target_network_update_period=FLAGS.target_network_update_period,
        rng_key=train_rng_key,
    )

    D_train_agent = agent.Rainbow(
        preprocessor=preprocessor_builder(),
        sample_network_input=copy.deepcopy(sample_processed_network_input),
        network=copy.deepcopy(network),
        support=copy.deepcopy(support),
        optimizer=copy.deepcopy(optimizer),
        transition_accumulator=copy.deepcopy(transition_accumulator),
        replay=copy.deepcopy(transition_replay),
        batch_size=FLAGS.batch_size,
        min_replay_capacity_fraction=FLAGS.min_replay_capacity_fraction,
        learn_period=FLAGS.learn_period,
        target_network_update_period=FLAGS.target_network_update_period,
        rng_key=train_rng_key,
    )

    P_eval_agent = parts.EpsilonGreedyActor(
        preprocessor=preprocessor_builder(),
        network=copy.deepcopy(network),
        exploration_epsilon=0,
        rng_key=eval_rng_key,
    )

    D_eval_agent = parts.EpsilonGreedyActor(
        preprocessor=preprocessor_builder(),
        network=copy.deepcopy(network),
        exploration_epsilon=0,
        rng_key=eval_rng_key,
    )

    if checkpoint_restored:
        P_train_agent.set_state(checkpoint.state.P_agent['train'])
        D_train_agent.set_state(checkpoint.state.D_agent['train'])

        P_eval_agent.set_state(checkpoint.state.P_agent['eval'])
        D_eval_agent.set_state(checkpoint.state.D_agent['eval'])

    state = checkpoint.state

    if not checkpoint_restored:
        state.iteration = 0

    state.P_agent = {}
    state.D_agent = {}

    state.rng_state = rng_state
    state.writer = writer

    state.P_agent['train'] = P_train_agent
    state.D_agent['train'] = D_train_agent

    state.P_agent['eval'] = P_eval_agent
    state.D_agent['eval'] = D_eval_agent

    while state.iteration < FLAGS.num_iterations:
        # Create a new environment at each new iteration
        # to allow for determinism if preempted.
        env = environment_builder()

        # Leave some spacing
        print('\n')

        logging.info('Training iteration: %d', state.iteration)

        train_trackers = make_odysseus_trackers(FLAGS.max_abs_reward)
        eval_trackers  = make_odysseus_trackers(FLAGS.max_abs_reward)

        train_seq = run_loop(P_train_agent, D_train_agent,
                             env, FLAGS.max_steps_per_episode)

        num_train_frames = 0        \
            if state.iteration == 0 \
            else FLAGS.num_train_frames

        train_seq_truncated = it.islice(train_seq, num_train_frames)

        train_stats = generate_statistics(train_trackers,
                                          train_seq_truncated)

        logging.info('Evaluation iteration: %d', state.iteration)

        # Synchronize network parameters
        P_eval_agent.network_params = P_train_agent.online_params
        D_eval_agent.network_params = P_train_agent.online_params

        eval_seq = run_loop(P_eval_agent, D_eval_agent,
                            env, FLAGS.max_steps_per_episode)

        eval_seq_truncated = it.islice(eval_seq, FLAGS.num_eval_frames)

        eval_stats = generate_statistics(eval_trackers,
                                         eval_seq_truncated)

        # Logging and checkpointing
        L = [
            # Simulation metadata
            ('iteration', state.iteration, '%3d'),

            # ODySSEUS metadata
            ('n_charging_workers', sim_scenario_conf['n_workers'], '%3d'),
            ('n_relocation_workers', sim_scenario_conf['n_relocation_workers'], '%3d'),
            ('n_vehicles', sim_scenario_conf['n_vehicles'], '%3d'),
            ('pct_incentive_willingness', sim_scenario_conf['incentive_willingness'], '%2.2f'),
            ('zone_side_m', sim_general_conf['bin_side_length'], '%3d'),

            # Validation agents
            ('eval_num_episodes', eval_stats['num_episodes'], '%3d'),

            ('eval_P_episode_return', eval_stats['episode_return'][0], '%2.2f'),
            ('eval_D_episode_return', eval_stats['episode_return'][1], '%2.2f'),

            ('eval_min_n_accepted_incentives',
             np.min(eval_stats['episodes_n_accepted_incentives']), '%2.2f'),
            ('eval_avg_n_accepted_incentives',
             np.mean(eval_stats['episodes_n_accepted_incentives']), '%2.2f'),
            ('eval_max_n_accepted_incentives',
             np.max(eval_stats['episodes_n_accepted_incentives']), '%2.2f'),

            ('eval_min_n_lives',
             np.min(eval_stats['episodes_n_lives']), '%2.2f'),
            ('eval_avg_n_lives',
             np.mean(eval_stats['episodes_n_lives']), '%2.2f'),
            ('eval_max_n_lives',
             np.max(eval_stats['episodes_n_lives']), '%2.2f'),

            ('eval_min_pct_satisfied_demand',
             np.min(eval_stats['pct_satisfied_demands']), '%2.2f'),
            ('eval_avg_pct_satisfied_demand',
             np.mean(eval_stats['pct_satisfied_demands']), '%2.2f'),
            ('eval_max_pct_satisfied_demand',
             np.max(eval_stats['pct_satisfied_demands']), '%2.2f'),

            # Training agents
            ('train_num_episodes', train_stats['num_episodes'], '%3d'),

            ('train_P_episode_return', train_stats['episode_return'][0], '%2.2f'),
            ('train_D_episode_return', train_stats['episode_return'][1], '%2.2f'),

            ('train_min_n_accepted_incentives',
             np.min(train_stats['episodes_n_accepted_incentives']), '%2.2f'),
            ('train_avg_n_accepted_incentives',
             np.mean(train_stats['episodes_n_accepted_incentives']), '%2.2f'),
            ('train_max_n_accepted_incentives',
             np.max(train_stats['episodes_n_accepted_incentives']), '%2.2f'),

            ('train_min_n_lives',
             np.min(train_stats['episodes_n_lives']), '%2.2f'),
            ('train_avg_n_lives',
             np.mean(train_stats['episodes_n_lives']), '%2.2f'),
            ('train_mac_n_lives',
             np.max(train_stats['episodes_n_lives']), '%2.2f'),

            ('train_min_pct_satisfied_demand',
             np.min(train_stats['pct_satisfied_demands']), '%2.2f'),
            ('train_avg_pct_satisfied_demand',
             np.mean(train_stats['pct_satisfied_demands']), '%2.2f'),
            ('train_max_pct_satisfied_demand',
             np.max(train_stats['pct_satisfied_demands']), '%2.2f'),

            ('P_importance_sampling_exponent',
             P_train_agent.importance_sampling_exponent, '%.3f'),
            ('D_importance_sampling_exponent',
             D_train_agent.importance_sampling_exponent, '%.3f'),

            ('P_max_seen_priority', P_train_agent.max_seen_priority, '%.3f'),
            ('D_max_seen_priority', D_train_agent.max_seen_priority, '%.3f'),
        ]

        L_str = '\n'.join(('%s: ' + f) % (n, v) for n, v, f in L)

        logging.info(L_str)

        if state.iteration == \
                FLAGS.num_iterations - 1:
            print('\n')

        writer.write(collections.OrderedDict(
            (n, v) for n, v, _ in L))

        state.iteration += 1

        if state.iteration \
                % FLAGS.checkpoint_period == 0:
            checkpoint.save()

    writer.close()
Beispiel #6
0
                    test_range=["2020", "1", "2020", "1"],
                    valid_zones_thresh=["0"])

args = parser.parse_args()

demand_model_configs_grid = {
    "city": args.cities,
    "data_source_id": args.data_source_ids,
    "sim_technique": args.sim_techniques,
    "bin_side_length": list(map(int, args.bin_side_lengths)),
    "k_zones_factor": list(map(int, args.zones_factors)),
    "kde_bandwidth": list(map(int, args.kde_bandwidths)),
    "valid_zones_thresh": list(map(int, args.valid_zones_thresh))
}

demand_model_configs_list = EFFCS_SimConfGrid(
    demand_model_configs_grid).conf_list

for demand_model_config in demand_model_configs_list:
    print(demand_model_config)

    demand_model_path = os.path.join(
        os.path.dirname(os.path.dirname(__file__)),
        "demand_modelling",
        "demand_models",
        demand_model_config["city"],
    )
    os.makedirs(demand_model_path, exist_ok=True)

    if not os.path.exists(os.path.join(demand_model_path, "city_obj.pickle")):
        demand_model = DemandModel(demand_model_config["city"],
                                   demand_model_config,
Beispiel #7
0
def multiple_runs(sim_general_conf,
                  sim_scenario_conf_grid,
                  sim_scenario_name,
                  exp_name,
                  conf_id,
                  n_cpus=mp.cpu_count()):
    """
    Parameters
    ----------
    sim_general_conf : dict
        A combination from sim_conf.General

    sim_scenario_conf_grid : dict[list]
        Lists of parameters to experiment, i.e., sim_conf.Multiple_runs

    sim_scenario_name : str
        Name of the scenario, i.e., sim_general_conf['sim_scenario_name']

    exp_name : str
        Name of the experiment

    conf_id : int
        General configuration Id

    n_cpus : int
        Number of cores for parallel execution. The default is mp.cpu_count()
    """

    sim_technique = sim_general_conf["sim_technique"]
    city = sim_general_conf["city"]

    results_path = os.path.join(os.path.dirname(os.path.dirname(__file__)),
                                "experiments", exp_name, "results",
                                city, "multiple_runs", sim_scenario_name,
                                str(conf_id))

    os.makedirs(results_path, exist_ok=True)

    with mp.Pool(n_cpus) as pool:

        sim_conf_grid = EFFCS_SimConfGrid(sim_scenario_conf_grid)

        # pool_stats_dict = {}
        conf_tuples = []

        for scenario_id, sim_scenario_conf in enumerate(
                sim_conf_grid.conf_list):  # List of conf dicts
            sim_scenario_conf["conf_id"] = scenario_id
            # sim_scenario_conf["n_workers"] = sim_scenario_conf["n_vehicles"]

            if "const_load_factor" in sim_general_conf.keys():
                if sim_general_conf["const_load_factor"]:
                    round_lambda = round(
                        sim_scenario_conf["requests_rate_factor"], 2)
                    round_vehicles_factor = round(
                        sim_scenario_conf["n_vehicles_factor"], 2)

                    if round(round_lambda / round_vehicles_factor,
                             2) == sim_general_conf["const_load_factor"]:
                        conf_tuples += [(
                            sim_general_conf,
                            sim_scenario_conf,
                        )]
                else:
                    conf_tuples += [(
                        sim_general_conf,
                        sim_scenario_conf,
                    )]
            else:
                conf_tuples += [(
                    sim_general_conf,
                    sim_scenario_conf,
                )]

        with tqdm(total=len(conf_tuples),
                  unit="sim",
                  postfix=str(n_cpus) + " CPU(s)",
                  smoothing=0,
                  dynamic_ncols=True) as pbar:

            def collect_result(res):
                stats, sim_output = res
                _scenario_id = stats["conf_id"]

                # Extract the scenario conf
                _sim_scenario_conf = [
                    d for d in sim_conf_grid.conf_list
                    if d['conf_id'] == _scenario_id
                ]
                _sim_scenario_conf = _sim_scenario_conf[0]

                # Make a folder for every scenario conf
                scenario_path = os.path.join(results_path, str(_scenario_id))
                os.makedirs(scenario_path, exist_ok=True)

                stats.to_pickle(os.path.join(scenario_path,
                                             "sim_stats.pickle"))

                stats.to_csv(os.path.join(scenario_path, "sim_stats.csv"))

                pd.Series(_sim_scenario_conf).to_pickle(
                    os.path.join(scenario_path, "sim_scenario_conf.pickle"))

                pd.Series(_sim_scenario_conf).to_csv(os.path.join(
                    scenario_path, "sim_scenario_conf.csv"),
                                                     header=True)

                pickle.dump(
                    sim_output,
                    open(os.path.join(scenario_path, "sim_output.pickle"),
                         "wb"))

                sim_output.grid.to_pickle(
                    os.path.join(scenario_path, "grid.pickle"))

                sim_output.grid.to_file(os.path.join(scenario_path,
                                                     "grid.dbf"))

                if sim_general_conf["save_history"]:
                    sim_output.sim_booking_requests.to_csv(
                        os.path.join(scenario_path,
                                     "sim_booking_requests.csv"))

                    sim_output.sim_bookings.to_pickle(
                        os.path.join(scenario_path, "sim_bookings.pickle"))

                    sim_output.sim_charges.to_pickle(
                        os.path.join(scenario_path, "sim_charges.pickle"))

                    sim_output.sim_not_enough_energy_requests.to_pickle(
                        os.path.join(scenario_path,
                                     "sim_unsatisfied_no-energy.pickle"))

                    sim_output.sim_no_close_vehicle_requests.to_pickle(
                        os.path.join(
                            scenario_path,
                            "sim_unsatisfied_no_close_vehicle.pickle"))

                    sim_output.sim_unsatisfied_requests.to_pickle(
                        os.path.join(scenario_path,
                                     "sim_unsatisfied_requests.pickle"))

                    sim_output.sim_system_charges_bookings.to_pickle(
                        os.path.join(scenario_path,
                                     "sim_system_charges_bookings.pickle"))

                    sim_output.sim_users_charges_bookings.to_pickle(
                        os.path.join(scenario_path,
                                     "sim_users_charges_bookings.pickle"))

                    sim_output.sim_unfeasible_charge_bookings.to_pickle(
                        os.path.join(scenario_path,
                                     "sim_unfeasible_charge_bookings.pickle"))

                    sim_output.sim_charge_deaths.to_pickle(
                        os.path.join(scenario_path,
                                     "sim_unfeasible_charges.pickle"))

                    sim_output.vehicles_history.to_csv(
                        os.path.join(scenario_path, "vehicles_history.csv"))

                    sim_output.stations_history.to_csv(
                        os.path.join(scenario_path, "stations_history.csv"))

                    sim_output.zones_history.to_csv(
                        os.path.join(scenario_path, "zones_history.csv"))

                    if _sim_scenario_conf["scooter_relocation"]:
                        sim_output.relocation_history.to_csv(
                            os.path.join(scenario_path,
                                         "relocation_history.csv"))

                    plotter = EFFCS_SimOutputPlotter(sim_output, city,
                                                     sim_scenario_name,
                                                     scenario_path)

                    plotter.plot_events_profile_barh()
                    plotter.plot_events_t()
                    plotter.plot_fleet_status_t()
                    plotter.plot_events_hourly_count_boxplot(
                        "bookings_train", "start")
                    plotter.plot_events_hourly_count_boxplot(
                        "charges", "start")
                    plotter.plot_events_hourly_count_boxplot(
                        "unsatisfied", "start")
                    plotter.plot_n_vehicles_charging_hourly_mean_boxplot()
                    plotter.plot_city_zones()

                    for col in [
                            "origin_count",
                            "destination_count",
                            "charge_needed_system_zones_count",
                            "charge_needed_users_zones_count",
                            "unsatisfied_demand_origins_fraction",
                            "not_enough_energy_origins_count",
                            "charge_deaths_origins_count",
                    ]:
                        if col in sim_output.grid:
                            plotter.plot_choropleth(col)

                pbar.update()

            def print_error(e):
                tqdm.write(str(datetime.datetime.now()) +
                           " Error: Simulation failed! Cause: " +
                           "-->{}<--".format(e.__cause__),
                           file=sys.stderr)

                pbar.update()

            futures = []

            run_func = get_eventG_sim_stats \
                if sim_technique == "eventG" \
                else get_traceB_sim_stats

            for conf_tuple in conf_tuples:
                future = pool.apply_async(run_func, (conf_tuple, ),
                                          callback=collect_result,
                                          error_callback=print_error)
                futures.append(future)

            [future.wait() for future in futures]

    print(datetime.datetime.now(), city, f'#{conf_id}',
          "multiple runs finished!")

    # # Convert the stats dict into a list ordered by key (configuration Id)
    # # and concatenates them row by row in a DataFrame
    # sim_stats_df = pd.concat([pool_stats_dict[res_id]
    # 						 	for res_id in sorted(pool_stats_dict)],
    # 						 axis=1, ignore_index=True).T
    # sim_stats_df.to_csv(os.path.join(results_path, "sim_stats.csv"))
    #
    # pd.Series(sim_general_conf).to_csv(os.path.join(results_path, "sim_general_conf.csv"), header=True)
    # pd.Series(sim_scenario_conf_grid).to_csv(os.path.join(results_path, "sim_scenario_conf_grid.csv"), header=True)
    #
    # sim_stats_df.to_pickle(os.path.join(results_path, "sim_stats.pickle"))
    # pd.Series(sim_general_conf).to_pickle(os.path.join(results_path, "sim_general_conf.pickle"))
    # pd.Series(sim_scenario_conf_grid).to_pickle(os.path.join(results_path, "sim_scenario_conf_grid.pickle"))

    # Store the general conf in the higher folder
    pd.Series(sim_general_conf).to_csv(os.path.join(results_path,
                                                    "sim_general_conf.csv"),
                                       header=True)
    pd.Series(sim_general_conf).to_pickle(
        os.path.join(results_path, "sim_general_conf.pickle"))
sim_conf = importlib.import_module(
    'odysseus.simulator.experiments.{}.{}'.format(args.exp_name,
                                                  default_conf_filename))

# Custom simulator imports
from odysseus.simulator.single_run.single_run import single_run
from odysseus.simulator.multiple_runs.multiple_runs import multiple_runs

from odysseus.simulator.simulation_input.sim_config_grid import EFFCS_SimConfGrid

confs_dict = {
    "single_run": sim_conf.Single_run,
    "multiple_runs": sim_conf.Multiple_runs
}

sim_general_conf_list = EFFCS_SimConfGrid(sim_conf.General).conf_list

# Launch a simulation for each conf
for general_conf_id, sim_general_conf in enumerate(sim_general_conf_list):
    sim_run_mode = sim_general_conf["sim_run_mode"]

    if sim_run_mode == "single_run":
        single_run(
            (sim_general_conf, confs_dict[sim_general_conf["sim_run_mode"]],
             sim_general_conf["sim_scenario_name"]))
    elif sim_run_mode == "multiple_runs":
        if args.n_cpus is not None:
            multiple_runs(sim_general_conf,
                          confs_dict[sim_general_conf["sim_run_mode"]],
                          sim_general_conf["sim_scenario_name"],
                          args.exp_name,