def run_simulation(self, simulation_instance=None): if simulation_instance is None: simulation_instance = ModelSystem() simulation_instance.run(self.config) #simulation_instance.run_multiprocess(self.config, is_run_subset=True) logger.log_status("Data cache in %s" % self.simulation_state.get_cache_directory())
def run_simulation(self, simulation_instance=None): logger.start_block('Simulation on database %s' % self.config['scenario_database_configuration'].database_name) try: if simulation_instance is None: simulation_instance = ModelSystem() simulation_instance.run(self.config) #simulation_instance.run_multiprocess(self.config, is_run_subset=True) finally: logger.end_block() logger.log_status("Data cache in %s" % self.simulation_state.get_cache_directory())
def __init__(self, config=None, save_estimation_results=False): if 'cache_directory' not in config or config['cache_directory'] is None: raise KeyError("The cache directory must be specified in the " "given configuration, giving the filesystem path to the cache " "directory containing the data with which to estimate. Please " "check that your configuration contains the 'cache_directory' " "entry and that it is not None.") self.simulation_state = SimulationState(new_instance=True, start_time=config.get('base_year', 0)) self.simulation_state.set_cache_directory(config['cache_directory']) SessionConfiguration(new_instance=True, package_order=config['dataset_pool_configuration'].package_order, in_storage=AttributeCache()) self.config = Resources(config) self.save_estimation_results = save_estimation_results self.debuglevel = self.config.get("debuglevel", 4) self.model_system = ModelSystem() self.agents_index_for_prediction = None models = self.config.get('models',[]) self.model_name = None if "model_name" in config.keys(): self.model_name = config["model_name"] else: for model in models: if isinstance(model, dict): model_name = model.keys()[0] if (model[model_name] == "estimate") or (isinstance(model[model_name], list) and ("estimate" in model[model_name])): self.model_name = model_name break estimate_config_changes = self.config.get('config_changes_for_estimation', {}).get('estimate_config', {}) if len(estimate_config_changes) > 0: change = Resources({'models_configuration': {self.model_name: {'controller': {'init': {'arguments': {}}}}}}) estimate_config_str = self.config['models_configuration'].get(self.model_name, {}).get('controller', {}).get('init', {}).get('arguments', {}).get('estimate_config', '{}') estimate_config = Resources({}) try: estimate_config = eval(estimate_config_str) except: pass estimate_config.merge(estimate_config_changes) self.config.merge(change) self.config['models_configuration'][self.model_name]['controller']['init']['arguments']['estimate_config'] = 'Resources(%s)' % estimate_config
def __init__(self, models, configuration, datasets_to_preload=None): """ 'models' is a list of strings determining the models to be run. 'configuration' is a dictionary based configuration used for ModelSystem. Its entry 'models_configuration' must contain the given 'models'. 'datasets_to_preload' is a list of dataset names that should be pre-loaded for the use of the 'models'. If it is None, all datasets in configuration['datasets_to_preload'] are loaded prior to each run. Setting this entry can speed the run-time, since all pre-loaded datasets are also cached after each iteration. """ self.config = Resources(configuration) self.config['models'] = models if datasets_to_preload is not None: new_datasets_to_preload = {} for dataset in datasets_to_preload: new_datasets_to_preload[dataset] = self.config[ 'datasets_to_preload'].get(dataset, {}) self.config['datasets_to_preload'] = new_datasets_to_preload self.model_system = ModelSystem()
def run(self): self.model_system = ModelSystem() self.model_system.run(self.config, write_datasets_to_cache_at_end_of_year=False, cleanup_datasets=False) logger.log_status("Data cache in %s" % self.simulation_state.get_cache_directory())