Пример #1
0
    def _model_converged(self, model, data_handle):
        """Check a single model's output for convergence

        Compare data output for each param over recent iterations.

        Parameters
        ----------
        model: :class:`smif.model.Model`
        data_handle: :class:`smif.data_layer.DataHandle`

        Returns
        -------
        bool
            True if converged otherwise, False
        """
        prev_data_handle = DataHandle(
            data_handle._store,
            data_handle._modelrun_name,
            data_handle._current_timestep,
            data_handle._timesteps,
            model,
            self._current_iteration - 1,  # access previous iteration
            data_handle._decision_iteration)
        return all(
            np.allclose(data_handle.get_data(param.name),
                        prev_data_handle.get_data(param.name),
                        rtol=self.relative_tolerance,
                        atol=self.absolute_tolerance)
            for param in model.inputs.metadata)
Пример #2
0
    def solve_model(self, model_run, store):
        """Solve a ModelRun

        This method first calls :func:`smif.model.SosModel.before_model_run`
        with parameter data, then steps through the model horizon, calling
        :func:`smif.model.SosModel.simulate` with parameter data at each
        timestep.

        Arguments
        ---------
        model_run : :class:`smif.modelrun.ModelRun`
        """
        self.logger.debug("Initialising each of the sector models")
        # Initialise each of the sector models
        data_handle = DataHandle(
            store, model_run.name, None, model_run.model_horizon,
            model_run.sos_model)
        model_run.sos_model.before_model_run(data_handle)

        self.logger.debug("Solving the models over all timesteps: %s",
                          model_run.model_horizon)
        # Solve the models over all timesteps
        for timestep in model_run.model_horizon:
            self.logger.debug('Running model for timestep %s', timestep)

            data_handle = DataHandle(
                store, model_run.name, timestep, model_run.model_horizon,
                model_run.sos_model)
            model_run.sos_model.simulate(data_handle)
        return data_handle
Пример #3
0
    def test_set_data(self, mock_store, mock_model):
        """should allow write access to output data
        """
        expected = np.array([[1.0]])
        data_handle = DataHandle(mock_store, 1, 2015, [2015, 2020], mock_model)

        data_handle.set_results("test", expected)
        assert data_handle["test"] == expected

        mock_store.write_results.assert_called_with(1, 'test_model', 'test',
                                                    expected, 'test_regions',
                                                    'test_intervals', 2015,
                                                    None, None)
Пример #4
0
    def load_network(self, data_handle: DataHandle) -> NetworkManager:
        """Implement this method to conduct pre-model run tasks

        Arguments
        ---------
        data_handle: smif.data_layer.DataHandle
            Access parameter values (before any model is run, no dependency
            input data or state is guaranteed to be available)
        """
        # Get wrapper configuration
        path_main = os.path.dirname(os.path.abspath(__file__))
        config = configparser.ConfigParser()
        config.read(os.path.join(path_main, 'wrapperconfig.ini'))
        data_path = config['PATHS']['path_local_data']

        # Get modelrun configuration
        read_only_parameters = data_handle.get_parameters()

        parameters = {}
        for name, data_array in read_only_parameters.items():
            parameters[name] = float(data_array.data)

        self.logger.debug(parameters)

        # Load assets
        assets = read_assets(data_path)

        # Load links
        links = read_links(data_path)

        return NetworkManager(assets, links, parameters)
Пример #5
0
 def test_previous_timestep_error(self):
     """should raise error if there's no previous timestep in the list
     """
     data_handle = DataHandle(Mock(), 1, 2015, [2015, 2020], Mock())
     with raises(TimestepResolutionError) as ex:
         data_handle.previous_timestep
     assert 'no previous timestep' in str(ex)
Пример #6
0
    def simulate(self, data_handle):
        """Run the SosModel

        Arguments
        ---------
        data_handle: smif.data_layer.DataHandle
            Access state, parameter values, dependency inputs

        Returns
        -------
        results : smif.data_layer.DataHandle
            Access model outputs

        """
        self.check_dependencies()
        run_order = self._get_model_sets_in_run_order()
        self.logger.info("Determined run order as %s",
                         [x.name for x in run_order])
        for model in run_order:
            # get custom data handle for the Model
            model_data_handle = DataHandle(data_handle._store,
                                           data_handle._modelrun_name,
                                           data_handle._current_timestep,
                                           list(data_handle.timesteps), model,
                                           data_handle._modelset_iteration,
                                           data_handle._decision_iteration)
            model.simulate(model_data_handle)
        return data_handle
Пример #7
0
def _get_model_and_handle(store,
                          model_run_id,
                          model_name,
                          timestep=None,
                          decision=None):
    """Helper method to read model and set up appropriate data handle
    """
    try:
        model_run_config = store.read_model_run(model_run_id)
    except SmifDataNotFoundError:
        logging.error(
            "Model run %s not found. Run 'smif list' to see available model runs.",
            model_run_id)
        sys.exit(1)

    loader = ModelLoader()
    sector_model_config = store.read_model(model_name)
    # absolute path to be crystal clear for ModelLoader when loading python class
    sector_model_config['path'] = os.path.normpath(
        os.path.join(store.model_base_folder, sector_model_config['path']))
    model = loader.load(sector_model_config)

    # DataHandle reads
    # - model run from store to find narratives and scenarios selected
    # - sos model from store to find dependencies and parameters
    # all in order to resolve *input* data locations and *parameter* defaults and values
    data_handle = DataHandle(store=store,
                             model=model,
                             modelrun_name=model_run_id,
                             current_timestep=timestep,
                             timesteps=model_run_config['timesteps'],
                             decision_iteration=decision)
    return model, data_handle
Пример #8
0
    def test_get_data(self, mock_store, mock_model):
        """should allow read access to input data
        """
        data_handle = DataHandle(mock_store, 1, 2015, [2015, 2020], mock_model)
        expected = np.array([[1.0]])
        actual = data_handle.get_data("test")
        assert actual == expected

        mock_store.read_results.assert_called_with(
            1,
            'test_source',  # read from source model
            'test_output',  # using source model output name
            'test_regions',
            'test_intervals',
            2015,
            None,
            None)
Пример #9
0
    def test_get_data_with_conversion(self, mock_store,
                                      mock_model_with_conversion):
        """should convert liters to milliliters (1 -> 0.001)
        """
        data_handle = DataHandle(mock_store, 1, 2015, [2015, 2020],
                                 mock_model_with_conversion)
        expected = np.array([[0.001]])
        actual = data_handle.get_data("test")
        assert actual == expected

        mock_store.read_results.assert_called_with(
            1,
            'test_source',  # read from source model
            'test_output',  # using source model output name
            'test_regions',
            'test_intervals',
            2015,
            None,
            None)
Пример #10
0
def get_data_handle(model):
    """Return a data handle for the model
    """
    store = MemoryInterface()
    store.write_sos_model_run({'name': 'test', 'narratives': {}})
    return DataHandle(
        store,
        'test',  # modelrun_name
        2010,  # current_timestep
        [2010, 2011],  # timesteps
        model)
Пример #11
0
 def before_model_run(self, data_handle):
     """Initialise each model (passing in parameter data only)
     """
     for model in self.sector_models.values():
         # get custom data handle for the Model
         model_data_handle = DataHandle(data_handle._store,
                                        data_handle._modelrun_name,
                                        data_handle._current_timestep,
                                        data_handle._timesteps, model,
                                        data_handle._modelset_iteration,
                                        data_handle._decision_iteration)
         model.before_model_run(model_data_handle)
Пример #12
0
    def save_decision_metrics(self, data_handle: DataHandle):
        """Compute decision metrics for the current system and save to disk

        Expects outputs for this wrapper to be defined with the same name as the
        list of attributed below

        Arguments
        ---------
        data_handle
        """
        technologies = self.outputs['rollout_costs'].dim_coords(
            'technology').ids
        exchanges = self.outputs['rollout_costs'].dim_coords('exchanges').ids

        attributes = [
            'rollout_costs', 'rollout_bcr', 'total_potential_benefit',
            'total_potential_bcr'
        ]

        for attribute in attributes:
            data = self.save_exchange_attribute(technologies, exchanges,
                                                attribute)
            data_handle.set_results(attribute, data)
Пример #13
0
def get_data_handle(model):
    """Return a data handle for the model
    """
    store = MemoryInterface()
    store.write_sos_model_run({
        'name': 'test',
        'narratives': {}
    })
    store.write_scenario_data(
        'Arbitrary Demand Scenario',
        'electricity_demand_output',
        np.array([[123]]),
        'LSOA',
        'annual',
        2010)
    return DataHandle(
        store,
        'test',  # modelrun_name
        2010,  # current_timestep
        [2010],  # timesteps
        model
    )
Пример #14
0
    def _run_iteration(self, i, data_handle):
        """Run all models within the set

        Arguments
        ---------
        i : int
            Iteration counter
        data_handle : smif.data_layer.DataHandle
        """
        for model in self.models.values():
            self.logger.info("Simulating %s, iteration %s", model.name, i)
            model_data_handle = DataHandle(data_handle._store,
                                           data_handle._modelrun_name,
                                           data_handle._current_timestep,
                                           data_handle._timesteps, model, i,
                                           data_handle._decision_iteration)
            # Start by running all models in set with best guess
            # - zeroes
            # - last year's inputs
            if i == 0:
                self._guess_results(model, model_data_handle)
            else:
                model.simulate(model_data_handle)
Пример #15
0
    def _converged(self, data_handle):
        """Check whether the results of a set of models have converged.

        Returns
        -------
        converged: bool
            True if the results have converged to within a tolerance

        Raises
        ------
        DiverganceError
            If the results appear to be diverging
        """
        if self._current_iteration < 2:
            # must have at least two result sets per model to assess convergence
            return False

        # each data output is a dict with
        #   str key (parameter name) =>
        #       np.ndarray value (regions x intervals)
        converged = []
        for model in self.models.values():
            model_data_handle = DataHandle(data_handle._store,
                                           data_handle._modelrun_name,
                                           data_handle._current_timestep,
                                           data_handle._timesteps, model,
                                           self._current_iteration,
                                           data_handle._decision_iteration)
            converged.append(self._model_converged(model, model_data_handle))

        if all(converged):
            # if all most recent are almost equal to penultimate, must have converged
            return True

        # TODO check for divergence and raise error

        return False
Пример #16
0
 def test_create(self):
     """should be created with a DataInterface
     """
     DataHandle(Mock(), 1, 2015, [2015, 2020], Mock())
Пример #17
0
 def test_previous_timestep(self):
     """should return previous timestep from list
     """
     data_handle = DataHandle(Mock(), 1, 2020, [2015, 2020], Mock())
     assert data_handle.previous_timestep == 2015
Пример #18
0
 def test_base_timestep(self):
     """should return first timestep in list
     """
     data_handle = DataHandle(Mock(), 1, 2015, [2015, 2020], Mock())
     assert data_handle.base_timestep == 2015
Пример #19
0
 def test_current_timestep(self):
     """should return current timestep
     """
     data_handle = DataHandle(Mock(), 1, 2015, [2015, 2020], Mock())
     assert data_handle.current_timestep == 2015
Пример #20
0
    def simulate(self, data_handle: DataHandle):
        """Implement smif.SectorModel simulate
        """
        # -----
        # Start
        # -----
        now = data_handle.current_timestep
        total_cost = 0

        interventions = data_handle.get_current_interventions()

        dc_assets = []

        for name, intervention in interventions.items():
            if intervention['build_year'] == now:
                technology = intervention['technology']
                asset_id = intervention['id']
                exchange = [
                    exchange for exchange in self.system._exchanges
                    if exchange.id == asset_id
                ][0]
                total_cost += exchange.rollout_costs[technology]

                asset = (intervention['id'], intervention['technology'])
                dc_assets.append(asset)

        self.logger.debug(self.system.capacity('exchange'))

        data_handle.set_results('total_cost', total_cost)

        self.logger.debug("Upgrading dc assets with %s", dc_assets)
        self.system.upgrade(dc_assets)

        self.logger.debug(self.system.capacity('exchange'))

        adoption = self.update_adoption_desirability(data_handle)

        self.save_decision_metrics(data_handle)

        # -------------
        # Write outputs
        # -------------
        lad_names = self.outputs['lad_premises_with_fttp'].dim_coords(
            'lad_uk_2016').ids
        num_lads = len(lad_names)
        num_fttp = np.zeros((num_lads))
        num_fttdp = np.zeros((num_lads))
        num_fttc = np.zeros((num_lads))
        num_adsl = np.zeros((num_lads))

        coverage = self.system.coverage()
        for i, lad in enumerate(lad_names):
            if lad not in coverage:
                continue
            stats = coverage[lad]
            num_fttp[i] = stats['num_fttp']
            num_fttdp[i] = stats['num_fttdp']
            num_fttc[i] = stats['num_fttc']
            num_adsl[i] = stats['num_adsl']

        data_handle.set_results('lad_premises_with_fttp', num_fttp)
        data_handle.set_results('lad_premises_with_fttdp', num_fttdp)
        data_handle.set_results('lad_premises_with_fttc', num_fttc)
        data_handle.set_results('lad_premises_with_adsl', num_adsl)

        aggregate_coverage = self.system.aggregate_coverage('lad')

        perc_fttp = np.zeros((num_lads))
        perc_fttdp = np.zeros((num_lads))
        perc_fttc = np.zeros((num_lads))
        perc_docsis3 = np.zeros((num_lads))
        perc_adsl = np.zeros((num_lads))
        sum_of_premises = np.zeros((num_lads))

        for i, lad in enumerate(lad_names):
            if lad not in aggregate_coverage:
                continue
            datum = aggregate_coverage[lad]
            perc_fttp[i] = datum['percentage_of_premises_with_fttp']
            perc_fttdp[i] = datum['percentage_of_premises_with_fttdp']
            perc_fttc[i] = datum['percentage_of_premises_with_fttc']
            perc_docsis3[i] = datum['percentage_of_premises_with_docsis3']
            perc_adsl[i] = datum['percentage_of_premises_with_adsl']
            sum_of_premises[i] = datum['sum_of_premises']

        data_handle.set_results('percentage_of_premises_connected_with_fttp',
                                perc_fttp)
        data_handle.set_results('percentage_of_premises_connected_with_fttdp',
                                perc_fttdp)
        data_handle.set_results('percentage_of_premises_connected_with_fttc',
                                perc_fttc)
        data_handle.set_results(
            'percentage_of_premises_connected_with_docsis3', perc_docsis3)
        data_handle.set_results('percentage_of_premises_connected_with_adsl',
                                perc_adsl)