示例#1
0
    def __init__(self, first_year=2016, final_year=2040, scenarios_per_year=5):
        self.dual = Dual(first_year, final_year, scenarios_per_year)
        self.data = ModelData()
        self.analysis = AnalyseResults()

        # Common model components. May need to update these values
        self.common = CommonComponents(first_year=first_year,
                                       final_year=final_year,
                                       scenarios_per_year=scenarios_per_year)

        # Model sets
        self.sets = self.get_model_sets()
示例#2
0
    def __init__(self, output_dir, log_name):
        logging.basicConfig(
            filename=os.path.join(output_dir, f'{log_name}.log'),
            filemode='a',
            format='%(asctime)s %(name)s %(levelname)s %(message)s',
            datefmt='%Y-%m-%d %H:%M:%S',
            level=logging.DEBUG)

        # Used to parse prices and analyse results
        self.analysis = AnalyseResults()

        # Get scheme targets
        self.targets = Targets()
示例#3
0
    def __init__(self, first_year, final_year, scenarios_per_year,
                 transition_year):
        self.common = CommonComponents(first_year, final_year,
                                       scenarios_per_year)
        self.analysis = AnalyseResults()
        self.prices = PriceSetter()
        self.transition_year = transition_year

        # Solver options
        self.keepfiles = False
        self.solver_options = {}  # 'MIPGap': 0.0005
        self.opt = SolverFactory('cplex', solver_io='lp')
示例#4
0
class PersistenceForecast:
    def __init__(self):
        self.data = ModelData()
        self.analysis = AnalyseResults()

    def get_energy_forecast_persistence(self, output_dir, year, week,
                                        n_intervals, eligible_generators):
        """
        Get persistence based energy forecast. Energy output in previous week assumed same for following weeks.

        Params
        ------
        output_dir : str
            Path to directory containing output files

        year : int


        """

        # Take into account end-of-year transition
        if week == 1:
            previous_interval_year = year - 1
            previous_interval_week = 52
        else:
            previous_interval_year = year
            previous_interval_week = week - 1

        # Container for energy output DataFrames
        dfs = []

        for day in range(1, 8):
            df_o = self.analysis.get_generator_interval_results(
                output_dir, 'e', previous_interval_year,
                previous_interval_week, day)
            dfs.append(df_o)

        # Concatenate DataFrames
        df_c = pd.concat(dfs)

        # Energy forecast
        energy_forecast = {(g, 1, c): v
                           for g, v in df_c.sum().to_dict().items()
                           for c in range(1, n_intervals + 1)
                           if g in eligible_generators}

        # Assume probability = 1 for each scenario (only one scenario per calibration interval for persistence forecast)
        probabilities = {(g, 1): float(1)
                         for g in df_c.sum().to_dict().keys()
                         if g in eligible_generators}

        return energy_forecast, probabilities
示例#5
0
    def __init__(self, output_dir=os.path.join(os.path.dirname(__name__), os.path.pardir, 'output')):
        self.output_dir = output_dir

        # Object containing model data
        self.data = ModelData()

        # Class used to analyse model results
        self.analysis = AnalyseResults()

        # Solver options
        self.keepfiles = True
        self.solver_options = {}
        self.opt = SolverFactory('cplex', solver_io='lp')
示例#6
0
class PriceSetter:
    def __init__(self, first_year=2016, final_year=2040, scenarios_per_year=5):
        self.dual = Dual(first_year, final_year, scenarios_per_year)
        self.data = ModelData()
        self.analysis = AnalyseResults()

        # Common model components. May need to update these values
        self.common = CommonComponents(first_year=first_year,
                                       final_year=final_year,
                                       scenarios_per_year=scenarios_per_year)

        # Model sets
        self.sets = self.get_model_sets()

    @staticmethod
    def convert_to_frame(results, index_name, variable_name):
        """Convert dict to pandas DataFrame"""

        # Convert dictionary to DataFrame
        df = pd.Series(
            results[variable_name]).rename_axis(index_name).to_frame(
                name=variable_name)

        return df

    def get_model_sets(self):
        """Define sets used in model"""

        # Get all sets used within the model
        m = ConcreteModel()
        m = self.common.define_sets(m)

        return m

    def get_eligible_generators(self):
        """Find generators which are eligible for rebates / penalties"""

        # Eligible generators
        eligible_generators = [
            g for g in self.sets.G_THERM.union(self.sets.G_C_WIND,
                                               self.sets.G_C_SOLAR)
        ]

        return eligible_generators

    def get_generator_cost_parameters(self, results_dir, filename,
                                      eligible_generators):
        """
        Get parameters affecting generator marginal costs, and compute the net marginal cost for a given policy
        """

        # Model results
        results = self.analysis.load_results(results_dir, filename)

        # Price setting algorithm
        costs = pd.Series(results['C_MC']).rename_axis(
            ['generator', 'year']).to_frame(name='marginal_cost')

        # Add emissions intensity baseline
        costs = costs.join(pd.Series(
            results['baseline']).rename_axis('year').to_frame(name='baseline'),
                           how='left')

        # Add permit price
        costs = (costs.join(pd.Series(
            results['permit_price']).rename_axis('year').to_frame(
                name='permit_price'),
                            how='left'))

        # Emissions intensities for existing and candidate units
        existing_emissions = self.analysis.data.existing_units.loc[:, (
            'PARAMETERS', 'EMISSIONS')]
        candidate_emissions = self.analysis.data.candidate_units.loc[:, (
            'PARAMETERS', 'EMISSIONS')]

        # Combine emissions intensities into a single DataFrame
        emission_intensities = (pd.concat([
            existing_emissions, candidate_emissions
        ]).rename_axis('generator').to_frame('emissions_intensity'))

        # Join emissions intensities
        costs = costs.join(emission_intensities, how='left')

        # Total marginal cost (taking into account net cost under policy)
        costs['net_marginal_cost'] = (
            costs['marginal_cost'] +
            (costs['emissions_intensity'] - costs['baseline']) *
            costs['permit_price'])

        def correct_for_ineligible_generators(row):
            """Update costs so only eligible generators have new costs (ineligible generators have unchanged costs)"""

            if row.name[0] in eligible_generators:
                return row['net_marginal_cost']
            else:
                return row['marginal_cost']

        # Correct for ineligible generators
        costs['net_marginal_cost'] = costs.apply(
            correct_for_ineligible_generators, axis=1)

        return costs

    def get_price_setting_generators(self, results_dir, filename,
                                     eligible_generators):
        """Find price setting generators"""

        # Prices
        prices = self.analysis.parse_prices(results_dir, filename)

        # Generator SRMC and cost parameters (emissions intensities, baselines, permit prices)
        generator_costs = self.get_generator_cost_parameters(
            results_dir, filename, eligible_generators)

        def get_price_setting_generator(row):
            """Get price setting generator, price difference, and absolute real price"""

            # Year and average real price for a given row
            year, price = row.name[0], row['average_price_real']

            # Absolute difference between price and all generator SRMCs
            abs_price_difference = generator_costs.loc[(
                slice(None), year), 'net_marginal_cost'].subtract(price).abs()

            # Price setting generator and absolute price difference for that generator
            generator, difference = abs_price_difference.idxmin(
            )[0], abs_price_difference.min()

            # Generator SRMC
            srmc = generator_costs.loc[(generator, year), 'net_marginal_cost']

            # Update generator name to load shedding if price very high (indicative of load shedding)
            if difference > 9000:
                generator = 'LOAD-SHEDDING'
                difference = np.nan
                srmc = np.nan

            return pd.Series({
                'generator': generator,
                'difference': difference,
                'price': price,
                'srmc': srmc
            })

        # Find price setting generators
        price_setters = prices.apply(get_price_setting_generator, axis=1)

        # Combine output into single dictionary
        output = {
            'price_setters': price_setters,
            'prices': prices,
            'generator_costs': generator_costs
        }

        return output

    def get_dual_component_existing_thermal(self, results):
        """Get dual variable component of dual constraint for existing thermal units"""

        # def get_existing_thermal_unit_dual_information():
        dfs = []

        for v in ['SIGMA_1', 'SIGMA_2', 'SIGMA_20', 'SIGMA_23']:
            print(v)
            index = ('generator', 'year', 'scenario', 'interval')
            dfs.append(self.convert_to_frame(results, index, v))

        # Place all information in a single DataFrame
        df_c = pd.concat(dfs, axis=1).dropna()

        # Get offset values
        df_c['SIGMA_20_PLUS_1'] = df_c['SIGMA_20'].shift(-1)
        df_c['SIGMA_23_PLUS_1'] = df_c['SIGMA_23'].shift(-1)

        return df_c

    def k(self, g):
        """Mapping generator to the NEM zone to which it belongs"""

        if g in self.sets.G_E:
            return self.data.existing_units_dict[('PARAMETERS', 'NEM_ZONE')][g]

        elif g in self.sets.G_C.difference(self.sets.G_STORAGE):
            return self.data.candidate_units_dict[('PARAMETERS', 'ZONE')][g]

        elif g in self.sets.G_STORAGE:
            return self.data.battery_properties_dict['NEM_ZONE'][g]

        else:
            raise Exception(f'Unexpected generator: {g}')

    @staticmethod
    def merge_generator_node_prices(dual_component, zone_prices):
        """Get prices at the node to which a generator is connected"""

        # Merge price information
        df = (pd.merge(dual_component.reset_index(),
                       zone_prices.reset_index(),
                       how='left',
                       left_on=['zone', 'year', 'scenario', 'interval'],
                       right_on=['zone', 'year', 'scenario',
                                 'interval']).set_index([
                                     'generator', 'year', 'scenario',
                                     'interval', 'zone'
                                 ]))

        return df

    def get_generator_cost_information(self, results):
        """Merge generator cost information"""

        # Load results
        delta = self.convert_to_frame(results, 'year', 'DELTA')
        rho = self.convert_to_frame(results, ('year', 'scenario'), 'RHO')
        emissions_rate = self.convert_to_frame(results, 'generator',
                                               'EMISSIONS_RATE')
        baseline = self.convert_to_frame(results, 'year', 'baseline')
        permit_price = self.convert_to_frame(results, 'year', 'permit_price')
        marginal_cost = self.convert_to_frame(results, ('generator', 'year'),
                                              'C_MC')

        # Join information into single dataFrame
        df_c = marginal_cost.join(emissions_rate, how='left')
        df_c = df_c.join(baseline, how='left')
        df_c = df_c.join(permit_price, how='left')
        df_c = df_c.join(delta, how='left')
        df_c = df_c.join(rho, how='left')

        # Add a scaling factor for the final year
        final_year = df_c.index.levels[0][-1]
        df_c['scaling_factor'] = df_c.apply(
            lambda x: 1 if int(x.name[0]) < final_year else 1 + (1 / 0.06),
            axis=1)

        return df_c

    def merge_generator_cost_information(self, df, results):
        """Merge generator cost information from model"""

        # Get generator cost information
        generator_cost_info = self.get_generator_cost_information(results)

        df = (pd.merge(df.reset_index(),
                       generator_cost_info.reset_index(),
                       how='left').set_index([
                           'generator', 'year', 'scenario', 'interval', 'zone'
                       ]))

        return df

    def get_constraint_body_existing_thermal(self, results):
        """Get body of dual power output constraint for existing thermal generators"""

        # Components of dual power output constraint
        duals = self.get_dual_component_existing_thermal(results)

        # Map between generators and zones
        generators = duals.index.levels[0]
        generator_zone_map = (pd.DataFrame.from_dict(
            {g: self.k(g)
             for g in generators},
            orient='index',
            columns=['zone']).rename_axis('generator'))

        # Add NEM zone to index
        duals = duals.join(generator_zone_map,
                           how='left').set_index('zone', append=True)

        # Power balance dual variables
        var_index = ('zone', 'year', 'scenario', 'interval')
        prices = self.convert_to_frame(results, var_index, 'PRICES')

        # Merge price information
        c = self.merge_generator_node_prices(duals, prices)

        # Merge operating cost information
        c = price_setter.merge_generator_cost_information(c, results)

        return c

    def evaluate_constraint_body_existing_thermal(self, results):
        """Evaluate constraint body information for existing thermal units (should = 0)"""

        # Get values of terms constituting the constraint
        c = self.get_constraint_body_existing_thermal(results)

        # Correct for all intervals excluding the last interval of each scenario
        s_1 = (-c['SIGMA_1'].abs() + c['SIGMA_2'].abs() - c['PRICES'].abs() +
               (c['DELTA'] * c['RHO'] * c['scaling_factor'] *
                (c['C_MC'] +
                 (c['EMISSIONS_RATE'] - c['baseline']) * c['permit_price'])) +
               c['SIGMA_20'].abs() - c['SIGMA_20_PLUS_1'].abs() -
               c['SIGMA_23'].abs() + c['SIGMA_23_PLUS_1'].abs())

        # Set last interval to NaN
        s_1.loc[(slice(None), slice(None), slice(None), 24,
                 slice(None))] = np.nan

        # Last interval of each scenario
        s_2 = (-c['SIGMA_1'].abs() + c['SIGMA_2'].abs() - c['PRICES'].abs() +
               (c['DELTA'] * c['RHO'] * c['scaling_factor'] *
                (c['C_MC'] +
                 (c['EMISSIONS_RATE'] - c['baseline']) * c['permit_price'])) +
               c['SIGMA_20'].abs() - c['SIGMA_23'].abs())

        # Update so corrected values for last interval are accounted for
        s_3 = s_1.to_frame(name='body')
        s_3.update(s_2.to_frame(name='body'), overwrite=False)

        return s_3

    def evaluate_constraint_dual_component_existing_thermal(self, results):
        """Evaluate dual component of constraint"""

        # Get values of terms constituting the constraint
        c = self.get_constraint_body_existing_thermal(results)

        # Dual component - correct for intervals excluding the last interval of each scenario
        s_1 = (-c['SIGMA_1'].abs() + c['SIGMA_2'].abs() + c['SIGMA_20'].abs() -
               c['SIGMA_20_PLUS_1'].abs() - c['SIGMA_23'].abs() +
               c['SIGMA_23_PLUS_1'].abs())

        # Set last interval to NaN
        s_1.loc[(slice(None), slice(None), slice(None), 24,
                 slice(None))] = np.nan

        # Dual component - correct for last interval of each scenario
        s_2 = -c['SIGMA_1'].abs() + c['SIGMA_2'].abs() + c['SIGMA_20'].abs(
        ) - c['SIGMA_23'].abs()

        # Combine components
        s_3 = s_1.to_frame(name='body')
        s_3.update(s_2.to_frame(name='body'), overwrite=False)

        return s_3

    def get_price_setting_generators_from_model_results(self, results):
        """Find price setting generators"""

        # Generators eligible for a rebate / penalty under the scheme
        eligible_generators = self.get_eligible_generators()

        # Generator costs
        generator_costs = self.get_generator_cost_information(results)

        # Get prices in each zone for each dispatch interval
        index = ('zone', 'year', 'scenario', 'interval')
        zone_price = self.convert_to_frame(results, index, 'PRICES')

        def correct_permit_prices(row):
            """Only eligible generators face a non-zero permit price"""

            if row.name[1] in eligible_generators:
                return row['permit_price']
            else:
                return 0

        # Update permit prices
        generator_costs['permit_price'] = generator_costs.apply(
            correct_permit_prices, axis=1)

        # Net marginal costs
        generator_costs['net_marginal_cost'] = (
            generator_costs['scaling_factor'] * generator_costs['DELTA'] *
            generator_costs['RHO'] *
            (generator_costs['C_MC'] +
             (generator_costs['EMISSIONS_RATE'] - generator_costs['baseline'])
             * generator_costs['permit_price']))

        def get_price_setter(row):
            """Find generator whose marginal cost is closest to interval marginal cost"""

            # Extract zone, year, scenario, and interval information
            z, y, s, t = row.name

            # Power balance constraint marginal cost for given interval (related to price)
            p = abs(row['PRICES'])

            # Net marginal costs of all generators for the given interval
            generator_mc = generator_costs.loc[(y, slice(None), s), :]

            # Scenario duration and discount factor (arbitrarily selecting YWPS4 to get a single row)
            rho, delta = generator_costs.loc[(y, 'YWPS4', s),
                                             ['RHO', 'DELTA']].values

            # Difference between marginal cost in given interval and all generator marginal costs for that interval
            diff = generator_mc['net_marginal_cost'].subtract(p).abs()

            # Extract generator ID and absolute cost difference
            g, cost_diff = diff.idxmin(), diff.min()

            # Details of the price setting generator
            cols = [
                'EMISSIONS_RATE', 'baseline', 'permit_price', 'C_MC',
                'net_marginal_cost', 'scaling_factor'
            ]
            emissions_rate, baseline, permit_price, marginal_cost, net_marginal_cost, scaling_factor = generator_costs.loc[
                g, cols]

            # Compute normalised price and cost differences
            price_normalised = p / (delta * rho * scaling_factor)
            cost_diff_normalised = cost_diff / (delta * rho)
            net_marginal_cost_normalised = net_marginal_cost / (delta * rho *
                                                                scaling_factor)

            return (g[1], p, price_normalised, cost_diff, cost_diff_normalised,
                    emissions_rate, baseline, permit_price, marginal_cost,
                    net_marginal_cost, net_marginal_cost_normalised)

        # Get price setting generator information
        ps = zone_price.apply(get_price_setter, axis=1)

        # Convert column of tuples to DataFrame with separate columns
        columns = [
            'generator', 'price_abs', 'price_normalised', 'difference_abs',
            'difference_normalised', 'emissions_rate', 'baseline',
            'permit_price', 'marginal_cost', 'net_marginal_cost',
            'net_marginal_cost_normalised'
        ]
        ps = pd.DataFrame(ps.to_list(),
                          columns=columns,
                          index=zone_price.index)

        return ps
示例#7
0
    def run_case(self, params, hot_start=None):
        """Run case parameters"""

        # Save case parameters
        with open(os.path.join(params['output_dir'], 'parameters.pickle'),
                  'wb') as f:
            pickle.dump(params, f)

        # Unit commitment and MPC model objects
        uc = UnitCommitment()
        mpc = MPCController()

        # Objects used to generate forecasts for MPC updating model and analyse model results
        persistence_forecast = PersistenceForecast()
        scenario_forecast = MonteCarloForecast()
        analysis = AnalyseResults()

        # Construct UC and MPC models
        m_uc = uc.construct_model(params['overlap_intervals'])
        m_mpc = mpc.construct_model(
            generators=m_uc.G,
            n_intervals=params['calibration_intervals'],
            n_scenarios=params['scenarios'])

        # Activate additional constraints corresponding to different cases
        if params['case_name'] == 'revenue_floor':
            m_mpc.REVENUE_FLOOR_CONS.activate()

        # Initialise policy parameters (baseline and permit price)
        m_uc.PERMIT_PRICE.store_values(params['permit_price'])

        for t in m_uc.T:
            if params['case_name'] == 'carbon_tax':
                m_uc.BASELINE[t] = float(0)
            else:
                m_uc.BASELINE[t] = float(params['baseline_start'])

        # Counter for model windows, and flag used to break loop if model is infeasible
        window = 1
        break_flag = False

        # Years and weeks over which to iterate. Adjust if having a hot-start
        if hot_start is not None:
            years = range(hot_start[0], max(params['years']) + 1)
            weeks = range(hot_start[1], 53)
        else:
            years = params['years']
            weeks = range(1, 53)

        for y in years:
            if break_flag:
                break

            for w in weeks:
                if break_flag:
                    break

                for d in params['days']:
                    print(
                        f'Running window {window}: year={y}, week={w}, day={d}'
                    )

                    # Update model parameters for a given day
                    m_uc = uc.update_parameters(m_uc, y, w, d)

                    if window != 1:
                        # Fix interval start using solution from previous window
                        m_uc = uc.fix_interval_overlap(
                            m_uc, y, w, d, params['overlap_intervals'],
                            params['output_dir'])

                    # Run solve sequence. First solve MILP, then fix integer variables and re-solve to obtain prices.
                    m_uc, break_flag = self.run_solve_sequence(uc, m_uc)

                    # Break loop if model is infeasible
                    if break_flag:
                        break

                    # Save solution
                    uc.save_solution(m_uc, y, w, d, params['output_dir'])

                    # Unfix binary variables
                    m_uc = uc.unfix_binary_variables(m_uc)

                    # Check if next week will be the last calibration interval in the model horizon
                    next_week_is_last_interval = (w == max(
                        params['weeks'])) and (y == max(params['years']))

                    # If not the last week, and the baseline can be updated
                    if (d == 7) and (not next_week_is_last_interval
                                     ) and params['baseline_update_required']:

                        # Year and week index for next interval. Take into account year changing
                        if w == max(params['weeks']):
                            next_w = 1
                            next_y = y + 1
                        else:
                            next_w = w + 1
                            next_y = y

                        # Get cumulative scheme revenue
                        cumulative_revenue = analysis.get_cumulative_scheme_revenue(
                            params['output_dir'], next_y, next_y)

                        # Get generator energy forecast for next set of calibration intervals
                        if (params['case_name']
                                == 'multi_scenario_forecast') and (next_y
                                                                   == 2018):
                            energy_forecast, probabilities = scenario_forecast.get_scenarios(
                                output_dir=params['output_dir'],
                                year=next_y,
                                week=next_w,
                                start_year=min(params['years']),
                                n_intervals=params['calibration_intervals'],
                                n_random_paths=params['n_random_paths'],
                                n_clusters=params['scenarios'],
                                eligible_generators=m_mpc.G)

                        else:
                            # Use a persistence-based forecast
                            energy_forecast, probabilities = persistence_forecast.get_energy_forecast_persistence(
                                output_dir=params['output_dir'],
                                year=next_y,
                                week=next_w,
                                n_intervals=params['calibration_intervals'],
                                eligible_generators=m_mpc.G)

                        # Update emissions intensities used in MPC model if an anticipated emissions intensity shock
                        if ((params['case_name'].startswith(
                                'anticipated_emissions_intensity_shock'))
                                and (next_y >= params['emissions_shock_year'])
                                and (next_w > params['emissions_shock_week'] -
                                     params['calibration_intervals'])):

                            # Emissions intensities for future calibration intervals when shock is anticipated
                            for g in m_mpc.G:
                                for c in range(
                                        max(
                                            params['emissions_shock_week'] -
                                            next_w + 1, 1),
                                        params['calibration_intervals'] + 1):
                                    # Update emissions intensities if shock week within the forecast horizon
                                    m_mpc.EMISSIONS_RATE[g, c] = (
                                        params['emissions_shock_factor'][g] *
                                        float(self.data.generators.loc[
                                            g, 'EMISSIONS']))

                        # Update emissions intensities in UC model if shock will / has occurred next week
                        if ((params['case_name'].startswith(
                                'anticipated_emissions_intensity_shock'))
                                and (next_y >= params['emissions_shock_year'])
                                and
                            (next_w >= params['emissions_shock_week'])):

                            for g in m_uc.G:
                                m_uc.EMISSIONS_RATE[g] = (
                                    params['emissions_shock_factor'][g] *
                                    float(
                                        self.data.generators.loc[g,
                                                                 'EMISSIONS']))

                        # Compute revenue target to use when updating baselines
                        revenue_target = self.get_mpc_revenue_target_input(
                            next_y, next_w, params['revenue_target'],
                            params['calibration_intervals'])

                        # Get updated baselines
                        mpc_results = mpc.run_baseline_updater(
                            m_mpc,
                            next_y,
                            next_w,
                            baseline_start=m_uc.BASELINE[1].value,
                            revenue_start=cumulative_revenue,
                            revenue_target=revenue_target,
                            revenue_floor=params['revenue_floor'],
                            permit_price={
                                k: v.value
                                for k, v in m_uc.PERMIT_PRICE.items()
                            },
                            energy_forecast=energy_forecast,
                            scenario_probabilities=probabilities)

                        # Save MPC results
                        mpc.save_results(next_y, next_w, mpc_results,
                                         params['output_dir'])

                        # Update baseline (starting at beginning of following day)
                        for h in [t for t in m_uc.T if t > 24]:
                            m_uc.BASELINE[h] = float(
                                mpc_results['baseline_trajectory'][1])

                        # Fix variables up until end of day (beginning of overlap period for next day)
                        m_uc = uc.fix_interval(m_uc, start=1, end=24)

                        # Run solve sequence. First solve MILP, then fix integer variables and re-solve to obtain prices
                        m_uc, break_flag = self.run_solve_sequence(uc, m_uc)

                        # Break loop if model is infeasible
                        if break_flag:
                            break

                        # Save solution (updates previously saved solution for this interval)
                        uc.save_solution(m_uc,
                                         y,
                                         w,
                                         d,
                                         params['output_dir'],
                                         update=True)

                        # Unfix binary variables
                        m_uc = uc.unfix_binary_variables(m_uc)

                        # Unfix remaining variables
                        m_uc = uc.unfix_interval(m_uc, start=1, end=24)

                        # All intervals = baseline obtained from MPC model in preparation for next iteration.
                        for h in m_uc.T:
                            m_uc.BASELINE[h] = float(
                                mpc_results['baseline_trajectory'][1])

                    # Apply unanticipated emissions intensity shock if required
                    if ((params['case_name'].startswith(
                            'unanticipated_emissions_intensity_shock'))
                            and (y == params['emissions_shock_year'])
                            and (w == params['emissions_shock_week'])
                            and (d == 1)):
                        # Applying new emissions intensities for coming calibration interval (misaligned with forecast)
                        for g in m_uc.G:
                            print(
                                f'UC old emissions intensity {g}: {m_uc.EMISSIONS_RATE[g].value}'
                            )
                            m_uc.EMISSIONS_RATE[
                                g] = params['emissions_shock_factor'][
                                    g] * m_uc.EMISSIONS_RATE[g].value
                            print(
                                f'UC new emissions intensity {g}: {m_uc.EMISSIONS_RATE[g].value}'
                            )

                        # Emissions intensities aligned for next calibration interval
                        for g in m_mpc.G:
                            for c in m_mpc.C:
                                print(
                                    f'MPC old emissions intensity {g}: {m_mpc.EMISSIONS_RATE[g, c].value}'
                                )
                                m_mpc.EMISSIONS_RATE[g, c] = (
                                    params['emissions_shock_factor'][g] *
                                    m_mpc.EMISSIONS_RATE[g, c].value)
                                print(
                                    f'MPC new emissions intensity {g}: {m_mpc.EMISSIONS_RATE[g, c].value}'
                                )

                    # Update rolling window counter
                    window += 1
示例#8
0
 def __init__(self):
     # Object used to analyse results
     self.analysis = AnalyseResults()
示例#9
0
文件: results.py 项目: akxen/rep-gep
class ResultsExtractor:
    def __init__(self):
        self.analysis = AnalyseResults()

    def extract_bau_results(self, results_dir, keys, output_dir=None):
        """Extract BAU model results"""

        # All REP files
        filenames = [f for f in os.listdir(results_dir) if 'bau_case' in f]

        # Container for results
        results = {}

        for f in filenames:

            # Extract information for each  key
            for k in keys:
                if k == 'YEAR_AVERAGE_PRICE':
                    r = self.analysis.get_average_prices(
                        results_dir, f, None, 'PRICES', -1)
                    r = r.loc[:, 'average_price_real']

                else:
                    r = self.analysis.extract_results(results_dir,
                                                      f,
                                                      k,
                                                      stage=None,
                                                      iteration='max',
                                                      model=None)

                # Append results to main container
                results[k] = r.to_dict()

        # Save results
        if output_dir is not None:
            filename = 'bau_results.pickle'
            self.save_results(results, filename, output_dir)

        return results

    def extract_rep_results(self, results_dir, keys, output_dir=None):
        """Extract REP model results"""

        # All REP files
        filenames = [f for f in os.listdir(results_dir) if 'rep' in f]

        # Container for results
        results = {}

        for f in filenames:
            print(f'Processing: {f}')
            # Get carbon price from filename
            carbon_price = int(f.split('-')[1].replace('.pickle', ''))

            if carbon_price not in results.keys():
                results[carbon_price] = {}

            # Extract information for each  key
            for k in keys:
                if k == 'YEAR_AVERAGE_PRICE':
                    r = self.analysis.get_average_prices(
                        results_dir, f, 'stage_2_rep', 'PRICES', -1)
                    r = r.loc[:, 'average_price_real']

                else:
                    r = self.analysis.extract_results(results_dir,
                                                      f,
                                                      k,
                                                      stage='stage_2_rep',
                                                      iteration='max',
                                                      model=None)

                # Append results to main container
                results[carbon_price][k] = r.to_dict()

        # Save results
        if output_dir is not None:
            filename = 'rep_results.pickle'
            self.save_results(results, filename, output_dir)

        return results

    def extract_carbon_tax_results(self, results_dir, keys, output_dir=None):
        """Extract carbon tax results"""

        # All REP files
        filenames = [f for f in os.listdir(results_dir) if 'rep' in f]

        # Container for results
        results = {}

        for f in filenames:
            print(f'Processing: {f}')
            # Get carbon price from filename
            carbon_price = int(f.split('-')[1].replace('.pickle', ''))

            if carbon_price not in results.keys():
                results[carbon_price] = {}

            # Extract information for each  key
            for k in keys:
                if k == 'YEAR_AVERAGE_PRICE':
                    r = self.analysis.get_average_prices(
                        results_dir, f, 'stage_1_carbon_tax', 'PRICES', -1)
                    r = r.loc[:, 'average_price_real']

                else:
                    r = self.analysis.extract_results(
                        results_dir,
                        f,
                        k,
                        stage='stage_1_carbon_tax',
                        iteration='max',
                        model=None)

                # Append results to main container
                results[carbon_price][k] = r.to_dict()

        # Save results
        if output_dir is not None:
            filename = 'carbon_tax_results.pickle'
            self.save_results(results, filename, output_dir)

        return results

    def extract_price_targeting_results(self,
                                        filename_filter,
                                        results_dir,
                                        keys,
                                        output_dir=None):
        """Extract price targeting scenario model results"""

        # All REP files
        filenames = [
            f for f in os.listdir(results_dir) if filename_filter in f
        ]

        # Container for results
        results = {}

        for f in filenames:
            print(f'Processing: {f}')
            # Get carbon price and transition year from filename
            transition_year = int(f.split('-')[1].replace('_cp', ''))
            carbon_price = int(f.split('-')[2].replace('.pickle', ''))

            if transition_year not in results.keys():
                results[transition_year] = {}

            if carbon_price not in results[transition_year].keys():
                results[transition_year][carbon_price] = {}

            # Extract information for each  key
            for k in keys:
                if k == 'YEAR_AVERAGE_PRICE':
                    r = self.analysis.get_average_prices(
                        results_dir, f, 'stage_3_price_targeting', 'PRICES',
                        -1)
                    r = r.loc[:, 'average_price_real']

                else:
                    r = self.analysis.extract_results(
                        results_dir,
                        f,
                        k,
                        stage='stage_3_price_targeting',
                        iteration='max',
                        model='primal')

                # Append results to main container
                results[transition_year][carbon_price][k] = r.to_dict()

        # Save results
        if output_dir is not None:
            filename = f'{filename_filter}_results.pickle'
            self.save_results(results, filename, output_dir)

        return results

    @staticmethod
    def save_results(results, filename, output_dir):
        """Save model results"""

        print(f'Saving results: {filename}')
        with open(os.path.join(output_dir, filename), 'wb') as f:
            pickle.dump(results, f)

    def extract_all_results(self, results_dir, output_dir):
        """Extract results for different scenarios"""

        print('Extracting BAU results')
        bau_results_keys = [
            'YEAR_EMISSIONS', 'baseline', 'YEAR_AVERAGE_PRICE', 'x_c'
        ]
        self.extract_bau_results(results_dir, bau_results_keys, output_dir)

        # Extract these keys for all other scenarios
        result_keys = [
            'YEAR_EMISSIONS', 'baseline', 'YEAR_AVERAGE_PRICE',
            'YEAR_CUMULATIVE_SCHEME_REVENUE', 'x_c',
            'YEAR_SCHEME_EMISSIONS_INTENSITY', 'YEAR_EMISSIONS_INTENSITY'
        ]

        print('Extracting tax results')
        self.extract_carbon_tax_results(results_dir, result_keys, output_dir)

        print('Extracting REP results')
        self.extract_rep_results(results_dir, result_keys, output_dir)

        print('Extracting heuristic results')
        for i in ['heuristic_baudev', 'heuristic_ptar', 'heuristic_pdev']:
            self.extract_price_targeting_results(i, results_dir, result_keys,
                                                 output_dir)

    @staticmethod
    def load_results(directory, filename):
        """Load results"""

        with open(os.path.join(directory, filename), 'rb') as f:
            results = pickle.load(f)

        return results

    def combine_results(self, output_dir):
        """Combine extracted results"""

        # Load BAU, tax, and REP results
        bau = self.load_results(output_dir, 'bau_results.pickle')
        tax = self.load_results(output_dir, 'carbon_tax_results.pickle')
        rep = self.load_results(output_dir, 'rep_results.pickle')

        # Load price targeting results
        price_deviation = self.load_results(output_dir,
                                            'heuristic_pdev_results.pickle')
        bau_deviation = self.load_results(output_dir,
                                          'heuristic_baudev_results.pickle')
        trajectory_deviation = self.load_results(
            output_dir, 'heuristic_ptar_results.pickle')

        # Combine into single dictionary
        combined = {
            'bau': bau,
            'tax': tax,
            'rep': rep,
            'pdev': price_deviation,
            'baudev': bau_deviation,
            'ptar': trajectory_deviation
        }

        # Save combine results
        with open(os.path.join(output_dir, 'model_results.pickle'), 'wb') as f:
            pickle.dump(combined, f)

        return combined
示例#10
0
class ProbabilisticForecast:
    def __init__(self):
        self.data = ModelData()
        self.analysis = AnalyseResults()

    def get_probabilistic_energy_forecast(self, output_dir, year, week,
                                          n_intervals, eligible_generators,
                                          n_clusters):
        """Construct probabilistic forecast for energy output in future weeks for each generator"""

        # Get generator results
        energy = self.get_observed_energy(output_dir, range(2017, year + 1),
                                          week)

        # Construct regression models

        # Generate scenarios from regression model

        # Cluster scenarios

        # Get energy forecasts for each scenario

        # Get probabilities for each scenario

        pass

    def get_observed_energy(self, output_dir, years, week):
        """Get observed energy for all years and weeks up until the defined week and year"""

        # Container for energy output DataFrames
        dfs = []

        for y in years:
            for w in range(1, week + 1):
                for d in range(1, 8):
                    df_o = self.analysis.get_generator_interval_results(
                        output_dir, 'e', y, w, d)
                    dfs.append(df_o)

        # Concatenate DataFrames
        df_c = pd.concat(dfs)

        return df_c

    def construct_dataset(self, years, week, lags=6, future_intervals=3):
        """Construct dataset to be used in quantile regression models for a given DUID"""

        # Observed generator energy output for each dispatch interval
        observed = self.get_observed_energy(output_directory, years, week)

        # Aggregate energy output by week
        weekly_energy = observed.groupby(['year', 'week']).sum()

        # Lagged values
        lagged = pd.concat([
            weekly_energy.shift(i).add_suffix(f'_lag_{i}')
            for i in range(0, lags + 1)
        ],
                           axis=1)

        # Future values
        future = pd.concat([
            weekly_energy.shift(-i).add_suffix(f'_future_{i}')
            for i in range(1, future_intervals + 1)
        ],
                           axis=1)

        # Re-index so lagged and future observations have the same index
        # new_index = lagged.index.intersection(future.index).sort_values()
        # lagged = lagged.reindex(new_index)
        # future = future.reindex(new_index)

        return lagged, future

    def fit_model(self, x, y, duid):
        pass

    def construct_quantile_regression_models(self,
                                             years,
                                             week,
                                             lags=6,
                                             future_intervals=3):
        """Construct regression models for each"""

        # Construct dataset
        lagged, future = self.construct_dataset(years, week)

        # DUIDs
        duids = list(set([i.split('_future')[0] for i in future.columns]))
        duids.sort()

        # Container for quantile regression results
        results = {}

        # Run model for each quantile
        for duid in duids:
            # for duid in [duid]:
            results[duid] = {}

            # Lagged values
            x = pd.concat(
                [lagged.loc[:, f'{duid}_lag_{i}'] for i in range(0, lags + 1)],
                axis=1)
            x = x.dropna()

            # For each future interval range
            for f in range(1, future_intervals + 1):
                results[duid][f] = {}

                # Split independent and dependent variables
                y = future[f'{duid}_future_{f}']
                y = y.dropna()

                # Ensure index is the same
                new_index = y.index.intersection(x.index).sort_values()
                x = x.reindex(new_index)
                y = y.reindex(new_index)

                # Run model for each quantile
                for q in [0.1, 0.2, 0.3, 0.4, 0.6, 0.7, 0.8, 0.9]:
                    # print(f'Fitting model: duid={duid}, future_interval={f}, quantile={q}')

                    try:
                        # Construct and fit model
                        m = sm.QuantReg(y, x)
                        res = m.fit(q=q)

                        # Make prediction for last time point
                        last_observation = lagged.loc[:, [
                            f'{duid}_lag_{i}' for i in range(0, lags + 1)
                        ]].iloc[-1].values
                        pred = res.predict(last_observation)[0]
                        results[duid][f][q] = pred

                    except ValueError:
                        results[duid][f][q] = None
                        # print(f'Failed for: duid={duid}, quantile={q}')

        return results
示例#11
0
class MonteCarloForecast:
    def __init__(self):
        self.data = ModelData()
        self.analysis = AnalyseResults()

    def get_observed_energy(self, output_dir, years, week):
        """Get observed energy for all years and weeks up until the defined week and year"""

        # Container for energy output DataFrames
        dfs = []

        for y in years:
            # Update final week based on whether or not in final year
            if y == max(years):
                final_week = week
            else:
                final_week = 52

            for w in range(1, final_week + 1):
                for d in range(1, 8):
                    df_o = self.analysis.get_generator_interval_results(
                        output_dir, 'e', y, w, d)
                    dfs.append(df_o)

        # Concatenate DataFrames
        df_c = pd.concat(dfs)

        return df_c

    def get_weekly_energy(self, year, week, output_dir, start_year=2018):
        """Compute weekly generator energy output for all weeks preceding 'year' and 'week'"""

        df = self.get_observed_energy(output_dir, range(start_year, year + 1),
                                      week)
        energy = df.groupby(['year', 'week']).sum()

        return energy

    def get_max_energy(self, duid):
        """Compute max weekly energy output if generator output at max capacity for whole week"""

        # Max weekly energy output
        if duid in self.data.generators.index:
            max_energy = self.data.generators.loc[duid, 'REG_CAP'] * 24 * 7

        # Must spend at least half the time charging if a storage unit (assumes charging and discharging rates are same)
        elif duid in self.data.storage.index:
            max_energy = (self.data.storage.loc[duid, 'REG_CAP'] * 24 * 7) / 2

        else:
            raise Exception(f'Unidentified DUID: {duid}')

        return max_energy

    def get_duid_scenarios(self, energy, duid, n_intervals, n_random_paths,
                           n_clusters):
        """Randomly sample based on difference in energy output between successive weeks"""

        # Max energy output
        max_energy = self.get_max_energy(duid)

        # Last observation for given DUID
        last_observation = energy[duid].iloc[-1]

        # Container for all random paths
        energy_paths = []

        for r in range(1, n_random_paths + 1):
            # Container for randomised calibration interval observations
            interval = [last_observation]
            for c in range(1, n_intervals + 1):
                # Update
                update = np.random.normal(energy[duid].diff(1).mean(),
                                          energy[duid].diff(1).std())

                # New observation
                new_observation = last_observation + update

                # Check that new observation doesn't violate upper and lower revenue bounds
                if new_observation > max_energy:
                    new_observation = max_energy
                elif new_observation < 0:
                    new_observation = 0

                # Append to container
                interval.append(new_observation)

            # Append to random paths container
            energy_paths.append(interval)

        # Construct K-means classifier and fit to randomised energy paths
        k_means = KMeans(n_clusters=n_clusters,
                         random_state=0).fit(energy_paths)

        # Get cluster centroids (these are will be the energy paths used in the analysis
        clusters = k_means.cluster_centers_

        # Get scenario energy in format to be consumed by model
        scenario_energy = {(duid, s, c): clusters[s - 1][c]
                           for s in range(1, n_clusters + 1)
                           for c in range(1, n_intervals + 1)}

        # Determine number of randomised paths assigned to each cluster
        assignment = np.unique(k_means.labels_, return_counts=True)

        # Weighting dependent on number of paths assigned to each scenarios
        scenario_weights = {(duid, k + 1): v / n_random_paths
                            for k, v in zip(assignment[0], assignment[1])}

        # Pad missing weights. May occur if all observations assigned to one centroid.
        for i in range(1, n_clusters + 1):
            if (duid, i) not in scenario_weights.keys():
                scenario_weights[(duid, i)] = 0

        return scenario_energy, scenario_weights, energy_paths

    def get_scenarios(self, year, week, output_dir, start_year, n_intervals,
                      n_random_paths, n_clusters, eligible_generators):
        """Get scenarios for each DUID"""

        # Take into account end-of-year transition
        if week == 1:
            previous_interval_year = year - 1
            previous_interval_week = 52
        else:
            previous_interval_year = year
            previous_interval_week = week - 1

        # Compute energy output in all weeks prior to current week
        energy = self.get_weekly_energy(previous_interval_year,
                                        previous_interval_week,
                                        output_dir,
                                        start_year=start_year)

        # Containers for forecasts from all generators
        energy_combined, weights_combined = {}, {}

        # Construct scenarios for each DUID
        for duid in eligible_generators:
            print(f'Construct scenarios for: {duid}')
            # Get scenarios for each DUID
            s_energy, s_weights, s_paths = self.get_duid_scenarios(
                energy, duid, n_intervals, n_random_paths, n_clusters)

            # Add to main container
            energy_combined = {**energy_combined, **s_energy}
            weights_combined = {**weights_combined, **s_weights}

        return energy_combined, weights_combined
示例#12
0
        # Plot results
        ax.plot(x, y, color=arg['color'], alpha=0.8)
        legend_labels.append(arg['label'])

    # Set legend and title
    ax.legend(legend_labels)
    ax.set_title(title)
    plt.show()


if __name__ == '__main__':
    output_directory = os.path.join(os.path.dirname(__file__), os.path.pardir,
                                    '3_model', 'linear', 'output', 'local')

    analysis = AnalyseResults()

    # Load results
    # r_bau = load_results(output_directory, 'bau_case.pickle')
    # r_m = load_results(output_directory, 'mppdc_ptar_ty-2025_cp-25.pickle')
    # r_h = load_results(output_directory, 'heuristic_ptar_ty-2025_cp-25.pickle')
    #
    # # Compute average prices
    # p_m = analysis.get_year_average_price(r_m['stage_3_price_targeting'][max(r_m['stage_3_price_targeting'].keys())]['lamb'], factor=1)
    # p_h = analysis.get_year_average_price(r_h['stage_3_price_targeting'][max(r_h['stage_3_price_targeting'].keys())]['primal']['PRICES'], factor=-1)
    # p_bau = analysis.get_year_average_price(r_bau['PRICES'], factor=-1)
    #
    # # Extract baselines
    # b_m = r_m['stage_3_price_targeting'][max(r_m['stage_3_price_targeting'].keys())]['baseline']
    # b_h = r_h['stage_3_price_targeting'][max(r_h['stage_3_price_targeting'].keys())]['primal']['baseline']
    #
示例#13
0
    plt.show()


if __name__ == '__main__':
    # Output directory
    output_directory = os.path.join(os.path.dirname(__file__), 'output',
                                    'figures')

    # Results directory
    results_directory = os.path.join(os.path.dirname(__file__), os.path.pardir,
                                     '3_model', 'linear', 'output', 'local')
    # results_directory = r'C:\Users\eee\Desktop\local_hold\20191017\3_no_existing_storage'

    # Object used to analyse results
    analysis = AnalyseResults()

    # # Plot merit order (base image)
    # plot_merit_order(output_directory)
    # plt.show()
    #
    # # Plot merit order + demand
    # plot_merit_order_demand(output_directory)
    # plt.show()
    #
    # # Plot merit order + price setter
    # plot_merit_order_price_setter(output_directory)
    # plt.show()
    #
    # # # Plot generation expansion planning model prices
    # plot_gep_prices(results_directory, output_directory)
示例#14
0
        ax.plot(x, y1, color='r')
        ax.plot(x, y2, color='b')
        ax2.plot(x, y12, color='k')
        plt.show()


if __name__ == '__main__':
    results_directory = os.path.join(os.path.dirname(__file__), os.path.pardir,
                                     '3_model', 'linear', 'output', 'local')
    tmp_directory = os.path.join(os.path.dirname(__file__), 'output', 'tmp',
                                 'local')
    figures_directory = os.path.join(os.path.dirname(__file__), 'output',
                                     'figures')

    # Object used to analyse results and get price target trajectory
    analysis = AnalyseResults()

    # Get plot data
    plot_data = PlotData(tmp_directory)
    plots = CreatePlots(tmp_directory, figures_directory)

    # Get BAU price trajectories
    bau = analysis.load_results(results_directory, 'bau_case.pickle')
    bau_prices = analysis.get_year_average_price(bau['PRICES'], -1)
    bau_price_trajectory = bau_prices['average_price_real'].to_dict()
    bau_first_year_trajectory = {
        y: bau_price_trajectory[2016]
        for y in range(2016, 2031)
    }

    # Create plots
示例#15
0
 def __init__(self):
     self.data = ModelData()
     self.analysis = AnalyseResults()
示例#16
0
 def __init__(self, results_dir):
     self.results = self.load_model_results(results_dir)
     self.analysis = AnalyseResults()
示例#17
0
class Targets:
    def __init__(self):
        # Object used to analyse results
        self.analysis = AnalyseResults()

    @staticmethod
    def get_year_emission_intensity_target(initial_emissions_intensity,
                                           half_life, year, start_year):
        """Get half-life emissions intensity target for each year in model horizon"""

        # Re-index such that first year in model horizon is t = 0
        t = year - start_year

        exponent = (-t / half_life)

        return initial_emissions_intensity * (2**exponent)

    def get_emissions_intensity_target(self, half_life):
        """Get sequence of yearly emissions intensity targets"""

        # Get emissions intensities for each year of model horizon - BAU case
        df_bau = self.analysis.get_year_system_emissions_intensities(
            'primal_bau_results.pickle')
        df_bau = df_bau.rename(
            columns={'emissions_intensity': 'bau_emissions_intensity'})

        # First and last years of model horizon
        start, end = df_bau.index[[0, -1]]

        # Initial emissions intensity
        E_0 = df_bau.loc[start, 'bau_emissions_intensity']

        # Emissions intensity target sequence
        target_sequence = {
            y: self.get_year_emission_intensity_target(E_0, half_life, y,
                                                       start)
            for y in range(start, end + 1)
        }

        # Convert to DataFrame
        df_sequence = pd.Series(target_sequence).rename_axis('year').to_frame(
            'emissions_intensity_target')

        # Combine with bau emissions intensities
        df_c = pd.concat([df_bau, df_sequence], axis=1)

        return df_c

    def get_first_year_average_real_bau_price(self):
        """Get average price in first year of model horizon"""

        # Get average price in first year of model horizon (real price)
        prices = self.analysis.get_year_average_price(
            'primal_bau_results.pickle')

        return prices.iloc[0]['average_price_real']

    @staticmethod
    def load_emissions_intensity_target(filename):
        """Load emissions intensity target"""

        # Check that emissions target loads correctly
        with open(os.path.join(os.path.dirname(__file__), 'output', filename),
                  'r') as f:
            target = json.load(f)

        # Convert keys from strings to integers
        target = {int(k): v for k, v in target.items()}

        return target

    @staticmethod
    def load_first_year_average_bau_price(filename):
        """Load average price in first year - BAU scenario"""

        # Check that price loads correctly
        with open(os.path.join(os.path.dirname(__file__), 'output', filename),
                  'r') as f:
            price = json.load(f)

        return price['first_year_average_price']

    def get_cumulative_emissions_target(self, filename, frac):
        """
        Load emissions target

        Parameters
        ----------
        filename : str
            Name of results file on which emissions target will be based

        frac : float
            Target emissions reduction. E.g. 0.5 would imply emissions should be less than or equal to 50% of total
            emissions observed in results associated with 'filename'
        """

        return float(self.analysis.get_total_emissions(filename) * frac)

    @staticmethod
    def load_cumulative_emissions_target():
        """Load cumulative emissions target"""

        with open(
                os.path.join(os.path.dirname(__file__), 'output',
                             'cumulative_emissions_target.json'), 'r') as f:
            emissions_target = json.load(f)

        return emissions_target['cumulative_emissions_target']

    def get_interim_emissions_target(self, filename):
        """Load total emissions in each year when pursuing a cumulative emissions cap"""

        # Get emissions in each year of model horizon when pursuing cumulative target
        year_emissions = self.analysis.get_year_emissions(filename)

        return year_emissions

    @staticmethod
    def load_interim_emissions_target():
        """Load interim emissions target"""

        with open(
                os.path.join(os.path.dirname(__file__), 'output',
                             'interim_emissions_target.json'), 'r') as f:
            emissions_target = json.load(f)

        # Convert years to integers
        emissions_target = {int(k): v for k, v in emissions_target.items()}

        return emissions_target

    def get_cumulative_emissions_cap_carbon_price(self):
        """Get carbon price from cumulative emissions cap model results"""

        # Results
        results = self.analysis.load_results(
            'cumulative_emissions_cap_results.pickle')

        return results['CUMULATIVE_EMISSIONS_CAP_CONS_DUAL']

    def get_interim_emissions_cap_carbon_price(self):
        """Get carbon price from interim emissions cap model results"""

        # Results
        results = self.analysis.load_results(
            'interim_emissions_cap_results.pickle')

        return results['INTERIM_EMISSIONS_CAP_CONS_DUAL']

    @staticmethod
    def get_envelope(n_0, half_life, first_year, year):
        """Get revenue envelope level for a given year"""

        # Year with first year = 0
        t = year - first_year

        return n_0 * np.power(0.5, (t / half_life))
示例#18
0
文件: results.py 项目: akxen/rep-gep
 def __init__(self):
     self.analysis = AnalyseResults()
示例#19
0
class ModelCases:
    def __init__(self, output_dir, log_name):
        logging.basicConfig(
            filename=os.path.join(output_dir, f'{log_name}.log'),
            filemode='a',
            format='%(asctime)s %(name)s %(levelname)s %(message)s',
            datefmt='%Y-%m-%d %H:%M:%S',
            level=logging.DEBUG)

        # Used to parse prices and analyse results
        self.analysis = AnalyseResults()

        # Get scheme targets
        self.targets = Targets()

    @staticmethod
    def algorithm_logger(function, message, print_message=False):
        """Write message to logfile and optionally print output"""

        # Get logging object
        logger = logging.getLogger(__name__)

        # Message to write to logfile
        log_message = f'{function} - {message}'

        if print_message:
            print(log_message)
            logger.info(log_message)
        else:
            logger.info(log_message)

    @staticmethod
    def extract_result(m, component_name):
        """Extract values associated with model components"""

        model_component = m.__getattribute__(component_name)

        if type(model_component
                ) == pyomo.core.base.expression.IndexedExpression:
            return {
                k: model_component[k].expr()
                for k in model_component.keys()
            }

        elif type(model_component
                  ) == pyomo.core.base.expression.SimpleExpression:
            return model_component.expr()

        elif type(model_component) == pyomo.core.base.var.SimpleVar:
            return model_component.value

        elif type(model_component) == pyomo.core.base.var.IndexedVar:
            return model_component.get_values()

        elif type(model_component) == pyomo.core.base.param.IndexedParam:
            try:
                return {k: v.value for k, v in model_component.items()}
            except AttributeError:
                return {k: v for k, v in model_component.items()}

        elif type(model_component) == pyomo.core.base.param.SimpleParam:
            return model_component.value

        elif type(
                model_component) == pyomo.core.base.objective.SimpleObjective:
            return model_component.expr()

        else:
            raise Exception(f'Unexpected model component: {component_name}')

    @staticmethod
    def save_results(results, output_dir, filename):
        """Save model results"""

        with open(os.path.join(output_dir, filename), 'wb') as f:
            pickle.dump(results, f)

    @staticmethod
    def get_hash(params):
        """Get hash string of model parameters. Used to identify cases in log file."""

        return hashlib.sha224(str(params).encode('utf-8',
                                                 'ignore')).hexdigest()[:10]

    @staticmethod
    def save_hash(case_id, params, output_dir):
        """Save case ID and associated parameters to file"""

        # Include case ID in dictionary
        params['case_id'] = case_id

        # Save case IDs and all associated params to file
        with open(os.path.join(output_dir, 'case_ids.txt'), 'a+') as f:
            f.write(str(params) + '\n')

    @staticmethod
    def save_solution_summary(summary, output_dir):
        """Save solution summary"""

        # Save summary of total solution time + number of iterations (if specified)
        with open(os.path.join(output_dir, 'solution_summary.txt'), 'a+') as f:
            f.write(str(summary) + '\n')

    def get_bau_initial_price(self, output_dir, first_year):
        """Get BAU price in first year"""

        # Load BAU results
        with open(os.path.join(output_dir, 'bau_case.pickle'), 'rb') as f:
            results = pickle.load(f)

        # Get BAU average price in first year
        prices = self.analysis.get_year_average_price(results['PRICES'],
                                                      factor=-1)
        initial_price = prices.loc[first_year, 'average_price_real']

        return initial_price

    @staticmethod
    def get_successive_iteration_difference(i_input, i_output, key):
        """Get max absolute difference between successive iterations for a particular model component"""

        return max([
            abs(i_input[key][k] - i_output[key][k])
            for k in i_input[key].keys()
        ])

    @staticmethod
    def run_mppdc_fixed_policy(final_year,
                               scenarios_per_year,
                               permit_prices,
                               baselines,
                               include_primal_constraints=True):
        """Run MPPDC model with fixed policy parameters"""

        # Initialise object and model used to run MPPDC model
        mppdc = MPPDCModel(final_year, scenarios_per_year)
        m = mppdc.construct_model(
            include_primal_constraints=include_primal_constraints)

        # Fix permit prices and baselines
        for y in m.Y:
            m.permit_price[y].fix(permit_prices[y])
            m.baseline[y].fix(baselines[y])

        # Solve MPPDC model with fixed policy parameters
        m, status = mppdc.solve_model(m)

        return m, status

    @staticmethod
    def run_primal_fixed_policy(start_year, final_year, scenarios_per_year,
                                permit_prices, baselines):
        """Run primal model with fixed policy parameters"""

        # Initialise object and model used to run primal model
        primal = Primal(start_year, final_year, scenarios_per_year)
        m = primal.construct_model()

        # Fix permit prices and baselines to specified levels
        for y in m.Y:
            m.permit_price[y].fix(permit_prices[y])
            m.baseline[y].fix(baselines[y])

        # Solve primal model with fixed policy parameters
        m, status = primal.solve_model(m)

        return m, status

    def run_bau_case(self, params, output_dir, overwrite=False):
        """Run business-as-usual case"""

        # Case filename
        filename = 'bau_case.pickle'

        # Check if case exists
        if (not overwrite) and (filename in os.listdir(output_dir)):
            print(f'Already solved: {filename}')
            return

        # Construct hash for case
        case_id = self.get_hash(params)

        # Save case params and associated hash
        self.save_hash(case_id, params, output_dir)

        # Extract case parameters for model
        start, end, scenarios = params['start'], params['end'], params[
            'scenarios']

        # Start timer for case run
        t_start = time.time()

        message = f"""Starting case: first_year={start}, final_year={end}, scenarios_per_year={scenarios}"""
        self.algorithm_logger('run_bau_case', message)

        # Permit prices and emissions intensity baselines for BAU case (all 0)
        permit_prices = {y: float(0) for y in range(start, end + 1)}
        baselines = {y: float(0) for y in range(start, end + 1)}

        # Run model
        self.algorithm_logger('run_bau_case', 'Starting solve')
        m, status = self.run_primal_fixed_policy(start, end, scenarios,
                                                 permit_prices, baselines)
        log_infeasible_constraints(m)
        self.algorithm_logger('run_bau_case', 'Finished solve')

        # Results to extract
        result_keys = [
            'x_c', 'p', 'p_V', 'p_in', 'p_out', 'p_L', 'baseline',
            'permit_price', 'YEAR_EMISSIONS', 'YEAR_EMISSIONS_INTENSITY',
            'YEAR_SCHEME_REVENUE', 'TOTAL_SCHEME_REVENUE', 'C_MC', 'ETA',
            'DELTA', 'RHO', 'EMISSIONS_RATE', 'OBJECTIVE'
        ]

        # Model results
        results = {k: self.extract_result(m, k) for k in result_keys}

        # Add dual variable from power balance constraint
        results['PRICES'] = {
            k: m.dual[m.POWER_BALANCE[k]]
            for k in m.POWER_BALANCE.keys()
        }

        # Save results
        self.save_results(results, output_dir, filename)

        # Combine output in dictionary. To be returned by method.
        output = {'results': results, 'model': m, 'status': status}

        # Solution summary
        solution_summary = {
            'case_id': case_id,
            'mode': params['mode'],
            'total_solution_time': time.time() - t_start
        }
        self.save_solution_summary(solution_summary, output_dir)

        self.algorithm_logger(
            'run_bau_case',
            f'Finished BAU case: case_id={case_id}, total_solution_time={time.time() - t_start}s'
        )

        return output

    def run_rep_case(self, params, output_dir, overwrite=False):
        """Run carbon tax scenario"""

        # Extract case parameters
        start, end, scenarios = params['start'], params['end'], params[
            'scenarios']
        permit_prices = params['permit_prices']

        # First run carbon tax case
        baselines = {y: float(0) for y in range(start, end + 1)}

        # Check that carbon tax is same for all years in model horizon
        assert len(set(permit_prices.values(
        ))) == 1, f'Permit price trajectory is not flat: {permit_prices}'

        # Extract carbon price in first year (same as all other used). To be used in filename.
        carbon_price = permit_prices[start]

        # Filename for REP case
        filename = f'rep_cp-{carbon_price:.0f}.pickle'

        # Check if model has already been solved
        if (not overwrite) and (filename in os.listdir(output_dir)):
            print(f'Already solved: {filename}')
            return

        # Construct hash for case ID
        case_id = self.get_hash(params)

        # Save hash and associated parameters
        self.save_hash(case_id, params, output_dir)

        # Start timer for model run
        t_start = time.time()

        self.algorithm_logger('run_rep_case',
                              'Starting case with params: ' + str(params))

        # Results to extract
        result_keys = [
            'x_c', 'p', 'p_V', 'p_in', 'p_out', 'q', 'p_L', 'baseline',
            'permit_price', 'YEAR_EMISSIONS', 'YEAR_EMISSIONS_INTENSITY',
            'YEAR_SCHEME_REVENUE', 'TOTAL_SCHEME_REVENUE', 'C_MC', 'ETA',
            'DELTA', 'RHO', 'EMISSIONS_RATE',
            'YEAR_SCHEME_EMISSIONS_INTENSITY',
            'YEAR_CUMULATIVE_SCHEME_REVENUE', 'OBJECTIVE'
        ]

        # Run carbon tax case
        self.algorithm_logger('run_rep_case', 'Starting carbon tax case solve')
        m, status = self.run_primal_fixed_policy(start, end, scenarios,
                                                 permit_prices, baselines)
        log_infeasible_constraints(m)
        self.algorithm_logger('run_rep_case', 'Finished carbon tax case solve')

        # Model results
        carbon_tax_results = {
            k: self.extract_result(m, k)
            for k in result_keys
        }

        # Add dual variable from power balance constraint
        carbon_tax_results['PRICES'] = {
            k: m.dual[m.POWER_BALANCE[k]]
            for k in m.POWER_BALANCE.keys()
        }

        # Update baselines so they = emissions intensity of output from participating generators
        baselines = carbon_tax_results['YEAR_SCHEME_EMISSIONS_INTENSITY']

        # Container for iteration results
        i_results = dict()

        # Iteration counter
        counter = 1

        # Initialise iteration input to carbon tax scenario results (used to check stopping criterion)
        i_input = carbon_tax_results

        while True:
            # Re-run model with new baselines
            self.algorithm_logger(
                'run_rep_case', f'Starting solve for REP iteration={counter}')
            m, status = self.run_primal_fixed_policy(start, end, scenarios,
                                                     permit_prices, baselines)
            log_infeasible_constraints(m)
            self.algorithm_logger(
                'run_rep_case', f'Finished solved for REP iteration={counter}')

            # Model results
            i_output = {k: self.extract_result(m, k) for k in result_keys}

            # Get dual variable values from power balance constraint
            i_output['PRICES'] = {
                k: m.dual[m.POWER_BALANCE[k]]
                for k in m.POWER_BALANCE.keys()
            }

            # Add results to iteration results container
            i_results[counter] = copy.deepcopy(i_output)

            # Check max absolute capacity difference between successive iterations
            max_capacity_difference = self.get_successive_iteration_difference(
                i_input, i_output, 'x_c')
            print(
                f'{counter}: Maximum capacity difference = {max_capacity_difference} MW'
            )

            # Max absolute baseline difference between successive iterations
            max_baseline_difference = self.get_successive_iteration_difference(
                i_input, i_output, 'baseline')
            print(
                f'{counter}: Maximum baseline difference = {max_baseline_difference} tCO2/MWh'
            )

            # If max absolute difference between successive iterations is sufficiently small stop iterating
            if max_baseline_difference < 0.05:
                break

            # If iteration limit exceeded
            elif counter > 9:
                message = f'Max iterations exceeded. Exiting loop.'
                print(message)
                self.algorithm_logger('run_rep_case', message)
                break

            # Update iteration inputs (used to check stopping criterion in next iteration)
            i_input = copy.deepcopy(i_output)

            # Update baselines to be used in next iteration
            baselines = i_output['YEAR_SCHEME_EMISSIONS_INTENSITY']

            # Update iteration counter
            counter += 1

        # Combine results into single dictionary
        results = {
            'stage_1_carbon_tax': carbon_tax_results,
            'stage_2_rep': i_results
        }

        # Save results
        self.save_results(results, output_dir, filename)

        # Dictionary to be returned by method
        output = {'results': results, 'model': m, 'status': status}
        self.algorithm_logger('run_rep_case', f'Finished REP case')

        # Total number of iterations processed
        total_iterations = max(i_results.keys())

        # Save summary of the solution time
        solution_summary = {
            'case_id': case_id,
            'mode': params['mode'],
            'carbon_price': carbon_price,
            'total_solution_time': time.time() - t_start,
            'total_iterations': total_iterations,
            'max_capacity_difference': max_capacity_difference,
            'max_baseline_difference': max_baseline_difference
        }

        self.save_solution_summary(solution_summary, output_dir)

        message = 'Finished REP case: ' + str(solution_summary)
        self.algorithm_logger('run_rep_case', message)

        return output

    def run_price_smoothing_heuristic_case(self,
                                           params,
                                           output_dir,
                                           overwrite=False):
        """Smooth prices over entire model horizon using approximated price functions"""

        # Get filename based on run mode
        if params['mode'] == 'bau_deviation_minimisation':
            filename = f"heuristic_baudev_ty-{params['transition_year']}_cp-{params['carbon_price']}.pickle"

        elif params['mode'] == 'price_change_minimisation':
            filename = f"heuristic_pdev_ty-{params['transition_year']}_cp-{params['carbon_price']}.pickle"

        elif params['mode'] == 'price_target':
            filename = f"heuristic_ptar_ty-{params['transition_year']}_cp-{params['carbon_price']}.pickle"

        else:
            raise Exception(f"Unexpected run mode: {params['mode']}")

        # Check if case already solved
        if (not overwrite) and (filename in os.listdir(output_dir)):
            print(f'Already solved: {filename}')
            return

        # Get hash for case
        case_id = self.get_hash(params)

        # Save case ID and associated model parameters
        self.save_hash(case_id, params, output_dir)

        # Start timer for model run
        t_start = time.time()

        self.algorithm_logger('run_price_smoothing_heuristic_case',
                              'Starting case with params: ' + str(params))

        # Load REP results
        with open(os.path.join(output_dir, params['rep_filename']), 'rb') as f:
            rep_results = pickle.load(f)

        # Get results corresponding to last iteration of REP solution
        rep_iteration = rep_results['stage_2_rep'][max(
            rep_results['stage_2_rep'].keys())]

        # Model parameters used to initialise classes that construct and run models
        start, end, scenarios = params['start'], params['end'], params[
            'scenarios']
        bau_initial_price = self.get_bau_initial_price(output_dir, start)

        # Classes used to construct and run primal and MPPDC programs
        primal = Primal(start, end, scenarios)
        baseline = BaselineUpdater(start, end, scenarios,
                                   params['transition_year'])

        # Construct primal program
        m_p = primal.construct_model()

        # Results to extract from primal program
        primal_keys = [
            'x_c', 'p', 'p_V', 'p_in', 'p_out', 'p_L', 'baseline',
            'permit_price', 'YEAR_EMISSIONS', 'YEAR_EMISSIONS_INTENSITY',
            'YEAR_SCHEME_REVENUE', 'TOTAL_SCHEME_REVENUE', 'C_MC', 'ETA',
            'DELTA', 'RHO', 'EMISSIONS_RATE', 'YEAR_CUMULATIVE_SCHEME_REVENUE',
            'YEAR_SCHEME_EMISSIONS_INTENSITY', 'OBJECTIVE'
        ]

        # Results to extract from baseline targeting model
        baseline_keys = [
            'YEAR_AVERAGE_PRICE', 'YEAR_AVERAGE_PRICE_0',
            'YEAR_ABSOLUTE_PRICE_DIFFERENCE',
            'TOTAL_ABSOLUTE_PRICE_DIFFERENCE', 'PRICE_WEIGHTS',
            'YEAR_SCHEME_REVENUE', 'YEAR_CUMULATIVE_SCHEME_REVENUE', 'baseline'
        ]

        # Container for iteration results
        i_results = dict()

        # Initialise price setting generator input as results obtained from final REP iteration
        psg_input = rep_iteration

        # Initialise iteration counter
        counter = 1

        while True:
            self.algorithm_logger('run_price_smoothing_heuristic_case',
                                  f'Starting iteration={counter}')

            # Identify price setting generators
            psg = baseline.prices.get_price_setting_generators_from_model_results(
                psg_input)

            # Construct model used to calibrate baseline
            m_b = baseline.construct_model(psg)

            # Update parameters
            m_b = baseline.update_parameters(m_b, psg_input)
            m_b.YEAR_AVERAGE_PRICE_0 = float(bau_initial_price)
            m_b.PRICE_WEIGHTS.store_values(params['price_weights'])

            # Activate constraints and objectives depending on case being run
            m_b.NON_NEGATIVE_TRANSITION_REVENUE_CONS.activate()

            if params['mode'] == 'bau_deviation_minimisation':
                # Set the price target to be BAU price
                bau_price_target = {y: bau_initial_price for y in m_b.Y}
                m_b.YEAR_AVERAGE_PRICE_TARGET.store_values(bau_price_target)

                # Activate price targeting constraints and objective
                m_b.PRICE_TARGET_DEVIATION_1.activate()
                m_b.PRICE_TARGET_DEVIATION_2.activate()
                m_b.OBJECTIVE_PRICE_TARGET_DIFFERENCE.activate()

                # Append name of objective so objective value can be extracted, and create filename for case
                baseline_keys.append('OBJECTIVE_PRICE_TARGET_DIFFERENCE')

            elif params['mode'] == 'price_change_minimisation':
                # Activate constraints penalised price deviations over successive years
                m_b.PRICE_CHANGE_DEVIATION_1.activate()
                m_b.PRICE_CHANGE_DEVIATION_2.activate()
                m_b.OBJECTIVE_PRICE_DEVIATION.activate()

                # Append name of objective so objective value can be extracted, and create filename for case
                baseline_keys.append('OBJECTIVE_PRICE_DEVIATION')

            elif params['mode'] == 'price_target':
                # Set target price trajectory to prices obtained from BAU model over same period
                m_b.YEAR_AVERAGE_PRICE_TARGET.store_values(
                    params['price_target'])

                # Activate price targeting constraints and objective function
                m_b.PRICE_TARGET_DEVIATION_1.activate()
                m_b.PRICE_TARGET_DEVIATION_2.activate()
                m_b.OBJECTIVE_PRICE_TARGET_DIFFERENCE.activate()

                # Append name of objective so objective value can be extracted, and create filename for case
                baseline_keys.append('OBJECTIVE_PRICE_TARGET_DIFFERENCE')

            else:
                raise Exception(f"Unexpected run mode: {params['mode']}")

            for y in m_b.Y:
                if y >= params['transition_year']:
                    m_b.YEAR_NET_SCHEME_REVENUE_NEUTRAL_CONS[y].activate()

            # Solve model
            m_b, m_b_status = baseline.solve_model(m_b)
            r_b = copy.deepcopy(
                {k: self.extract_result(m_b, k)
                 for k in baseline_keys})

            # Update baselines and permit prices in primal model
            for y in m_p.Y:
                m_p.baseline[y].fix(m_b.baseline[y].value)
                m_p.permit_price[y].fix(m_b.PERMIT_PRICE[y].value)

            # Solve primal program
            m_p, m_p_status = primal.solve_model(m_p)

            # Log all infeasible constraints
            log_infeasible_constraints(m_p)

            # Get results
            r_p = copy.deepcopy(
                {v: self.extract_result(m_p, v)
                 for v in primal_keys})
            r_p['PRICES'] = copy.deepcopy({
                k: m_p.dual[m_p.POWER_BALANCE[k]]
                for k in m_p.POWER_BALANCE.keys()
            })
            i_results[counter] = {'primal': r_p, 'auxiliary': r_b}

            # Max difference in capacity sizing decision between iterations
            max_capacity_difference = self.get_successive_iteration_difference(
                psg_input, r_p, 'x_c')
            print(f'Max capacity difference: {max_capacity_difference} MW')

            # Max absolute baseline difference between successive iterations
            max_baseline_difference = self.get_successive_iteration_difference(
                psg_input, r_p, 'baseline')
            print(
                f'{counter}: Maximum baseline difference = {max_baseline_difference} tCO2/MWh'
            )

            self.algorithm_logger('run_price_smoothing_heuristic_case',
                                  f'Finished iteration={counter}')

            # If baseline difference between successive iterations is sufficiently small then stop
            if max_baseline_difference < 0.05:
                break

            # Stop iterating if max iteration limit exceeded
            elif counter > 9:
                message = f'Max iterations exceeded. Exiting loop.'
                print(message)
                self.algorithm_logger('run_price_smoothing_heuristic_case',
                                      message)
                break

            else:
                # Update dictionary of price setting generator program inputs
                psg_input = r_p

            # Update iteration counter
            counter += 1

        self.algorithm_logger('run_price_smoothing_heuristic_case',
                              f'Finished solving model')

        # Combine results into a single dictionary
        results = {
            **rep_results, 'stage_3_price_targeting': i_results,
            'parameters': params
        }

        # Save results
        self.save_results(results, output_dir, filename)

        # Combine output for method (can be used for debugging)
        output = {
            'auxiliary_model': m_b,
            'auxiliary_status': m_b_status,
            'primal_model': m_p,
            'primal_status': m_p_status,
            'results': results
        }

        # Total iterations
        total_iterations = max(i_results.keys())

        # Save summary of the solution time
        solution_summary = {
            'case_id': case_id,
            'mode': params['mode'],
            'carbon_price': params['carbon_price'],
            'transition_year': params['transition_year'],
            'total_solution_time': time.time() - t_start,
            'total_iterations': total_iterations,
            'max_capacity_difference': max_capacity_difference,
            'max_baseline_difference': max_baseline_difference
        }
        self.save_solution_summary(solution_summary, output_dir)

        message = f"Finished heuristic case: " + str(solution_summary)
        self.algorithm_logger('run_price_smoothing_heuristic_case', message)

        return output

    def run_price_smoothing_mppdc_case(self,
                                       params,
                                       output_dir,
                                       overwrite=False):
        """Run case to smooth prices over model horizon, subject to total revenue constraint"""

        # Get case filename based on run mode
        if params['mode'] == 'bau_deviation_minimisation':
            filename = f"mppdc_baudev_ty-{params['transition_year']}_cp-{params['carbon_price']}.pickle"

        elif params['mode'] == 'price_change_minimisation':
            filename = f"mppdc_pdev_ty-{params['transition_year']}_cp-{params['carbon_price']}.pickle"

        elif params['mode'] == 'price_target':
            filename = f"mppdc_ptar_ty-{params['transition_year']}_cp-{params['carbon_price']}.pickle"

        else:
            raise Exception(f"Unexpected run mode: {params['mode']}")

        # Check if case already solved
        if (not overwrite) and (filename in os.listdir(output_dir)):
            print(f'Already solved: {filename}')
            return

        # Construct hash for case ID
        case_id = self.get_hash(params)

        # Save hash and associated parameters
        self.save_hash(case_id, params, output_dir)

        # Start timer for model run
        t_start = time.time()

        self.algorithm_logger(
            'run_price_smoothing_mppdc_case',
            'Starting MPPDC case with params: ' + str(params))

        # Load REP results
        with open(os.path.join(output_dir, params['rep_filename']), 'rb') as f:
            rep_results = pickle.load(f)

        # Get results corresponding to last iteration of REP solution
        rep_iteration = rep_results['stage_2_rep'][max(
            rep_results['stage_2_rep'].keys())]

        # Extract parameters from last iteration of REP program results
        start, end, scenarios = params['start'], params['end'], params[
            'scenarios']
        bau_initial_price = self.get_bau_initial_price(output_dir, start)

        # Classes used to construct and run primal and MPPDC programs
        mppdc = MPPDCModel(start, end, scenarios, params['transition_year'])
        primal = Primal(start, end, scenarios)

        # Construct MPPDC
        m_m = mppdc.construct_model(include_primal_constraints=True)

        # Construct primal model
        m_p = primal.construct_model()

        # Update MPPDC model parameters
        m_m.YEAR_AVERAGE_PRICE_0 = float(bau_initial_price)
        m_m.PRICE_WEIGHTS.store_values(params['price_weights'])

        # Activate necessary constraints depending on run mode
        m_m.NON_NEGATIVE_TRANSITION_REVENUE_CONS.activate()

        if params['mode'] == 'bau_deviation_minimisation':
            m_m.PRICE_BAU_DEVIATION_1.activate()
            m_m.PRICE_BAU_DEVIATION_2.activate()

        elif params['mode'] == 'price_change_minimisation':
            m_m.PRICE_CHANGE_DEVIATION_1.activate()
            m_m.PRICE_CHANGE_DEVIATION_2.activate()

        elif params['mode'] == 'price_target':
            m_m.YEAR_AVERAGE_PRICE_TARGET.store_values(params['price_target'])
            m_m.PRICE_TARGET_DEVIATION_1.activate()
            m_m.PRICE_TARGET_DEVIATION_2.activate()

        else:
            raise Exception(f"Unexpected run mode: {params['mode']}")

        for y in m_m.Y:
            if y >= params['transition_year']:
                m_m.YEAR_NET_SCHEME_REVENUE_NEUTRAL_CONS[y].activate()

        # Primal variables
        primal_vars = [
            'x_c', 'p', 'p_in', 'p_out', 'q', 'p_V', 'p_L', 'permit_price'
        ]
        fixed_vars = {v: rep_iteration[v] for v in primal_vars}

        # Results to extract from MPPDC model
        mppdc_keys = [
            'x_c', 'p', 'p_V', 'p_in', 'p_out', 'p_L', 'q', 'baseline',
            'permit_price', 'lamb', 'YEAR_EMISSIONS',
            'YEAR_EMISSIONS_INTENSITY', 'YEAR_SCHEME_EMISSIONS_INTENSITY',
            'YEAR_SCHEME_REVENUE', 'YEAR_CUMULATIVE_SCHEME_REVENUE',
            'TOTAL_SCHEME_REVENUE', 'YEAR_ABSOLUTE_PRICE_DIFFERENCE',
            'YEAR_AVERAGE_PRICE_0', 'YEAR_AVERAGE_PRICE',
            'YEAR_SUM_CUMULATIVE_PRICE_DIFFERENCE_WEIGHTED', 'OBJECTIVE',
            'YEAR_ABSOLUTE_PRICE_DIFFERENCE_WEIGHTED',
            'TOTAL_ABSOLUTE_PRICE_DIFFERENCE_WEIGHTED',
            'YEAR_CUMULATIVE_PRICE_DIFFERENCE_WEIGHTED', 'sd_1', 'sd_2',
            'STRONG_DUALITY_VIOLATION_COST', 'TRANSITION_YEAR', 'PRICE_WEIGHTS'
        ]

        # Container for iteration results
        i_results = {}

        # Initialise iteration counter
        counter = 1

        # Placeholder for max difference variables
        max_baseline_difference = None
        max_capacity_difference = None

        while True:
            self.algorithm_logger('run_price_smoothing_mppdc_case',
                                  f'Starting iteration={counter}')

            # Fix MPPDC variables
            m_m = mppdc.fix_variables(m_m, fixed_vars)

            # Solve MPPDC
            m_m, m_m_status = mppdc.solve_model(m_m)

            # Model timeout will cause sub-optimal termination condition
            if m_m_status.solver.termination_condition != TerminationCondition.optimal:
                i_results[counter] = None
                self.algorithm_logger('run_price_smoothing_mppdc_case',
                                      f'Sub-optimal solution')
                self.algorithm_logger(
                    'run_price_smoothing_mppdc_case',
                    f'User time: {m_m_status.solver.user_time}s')

                # No primal model solved
                m_p, m_p_status = None, None
                break

            # Log infeasible constraints
            log_infeasible_constraints(m_m)

            # Results from MPPDC program
            r_m = copy.deepcopy(
                {v: self.extract_result(m_m, v)
                 for v in mppdc_keys})
            i_results[counter] = r_m

            # Update primal program with baselines and permit prices obtained from MPPDC model
            for y in m_p.Y:
                m_p.baseline[y].fix(m_m.baseline[y].value)
                m_p.permit_price[y].fix(m_m.permit_price[y].value)

            # Solve primal model
            m_p, m_p_status = primal.solve_model(m_p)
            log_infeasible_constraints(m_p)

            # Results from primal program
            p_r = copy.deepcopy(
                {v: self.extract_result(m_p, v)
                 for v in primal_vars})
            p_r['PRICES'] = copy.deepcopy({
                k: m_p.dual[m_p.POWER_BALANCE[k]]
                for k in m_p.POWER_BALANCE.keys()
            })
            i_results[counter]['primal'] = p_r

            # Max absolute capacity difference between MPPDC and primal program
            max_capacity_difference = max(
                abs(m_m.x_c[k].value - m_p.x_c[k].value)
                for k in m_m.x_c.keys())
            print(f'Max capacity difference: {max_capacity_difference} MW')

            # Max absolute baseline difference between MPPDC and primal program
            max_baseline_difference = max(
                abs(m_m.baseline[k].value - m_p.baseline[k].value)
                for k in m_m.baseline.keys())
            print(
                f'Max baseline difference: {max_baseline_difference} tCO2/MWh')

            # Check if capacity variables have changed
            if max_baseline_difference < 0.05:
                break

            # Check if max iterations exceeded
            elif counter > 9:
                message = f'Max iterations exceeded. Exiting loop.'
                print(message)
                self.algorithm_logger('run_price_smoothing_mppdc_case',
                                      message)
                break

            else:
                # Update dictionary of fixed variables to be used in next iteration
                fixed_vars = {v: p_r[v] for v in primal_vars}

            self.algorithm_logger('run_price_smoothing_mppdc_case',
                                  f'Finished iteration={counter}')
            counter += 1

        self.algorithm_logger('run_price_smoothing_mppdc_case',
                              f'Finished solving model')

        # Combine results into a single dictionary
        results = {
            **rep_results, 'stage_3_price_targeting': i_results,
            'parameters': params
        }

        # Save results
        self.save_results(results, output_dir, filename)

        # Method output
        output = {
            'mppdc_model': m_m,
            'mppdc_status': m_m_status,
            'primal_model': m_p,
            'primal_status': m_p_status,
            'results': results
        }

        self.algorithm_logger('run_price_smoothing_mppdc_case',
                              'Finished MPPDC case')

        # Total iterations
        total_iterations = max(i_results.keys())

        # Save summary of the solution time
        solution_summary = {
            'case_id': case_id,
            'mode': params['mode'],
            'carbon_price': params['carbon_price'],
            'transition_year': params['transition_year'],
            'total_solution_time': time.time() - t_start,
            'total_iterations': total_iterations,
            'max_capacity_difference': max_capacity_difference,
            'max_baseline_difference': max_baseline_difference
        }
        self.save_solution_summary(solution_summary, output_dir)

        message = f"Finished MPPDC case: " + str(solution_summary)
        self.algorithm_logger('run_price_smoothing_mppdc_case', message)

        return output