def bids(variable_ids, price_bids, unit_info): """Create the cost coefficients of energy in bids in the objective function. This function defines the cost associated with each decision variable that represents a unit's energy bid. Costs are are with reference to the regional node. """ # If no service column is provided assume bids are for energy. if 'service' not in price_bids.columns: price_bids['service'] = 'energy' # Get the list of columns that are bid bands. bid_bands = [ col for col in price_bids.columns if col not in ['unit', 'service'] ] price_bids = hf.stack_columns(price_bids, cols_to_keep=['unit', 'service'], cols_to_stack=bid_bands, type_name='capacity_band', value_name='cost') # Match bid cost with existing variable ids objective_function = pd.merge(variable_ids, price_bids, how='inner', on=['unit', 'service', 'capacity_band']) objective_function = pd.merge(objective_function, unit_info.loc[:, ['unit', 'dispatch_type']], how='inner', on=['unit']) objective_function['cost'] = np.where( (objective_function['dispatch_type'] == 'load') & (objective_function['service'] == 'energy'), -1.0 * objective_function['cost'], objective_function['cost']) return objective_function
def get_dispatch_comparison(self): DISPATCHLOAD = self.inputs_manager.DISPATCHLOAD.get_data(self.interval) bounds = DISPATCHLOAD.loc[:, ['DUID'] + self.services] bounds.columns = ['unit'] + self.services bounds = hf.stack_columns(bounds, cols_to_keep=['unit'], cols_to_stack=self.services, type_name='service', value_name='dispatched') bounds['service'] = bounds['service'].apply( lambda x: self.service_name_mapping[x]) nempy_dispatch = self.market.get_unit_dispatch() comp = pd.merge(bounds, nempy_dispatch, 'inner', on=['unit', 'service']) comp['diff'] = comp['dispatch'] - comp['dispatched'] comp = pd.merge(comp, self.market._unit_info.loc[:, ['unit', 'dispatch_type']], on='unit') comp['diff'] = np.where( (comp['dispatch_type'] == 'load') & (comp['service'] == 'energy'), comp['diff'] * -1, comp['diff']) return comp
def all_dispatch_units_and_service_have_decision_variables( self, wiggle_room=0.001): DISPATCHLOAD = self.inputs_manager.DISPATCHLOAD.get_data(self.interval) bounds = DISPATCHLOAD.loc[:, ['DUID'] + self.services] bounds.columns = ['unit'] + self.services bounds = hf.stack_columns(bounds, cols_to_keep=['unit'], cols_to_stack=self.services, type_name='service', value_name='dispatched') bounds['service'] = bounds['service'].apply( lambda x: self.service_name_mapping[x]) bounds = bounds[bounds['dispatched'] > 0.001] decision_variables = self.market._decision_variables['bids'].copy() decision_variables = decision_variables.groupby( ['unit', 'service'], as_index=False).first() decision_variables = pd.merge(bounds, decision_variables, how='left', on=['unit', 'service']) decision_variables[ 'not_missing'] = ~decision_variables['variable_id'].isna() decision_variables = decision_variables.sort_values('not_missing') return decision_variables['not_missing'].all()
def set_unit_dispatch_to_historical_values(self, wiggle_room=0.001): DISPATCHLOAD = self.inputs_manager.DISPATCHLOAD.get_data(self.interval) bounds = DISPATCHLOAD.loc[:, ['DUID'] + self.services] bounds.columns = ['unit'] + self.services bounds = hf.stack_columns(bounds, cols_to_keep=['unit'], cols_to_stack=self.services, type_name='service', value_name='dispatched') bounds['service'] = bounds['service'].apply( lambda x: self.service_name_mapping[x]) decision_variables = self.market._decision_variables['bids'].copy() decision_variables = pd.merge(decision_variables, bounds, on=['unit', 'service']) decision_variables_first_bid = decision_variables.groupby( ['unit', 'service'], as_index=False).first() def last_bids(df): return df.iloc[1:] decision_variables_remaining_bids = \ decision_variables.groupby(['unit', 'service'], as_index=False).apply(last_bids) decision_variables_first_bid[ 'lower_bound'] = decision_variables_first_bid[ 'dispatched'] - wiggle_room decision_variables_first_bid[ 'upper_bound'] = decision_variables_first_bid[ 'dispatched'] + wiggle_room decision_variables_first_bid['lower_bound'] = np.where( decision_variables_first_bid['lower_bound'] < 0.0, 0.0, decision_variables_first_bid['lower_bound']) decision_variables_first_bid['upper_bound'] = np.where( decision_variables_first_bid['upper_bound'] < 0.0, 0.0, decision_variables_first_bid['upper_bound']) decision_variables_remaining_bids['lower_bound'] = 0.0 decision_variables_remaining_bids['upper_bound'] = 0.0 decision_variables = pd.concat( [decision_variables_first_bid, decision_variables_remaining_bids]) self.market._decision_variables['bids'] = decision_variables
def get_price_comparison(self): energy_prices = self.market.get_energy_prices() energy_prices['time'] = self.interval energy_prices['service'] = 'energy' fcas_prices = self.market.get_fcas_prices() fcas_prices['time'] = self.interval prices = pd.concat([energy_prices, fcas_prices]) price_to_service = { 'ROP': 'energy', 'RAISE6SECROP': 'raise_6s', 'RAISE60SECROP': 'raise_60s', 'RAISE5MINROP': 'raise_5min', 'RAISEREGROP': 'raise_reg', 'LOWER6SECROP': 'lower_6s', 'LOWER60SECROP': 'lower_60s', 'LOWER5MINROP': 'lower_5min', 'LOWERREGROP': 'lower_reg' } price_columns = list(price_to_service.keys()) historical_prices = self.inputs_manager.DISPATCHPRICE.get_data( self.interval) historical_prices = hf.stack_columns( historical_prices, cols_to_keep=['SETTLEMENTDATE', 'REGIONID'], cols_to_stack=price_columns, type_name='service', value_name='RRP') historical_prices['service'] = historical_prices['service'].apply( lambda x: price_to_service[x]) historical_prices = historical_prices.loc[:, [ 'SETTLEMENTDATE', 'REGIONID', 'service', 'RRP' ]] historical_prices.columns = ['time', 'region', 'service', 'hist_price'] prices = pd.merge(prices, historical_prices, on=['time', 'region', 'service']) prices['error'] = prices['price'] - prices['hist_price'] return prices
def create(definitions, next_variable_id): """Create decision variables, and their mapping to constraints. For modeling interconnector flows. As DataFrames. Examples -------- Definitions for two interconnectors, one called A, that nominal flows from region X to region Y, note A can flow in both directions because of the way max and min are defined. The interconnector B nominal flows from Y to Z, but can only flow in the forward direction. >>> pd.options.display.width = None >>> inter_definitions = pd.DataFrame({ ... 'interconnector': ['A', 'B'], ... 'link': ['A', 'B'], ... 'from_region': ['X', 'Y'], ... 'to_region': ['Y', 'Z'], ... 'max': [100.0, 400.0], ... 'min': [-100.0, 50.0], ... 'generic_constraint_factor': [1, 1], ... 'from_region_loss_factor': [0.9, 1.0], ... 'to_region_loss_factor': [1.0, 1.1]}) >>> print(inter_definitions) interconnector link from_region to_region max min generic_constraint_factor from_region_loss_factor to_region_loss_factor 0 A A X Y 100.0 -100.0 1 0.9 1.0 1 B B Y Z 400.0 50.0 1 1.0 1.1 Start creating new variable ids from 0. >>> next_variable_id = 0 Run the function and print results. >>> decision_variables, constraint_map = create(inter_definitions, next_variable_id) >>> print(decision_variables) interconnector link variable_id lower_bound upper_bound type generic_constraint_factor 0 A A 0 -100.0 100.0 continuous 1 1 B B 1 50.0 400.0 continuous 1 >>> print(constraint_map) variable_id interconnector link region service coefficient 0 0 A A Y energy 1.0 1 1 B B Z energy 1.1 2 0 A A X energy -0.9 3 1 B B Y energy -1.0 """ # Create a variable_id for each interconnector. decision_variables = hf.save_index(definitions, 'variable_id', next_variable_id) # Create two entries in the constraint_map for each interconnector. This means the variable will be mapped to the # demand constraint of both connected regions. constraint_map = hf.stack_columns( decision_variables, ['variable_id', 'interconnector', 'link', 'max', 'min'], ['to_region', 'from_region'], 'direction', 'region') loss_factors = hf.stack_columns( decision_variables, ['variable_id'], ['from_region_loss_factor', 'to_region_loss_factor'], 'direction', 'loss_factor') loss_factors['direction'] = loss_factors['direction'].apply( lambda x: x.replace('_loss_factor', '')) constraint_map = pd.merge(constraint_map, loss_factors, on=['variable_id', 'direction']) # Define decision variable attributes. decision_variables['type'] = 'continuous' decision_variables = decision_variables.loc[:, [ 'interconnector', 'link', 'variable_id', 'min', 'max', 'type', 'generic_constraint_factor' ]] decision_variables.columns = [ 'interconnector', 'link', 'variable_id', 'lower_bound', 'upper_bound', 'type', 'generic_constraint_factor' ] # Set positive coefficient for the to_region so the interconnector flowing in the nominal direction helps meet the # to_region demand constraint. Negative for the from_region, same logic. constraint_map['coefficient'] = np.where( constraint_map['direction'] == 'to_region', 1.0 * constraint_map['loss_factor'], -1.0 * constraint_map['loss_factor']) constraint_map['service'] = 'energy' constraint_map = constraint_map.loc[:, [ 'variable_id', 'interconnector', 'link', 'region', 'service', 'coefficient' ]] return decision_variables, constraint_map
def do_fcas_availabilities_match_historical(self): DISPATCHLOAD = self.inputs_manager.DISPATCHLOAD.get_data(self.interval) availabilities = [ 'RAISE6SECACTUALAVAILABILITY', 'RAISE60SECACTUALAVAILABILITY', 'RAISE5MINACTUALAVAILABILITY', 'RAISEREGACTUALAVAILABILITY', 'LOWER6SECACTUALAVAILABILITY', 'LOWER60SECACTUALAVAILABILITY', 'LOWER5MINACTUALAVAILABILITY', 'LOWERREGACTUALAVAILABILITY' ] availabilities_mapping = { 'RAISEREGACTUALAVAILABILITY': 'raise_reg', 'LOWERREGACTUALAVAILABILITY': 'lower_reg', 'RAISE6SECACTUALAVAILABILITY': 'raise_6s', 'RAISE60SECACTUALAVAILABILITY': 'raise_60s', 'RAISE5MINACTUALAVAILABILITY': 'raise_5min', 'LOWER6SECACTUALAVAILABILITY': 'lower_6s', 'LOWER60SECACTUALAVAILABILITY': 'lower_60s', 'LOWER5MINACTUALAVAILABILITY': 'lower_5min' } bounds = DISPATCHLOAD.loc[:, ['DUID'] + availabilities] bounds.columns = ['unit'] + availabilities availabilities = hf.stack_columns(bounds, cols_to_keep=['unit'], cols_to_stack=availabilities, type_name='service', value_name='availability') bounds = DISPATCHLOAD.loc[:, ['DUID'] + self.services] bounds.columns = ['unit'] + self.services bounds = hf.stack_columns(bounds, cols_to_keep=['unit'], cols_to_stack=self.services, type_name='service', value_name='dispatched') bounds['service'] = bounds['service'].apply( lambda x: self.service_name_mapping[x]) availabilities['service'] = availabilities['service'].apply( lambda x: availabilities_mapping[x]) availabilities = pd.merge(availabilities, bounds, on=['unit', 'service']) availabilities = availabilities[~( availabilities['dispatched'] - 0.001 > availabilities['availability'])] output = self.market.get_fcas_availability() output.columns = ['unit', 'service', 'availability_measured'] availabilities = pd.merge(availabilities, output, 'left', on=['unit', 'service']) availabilities['availability_measured'] = availabilities[ 'availability_measured'].fillna(0) availabilities['error'] = availabilities[ 'availability_measured'] - availabilities['availability'] availabilities['match'] = availabilities['error'].abs() < 0.1 availabilities = availabilities.sort_values('match') return availabilities
def bids(volume_bids, unit_info, next_variable_id): """Create decision variables that correspond to unit bids, for use in the linear program. This function defines the needed parameters for each variable, with a lower bound equal to zero, an upper bound equal to the bid volume, and a variable type of continuous. There is no limit on the number of bid bands and each column in the capacity_bids DataFrame other than unit is treated as a bid band. Volume bids should be positive. numeric values only. Examples -------- >>> import pandas A set of capacity bids. >>> volume_bids = pd.DataFrame({ ... 'unit': ['A', 'B'], ... '1': [10.0, 50.0], ... '2': [20.0, 30.0]}) The locations of the units. >>> unit_info = pd.DataFrame({ ... 'unit': ['A', 'B'], ... 'region': ['NSW', 'X'], ... 'dispatch_type': ['generator', 'load']}) >>> next_variable_id = 0 Create the decision variables and their mapping into constraints. >>> decision_variables, unit_level_constraint_map, regional_constraint_map = bids( ... volume_bids, unit_info, next_variable_id) >>> print(decision_variables) unit capacity_band service variable_id lower_bound upper_bound type 0 A 1 energy 0 0.0 10.0 continuous 1 A 2 energy 1 0.0 20.0 continuous 2 B 1 energy 2 0.0 50.0 continuous 3 B 2 energy 3 0.0 30.0 continuous >>> print(unit_level_constraint_map) variable_id unit service coefficient 0 0 A energy 1.0 1 1 A energy 1.0 2 2 B energy 1.0 3 3 B energy 1.0 >>> print(regional_constraint_map) variable_id region service coefficient 0 0 NSW energy 1.0 1 1 NSW energy 1.0 2 2 X energy -1.0 3 3 X energy -1.0 Parameters ---------- volume_bids : pd.DataFrame Bids by unit, in MW, can contain up to n bid bands. ======== =============================================================== Columns: Description: unit unique identifier of a dispatch unit (as `str`) service the service being provided, optional, if missing energy assumed (as `str`) 1 bid volume in the 1st band, in MW (as `float`) 2 bid volume in the 2nd band, in MW (as `float`) n bid volume in the nth band, in MW (as `float`) ======== =============================================================== unit_info : pd.DataFrame The region each unit is located in. ======== ====================================================== Columns: Description: unit unique identifier of a dispatch unit (as `str`) region unique identifier of a market region (as `str`) ======== ====================================================== next_variable_id : int The next integer to start using for variables ids. Returns ------- decision_variables : pd.DataFrame ============= =============================================================== Columns: Description: unit unique identifier of a dispatch unit (as `str`) capacity_band the bid band of the variable (as `str`) variable_id the id of the variable (as `int`) lower_bound the lower bound of the variable, is zero for bids (as `np.float64`) upper_bound the upper bound of the variable, the volume bid (as `np.float64`) type the type of variable, is continuous for bids (as `str`) ============= =============================================================== unit_level_constraint_map : pd.DataFrame ============= ============================================================================= Columns: Description: variable_id the id of the variable (as `np.int64`) unit the unit level constraints the variable should map to (as `str`) service the service type of the constraints the variables should map to (as `str`) coefficient the upper bound of the variable, the volume bid (as `np.float64`) ============= ============================================================================= regional_constraint_map : pd.DataFrame ============= ============================================================================= Columns: Description: variable_id the id of the variable (as `np.int64`) region the regional constraints the variable should map to (as `str`) service the service type of the constraints the variables should map to (as `str`) coefficient the upper bound of the variable, the volume bid (as `np.float64`) ============= ============================================================================= """ # If no service column is provided assume bids are for energy. if 'service' not in volume_bids.columns: volume_bids['service'] = 'energy' # Get a list of all the columns that contain volume bids. bid_bands = [ col for col in volume_bids.columns if col not in ['unit', 'service'] ] # Reshape the table so each bid band is on it own row. decision_variables = hf.stack_columns(volume_bids, cols_to_keep=['unit', 'service'], cols_to_stack=bid_bands, type_name='capacity_band', value_name='upper_bound') decision_variables = decision_variables[ decision_variables['upper_bound'] >= 0.0001] # Group units together in the decision variable table. decision_variables = decision_variables.sort_values( ['unit', 'capacity_band']) # Create a unique identifier for each decision variable. decision_variables = hf.save_index(decision_variables, 'variable_id', next_variable_id) # The lower bound of bidding decision variables will always be zero. decision_variables['lower_bound'] = 0.0 decision_variables['type'] = 'continuous' constraint_map = decision_variables.loc[:, ['variable_id', 'unit', 'service']] constraint_map = pd.merge( constraint_map, unit_info.loc[:, ['unit', 'region', 'dispatch_type']], 'inner', on='unit') regional_constraint_map = constraint_map.loc[:, [ 'variable_id', 'region', 'service', 'dispatch_type' ]] regional_constraint_map['coefficient'] = np.where( (regional_constraint_map['dispatch_type'] == 'load') & (regional_constraint_map['service'] == 'energy'), -1.0, 1.0) regional_constraint_map = regional_constraint_map.drop('dispatch_type', axis=1) unit_level_constraint_map = constraint_map.loc[:, [ 'variable_id', 'unit', 'service' ]] unit_level_constraint_map['coefficient'] = 1.0 decision_variables = \ decision_variables.loc[:, ['unit', 'capacity_band', 'service', 'variable_id', 'lower_bound', 'upper_bound', 'type']] return decision_variables, unit_level_constraint_map, regional_constraint_map