def solve_dcopf_losses(model_data, solver, timelimit = None, solver_tee = True, symbolic_solver_labels = False, options = None, dcopf_losses_model_generator = create_btheta_losses_dcopf_model, return_model = False, return_results = False, **kwargs): ''' Create and solve a new dcopf with losses model Parameters ---------- model_data : egret.data.ModelData An egret ModelData object with the appropriate data loaded. solver : str or pyomo.opt.base.solvers.OptSolver Either a string specifying a pyomo solver name, or an instantiated pyomo solver timelimit : float (optional) Time limit for dcopf run. Default of None results in no time limit being set. solver_tee : bool (optional) Display solver log. Default is True. symbolic_solver_labels : bool (optional) Use symbolic solver labels. Useful for debugging; default is False. options : dict (optional) Other options to pass into the solver. Default is dict(). dcopf_model_generator : function (optional) Function for generating the dcopf model. Default is egret.models.dcopf.create_btheta_dcopf_model return_model : bool (optional) If True, returns the pyomo model object return_results : bool (optional) If True, returns the pyomo results object kwargs : dictionary (optional) Additional arguments for building model ''' import pyomo.environ as pe from pyomo.environ import value from egret.common.solver_interface import _solve_model from egret.model_library.transmission.tx_utils import \ scale_ModelData_to_pu, unscale_ModelData_to_pu m, md = dcopf_losses_model_generator(model_data, **kwargs) m.dual = pe.Suffix(direction=pe.Suffix.IMPORT) m, results = _solve_model(m,solver,timelimit=timelimit,solver_tee=solver_tee, symbolic_solver_labels=symbolic_solver_labels,solver_options=options) # save results data to ModelData object gens = dict(md.elements(element_type='generator')) buses = dict(md.elements(element_type='bus')) branches = dict(md.elements(element_type='branch')) md.data['system']['total_cost'] = value(m.obj) for g,g_dict in gens.items(): g_dict['pg'] = value(m.pg[g]) if dcopf_losses_model_generator == create_btheta_losses_dcopf_model: for b,b_dict in buses.items(): b_dict['pl'] = value(m.pl[b]) b_dict.pop('qlmp',None) b_dict['lmp'] = value(m.dual[m.eq_p_balance[b]]) b_dict['va'] = value(m.va[b]) elif dcopf_losses_model_generator == create_ptdf_losses_dcopf_model: PTDF = m._PTDF ptdf_r = PTDF.PTDFM ldf = PTDF.LDF buses_idx = PTDF.buses_keys branches_idx = PTDF.branches_keys for j, b in enumerate(buses_idx): b_dict = buses[b] b_dict['pl'] = value(m.pl[b]) b_dict.pop('qlmp',None) b_dict['lmp'] = value(m.dual[m.eq_p_balance]) for i, k in enumerate(branches_idx): b_dict['lmp'] += ptdf_r[i,j]*value(m.dual[m.ineq_pf_branch_thermal_lb[k]]) b_dict['lmp'] += ptdf_r[i,j]*value(m.dual[m.ineq_pf_branch_thermal_ub[k]]) b_dict['lmp'] += ldf[i,j]*value(m.dual[m.eq_pfl_branch[k]]) else: raise Exception("Unrecognized dcopf_losses_model_generator {}".format(dcopf_losses_model_generator)) for k, k_dict in branches.items(): k_dict['pf'] = value(m.pf[k]) unscale_ModelData_to_pu(md, inplace=True) if return_model and return_results: return md, m, results elif return_model: return md, m elif return_results: return md, results return md
def solve_scopf(model_data, solver, timelimit=None, solver_tee=True, symbolic_solver_labels=False, options=None, scopf_model_generator=create_scopf_model, return_model=False, return_results=False, **kwargs): ''' Create and solve a new scopf model Parameters ---------- model_data : egret.data.ModelData An egret ModelData object with the appropriate data loaded. solver : str or pyomo.opt.base.solvers.OptSolver Either a string specifying a pyomo solver name, or an instantiated pyomo solver timelimit : float (optional) Time limit for dcopf run. Default of None results in no time limit being set. solver_tee : bool (optional) Display solver log. Default is True. symbolic_solver_labels : bool (optional) Use symbolic solver labels. Useful for debugging; default is False. options : dict (optional) Other options to pass into the solver. Default is dict(). scopf_model_generator : function (optional) Function for generating the dcopf model. Default is egret.models.dcopf.create_btheta_dcopf_model return_model : bool (optional) If True, returns the pyomo model object return_results : bool (optional) If True, returns the pyomo results object kwargs : dictionary (optional) Additional arguments for building model ''' import pyomo.environ as pe import pyomo.opt as po from pyomo.environ import value from egret.common.solver_interface import _solve_model from egret.model_library.transmission.tx_utils import \ scale_ModelData_to_pu, unscale_ModelData_to_pu m, md = scopf_model_generator(model_data, **kwargs) m.dual = pyo.Suffix(direction=pyo.Suffix.IMPORT) m, results, solver = _solve_model( m, solver, timelimit=timelimit, solver_tee=solver_tee, symbolic_solver_labels=symbolic_solver_labels, solver_options=options, return_solver=True) if m._ptdf_options['lazy']: iter_limit = m._ptdf_options['iteration_limit'] term_cond = _lazy_ptdf_dcopf_model_solve_loop( m, md, solver, solver_tee=solver_tee, symbolic_solver_labels=symbolic_solver_labels, iteration_limit=iter_limit) # save results data to ModelData object gens = dict(md.elements(element_type='generator')) buses = dict(md.elements(element_type='bus')) branches = dict(md.elements(element_type='branch')) dc_branches = dict(md.elements(element_type='dc_branch')) md.data['system']['total_cost'] = value(m.obj) for g, g_dict in gens.items(): g_dict['pg'] = value(m.pg[g]) ## calculate the power flows from our PTDF matrix for maximum precision ## calculate the LMPC (LMP congestion) using numpy PTDF = m._PTDF PFV, _, VA = PTDF.calculate_PFV(m) branches_idx = PTDF.branches_keys for i, bn in enumerate(branches_idx): branches[bn]['pf'] = PFV[i] if hasattr(m, 'p_load_shed'): md.data['system']['p_balance_violation'] = value( m.p_load_shed) - value(m.p_over_generation) buses_idx = PTDF.buses_keys LMP = PTDF.calculate_LMP(m, m.dual, m.eq_p_balance) for i, b in enumerate(buses_idx): b_dict = buses[b] b_dict['lmp'] = LMP[i] b_dict['pl'] = value(m.pl[b]) b_dict['va'] = degrees(VA[i]) for k, k_dict in dc_branches.items(): k_dict['pf'] = value(m.dcpf[k]) contingencies = dict(md.elements(element_type='contingency')) contingency_flows = PTDF.calculate_monitored_contingency_flows(m) for (cn, bn), flow in contingency_flows.items(): c_dict = contingencies[cn] if 'monitored_branches' not in c_dict: c_dict['monitored_branches'] = {} c_dict['monitored_branches'][bn] = {'pf': flow} unscale_ModelData_to_pu(md, inplace=True) if return_model and return_results: return md, m, results elif return_model: return md, m elif return_results: return md, results return md
def solve_acopf(model_data, solver, timelimit=None, solver_tee=True, symbolic_solver_labels=False, options=None, acopf_model_generator=create_psv_acopf_model, return_model=False, return_results=False, **kwargs): ''' Create and solve a new acopf model Parameters ---------- model_data : egret.data.ModelData An egret ModelData object with the appropriate data loaded. solver : str or pyomo.opt.base.solvers.OptSolver Either a string specifying a pyomo solver name, or an instantiated pyomo solver timelimit : float (optional) Time limit for dcopf run. Default of None results in no time limit being set. solver_tee : bool (optional) Display solver log. Default is True. symbolic_solver_labels : bool (optional) Use symbolic solver labels. Useful for debugging; default is False. options : dict (optional) Other options to pass into the solver. Default is dict(). acopf_model_generator : function (optional) Function for generating the acopf model. Default is egret.models.acopf.create_psv_acopf_model return_model : bool (optional) If True, returns the pyomo model object return_results : bool (optional) If True, returns the pyomo results object kwargs : dictionary (optional) Additional arguments for building model ''' import pyomo.environ as pe from pyomo.environ import value from egret.common.solver_interface import _solve_model from egret.model_library.transmission.tx_utils import \ scale_ModelData_to_pu, unscale_ModelData_to_pu m, md = acopf_model_generator(model_data, **kwargs) m.dual = pe.Suffix(direction=pe.Suffix.IMPORT) m, results = _solve_model(m, solver, timelimit=timelimit, solver_tee=solver_tee, symbolic_solver_labels=symbolic_solver_labels, options=options) # save results data to ModelData object gens = dict(md.elements(element_type='generator')) buses = dict(md.elements(element_type='bus')) branches = dict(md.elements(element_type='branch')) md.data['system']['total_cost'] = value(m.obj) for g, g_dict in gens.items(): g_dict['pg'] = value(m.pg[g]) g_dict['qg'] = value(m.qg[g]) for b, b_dict in buses.items(): b_dict['lmp'] = value(m.dual[m.eq_p_balance[b]]) b_dict['qlmp'] = value(m.dual[m.eq_q_balance[b]]) b_dict['pl'] = value(m.pl[b]) if hasattr(m, 'vj'): b_dict['vm'] = tx_calc.calculate_vm_from_vj_vr( value(m.vj[b]), value(m.vr[b])) b_dict['va'] = tx_calc.calculate_va_from_vj_vr( value(m.vj[b]), value(m.vr[b])) else: b_dict['vm'] = value(m.vm[b]) b_dict['va'] = value(m.va[b]) for k, k_dict in branches.items(): if hasattr(m, 'pf'): k_dict['pf'] = value(m.pf[k]) k_dict['pt'] = value(m.pt[k]) k_dict['qf'] = value(m.qf[k]) k_dict['qt'] = value(m.qt[k]) if hasattr(m, 'irf'): b = k_dict['from_bus'] k_dict['pf'] = value( tx_calc.calculate_p(value(m.ifr[k]), value(m.ifj[k]), value(m.vr[b]), value(m.vj[b]))) k_dict['qf'] = value( tx_calc.calculate_q(value(m.ifr[k]), value(m.ifj[k]), value(m.vr[b]), value(m.vj[b]))) b = k_dict['to_bus'] k_dict['pt'] = value( tx_calc.calculate_p(value(m.itr[k]), value(m.itj[k]), value(m.vr[b]), value(m.vj[b]))) k_dict['qt'] = value( tx_calc.calculate_q(value(m.itr[k]), value(m.itj[k]), value(m.vr[b]), value(m.vj[b]))) unscale_ModelData_to_pu(md, inplace=True) if return_model and return_results: return md, m, results elif return_model: return md, m elif return_results: return md, results return md
def solve_unit_commitment( model_data, solver, mipgap=0.001, timelimit=None, solver_tee=True, symbolic_solver_labels=False, options=None, uc_model_generator=create_tight_unit_commitment_model, relaxed=False, return_model=False): ''' Create and solve a new unit commitment model Parameters ---------- model_data : egret.data.ModelData An egret ModelData object with the appropriate data loaded. # TODO: describe the required and optional attributes solver : str or pyomo.opt.base.solvers.OptSolver Either a string specifying a pyomo solver name, or an instanciated pyomo solver mipgap : float (optional) Mipgap to use for unit commitment solve; default is 0.001 timelimit : float (optional) Time limit for unit commitment run. Default of None results in no time limit being set -- runs until mipgap is satisfied solver_tee : bool (optional) Display solver log. Default is True. symbolic_solver_labels : bool (optional) Use symbolic solver labels. Useful for debugging; default is False. options : dict (optional) Other options to pass into the solver. Default is dict(). uc_model_generator : function (optional) Function for generating the unit commitment model. Default is egret.models.unit_commitment.create_tight_unit_commitment_model relaxed : bool (optional) If True, creates a relaxed unit commitment model return_model : bool (optional) If True, returns the pyomo model object ''' import pyomo.environ as pe from pyomo.environ import value from egret.common.solver_interface import _solve_model m = uc_model_generator(model_data, relaxed=relaxed) if relaxed: m.dual = pe.Suffix(direction=pe.Suffix.IMPORT) m, results = _solve_model(m, solver, mipgap, timelimit, solver_tee, symbolic_solver_labels, options) md = m.model_data # save results data to ModelData object thermal_gens = dict( md.elements(element_type='generator', generator_type='thermal')) renewable_gens = dict( md.elements(element_type='generator', generator_type='renewable')) buses = dict(md.elements(element_type='bus')) branches = dict(md.elements(element_type='branch')) storage = dict(md.elements(element_type='storage')) zones = dict(md.elements(element_type='zone')) areas = dict(md.elements(element_type='area')) data_time_periods = md.data['system']['time_indices'] reserve_requirement = ('reserve_requirement' in md.data['system']) regulation = False spin = False nspin = False supp = False flex = False if hasattr(m, 'regulation_service'): regulation = True if hasattr(m, 'spinning_reserve'): spin = True if hasattr(m, 'non_spinning_reserve'): nspin = True if hasattr(m, 'supplemental_reserve'): supp = True if hasattr(m, 'flexible_ramping'): flex = True fs = False if hasattr(m, 'fuel_supply'): fs = True for g, g_dict in thermal_gens.items(): pg_dict = {} if reserve_requirement: rg_dict = {} commitment_dict = {} commitment_cost_dict = {} production_cost_dict = {} ramp_up_avail_dict = {} ## all of the potential constraints that could limit maximum output ## Not all unit commitment models have these constraints, so first ## we need check if they're on the model object ramp_up_avail_potential_constrs = [ 'EnforceMaxAvailableRampUpRates', 'AncillaryServiceRampUpLimit', 'power_limit_from_start', 'power_limit_from_stop', 'power_limit_from_start_stop', 'power_limit_from_start_stops', 'EnforceMaxAvailableRampDownRates', 'EnforceMaxCapacity', ] ramp_up_avail_constrs = [] for constr in ramp_up_avail_potential_constrs: if hasattr(m, constr): ramp_up_avail_constrs.append(getattr(m, constr)) if regulation: reg_prov = {} reg_up_supp = {} reg_dn_supp = {} if spin: spin_supp = {} if nspin: nspin_supp = {} if supp: supp_supp = {} if flex: flex_up_supp = {} flex_dn_supp = {} gfs = (fs and (g in m.FuelSupplyGenerators)) if gfs: fuel_consumed = {} for dt, mt in zip(data_time_periods, m.TimePeriods): pg_dict[dt] = value(m.PowerGenerated[g, mt]) if reserve_requirement: rg_dict[dt] = value(m.ReserveProvided[g, mt]) commitment_dict[dt] = value(m.UnitOn[g, mt]) commitment_cost_dict[dt] = value(m.StartupCost[g,mt]+m.ShutdownCost[g,mt]+\ m.MinimumProductionCost[g]*m.UnitOn[g,mt]*m.TimePeriodLengthHours) production_cost_dict[dt] = value(m.ProductionCost[g, mt]) if regulation: if g in m.AGC_Generators: reg_prov[dt] = value(m.RegulationOn[g, mt]) reg_up_supp[dt] = value(m.RegulationReserveUp[g, mt]) reg_dn_supp[dt] = value(m.RegulationReserveDn[g, mt]) commitment_cost_dict[dt] += value( m.RegulationCostCommitment[g, mt]) production_cost_dict[dt] += value( m.RegulationCostGeneration[g, mt]) else: reg_prov[dt] = 0. reg_up_supp[dt] = 0. reg_dn_supp[dt] = 0. if spin: spin_supp[dt] = value(m.SpinningReserveDispatched[g, mt]) production_cost_dict[dt] += value( m.SpinningReserveCostGeneration[g, mt]) if nspin: if g in m.NonSpinGenerators: nspin_supp[dt] = value(m.NonSpinningReserveDispatched[g, mt]) production_cost_dict[dt] += value( m.NonSpinningReserveCostGeneration[g, mt]) else: nspin_supp[dt] = 0. if supp: supp_supp[dt] = value(m.SupplementalReserveDispatched[g, mt]) production_cost_dict[dt] += value( m.SupplementalReserveCostGeneration[g, mt]) if flex: flex_up_supp[dt] = value(m.FlexUpProvided[g, mt]) flex_dn_supp[dt] = value(m.FlexDnProvided[g, mt]) if gfs: fuel_consumed[dt] = value(m.FuelConsumed[g, mt]) ## pyomo doesn't add constraints that are skiped to the index set, so we also ## need check here if the index exists. slack_list = [] for constr in ramp_up_avail_constrs: if (g, mt) in constr: slack_list.append(constr[g, mt].slack()) ramp_up_avail_dict[dt] = min(slack_list) g_dict['pg'] = _time_series_dict(pg_dict) if reserve_requirement: g_dict['rg'] = _time_series_dict(rg_dict) g_dict['commitment'] = _time_series_dict(commitment_dict) g_dict['commitment_cost'] = _time_series_dict(commitment_cost_dict) g_dict['production_cost'] = _time_series_dict(production_cost_dict) if regulation: g_dict['reg_provider'] = _time_series_dict(reg_prov) g_dict['reg_up_supplied'] = _time_series_dict(reg_up_supp) g_dict['reg_down_supplied'] = _time_series_dict(reg_dn_supp) if spin: g_dict['spinning_supplied'] = _time_series_dict(spin_supp) if nspin: g_dict['non_spinning_supplied'] = _time_series_dict(nspin_supp) if supp: g_dict['supplemental_supplied'] = _time_series_dict(supp_supp) if flex: g_dict['flex_up_supplied'] = _time_series_dict(flex_up_supp) g_dict['flex_down_supplied'] = _time_series_dict(flex_dn_supp) if gfs: g_dict['fuel_consumed'] = _time_series_dict(fuel_consumed) g_dict['headroom'] = _time_series_dict(ramp_up_avail_dict) for g, g_dict in renewable_gens.items(): pg_dict = {} for dt, mt in zip(data_time_periods, m.TimePeriods): pg_dict[dt] = value(m.NondispatchablePowerUsed[g, mt]) g_dict['pg'] = _time_series_dict(pg_dict) for s, s_dict in storage.items(): state_of_charge_dict = {} p_discharge_dict = {} p_charge_dict = {} operational_cost_dict = {} for dt, mt in zip(data_time_periods, m.TimePeriods): p_discharge_dict[dt] = value(m.PowerOutputStorage[s, mt]) p_charge_dict[dt] = value(m.PowerInputStorage[s, mt]) operational_cost_dict[dt] = value(m.StorageCost[s, mt]) state_of_charge_dict[dt] = value(m.SocStorage[s.mt]) s_dict['p_discharge'] = _time_series_dict(p_discharge_dict) s_dict['p_charge'] = _time_series_dict(p_charge_dict) s_dict['operational_cost'] = _time_series_dict(operational_cost_dict) s_dict['state_of_charge'] = _time_series_dict(state_of_charge_dict) ## NOTE: UC model currently has no notion of separate loads if m.power_balance == 'btheta_power_flow': for l, l_dict in branches.items(): pf_dict = {} for dt, mt in zip(data_time_periods, m.TimePeriods): pf_dict[dt] = value(m.TransmissionBlock[mt].pf[l]) l_dict['pf'] = _time_series_dict(pf_dict) for b, b_dict in buses.items(): va_dict = {} p_balance_violation_dict = {} pl_dict = {} for dt, mt in zip(data_time_periods, m.TimePeriods): va_dict[dt] = value(m.TransmissionBlock[mt].va[b]) p_balance_violation_dict[dt] = value( m.LoadGenerateMismatch[b, mt]) pl_dict[dt] = value(m.TransmissionBlock[mt].pl[b]) b_dict['va'] = _time_series_dict(va_dict) b_dict['p_balance_violation'] = _time_series_dict( p_balance_violation_dict) b_dict['pl'] = _time_series_dict(pl_dict) if relaxed: lmp_dict = {} for dt, mt in zip(data_time_periods, m.TimePeriods): lmp_dict[dt] = value( m.dual[m.TransmissionBlock[mt].eq_p_balance[b]]) b_dict['lmp'] = _time_series_dict(lmp_dict) elif m.power_balance == 'power_balance_constraints': for l, l_dict in branches.items(): pf_dict = {} for dt, mt in zip(data_time_periods, m.TimePeriods): pf_dict[dt] = value(m.LinePower[l, mt]) l_dict['pf'] = _time_series_dict(pf_dict) for b, b_dict in buses.items(): va_dict = {} p_balance_violation_dict = {} for dt, mt in zip(data_time_periods, m.TimePeriods): va_dict[dt] = value(m.Angle[b, mt]) p_balance_violation_dict[dt] = value( m.LoadGenerateMismatch[b, mt]) b_dict['va'] = _time_series_dict(va_dict) b_dict['p_balance_violation'] = _time_series_dict( p_balance_violation_dict) if relaxed: lmp_dict = {} for dt, mt in zip(data_time_periods, m.TimePeriods): lmp_dict[dt] = value(m.dual[m.PowerBalance[b, mt]]) b_dict['lmp'] = _time_series_dict(lmp_dict) else: raise Exception("Unrecongized network type " + m.power_balance) if reserve_requirement: ## populate the system attributes sys_dict = md.data['system'] sr_s_dict = {} for dt, mt in zip(data_time_periods, m.TimePeriods): sr_s_dict[dt] = value(m.ReserveShortfall[mt]) sys_dict['reserve_shortfall'] = _time_series_dict(sr_s_dict) if relaxed: sr_p_dict = {} for dt, mt in zip(data_time_periods, m.TimePeriods): ## TODO: if the 'relaxed' flag is set, we should automatically ## pick a formulation which uses the MLR reserve constraints sr_p_dict[dt] = value(m.dual[m.EnforceReserveRequirements[mt]]) sys_dict['reserve_price'] = _time_series_dict(sr_p_dict) ## TODO: Can the code above this be re-factored in a similar way? ## as we add more zonal reserve products, they can be added here _zonal_reserve_map = dict() _system_reserve_map = dict() if spin: _zonal_reserve_map['spinning_reserve_requirement'] = { 'shortfall': 'spinning_reserve_shortfall', 'price': 'spinning_reserve_price', 'shortfall_m': m.ZonalSpinningReserveShortfall, 'balance_m': m.EnforceZonalSpinningReserveRequirement, } _system_reserve_map['spinning_reserve_requirement'] = { 'shortfall': 'spinning_reserve_shortfall', 'price': 'spinning_reserve_price', 'shortfall_m': m.SystemSpinningReserveShortfall, 'balance_m': m.EnforceSystemSpinningReserveRequirement, } if nspin: _zonal_reserve_map['non_spinning_reserve_requirement'] = { 'shortfall': 'non_spinning_reserve_shortfall', 'price': 'non_spinning_reserve_price', 'shortfall_m': m.ZonalNonSpinningReserveShortfall, 'balance_m': m.EnforceNonSpinningZonalReserveRequirement, } _system_reserve_map['non_spinning_reserve_requirement'] = { 'shortfall': 'non_spinning_reserve_shortfall', 'price': 'non_spinning_reserve_price', 'shortfall_m': m.SystemNonSpinningReserveShortfall, 'balance_m': m.EnforceSystemNonSpinningReserveRequirement, } if regulation: _zonal_reserve_map['regulation_up_requirement'] = { 'shortfall': 'regulation_up_shortfall', 'price': 'regulation_up_price', 'shortfall_m': m.ZonalRegulationUpShortfall, 'balance_m': m.EnforceZonalRegulationUpRequirements, } _system_reserve_map['regulation_up_requirement'] = { 'shortfall': 'regulation_up_shortfall', 'price': 'regulation_up_price', 'shortfall_m': m.SystemRegulationUpShortfall, 'balance_m': m.EnforceSystemRegulationUpRequirement, } _zonal_reserve_map['regulation_down_requirement'] = { 'shortfall': 'regulation_down_shortfall', 'price': 'regulation_down_price', 'shortfall_m': m.ZonalRegulationDnShortfall, 'balance_m': m.EnforceZonalRegulationDnRequirements, } _system_reserve_map['regulation_down_requirement'] = { 'shortfall': 'regulation_down_shortfall', 'price': 'regulation_down_price', 'shortfall_m': m.SystemRegulationDnShortfall, 'balance_m': m.EnforceSystemRegulationDnRequirement, } if flex: _zonal_reserve_map['flexible_ramp_up_requirement'] = { 'shortfall': 'flexible_ramp_up_shortfall', 'price': 'flexible_ramp_up_price', 'shortfall_m': m.ZonalFlexUpShortfall, 'balance_m': m.ZonalFlexUpRequirementConstr, } _system_reserve_map['flexible_ramp_up_requirement'] = { 'shortfall': 'flexible_ramp_up_shortfall', 'price': 'flexible_ramp_up_price', 'shortfall_m': m.SystemFlexUpShortfall, 'balance_m': m.SystemFlexUpRequirementConstr, } _zonal_reserve_map['flexible_ramp_down_requirement'] = { 'shortfall': 'flexible_ramp_down_shortfall', 'price': 'flexible_ramp_down_price', 'shortfall_m': m.ZonalFlexDnShortfall, 'balance_m': m.ZonalFlexDnRequirementConstr, } _system_reserve_map['flexible_ramp_down_requirement'] = { 'shortfall': 'flexible_ramp_down_shortfall', 'price': 'flexible_ramp_down_price', 'shortfall_m': m.SystemFlexDnShortfall, 'balance_m': m.SystemFlexDnRequirementConstr, } if supp: _zonal_reserve_map['supplemental_reserve_requirement'] = { 'shortfall': 'supplemental_shortfall', 'price': 'supplemental_price', 'shortfall_m': m.ZonalSupplementalReserveShortfall, 'balance_m': m.EnforceZonalSupplementalReserveRequirement, } _system_reserve_map['supplemental_reserve_requirement'] = { 'shortfall': 'supplemental_shortfall', 'price': 'supplemental_price', 'shortfall_m': m.SystemSupplementalReserveShortfall, 'balance_m': m.EnforceSystemSupplementalReserveRequirement, } def _populate_zonal_reserves(elements_dict, string_handle): for e, e_dict in elements_dict.items(): me = string_handle + e for req, req_dict in _zonal_reserve_map.items(): if req in e_dict: req_shortfall_dict = {} for dt, mt in zip(data_time_periods, m.TimePeriods): req_shortfall_dict[dt] = value( req_dict['shortfall_m'][me, mt]) e_dict[req_dict['shortfall']] = _time_series_dict( req_shortfall_dict) if relaxed: req_price_dict = {} for dt, mt in zip(data_time_periods, m.TimePeriods): req_price_dict[dt] = value( m.dual[req_dict['balance_m'][me, mt]]) e_dict[req_dict['price']] = _time_series_dict( req_price_dict) def _populate_system_reserves(sys_dict): for req, req_dict in _system_reserve_map.items(): if req in sys_dict: req_shortfall_dict = {} for dt, mt in zip(data_time_periods, m.TimePeriods): req_shortfall_dict[dt] = value(req_dict['shortfall_m'][mt]) sys_dict[req_dict['shortfall']] = _time_series_dict( req_shortfall_dict) if relaxed: req_price_dict = {} for dt, mt in zip(data_time_periods, m.TimePeriods): req_price_dict[dt] = value( m.dual[req_dict['balance_m'][mt]]) sys_dict[req_dict['price']] = _time_series_dict( req_price_dict) _populate_zonal_reserves(areas, 'area_') _populate_zonal_reserves(zones, 'zone_') _populate_system_reserves(md.data['system']) if fs: fuel_supplies = dict(md.elements(element_type='fuel_supply')) for f, f_dict in fuel_supplies.items(): fuel_consumed = {} fuel_supply_type = f_dict['fuel_supply_type'] if fuel_supply_type == 'instantaneous': for dt, mt in zip(data_time_periods, m.TimePeriods): fuel_consumed[dt] = value( m.TotalFuelConsumedAtInstFuelSupply[f, mt]) else: print( 'WARNING: unrecongized fuel_supply_type {} for fuel_supply {}' .format(fuel_supply_type, f)) f_dict['fuel_consumed'] = _time_series_dict(fuel_consumed) md.data['system']['total_cost'] = value(m.TotalCostObjective) unscale_ModelData_to_pu(md, inplace=True) if return_model: return md, m return md
def solve_lpac(model_data, solver, ac_solver=None, timelimit=None, solver_tee=True, symbolic_solver_labels=False, options=None, ac_options=None, lpac_model_generator=create_cold_start_lpac_model, return_model=False, return_results=False, kwargs={}, kwargs_for_lpac={}): ''' Create and solve a new lpac model Parameters ---------- model_data : egret.data.ModelData An egret ModelData object with the appropriate data loaded. solver : str or pyomo.opt.base.solvers.OptSolver Either a string specifying a pyomo solver name, or an instantiated pyomo solver ac_solver : str or pyomo.opt.base.solvers.OptSolver (optional) Either a string specifying a pyomo solver name, or an instantiated pyomo solver. Default is None for the cold start lpac model. timelimit : float (optional) Time limit for dcopf run. Default of None results in no time limit being set. solver_tee : bool (optional) Display solver log. Default is True. symbolic_solver_labels : bool (optional) Use symbolic solver labels. Useful for debugging; default is False. options : dict (optional) Other options to pass into the (LPAC) solver. Default is dict(). ac_options : dict (optional) Other options to pass into the ACOPF solver. Default is dict(). lpac_model_generator : function (optional) Function for generating the lpac model. Default is the cold start lpac model return_model : bool (optional) If True, returns the pyomo model object return_results : bool (optional) If True, returns the pyomo results object kwargs : dictionary (optional) Additional arguments for building model. kwargs_for_lpac : dictionary (optional) Additional arguments for building lpac model (not used in ACOPF model) ''' import pyomo.environ as pe import pyomo.opt as po from pyomo.environ import value from egret.common.solver_interface import _solve_model from egret.model_library.transmission.tx_utils import \ scale_ModelData_to_pu, unscale_ModelData_to_pu if lpac_model_generator == create_hot_start_lpac_model or lpac_model_generator == create_warm_start_lpac_model: if ac_solver != None: ac_md, ac_m, ac_results = solve_acopf( model_data, ac_solver, options=ac_options, acopf_model_generator=create_psv_acopf_model, return_model=True, return_results=True, **kwargs) else: ac_md, ac_m, ac_results = solve_acopf( model_data, solver, options=options, acopf_model_generator=create_psv_acopf_model, return_model=True, return_results=True, **kwargs) voltages = dict({}) for bus in ac_md.elements(element_type="bus"): voltages[bus[0]] = bus[1]['vm'] #print(voltages) m, md = lpac_model_generator(model_data, voltages, **kwargs, **kwargs_for_lpac) else: m, md = lpac_model_generator(model_data, **kwargs, **kwargs_for_lpac) m, results, solver = _solve_model(m, solver, timelimit=timelimit, solver_tee=solver_tee, \ symbolic_solver_labels = symbolic_solver_labels, solver_options=options, return_solver=True) # save results data to ModelData object gens = dict(md.elements(element_type='generator')) buses = dict(md.elements(element_type='bus')) branches = dict(md.elements(element_type='branch')) md.data['system']['total_cost'] = value(m.obj) for g, g_dict in gens.items(): g_dict['pg'] = value(m.pg[g]) g_dict['qg'] = value(m.qg[g]) for b, b_dict in buses.items(): #b_dict['lmp'] = value(m.dual[m.eq_p_balance[b]]) #b_dict['qlmp'] = value(m.dual[m.eq_q_balance[b]]) b_dict['pl'] = value(m.pl[b]) #if hasattr(m, 'vj'): #b_dict['vm'] = tx_calc.calculate_vm_from_vj_vr(value(m.vj[b]), value(m.vr[b])) #b_dict['va'] = tx_calc.calculate_va_from_vj_vr(value(m.vj[b]), value(m.vr[b])) #else: #b_dict['vm'] = value(m.vm[b]) #b_dict['va'] = value(m.va[b]) for k, k_dict in branches.items(): if hasattr(m, 'pf'): k_dict['pf'] = value(m.pf[k]) k_dict['pt'] = value(m.pt[k]) k_dict['qf'] = value(m.qf[k]) k_dict['qt'] = value(m.qt[k]) if hasattr(m, 'irf'): b = k_dict['from_bus'] k_dict['pf'] = value( tx_calc.calculate_p(value(m.ifr[k]), value(m.ifj[k]), value(m.vr[b]), value(m.vj[b]))) k_dict['qf'] = value( tx_calc.calculate_q(value(m.ifr[k]), value(m.ifj[k]), value(m.vr[b]), value(m.vj[b]))) b = k_dict['to_bus'] k_dict['pt'] = value( tx_calc.calculate_p(value(m.itr[k]), value(m.itj[k]), value(m.vr[b]), value(m.vj[b]))) k_dict['qt'] = value( tx_calc.calculate_q(value(m.itr[k]), value(m.itj[k]), value(m.vr[b]), value(m.vj[b]))) unscale_ModelData_to_pu(md, inplace=True) #print(buses) #print(gens) # print(branches) if return_model and return_results: return md, m, results elif return_model: return md, m elif return_results: return md, results return md
def solve_copperplate_dispatch( model_data, solver, timelimit=None, solver_tee=True, symbolic_solver_labels=False, options=None, copperplate_dispatch_model_generator=create_copperplate_dispatch_approx_model, return_model=False, return_results=False, **kwargs): ''' Create and solve a new copperplate dispatch model Parameters ---------- model_data : egret.data.ModelData An egret ModelData object with the appropriate data loaded. solver : str or pyomo.opt.base.solvers.OptSolver Either a string specifying a pyomo solver name, or an instantiated pyomo solver timelimit : float (optional) Time limit for dcopf run. Default of None results in no time limit being set. solver_tee : bool (optional) Display solver log. Default is True. symbolic_solver_labels : bool (optional) Use symbolic solver labels. Useful for debugging; default is False. options : dict (optional) Other options to pass into the solver. Default is dict(). copperplate_dispatch_model_generator : function (optional) Function for generating the copperplate dispatch model. Default is egret.models.copperplate_dispatch.create_copperplate_dispatch_approx_model return_model : bool (optional) If True, returns the pyomo model object return_results : bool (optional) If True, returns the pyomo results object kwargs : dictionary (optional) Additional arguments for building model ''' import pyomo.environ as pe from pyomo.environ import value from egret.common.solver_interface import _solve_model from egret.model_library.transmission.tx_utils import \ scale_ModelData_to_pu, unscale_ModelData_to_pu m, md = copperplate_dispatch_model_generator(model_data, **kwargs) m.dual = pe.Suffix(direction=pe.Suffix.IMPORT) m, results = _solve_model(m, solver, timelimit=timelimit, solver_tee=solver_tee, symbolic_solver_labels=symbolic_solver_labels, solver_options=options) md = model_data.clone_in_service() # save results data to ModelData object gens = dict(md.elements(element_type='generator')) buses = dict(md.elements(element_type='bus')) md.data['system']['total_cost'] = value(m.obj) if hasattr(m, 'p_load_shed'): md.data['system']['p_balance_violation'] = value( m.p_load_shed) - value(m.p_over_generation) for g, g_dict in gens.items(): g_dict['pg'] = value(m.pg[g]) for b, b_dict in buses.items(): b_dict['pl'] = value(m.pl[b]) b_dict['lmp'] = value(m.dual[m.eq_p_balance]) unscale_ModelData_to_pu(md, inplace=True) if return_model and return_results: return md, m, results elif return_model: return md, m elif return_results: return md, results return md
def solve_dcopf(model_data, solver, timelimit=None, solver_tee=True, symbolic_solver_labels=False, options=None, dcopf_model_generator=create_btheta_dcopf_model, return_model=False, return_results=False, **kwargs): ''' Create and solve a new dcopf model Parameters ---------- model_data : egret.data.ModelData An egret ModelData object with the appropriate data loaded. solver : str or pyomo.opt.base.solvers.OptSolver Either a string specifying a pyomo solver name, or an instantiated pyomo solver timelimit : float (optional) Time limit for dcopf run. Default of None results in no time limit being set. solver_tee : bool (optional) Display solver log. Default is True. symbolic_solver_labels : bool (optional) Use symbolic solver labels. Useful for debugging; default is False. options : dict (optional) Other options to pass into the solver. Default is dict(). dcopf_model_generator : function (optional) Function for generating the dcopf model. Default is egret.models.dcopf.create_btheta_dcopf_model return_model : bool (optional) If True, returns the pyomo model object return_results : bool (optional) If True, returns the pyomo results object kwargs : dictionary (optional) Additional arguments for building model ''' import pyomo.environ as pe import pyomo.opt as po from pyomo.environ import value from egret.common.solver_interface import _solve_model from egret.model_library.transmission.tx_utils import \ scale_ModelData_to_pu, unscale_ModelData_to_pu m, md = dcopf_model_generator(model_data, **kwargs) m.dual = pe.Suffix(direction=pe.Suffix.IMPORT) m, results, solver = _solve_model( m, solver, timelimit=timelimit, solver_tee=solver_tee, symbolic_solver_labels=symbolic_solver_labels, solver_options=options, return_solver=True) if dcopf_model_generator == create_ptdf_dcopf_model and m._ptdf_options[ 'lazy']: iter_limit = m._ptdf_options['iteration_limit'] term_cond = _lazy_ptdf_dcopf_model_solve_loop( m, md, solver, solver_tee=solver_tee, symbolic_solver_labels=symbolic_solver_labels, iteration_limit=iter_limit) # save results data to ModelData object gens = dict(md.elements(element_type='generator')) buses = dict(md.elements(element_type='bus')) branches = dict(md.elements(element_type='branch')) md.data['system']['total_cost'] = value(m.obj) for g, g_dict in gens.items(): g_dict['pg'] = value(m.pg[g]) ## calculate the power flows from our PTDF matrix for maximum precision ## calculate the LMPC (LMP congestion) using numpy if dcopf_model_generator == create_ptdf_dcopf_model: PTDF = m._PTDF PTDFM = PTDF.PTDFM branches_idx = PTDF.branches_keys NWV = np.array([pe.value(m.p_nw[b]) for b in PTDF.bus_iterator()]) NWV += PTDF.phi_adjust_array PFV = PTDFM.dot(NWV) PFV += PTDF.phase_shift_array PFD = np.zeros(len(branches_idx)) for i, bn in enumerate(branches_idx): branches[bn]['pf'] = PFV[i] if bn in m.ineq_pf_branch_thermal_bounds: PFD[i] += value(m.dual[m.ineq_pf_branch_thermal_bounds[bn]]) ## TODO: PFD is likely to be sparse, implying we just need a few ## rows of the PTDF matrix (or columns in its transpose). LMPC = -PTDFM.T.dot(PFD) else: for k, k_dict in branches.items(): k_dict['pf'] = value(m.pf[k]) if dcopf_model_generator == create_ptdf_dcopf_model: buses_idx = PTDF.buses_keys LMPE = value(m.dual[m.eq_p_balance]) for i, b in enumerate(buses_idx): b_dict = buses[b] b_dict['lmp'] = LMPE + LMPC[i] b_dict['pl'] = value(m.pl[b]) else: for b, b_dict in buses.items(): b_dict['pl'] = value(m.pl[b]) if dcopf_model_generator == create_btheta_dcopf_model: b_dict['lmp'] = value(m.dual[m.eq_p_balance[b]]) b_dict['va'] = value(m.va[b]) else: raise Exception("Unrecognized dcopf_mode_generator {}".format( dcopf_model_generator)) unscale_ModelData_to_pu(md, inplace=True) if return_model and return_results: return md, m, results elif return_model: return md, m elif return_results: return md, results return md
def _lazy_ptdf_dcopf_model_solve_loop(m, md, solver, solver_tee=True, symbolic_solver_labels=False, iteration_limit=100000): ''' The lazy PTDF DCOPF solver loop. This function iteratively adds violated transmission constraints until either the result is transmission feasible or we're tracking every violated constraint in the model Parameters ---------- m : pyomo.environ.ConcreteModel An egret DCOPF model with no transmission constraints md : egret.data.ModelData An egret ModelData object solver : pyomo.opt.solver A pyomo solver object solver_tee : bool (optional) For displaying the solver log (default is True) symbolic_solver_labels : bool (optional) Use symbolic solver labels when writing to the solver (default is False) iteration_limit : int (optional) Number of iterations before a hard termination (default is 100000) Returns ------- egret.common.lazy_ptdf_utils.LazyPTDFTerminationCondition : the termination status pyomo.opt.results.SolverResults : The results object from the pyomo solver int : The number of iterations before termination ''' from pyomo.solvers.plugins.solvers.persistent_solver import PersistentSolver from egret.common.solver_interface import _solve_model PTDF = m._PTDF ptdf_options = m._ptdf_options persistent_solver = isinstance(solver, PersistentSolver) for i in range(iteration_limit): flows, viol_num, mon_viol_num, viol_lazy \ = lpu.check_violations(m, md, PTDF, ptdf_options['max_violations_per_iteration']) iter_status_str = "iteration {0}, found {1} violation(s)".format(i,viol_num) if mon_viol_num: iter_status_str += ", {} of which are already monitored".format(mon_viol_num) logger.info(iter_status_str) if viol_num <= 0: ## in this case, there are no violations! ## load the duals now too, if we're using a persistent solver if persistent_solver: solver.load_duals() return lpu.LazyPTDFTerminationCondition.NORMAL elif viol_num == mon_viol_num: logger.warning('WARNING: Terminating with monitored violations! Result is not transmission feasible.') if persistent_solver: solver.load_duals() return lpu.LazyPTDFTerminationCondition.FLOW_VIOLATION lpu.add_violations(viol_lazy, flows, m, md, solver, ptdf_options, PTDF) total_flow_constr_added = len(viol_lazy) logger.info( "iteration {0}, added {1} flow constraint(s)".format(i,total_flow_constr_added)) if persistent_solver: m, results, solver = _solve_model(m, solver, solver_tee=solver_tee, return_solver=True, vars_to_load=[], set_instance=False) solver.load_vars() else: m, results, solver = _solve_model(m, solver, solver_tee=solver_tee, return_solver=True) else: # we hit the iteration limit logger.warning('WARNING: Exiting on maximum iterations for lazy PTDF model. Result is not transmission feasible.') if persistent_solver: solver.load_duals() return lpu.LazyPTDFTerminationCondition.ITERATION_LIMIT