コード例 #1
0
def test_scale_unscale():
    md = ModelData.read(scuc_fn)

    ## do type conversions
    original_base_MVA = md.data['system']['baseMVA']
    md.data['system']['baseMVA'] = 1.

    scale_ModelData_to_pu(md, inplace=True)
    md.data['system']['baseMVA'] = original_base_MVA

    md_transformed = scale_ModelData_to_pu(md, inplace=False)

    # test inplace flag
    assert id(md.data) != id(md_transformed.data)

    unscale_ModelData_to_pu(md_transformed, inplace=True)

    assert md.data['system'] == md_transformed.data['system']
    for esn, esd in md.data['elements'].items():
        for en, ed in esd.items():
            assert ed == md_transformed.data['elements'][esn][en]

    for esn, esd in md_transformed.data['elements'].items():
        for en, ed in esd.items():
            assert ed == md.data['elements'][esn][en]
コード例 #2
0
def reset_unit_commitment_penalties(m):
    scale_ModelData_to_pu(m.model_data, inplace=True)
    _reconstruct_pyomo_component(m.LoadMismatchPenalty)
    for param in m.component_objects(Param):
        if param.mutable and isinstance(param._rule, (ScalarCallInitializer, IndexedCallInitializer)) \
                and (param._rule._fcn.__name__ == 'penalty_rule'):
            _reconstruct_pyomo_component(param)
    unscale_ModelData_to_pu(m.model_data, inplace=True)
コード例 #3
0
def solve_dcopf_losses(model_data,
                solver,
                timelimit = None,
                solver_tee = True,
                symbolic_solver_labels = False,
                options = None,
                dcopf_losses_model_generator = create_btheta_losses_dcopf_model,
                return_model = False,
                return_results = False,
                **kwargs):
    '''
    Create and solve a new dcopf with losses model

    Parameters
    ----------
    model_data : egret.data.ModelData
        An egret ModelData object with the appropriate data loaded.
    solver : str or pyomo.opt.base.solvers.OptSolver
        Either a string specifying a pyomo solver name, or an instantiated pyomo solver
    timelimit : float (optional)
        Time limit for dcopf run. Default of None results in no time
        limit being set.
    solver_tee : bool (optional)
        Display solver log. Default is True.
    symbolic_solver_labels : bool (optional)
        Use symbolic solver labels. Useful for debugging; default is False.
    options : dict (optional)
        Other options to pass into the solver. Default is dict().
    dcopf_model_generator : function (optional)
        Function for generating the dcopf model. Default is
        egret.models.dcopf.create_btheta_dcopf_model
    return_model : bool (optional)
        If True, returns the pyomo model object
    return_results : bool (optional)
        If True, returns the pyomo results object
    kwargs : dictionary (optional)
        Additional arguments for building model
    '''

    import pyomo.environ as pe
    from pyomo.environ import value
    from egret.common.solver_interface import _solve_model
    from egret.model_library.transmission.tx_utils import \
        scale_ModelData_to_pu, unscale_ModelData_to_pu

    m, md = dcopf_losses_model_generator(model_data, **kwargs)

    m.dual = pe.Suffix(direction=pe.Suffix.IMPORT)

    m, results = _solve_model(m,solver,timelimit=timelimit,solver_tee=solver_tee,
                              symbolic_solver_labels=symbolic_solver_labels,solver_options=options)

    # save results data to ModelData object
    gens = dict(md.elements(element_type='generator'))
    buses = dict(md.elements(element_type='bus'))
    branches = dict(md.elements(element_type='branch'))

    md.data['system']['total_cost'] = value(m.obj)

    for g,g_dict in gens.items():
        g_dict['pg'] = value(m.pg[g])

    if dcopf_losses_model_generator == create_btheta_losses_dcopf_model:
        for b,b_dict in buses.items():
            b_dict['pl'] = value(m.pl[b])
            b_dict.pop('qlmp',None)
            b_dict['lmp'] = value(m.dual[m.eq_p_balance[b]])
            b_dict['va'] = value(m.va[b])
    elif dcopf_losses_model_generator == create_ptdf_losses_dcopf_model:
        PTDF = m._PTDF
        ptdf_r = PTDF.PTDFM
        ldf = PTDF.LDF
        buses_idx = PTDF.buses_keys
        branches_idx = PTDF.branches_keys

        for j, b in enumerate(buses_idx):
            b_dict = buses[b]
            b_dict['pl'] = value(m.pl[b])
            b_dict.pop('qlmp',None)

            b_dict['lmp'] = value(m.dual[m.eq_p_balance])
            for i, k in enumerate(branches_idx):
                b_dict['lmp'] += ptdf_r[i,j]*value(m.dual[m.ineq_pf_branch_thermal_lb[k]])
                b_dict['lmp'] += ptdf_r[i,j]*value(m.dual[m.ineq_pf_branch_thermal_ub[k]])
                b_dict['lmp'] += ldf[i,j]*value(m.dual[m.eq_pfl_branch[k]])

    else:
        raise Exception("Unrecognized dcopf_losses_model_generator {}".format(dcopf_losses_model_generator))

    for k, k_dict in branches.items():
        k_dict['pf'] = value(m.pf[k])

    unscale_ModelData_to_pu(md, inplace=True)

    if return_model and return_results:
        return md, m, results
    elif return_model:
        return md, m
    elif return_results:
        return md, results
    return md
コード例 #4
0
def solve_acopf(model_data,
                solver,
                timelimit=None,
                solver_tee=True,
                symbolic_solver_labels=False,
                options=None,
                acopf_model_generator=create_psv_acopf_model,
                return_model=False,
                return_results=False,
                **kwargs):
    '''
    Create and solve a new acopf model

    Parameters
    ----------
    model_data : egret.data.ModelData
        An egret ModelData object with the appropriate data loaded.
    solver : str or pyomo.opt.base.solvers.OptSolver
        Either a string specifying a pyomo solver name, or an instantiated pyomo solver
    timelimit : float (optional)
        Time limit for dcopf run. Default of None results in no time
        limit being set.
    solver_tee : bool (optional)
        Display solver log. Default is True.
    symbolic_solver_labels : bool (optional)
        Use symbolic solver labels. Useful for debugging; default is False.
    options : dict (optional)
        Other options to pass into the solver. Default is dict().
    acopf_model_generator : function (optional)
        Function for generating the acopf model. Default is
        egret.models.acopf.create_psv_acopf_model
    return_model : bool (optional)
        If True, returns the pyomo model object
    return_results : bool (optional)
        If True, returns the pyomo results object
    kwargs : dictionary (optional)
        Additional arguments for building model
    '''

    import pyomo.environ as pe
    from pyomo.environ import value
    from egret.common.solver_interface import _solve_model
    from egret.model_library.transmission.tx_utils import \
        scale_ModelData_to_pu, unscale_ModelData_to_pu

    m, md = acopf_model_generator(model_data, **kwargs)

    m.dual = pe.Suffix(direction=pe.Suffix.IMPORT)

    m, results = _solve_model(m,
                              solver,
                              timelimit=timelimit,
                              solver_tee=solver_tee,
                              symbolic_solver_labels=symbolic_solver_labels,
                              options=options)

    # save results data to ModelData object
    gens = dict(md.elements(element_type='generator'))
    buses = dict(md.elements(element_type='bus'))
    branches = dict(md.elements(element_type='branch'))

    md.data['system']['total_cost'] = value(m.obj)

    for g, g_dict in gens.items():
        g_dict['pg'] = value(m.pg[g])
        g_dict['qg'] = value(m.qg[g])

    for b, b_dict in buses.items():
        b_dict['lmp'] = value(m.dual[m.eq_p_balance[b]])
        b_dict['qlmp'] = value(m.dual[m.eq_q_balance[b]])
        b_dict['pl'] = value(m.pl[b])
        if hasattr(m, 'vj'):
            b_dict['vm'] = tx_calc.calculate_vm_from_vj_vr(
                value(m.vj[b]), value(m.vr[b]))
            b_dict['va'] = tx_calc.calculate_va_from_vj_vr(
                value(m.vj[b]), value(m.vr[b]))
        else:
            b_dict['vm'] = value(m.vm[b])
            b_dict['va'] = value(m.va[b])

    for k, k_dict in branches.items():
        if hasattr(m, 'pf'):
            k_dict['pf'] = value(m.pf[k])
            k_dict['pt'] = value(m.pt[k])
            k_dict['qf'] = value(m.qf[k])
            k_dict['qt'] = value(m.qt[k])
        if hasattr(m, 'irf'):
            b = k_dict['from_bus']
            k_dict['pf'] = value(
                tx_calc.calculate_p(value(m.ifr[k]), value(m.ifj[k]),
                                    value(m.vr[b]), value(m.vj[b])))
            k_dict['qf'] = value(
                tx_calc.calculate_q(value(m.ifr[k]), value(m.ifj[k]),
                                    value(m.vr[b]), value(m.vj[b])))
            b = k_dict['to_bus']
            k_dict['pt'] = value(
                tx_calc.calculate_p(value(m.itr[k]), value(m.itj[k]),
                                    value(m.vr[b]), value(m.vj[b])))
            k_dict['qt'] = value(
                tx_calc.calculate_q(value(m.itr[k]), value(m.itj[k]),
                                    value(m.vr[b]), value(m.vj[b])))

    unscale_ModelData_to_pu(md, inplace=True)

    if return_model and return_results:
        return md, m, results
    elif return_model:
        return md, m
    elif return_results:
        return md, results
    return md
コード例 #5
0
ファイル: scopf.py プロジェクト: DLWoodruff/Egret
def solve_scopf(model_data,
                solver,
                timelimit=None,
                solver_tee=True,
                symbolic_solver_labels=False,
                options=None,
                scopf_model_generator=create_scopf_model,
                return_model=False,
                return_results=False,
                **kwargs):
    '''
    Create and solve a new scopf model

    Parameters
    ----------
    model_data : egret.data.ModelData
        An egret ModelData object with the appropriate data loaded.
    solver : str or pyomo.opt.base.solvers.OptSolver
        Either a string specifying a pyomo solver name, or an instantiated pyomo solver
    timelimit : float (optional)
        Time limit for dcopf run. Default of None results in no time
        limit being set.
    solver_tee : bool (optional)
        Display solver log. Default is True.
    symbolic_solver_labels : bool (optional)
        Use symbolic solver labels. Useful for debugging; default is False.
    options : dict (optional)
        Other options to pass into the solver. Default is dict().
    scopf_model_generator : function (optional)
        Function for generating the dcopf model. Default is
        egret.models.dcopf.create_btheta_dcopf_model
    return_model : bool (optional)
        If True, returns the pyomo model object
    return_results : bool (optional)
        If True, returns the pyomo results object
    kwargs : dictionary (optional)
        Additional arguments for building model
    '''

    import pyomo.environ as pe
    import pyomo.opt as po
    from pyomo.environ import value
    from egret.common.solver_interface import _solve_model
    from egret.model_library.transmission.tx_utils import \
        scale_ModelData_to_pu, unscale_ModelData_to_pu

    m, md = scopf_model_generator(model_data, **kwargs)

    m.dual = pyo.Suffix(direction=pyo.Suffix.IMPORT)

    m, results, solver = _solve_model(
        m,
        solver,
        timelimit=timelimit,
        solver_tee=solver_tee,
        symbolic_solver_labels=symbolic_solver_labels,
        solver_options=options,
        return_solver=True)

    if m._ptdf_options['lazy']:
        iter_limit = m._ptdf_options['iteration_limit']
        term_cond = _lazy_ptdf_dcopf_model_solve_loop(
            m,
            md,
            solver,
            solver_tee=solver_tee,
            symbolic_solver_labels=symbolic_solver_labels,
            iteration_limit=iter_limit)

    # save results data to ModelData object
    gens = dict(md.elements(element_type='generator'))
    buses = dict(md.elements(element_type='bus'))
    branches = dict(md.elements(element_type='branch'))

    dc_branches = dict(md.elements(element_type='dc_branch'))

    md.data['system']['total_cost'] = value(m.obj)

    for g, g_dict in gens.items():
        g_dict['pg'] = value(m.pg[g])

    ## calculate the power flows from our PTDF matrix for maximum precision
    ## calculate the LMPC (LMP congestion) using numpy
    PTDF = m._PTDF

    PFV, _, VA = PTDF.calculate_PFV(m)

    branches_idx = PTDF.branches_keys
    for i, bn in enumerate(branches_idx):
        branches[bn]['pf'] = PFV[i]

    if hasattr(m, 'p_load_shed'):
        md.data['system']['p_balance_violation'] = value(
            m.p_load_shed) - value(m.p_over_generation)
    buses_idx = PTDF.buses_keys
    LMP = PTDF.calculate_LMP(m, m.dual, m.eq_p_balance)
    for i, b in enumerate(buses_idx):
        b_dict = buses[b]
        b_dict['lmp'] = LMP[i]
        b_dict['pl'] = value(m.pl[b])
        b_dict['va'] = degrees(VA[i])

    for k, k_dict in dc_branches.items():
        k_dict['pf'] = value(m.dcpf[k])

    contingencies = dict(md.elements(element_type='contingency'))
    contingency_flows = PTDF.calculate_monitored_contingency_flows(m)
    for (cn, bn), flow in contingency_flows.items():
        c_dict = contingencies[cn]
        if 'monitored_branches' not in c_dict:
            c_dict['monitored_branches'] = {}
        c_dict['monitored_branches'][bn] = {'pf': flow}

    unscale_ModelData_to_pu(md, inplace=True)

    if return_model and return_results:
        return md, m, results
    elif return_model:
        return md, m
    elif return_results:
        return md, results
    return md
コード例 #6
0
def solve_unit_commitment(
        model_data,
        solver,
        mipgap=0.001,
        timelimit=None,
        solver_tee=True,
        symbolic_solver_labels=False,
        options=None,
        uc_model_generator=create_tight_unit_commitment_model,
        relaxed=False,
        return_model=False):
    '''
    Create and solve a new unit commitment model

    Parameters
    ----------
    model_data : egret.data.ModelData
        An egret ModelData object with the appropriate data loaded.
        # TODO: describe the required and optional attributes
    solver : str or pyomo.opt.base.solvers.OptSolver
        Either a string specifying a pyomo solver name, or an instanciated pyomo solver
    mipgap : float (optional)
        Mipgap to use for unit commitment solve; default is 0.001
    timelimit : float (optional)
        Time limit for unit commitment run. Default of None results in no time
        limit being set -- runs until mipgap is satisfied
    solver_tee : bool (optional)
        Display solver log. Default is True.
    symbolic_solver_labels : bool (optional)
        Use symbolic solver labels. Useful for debugging; default is False.
    options : dict (optional)
        Other options to pass into the solver. Default is dict().
    uc_model_generator : function (optional)
        Function for generating the unit commitment model. Default is 
        egret.models.unit_commitment.create_tight_unit_commitment_model
    relaxed : bool (optional)
        If True, creates a relaxed unit commitment model
    return_model : bool (optional)
        If True, returns the pyomo model object
    '''

    import pyomo.environ as pe
    from pyomo.environ import value
    from egret.common.solver_interface import _solve_model

    m = uc_model_generator(model_data, relaxed=relaxed)

    if relaxed:
        m.dual = pe.Suffix(direction=pe.Suffix.IMPORT)

    m, results = _solve_model(m, solver, mipgap, timelimit, solver_tee,
                              symbolic_solver_labels, options)

    md = m.model_data

    # save results data to ModelData object
    thermal_gens = dict(
        md.elements(element_type='generator', generator_type='thermal'))
    renewable_gens = dict(
        md.elements(element_type='generator', generator_type='renewable'))
    buses = dict(md.elements(element_type='bus'))
    branches = dict(md.elements(element_type='branch'))
    storage = dict(md.elements(element_type='storage'))
    zones = dict(md.elements(element_type='zone'))
    areas = dict(md.elements(element_type='area'))

    data_time_periods = md.data['system']['time_indices']
    reserve_requirement = ('reserve_requirement' in md.data['system'])

    regulation = False
    spin = False
    nspin = False
    supp = False
    flex = False
    if hasattr(m, 'regulation_service'):
        regulation = True
    if hasattr(m, 'spinning_reserve'):
        spin = True
    if hasattr(m, 'non_spinning_reserve'):
        nspin = True
    if hasattr(m, 'supplemental_reserve'):
        supp = True
    if hasattr(m, 'flexible_ramping'):
        flex = True

    fs = False
    if hasattr(m, 'fuel_supply'):
        fs = True

    for g, g_dict in thermal_gens.items():
        pg_dict = {}
        if reserve_requirement:
            rg_dict = {}
        commitment_dict = {}
        commitment_cost_dict = {}
        production_cost_dict = {}
        ramp_up_avail_dict = {}

        ## all of the potential constraints that could limit maximum output
        ## Not all unit commitment models have these constraints, so first
        ## we need check if they're on the model object
        ramp_up_avail_potential_constrs = [
            'EnforceMaxAvailableRampUpRates',
            'AncillaryServiceRampUpLimit',
            'power_limit_from_start',
            'power_limit_from_stop',
            'power_limit_from_start_stop',
            'power_limit_from_start_stops',
            'EnforceMaxAvailableRampDownRates',
            'EnforceMaxCapacity',
        ]
        ramp_up_avail_constrs = []
        for constr in ramp_up_avail_potential_constrs:
            if hasattr(m, constr):
                ramp_up_avail_constrs.append(getattr(m, constr))

        if regulation:
            reg_prov = {}
            reg_up_supp = {}
            reg_dn_supp = {}
        if spin:
            spin_supp = {}
        if nspin:
            nspin_supp = {}
        if supp:
            supp_supp = {}
        if flex:
            flex_up_supp = {}
            flex_dn_supp = {}
        gfs = (fs and (g in m.FuelSupplyGenerators))
        if gfs:
            fuel_consumed = {}

        for dt, mt in zip(data_time_periods, m.TimePeriods):
            pg_dict[dt] = value(m.PowerGenerated[g, mt])
            if reserve_requirement:
                rg_dict[dt] = value(m.ReserveProvided[g, mt])
            commitment_dict[dt] = value(m.UnitOn[g, mt])
            commitment_cost_dict[dt] = value(m.StartupCost[g,mt]+m.ShutdownCost[g,mt]+\
                                    m.MinimumProductionCost[g]*m.UnitOn[g,mt]*m.TimePeriodLengthHours)
            production_cost_dict[dt] = value(m.ProductionCost[g, mt])

            if regulation:
                if g in m.AGC_Generators:
                    reg_prov[dt] = value(m.RegulationOn[g, mt])
                    reg_up_supp[dt] = value(m.RegulationReserveUp[g, mt])
                    reg_dn_supp[dt] = value(m.RegulationReserveDn[g, mt])
                    commitment_cost_dict[dt] += value(
                        m.RegulationCostCommitment[g, mt])
                    production_cost_dict[dt] += value(
                        m.RegulationCostGeneration[g, mt])
                else:
                    reg_prov[dt] = 0.
                    reg_up_supp[dt] = 0.
                    reg_dn_supp[dt] = 0.

            if spin:
                spin_supp[dt] = value(m.SpinningReserveDispatched[g, mt])
                production_cost_dict[dt] += value(
                    m.SpinningReserveCostGeneration[g, mt])

            if nspin:
                if g in m.NonSpinGenerators:
                    nspin_supp[dt] = value(m.NonSpinningReserveDispatched[g,
                                                                          mt])
                    production_cost_dict[dt] += value(
                        m.NonSpinningReserveCostGeneration[g, mt])
                else:
                    nspin_supp[dt] = 0.
            if supp:
                supp_supp[dt] = value(m.SupplementalReserveDispatched[g, mt])
                production_cost_dict[dt] += value(
                    m.SupplementalReserveCostGeneration[g, mt])
            if flex:
                flex_up_supp[dt] = value(m.FlexUpProvided[g, mt])
                flex_dn_supp[dt] = value(m.FlexDnProvided[g, mt])
            if gfs:
                fuel_consumed[dt] = value(m.FuelConsumed[g, mt])

            ## pyomo doesn't add constraints that are skiped to the index set, so we also
            ## need check here if the index exists.
            slack_list = []
            for constr in ramp_up_avail_constrs:
                if (g, mt) in constr:
                    slack_list.append(constr[g, mt].slack())

            ramp_up_avail_dict[dt] = min(slack_list)

        g_dict['pg'] = _time_series_dict(pg_dict)
        if reserve_requirement:
            g_dict['rg'] = _time_series_dict(rg_dict)
        g_dict['commitment'] = _time_series_dict(commitment_dict)
        g_dict['commitment_cost'] = _time_series_dict(commitment_cost_dict)
        g_dict['production_cost'] = _time_series_dict(production_cost_dict)
        if regulation:
            g_dict['reg_provider'] = _time_series_dict(reg_prov)
            g_dict['reg_up_supplied'] = _time_series_dict(reg_up_supp)
            g_dict['reg_down_supplied'] = _time_series_dict(reg_dn_supp)
        if spin:
            g_dict['spinning_supplied'] = _time_series_dict(spin_supp)
        if nspin:
            g_dict['non_spinning_supplied'] = _time_series_dict(nspin_supp)
        if supp:
            g_dict['supplemental_supplied'] = _time_series_dict(supp_supp)
        if flex:
            g_dict['flex_up_supplied'] = _time_series_dict(flex_up_supp)
            g_dict['flex_down_supplied'] = _time_series_dict(flex_dn_supp)
        if gfs:
            g_dict['fuel_consumed'] = _time_series_dict(fuel_consumed)
        g_dict['headroom'] = _time_series_dict(ramp_up_avail_dict)

    for g, g_dict in renewable_gens.items():
        pg_dict = {}
        for dt, mt in zip(data_time_periods, m.TimePeriods):
            pg_dict[dt] = value(m.NondispatchablePowerUsed[g, mt])
        g_dict['pg'] = _time_series_dict(pg_dict)

    for s, s_dict in storage.items():
        state_of_charge_dict = {}
        p_discharge_dict = {}
        p_charge_dict = {}
        operational_cost_dict = {}
        for dt, mt in zip(data_time_periods, m.TimePeriods):
            p_discharge_dict[dt] = value(m.PowerOutputStorage[s, mt])
            p_charge_dict[dt] = value(m.PowerInputStorage[s, mt])
            operational_cost_dict[dt] = value(m.StorageCost[s, mt])
            state_of_charge_dict[dt] = value(m.SocStorage[s.mt])

        s_dict['p_discharge'] = _time_series_dict(p_discharge_dict)
        s_dict['p_charge'] = _time_series_dict(p_charge_dict)
        s_dict['operational_cost'] = _time_series_dict(operational_cost_dict)
        s_dict['state_of_charge'] = _time_series_dict(state_of_charge_dict)

    ## NOTE: UC model currently has no notion of separate loads

    if m.power_balance == 'btheta_power_flow':
        for l, l_dict in branches.items():
            pf_dict = {}
            for dt, mt in zip(data_time_periods, m.TimePeriods):
                pf_dict[dt] = value(m.TransmissionBlock[mt].pf[l])
            l_dict['pf'] = _time_series_dict(pf_dict)

        for b, b_dict in buses.items():
            va_dict = {}
            p_balance_violation_dict = {}
            pl_dict = {}
            for dt, mt in zip(data_time_periods, m.TimePeriods):
                va_dict[dt] = value(m.TransmissionBlock[mt].va[b])
                p_balance_violation_dict[dt] = value(
                    m.LoadGenerateMismatch[b, mt])
                pl_dict[dt] = value(m.TransmissionBlock[mt].pl[b])
            b_dict['va'] = _time_series_dict(va_dict)
            b_dict['p_balance_violation'] = _time_series_dict(
                p_balance_violation_dict)
            b_dict['pl'] = _time_series_dict(pl_dict)
            if relaxed:
                lmp_dict = {}
                for dt, mt in zip(data_time_periods, m.TimePeriods):
                    lmp_dict[dt] = value(
                        m.dual[m.TransmissionBlock[mt].eq_p_balance[b]])
                b_dict['lmp'] = _time_series_dict(lmp_dict)

    elif m.power_balance == 'power_balance_constraints':
        for l, l_dict in branches.items():
            pf_dict = {}
            for dt, mt in zip(data_time_periods, m.TimePeriods):
                pf_dict[dt] = value(m.LinePower[l, mt])
            l_dict['pf'] = _time_series_dict(pf_dict)

        for b, b_dict in buses.items():
            va_dict = {}
            p_balance_violation_dict = {}
            for dt, mt in zip(data_time_periods, m.TimePeriods):
                va_dict[dt] = value(m.Angle[b, mt])
                p_balance_violation_dict[dt] = value(
                    m.LoadGenerateMismatch[b, mt])
            b_dict['va'] = _time_series_dict(va_dict)
            b_dict['p_balance_violation'] = _time_series_dict(
                p_balance_violation_dict)
            if relaxed:
                lmp_dict = {}
                for dt, mt in zip(data_time_periods, m.TimePeriods):
                    lmp_dict[dt] = value(m.dual[m.PowerBalance[b, mt]])
                b_dict['lmp'] = _time_series_dict(lmp_dict)
    else:
        raise Exception("Unrecongized network type " + m.power_balance)

    if reserve_requirement:
        ## populate the system attributes
        sys_dict = md.data['system']
        sr_s_dict = {}
        for dt, mt in zip(data_time_periods, m.TimePeriods):
            sr_s_dict[dt] = value(m.ReserveShortfall[mt])
        sys_dict['reserve_shortfall'] = _time_series_dict(sr_s_dict)
        if relaxed:
            sr_p_dict = {}
            for dt, mt in zip(data_time_periods, m.TimePeriods):
                ## TODO: if the 'relaxed' flag is set, we should automatically
                ##       pick a formulation which uses the MLR reserve constraints
                sr_p_dict[dt] = value(m.dual[m.EnforceReserveRequirements[mt]])
            sys_dict['reserve_price'] = _time_series_dict(sr_p_dict)

    ## TODO: Can the code above this be re-factored in a similar way?
    ## as we add more zonal reserve products, they can be added here
    _zonal_reserve_map = dict()
    _system_reserve_map = dict()
    if spin:
        _zonal_reserve_map['spinning_reserve_requirement'] = {
            'shortfall': 'spinning_reserve_shortfall',
            'price': 'spinning_reserve_price',
            'shortfall_m': m.ZonalSpinningReserveShortfall,
            'balance_m': m.EnforceZonalSpinningReserveRequirement,
        }
        _system_reserve_map['spinning_reserve_requirement'] = {
            'shortfall': 'spinning_reserve_shortfall',
            'price': 'spinning_reserve_price',
            'shortfall_m': m.SystemSpinningReserveShortfall,
            'balance_m': m.EnforceSystemSpinningReserveRequirement,
        }
    if nspin:
        _zonal_reserve_map['non_spinning_reserve_requirement'] = {
            'shortfall': 'non_spinning_reserve_shortfall',
            'price': 'non_spinning_reserve_price',
            'shortfall_m': m.ZonalNonSpinningReserveShortfall,
            'balance_m': m.EnforceNonSpinningZonalReserveRequirement,
        }
        _system_reserve_map['non_spinning_reserve_requirement'] = {
            'shortfall': 'non_spinning_reserve_shortfall',
            'price': 'non_spinning_reserve_price',
            'shortfall_m': m.SystemNonSpinningReserveShortfall,
            'balance_m': m.EnforceSystemNonSpinningReserveRequirement,
        }
    if regulation:
        _zonal_reserve_map['regulation_up_requirement'] = {
            'shortfall': 'regulation_up_shortfall',
            'price': 'regulation_up_price',
            'shortfall_m': m.ZonalRegulationUpShortfall,
            'balance_m': m.EnforceZonalRegulationUpRequirements,
        }
        _system_reserve_map['regulation_up_requirement'] = {
            'shortfall': 'regulation_up_shortfall',
            'price': 'regulation_up_price',
            'shortfall_m': m.SystemRegulationUpShortfall,
            'balance_m': m.EnforceSystemRegulationUpRequirement,
        }
        _zonal_reserve_map['regulation_down_requirement'] = {
            'shortfall': 'regulation_down_shortfall',
            'price': 'regulation_down_price',
            'shortfall_m': m.ZonalRegulationDnShortfall,
            'balance_m': m.EnforceZonalRegulationDnRequirements,
        }
        _system_reserve_map['regulation_down_requirement'] = {
            'shortfall': 'regulation_down_shortfall',
            'price': 'regulation_down_price',
            'shortfall_m': m.SystemRegulationDnShortfall,
            'balance_m': m.EnforceSystemRegulationDnRequirement,
        }
    if flex:
        _zonal_reserve_map['flexible_ramp_up_requirement'] = {
            'shortfall': 'flexible_ramp_up_shortfall',
            'price': 'flexible_ramp_up_price',
            'shortfall_m': m.ZonalFlexUpShortfall,
            'balance_m': m.ZonalFlexUpRequirementConstr,
        }
        _system_reserve_map['flexible_ramp_up_requirement'] = {
            'shortfall': 'flexible_ramp_up_shortfall',
            'price': 'flexible_ramp_up_price',
            'shortfall_m': m.SystemFlexUpShortfall,
            'balance_m': m.SystemFlexUpRequirementConstr,
        }
        _zonal_reserve_map['flexible_ramp_down_requirement'] = {
            'shortfall': 'flexible_ramp_down_shortfall',
            'price': 'flexible_ramp_down_price',
            'shortfall_m': m.ZonalFlexDnShortfall,
            'balance_m': m.ZonalFlexDnRequirementConstr,
        }
        _system_reserve_map['flexible_ramp_down_requirement'] = {
            'shortfall': 'flexible_ramp_down_shortfall',
            'price': 'flexible_ramp_down_price',
            'shortfall_m': m.SystemFlexDnShortfall,
            'balance_m': m.SystemFlexDnRequirementConstr,
        }
    if supp:
        _zonal_reserve_map['supplemental_reserve_requirement'] = {
            'shortfall': 'supplemental_shortfall',
            'price': 'supplemental_price',
            'shortfall_m': m.ZonalSupplementalReserveShortfall,
            'balance_m': m.EnforceZonalSupplementalReserveRequirement,
        }

        _system_reserve_map['supplemental_reserve_requirement'] = {
            'shortfall': 'supplemental_shortfall',
            'price': 'supplemental_price',
            'shortfall_m': m.SystemSupplementalReserveShortfall,
            'balance_m': m.EnforceSystemSupplementalReserveRequirement,
        }

    def _populate_zonal_reserves(elements_dict, string_handle):
        for e, e_dict in elements_dict.items():
            me = string_handle + e
            for req, req_dict in _zonal_reserve_map.items():
                if req in e_dict:
                    req_shortfall_dict = {}
                    for dt, mt in zip(data_time_periods, m.TimePeriods):
                        req_shortfall_dict[dt] = value(
                            req_dict['shortfall_m'][me, mt])
                    e_dict[req_dict['shortfall']] = _time_series_dict(
                        req_shortfall_dict)
                    if relaxed:
                        req_price_dict = {}
                        for dt, mt in zip(data_time_periods, m.TimePeriods):
                            req_price_dict[dt] = value(
                                m.dual[req_dict['balance_m'][me, mt]])
                        e_dict[req_dict['price']] = _time_series_dict(
                            req_price_dict)

    def _populate_system_reserves(sys_dict):
        for req, req_dict in _system_reserve_map.items():
            if req in sys_dict:
                req_shortfall_dict = {}
                for dt, mt in zip(data_time_periods, m.TimePeriods):
                    req_shortfall_dict[dt] = value(req_dict['shortfall_m'][mt])
                sys_dict[req_dict['shortfall']] = _time_series_dict(
                    req_shortfall_dict)
                if relaxed:
                    req_price_dict = {}
                    for dt, mt in zip(data_time_periods, m.TimePeriods):
                        req_price_dict[dt] = value(
                            m.dual[req_dict['balance_m'][mt]])
                    sys_dict[req_dict['price']] = _time_series_dict(
                        req_price_dict)

    _populate_zonal_reserves(areas, 'area_')
    _populate_zonal_reserves(zones, 'zone_')

    _populate_system_reserves(md.data['system'])

    if fs:
        fuel_supplies = dict(md.elements(element_type='fuel_supply'))
        for f, f_dict in fuel_supplies.items():
            fuel_consumed = {}
            fuel_supply_type = f_dict['fuel_supply_type']
            if fuel_supply_type == 'instantaneous':
                for dt, mt in zip(data_time_periods, m.TimePeriods):
                    fuel_consumed[dt] = value(
                        m.TotalFuelConsumedAtInstFuelSupply[f, mt])
            else:
                print(
                    'WARNING: unrecongized fuel_supply_type {} for fuel_supply {}'
                    .format(fuel_supply_type, f))
            f_dict['fuel_consumed'] = _time_series_dict(fuel_consumed)

    md.data['system']['total_cost'] = value(m.TotalCostObjective)

    unscale_ModelData_to_pu(md, inplace=True)

    if return_model:
        return md, m
    return md
コード例 #7
0
def solve_bilevel_physical_nk(model_data,
                              solver,
                              solver_tee=True,
                              return_model=False,
                              return_results=False,
                              **kwargs):
    '''
    Create and solve a new worst-case attacker defender

    Parameters
    ----------
    model_data : egret.data.ModelData
        An egret ModelData object with the appropriate data loaded.
    solver : str or pyomo.opt.base.solvers.OptSolver
        Either a string specifying a pyomo solver name, or an instantiated pyomo solver
    solver_tee : bool (optional)
        Display solver log. Default is True.
    return_model : bool (optional)
        If True, returns the pyomo model object
    return_results : bool (optional)
        If True, returns the pyomo results object
    kwargs : dictionary (optional)
        Additional arguments for building model
    '''

    import pyomo.environ as pe
    from pyomo.environ import value
    from egret.model_library.transmission.tx_utils import \
        scale_ModelData_to_pu, unscale_ModelData_to_pu

    md = model_data.clone_in_service()
    scale_ModelData_to_pu(md, inplace=True)

    ### pop from kwargs the number k for N-k contingency of relay IPs
    attack_budget_k = kwargs.pop('attack_budget_k', 1)

    ### create upper-level of the bilevel problem
    m, md = create_master(md, attack_budget_k)
    ### create lower-level of the bilevel problem
    m, md = create_explicit_subproblem(m, md, include_bigm=False)

    ### use PAO (Pyomo-extension) to do the following:
    ### 1. Transform the lower-level primal problem into it's corresponding dual problem
    ### 2. Apply Pyomo.GDP transformations to handle bilinear terms (Big-M)
    ### 3. Solve formulation (upper-level primal with lower-level dual) as a single level MILP
    ### 4. Take optimal solution from MILP, fix upper-level variables that appear in the
    ### lower-level problem, and resolve to determine primal variable solution for the lower-level
    opt = pe.SolverFactory('pao.bilevel.ld', solver=solver)
    ## need to fine-tune bigM and mipgap -- make sure that both the solve and resolve result in the same
    ## best objective
    opt.options.setdefault('bigM', 100)
    opt.options.setdefault('mipgap', 0.001)
    results = opt.solve(m, tee=solver_tee)

    ### save results data to ModelData object
    gens = dict(md.elements(element_type='generator'))
    buses = dict(md.elements(element_type='bus'))
    branches = dict(md.elements(element_type='branch'))

    md.data['system']['total_cost'] = value(m.obj)

    m = m.subproblem
    for g, g_dict in gens.items():
        g_dict['pg'] = value(m.pg[g])

    for k, k_dict in branches.items():
        k_dict['pf'] = value(m.pf[k])

    for b, b_dict in buses.items():
        b_dict['pl'] = value(m.pl[b])
        b_dict['va'] = value(m.va[b])

    unscale_ModelData_to_pu(md, inplace=True)

    ### return model_data (md), model (m), and/or results (results) objects
    if return_model and return_results:
        return md, m, results
    elif return_model:
        return md, m
    elif return_results:
        return md, results
    return md
コード例 #8
0
def solve_stochastic_bilevel_nk(model_data,
                                solver,
                                solver_tee=True,
                                return_model=False,
                                return_results=False,
                                **kwargs):
    '''
    Create and solve a new worst-case attacker defender as a stochastic bilevel interdiction problem.

    Parameters
    ----------
    model_data : egret.data.ModelData
        An egret ModelData object with the appropriate data loaded.
    solver : str or pyomo.opt.base.solvers.OptSolver
        Either a string specifying a pyomo solver name, or an instantiated pyomo solver
    solver_tee : bool (optional)
        Display solver log. Default is True.
    return_model : bool (optional)
        If True, returns the pyomo model object
    return_results : bool (optional)
        If True, returns the pyomo results object
    kwargs : dictionary (optional)
        Additional arguments for building model
    '''
    import random
    import math
    import pyomo.environ as pe
    from pyomo.environ import value
    from egret.model_library.transmission.tx_utils import \
        scale_ModelData_to_pu, unscale_ModelData_to_pu

    seed = 23
    random.seed(seed)  # repeatable

    md = model_data.clone_in_service()
    scale_ModelData_to_pu(md, inplace=True)

    ### pop from kwargs the number k for N-k contingency of relay IPs
    attack_budget_k = kwargs.pop('attack_budget_k', 1)
    omega = kwargs.pop('omega', None)

    if not omega:
        raise Exception(
            'User must specify a dictionary of scenario name <key>, probability <value> pairs.'
        )

    ### create upper-level of the bilevel problem
    m, md = create_master(md, omega, attack_budget_k)
    m.OmegaSet = pe.Set(initialize=omega.keys())
    m.Scenarios = pe.Block(m.OmegaSet)
    for p in m.OmegaSet:
        _md_uncertain = md.clone()
        per_l, per_u = omega[p]['percentage_bounds']
        loads = dict(_md_uncertain.elements(element_type='load'))
        for _, load_dict in loads.items():
            _variation_fraction = random.uniform(per_l, per_u)
            load_dict['p_load'] = _variation_fraction * load_dict['p_load']

        ### declare lower-level as a PAO (Pyomo-extension) submodel;
        ### be explicit in specifying upper-level variables that appear in this model
        subproblem = bi.SubModel(fixed=(m.u, m.v, m.w))
        ### create lower-level of the bilevel problem
        m.Scenarios[p].sub = subproblem

        m, _ = create_explicit_subproblem(m,
                                          subproblem,
                                          _md_uncertain,
                                          p,
                                          include_bigm=False)

    ### use PAO (Pyomo-extension) to do the following:
    ### 1. Transform the lower-level primal problem into it's corresponding dual problem
    ### 2. Apply Pyomo.GDP transformations to handle bilinear terms (Big-M)
    ### 3. Solve formulation (upper-level primal with lower-level dual) as a single level MILP
    ### 4. Take optimal solution from MILP, fix upper-level variables that appear in the
    ### lower-level problem, and resolve to determine primal variable solution for the lower-level
    weights = dict()
    for p in m.OmegaSet:
        name = m.Scenarios[p].name + '.sub'
        weights[name] = omega[p]['probability']
    kwargs = {'subproblem_objective_weights': weights}
    opt = pe.SolverFactory('pao.bilevel.stochastic_ld', solver=solver)
    ## need to fine-tune bigM and mipgap -- make sure that both the solve and resolve result in the same
    ## best objective
    opt.options.setdefault('bigM', 100)
    opt.options.setdefault('mipgap', 0.001)
    results = opt.solve(m, **kwargs, tee=solver_tee)

    objective = md.data['system']['baseMVA'] * value(m.obj)

    print('~~~~~~~~~~ solution stats ~~~~~~~~~~~')
    print('objective: {} MW expected load shed'.format(objective))
    _relay_list = ''
    for name, val in m.delta.items():
        if val == 1:
            _relay_list += name + " "
    print(' relay(s) compromised: {}'.format(_relay_list))

    unscale_ModelData_to_pu(md, inplace=True)

    ### return model_data (md), model (m), and/or results (results) objects
    if return_model and return_results:
        return md, m, results
    elif return_model:
        return md, m
    elif return_results:
        return md, results
    return md
コード例 #9
0
def solve_lpac(model_data,
               solver,
               ac_solver=None,
               timelimit=None,
               solver_tee=True,
               symbolic_solver_labels=False,
               options=None,
               ac_options=None,
               lpac_model_generator=create_cold_start_lpac_model,
               return_model=False,
               return_results=False,
               kwargs={},
               kwargs_for_lpac={}):
    '''
    Create and solve a new lpac model

    Parameters
    ----------
    model_data : egret.data.ModelData
        An egret ModelData object with the appropriate data loaded.
    solver : str or pyomo.opt.base.solvers.OptSolver
        Either a string specifying a pyomo solver name, or an instantiated pyomo solver
    ac_solver : str or pyomo.opt.base.solvers.OptSolver (optional)
    	Either a string specifying a pyomo solver name, or an instantiated pyomo solver.
    	Default is None for the cold start lpac model. 
    timelimit : float (optional)
        Time limit for dcopf run. Default of None results in no time
        limit being set.
    solver_tee : bool (optional)
        Display solver log. Default is True.
    symbolic_solver_labels : bool (optional)
        Use symbolic solver labels. Useful for debugging; default is False.
    options : dict (optional)
        Other options to pass into the (LPAC) solver. Default is dict().
    ac_options : dict (optional)
    	Other options to pass into the ACOPF solver. Default is dict().
    lpac_model_generator : function (optional)
        Function for generating the lpac model. Default is
        the cold start lpac model
    return_model : bool (optional)
        If True, returns the pyomo model object
    return_results : bool (optional)
        If True, returns the pyomo results object
    kwargs : dictionary (optional)
        Additional arguments for building model. 
    kwargs_for_lpac : dictionary (optional)
    	Additional arguments for building lpac model (not used in ACOPF model)
    '''

    import pyomo.environ as pe
    import pyomo.opt as po
    from pyomo.environ import value
    from egret.common.solver_interface import _solve_model
    from egret.model_library.transmission.tx_utils import \
      scale_ModelData_to_pu, unscale_ModelData_to_pu

    if lpac_model_generator == create_hot_start_lpac_model or lpac_model_generator == create_warm_start_lpac_model:
        if ac_solver != None:
            ac_md, ac_m, ac_results = solve_acopf(
                model_data,
                ac_solver,
                options=ac_options,
                acopf_model_generator=create_psv_acopf_model,
                return_model=True,
                return_results=True,
                **kwargs)
        else:
            ac_md, ac_m, ac_results = solve_acopf(
                model_data,
                solver,
                options=options,
                acopf_model_generator=create_psv_acopf_model,
                return_model=True,
                return_results=True,
                **kwargs)
        voltages = dict({})
        for bus in ac_md.elements(element_type="bus"):
            voltages[bus[0]] = bus[1]['vm']
        #print(voltages)
        m, md = lpac_model_generator(model_data, voltages, **kwargs,
                                     **kwargs_for_lpac)
    else:
        m, md = lpac_model_generator(model_data, **kwargs, **kwargs_for_lpac)


    m, results, solver = _solve_model(m, solver, timelimit=timelimit, solver_tee=solver_tee, \
             symbolic_solver_labels = symbolic_solver_labels, solver_options=options, return_solver=True)

    # save results data to ModelData object

    gens = dict(md.elements(element_type='generator'))
    buses = dict(md.elements(element_type='bus'))
    branches = dict(md.elements(element_type='branch'))

    md.data['system']['total_cost'] = value(m.obj)

    for g, g_dict in gens.items():
        g_dict['pg'] = value(m.pg[g])
        g_dict['qg'] = value(m.qg[g])

    for b, b_dict in buses.items():
        #b_dict['lmp'] = value(m.dual[m.eq_p_balance[b]])
        #b_dict['qlmp'] = value(m.dual[m.eq_q_balance[b]])
        b_dict['pl'] = value(m.pl[b])
        #if hasattr(m, 'vj'):
        #b_dict['vm'] = tx_calc.calculate_vm_from_vj_vr(value(m.vj[b]), value(m.vr[b]))
        #b_dict['va'] = tx_calc.calculate_va_from_vj_vr(value(m.vj[b]), value(m.vr[b]))
        #else:
        #b_dict['vm'] = value(m.vm[b])
        #b_dict['va'] = value(m.va[b])

    for k, k_dict in branches.items():
        if hasattr(m, 'pf'):
            k_dict['pf'] = value(m.pf[k])
            k_dict['pt'] = value(m.pt[k])
            k_dict['qf'] = value(m.qf[k])
            k_dict['qt'] = value(m.qt[k])
        if hasattr(m, 'irf'):
            b = k_dict['from_bus']
            k_dict['pf'] = value(
                tx_calc.calculate_p(value(m.ifr[k]), value(m.ifj[k]),
                                    value(m.vr[b]), value(m.vj[b])))
            k_dict['qf'] = value(
                tx_calc.calculate_q(value(m.ifr[k]), value(m.ifj[k]),
                                    value(m.vr[b]), value(m.vj[b])))
            b = k_dict['to_bus']
            k_dict['pt'] = value(
                tx_calc.calculate_p(value(m.itr[k]), value(m.itj[k]),
                                    value(m.vr[b]), value(m.vj[b])))
            k_dict['qt'] = value(
                tx_calc.calculate_q(value(m.itr[k]), value(m.itj[k]),
                                    value(m.vr[b]), value(m.vj[b])))

    unscale_ModelData_to_pu(md, inplace=True)

    #print(buses)
    #print(gens)
    # print(branches)

    if return_model and return_results:
        return md, m, results
    elif return_model:
        return md, m
    elif return_results:
        return md, results
    return md
コード例 #10
0
def solve_copperplate_dispatch(
        model_data,
        solver,
        timelimit=None,
        solver_tee=True,
        symbolic_solver_labels=False,
        options=None,
        copperplate_dispatch_model_generator=create_copperplate_dispatch_approx_model,
        return_model=False,
        return_results=False,
        **kwargs):
    '''
    Create and solve a new copperplate dispatch model

    Parameters
    ----------
    model_data : egret.data.ModelData
        An egret ModelData object with the appropriate data loaded.
    solver : str or pyomo.opt.base.solvers.OptSolver
        Either a string specifying a pyomo solver name, or an instantiated pyomo solver
    timelimit : float (optional)
        Time limit for dcopf run. Default of None results in no time
        limit being set.
    solver_tee : bool (optional)
        Display solver log. Default is True.
    symbolic_solver_labels : bool (optional)
        Use symbolic solver labels. Useful for debugging; default is False.
    options : dict (optional)
        Other options to pass into the solver. Default is dict().
    copperplate_dispatch_model_generator : function (optional)
        Function for generating the copperplate dispatch model. Default is
        egret.models.copperplate_dispatch.create_copperplate_dispatch_approx_model
    return_model : bool (optional)
        If True, returns the pyomo model object
    return_results : bool (optional)
        If True, returns the pyomo results object
    kwargs : dictionary (optional)
        Additional arguments for building model
    '''

    import pyomo.environ as pe
    from pyomo.environ import value
    from egret.common.solver_interface import _solve_model
    from egret.model_library.transmission.tx_utils import \
        scale_ModelData_to_pu, unscale_ModelData_to_pu

    m, md = copperplate_dispatch_model_generator(model_data, **kwargs)

    m.dual = pe.Suffix(direction=pe.Suffix.IMPORT)

    m, results = _solve_model(m,
                              solver,
                              timelimit=timelimit,
                              solver_tee=solver_tee,
                              symbolic_solver_labels=symbolic_solver_labels,
                              solver_options=options)

    md = model_data.clone_in_service()

    # save results data to ModelData object
    gens = dict(md.elements(element_type='generator'))
    buses = dict(md.elements(element_type='bus'))

    md.data['system']['total_cost'] = value(m.obj)
    if hasattr(m, 'p_load_shed'):
        md.data['system']['p_balance_violation'] = value(
            m.p_load_shed) - value(m.p_over_generation)

    for g, g_dict in gens.items():
        g_dict['pg'] = value(m.pg[g])

    for b, b_dict in buses.items():
        b_dict['pl'] = value(m.pl[b])
        b_dict['lmp'] = value(m.dual[m.eq_p_balance])

    unscale_ModelData_to_pu(md, inplace=True)

    if return_model and return_results:
        return md, m, results
    elif return_model:
        return md, m
    elif return_results:
        return md, results
    return md
コード例 #11
0
ファイル: dcopf.py プロジェクト: wlinz3/Egret
def solve_dcopf(model_data,
                solver,
                timelimit=None,
                solver_tee=True,
                symbolic_solver_labels=False,
                options=None,
                dcopf_model_generator=create_btheta_dcopf_model,
                return_model=False,
                return_results=False,
                **kwargs):
    '''
    Create and solve a new dcopf model

    Parameters
    ----------
    model_data : egret.data.ModelData
        An egret ModelData object with the appropriate data loaded.
    solver : str or pyomo.opt.base.solvers.OptSolver
        Either a string specifying a pyomo solver name, or an instantiated pyomo solver
    timelimit : float (optional)
        Time limit for dcopf run. Default of None results in no time
        limit being set.
    solver_tee : bool (optional)
        Display solver log. Default is True.
    symbolic_solver_labels : bool (optional)
        Use symbolic solver labels. Useful for debugging; default is False.
    options : dict (optional)
        Other options to pass into the solver. Default is dict().
    dcopf_model_generator : function (optional)
        Function for generating the dcopf model. Default is
        egret.models.dcopf.create_btheta_dcopf_model
    return_model : bool (optional)
        If True, returns the pyomo model object
    return_results : bool (optional)
        If True, returns the pyomo results object
    kwargs : dictionary (optional)
        Additional arguments for building model
    '''

    import pyomo.environ as pe
    import pyomo.opt as po
    from pyomo.environ import value
    from egret.common.solver_interface import _solve_model
    from egret.model_library.transmission.tx_utils import \
        scale_ModelData_to_pu, unscale_ModelData_to_pu

    m, md = dcopf_model_generator(model_data, **kwargs)

    m.dual = pe.Suffix(direction=pe.Suffix.IMPORT)

    m, results, solver = _solve_model(
        m,
        solver,
        timelimit=timelimit,
        solver_tee=solver_tee,
        symbolic_solver_labels=symbolic_solver_labels,
        solver_options=options,
        return_solver=True)

    if dcopf_model_generator == create_ptdf_dcopf_model and m._ptdf_options[
            'lazy']:
        iter_limit = m._ptdf_options['iteration_limit']
        term_cond = _lazy_ptdf_dcopf_model_solve_loop(
            m,
            md,
            solver,
            solver_tee=solver_tee,
            symbolic_solver_labels=symbolic_solver_labels,
            iteration_limit=iter_limit)

    # save results data to ModelData object
    gens = dict(md.elements(element_type='generator'))
    buses = dict(md.elements(element_type='bus'))
    branches = dict(md.elements(element_type='branch'))

    md.data['system']['total_cost'] = value(m.obj)

    for g, g_dict in gens.items():
        g_dict['pg'] = value(m.pg[g])

    ## calculate the power flows from our PTDF matrix for maximum precision
    ## calculate the LMPC (LMP congestion) using numpy
    if dcopf_model_generator == create_ptdf_dcopf_model:
        PTDF = m._PTDF
        PTDFM = PTDF.PTDFM
        branches_idx = PTDF.branches_keys

        NWV = np.array([pe.value(m.p_nw[b]) for b in PTDF.bus_iterator()])
        NWV += PTDF.phi_adjust_array

        PFV = PTDFM.dot(NWV)
        PFV += PTDF.phase_shift_array

        PFD = np.zeros(len(branches_idx))
        for i, bn in enumerate(branches_idx):
            branches[bn]['pf'] = PFV[i]
            if bn in m.ineq_pf_branch_thermal_bounds:
                PFD[i] += value(m.dual[m.ineq_pf_branch_thermal_bounds[bn]])
        ## TODO: PFD is likely to be sparse, implying we just need a few
        ##       rows of the PTDF matrix (or columns in its transpose).
        LMPC = -PTDFM.T.dot(PFD)
    else:
        for k, k_dict in branches.items():
            k_dict['pf'] = value(m.pf[k])

    if dcopf_model_generator == create_ptdf_dcopf_model:
        buses_idx = PTDF.buses_keys
        LMPE = value(m.dual[m.eq_p_balance])
        for i, b in enumerate(buses_idx):
            b_dict = buses[b]
            b_dict['lmp'] = LMPE + LMPC[i]
            b_dict['pl'] = value(m.pl[b])
    else:
        for b, b_dict in buses.items():
            b_dict['pl'] = value(m.pl[b])
            if dcopf_model_generator == create_btheta_dcopf_model:
                b_dict['lmp'] = value(m.dual[m.eq_p_balance[b]])
                b_dict['va'] = value(m.va[b])
            else:
                raise Exception("Unrecognized dcopf_mode_generator {}".format(
                    dcopf_model_generator))

    unscale_ModelData_to_pu(md, inplace=True)

    if return_model and return_results:
        return md, m, results
    elif return_model:
        return md, m
    elif return_results:
        return md, results
    return md
コード例 #12
0
def test_scaling_spot_check():
    md = ModelData.read(scuc_fn)

    baseMVA = md.data['system']['baseMVA']

    md_scaled = scale_ModelData_to_pu(md, inplace=False)

    md_scaled_unscaled = unscale_ModelData_to_pu(md_scaled, inplace=False)

    ## commitment should be unchanged
    assert md.data['elements']['generator']['101_STEAM_3_t']['commitment']['values'][10] == \
        md_scaled.data['elements']['generator']['101_STEAM_3_t']['commitment']['values'][10] == \
        md_scaled_unscaled.data['elements']['generator']['101_STEAM_3_t']['commitment']['values'][10]

    ## as should production cost
    assert md.data['elements']['generator']['101_STEAM_3_t']['production_cost']['values'][10] == \
        md_scaled.data['elements']['generator']['101_STEAM_3_t']['production_cost']['values'][10] == \
        md_scaled_unscaled.data['elements']['generator']['101_STEAM_3_t']['production_cost']['values'][10]

    ## as should voltage angle
    assert md.data['elements']['bus']['Alber']['va']['values'][10] == \
        md_scaled.data['elements']['bus']['Alber']['va']['values'][10] == \
        md_scaled_unscaled.data['elements']['bus']['Alber']['va']['values'][10]

    ## pg should be scaled
    assert md.data['elements']['generator']['101_STEAM_3_t']['pg']['values'][10] == \
        md_scaled.data['elements']['generator']['101_STEAM_3_t']['pg']['values'][10]/baseMVA == \
        md_scaled_unscaled.data['elements']['generator']['101_STEAM_3_t']['pg']['values'][10]

    ## load should be scaled
    assert md.data['elements']['bus']['Alber']['pl']['values'][10] == \
        md_scaled.data['elements']['bus']['Alber']['pl']['values'][10]/baseMVA == \
        md_scaled_unscaled.data['elements']['bus']['Alber']['pl']['values'][10]

    ## load should be scaled
    assert md.data['elements']['load']['Alber']['p_load']['values'][10] == \
        md_scaled.data['elements']['load']['Alber']['p_load']['values'][10]/baseMVA == \
        md_scaled_unscaled.data['elements']['load']['Alber']['p_load']['values'][10]

    ## flows should be scaled
    assert md.data['elements']['branch']['A22']['pf']['values'][20] == \
        md_scaled.data['elements']['branch']['A22']['pf']['values'][20]/baseMVA == \
        md_scaled_unscaled.data['elements']['branch']['A22']['pf']['values'][20]

    ## contingency flows should also be scaled
    assert md.data['elements']['contingency']['A1']['monitored_branches']['values'][10]['A11']['pf'] == \
        md_scaled.data['elements']['contingency']['A1']['monitored_branches']['values'][10]['A11']['pf']/baseMVA == \
        md_scaled_unscaled.data['elements']['contingency']['A1']['monitored_branches']['values'][10]['A11']['pf']

    ## lmp should be inversly scaled
    assert md.data['elements']['bus']['Alber']['lmp']['values'][10] == \
        md_scaled.data['elements']['bus']['Alber']['lmp']['values'][10]*baseMVA == \
        md_scaled_unscaled.data['elements']['bus']['Alber']['lmp']['values'][10]

    ## reserve prices should be inversly scaled
    assert md.data['system']['reserve_price']['values'][18] == \
        md_scaled.data['system']['reserve_price']['values'][18]*baseMVA == \
        md_scaled_unscaled.data['system']['reserve_price']['values'][18]

    ## shortfall price should be inversly scaled
    assert md.data['system']['reserve_shortfall_cost'] == \
        md_scaled.data['system']['reserve_shortfall_cost']*baseMVA == \
        md_scaled_unscaled.data['system']['reserve_shortfall_cost']