Exemplo n.º 1
0
    def _get_filtered_lines(self, ptdf_options):
        if ptdf_options['branch_kv_threshold'] is None:
            ## Nothing to do
            self.branch_mask = np.arange(len(self.branch_limits_array))
            self.branches_keys_masked = self.branches_keys
            self.branchname_to_index_masked_map = self._branchname_to_index_map
            self.B_dA_masked = self.B_dA
            self.phase_shift_flow_adjuster_array_masked = self.phase_shift_flow_adjuster_array
            self.branch_limits_array_masked = self.branch_limits_array
            self.contingency_limits_array_masked = self.contingency_limits_array
            return

        branches = self._branches
        buses = self._buses
        branch_mask = list()
        one = (ptdf_options['kv_threshold_type'] == 'one')
        kv_limit = ptdf_options['branch_kv_threshold']

        for i, bn in enumerate(self.branches_keys):
            branch = branches[bn]
            fb = buses[branch['from_bus']]

            fbt = True
            ## NOTE: this warning will be printed only once if we just check the from_bus
            if 'base_kv' not in fb:
                logger.warning(
                    "WARNING: did not find 'base_kv' for bus {}, considering it large for the purposes of filtering"
                    .format(branch['from_bus']))
            elif fb['base_kv'] < kv_limit:
                fbt = False

            if fbt and one:
                branch_mask.append(i)
                continue

            tb = buses[branch['to_bus']]
            tbt = False
            if ('base_kv' not in tb) or tb['base_kv'] >= kv_limit:
                tbt = True

            if fbt and tbt:
                branch_mask.append(i)
            elif one and tbt:
                branch_mask.append(i)

        self.branch_mask = np.array(branch_mask)
        self.branches_keys_masked = tuple(self.branches_keys[i]
                                          for i in self.branch_mask)
        self.branchname_to_index_masked_map = {
            bn: i
            for i, bn in enumerate(self.branches_keys_masked)
        }
        self.B_dA_masked = self.B_dA[branch_mask]
        self.phase_shift_flow_adjuster_array_masked = self.phase_shift_flow_adjuster_array[
            branch_mask]
        self.branch_limits_array_masked = self.branch_limits_array[branch_mask]
        self.contingency_limits_array_masked = self.contingency_limits_array[
            branch_mask]
Exemplo n.º 2
0
def _iter_over_initial_set(branches, branches_in_service, PTDF):
    for bn in branches_in_service:
        branch = branches[bn]
        if 'lazy' in branch and not branch['lazy']:
            if bn in PTDF.branchname_to_index_masked_map:
                i = PTDF.branchname_to_index_masked_map[bn]
                yield i, bn
            else:
                logger.warning(
                    "Branch {0} has flag 'lazy' set to False but is excluded from monitored set based on kV limits"
                    .format(bn))
Exemplo n.º 3
0
def _get_scalar_reserve_data(base_dir: str, metadata_df: df,
                             model_dict: dict) -> ScalarReserveData:
    # Store scalar reserve values as stored in the input
    #
    # Scalar reserve values that apply to both simulation types are stored in the
    # passed in model dict. Scalar values that vary depending on model type are stored
    # in the returned ScalarReserveData.

    da_scalar_reserves, rt_scalar_reserves = _identify_allowed_scalar_reserve_types(
        metadata_df)
    shared_reserves = da_scalar_reserves.intersection(rt_scalar_reserves)

    # Collect constant scalar reserves
    da_scalars = []
    rt_scalars = []
    reserve_df = pd.read_csv(os.path.join(base_dir, 'reserves.csv'))
    system = model_dict['system']
    areas = model_dict['elements']['area']
    for idx, row in reserve_df.iterrows():
        res_name = row['Reserve Product']
        req = float(row['Requirement (MW)'])

        if res_name in reserve_name_map:
            target_dict = system
            area_name = None
        else:
            # reserve name must be <type>_R<area>.
            # split into type and area
            res_name, area_name = res_name.split("_R", 1)
            if res_name not in reserve_name_map:
                logger.warning(
                    f"Skipping reserve for unrecognized reserve type '{res_name}'"
                )
                continue
            if area_name not in areas:
                logger.warning(
                    f"Skipping reserve for unrecognized area '{area_name}'")
                continue
            target_dict = areas[area_name]

        if res_name in shared_reserves:
            # If it applies to both types, save it in the skeleton
            target_dict[reserve_name_map[res_name]] = req
        elif res_name in da_scalar_reserves:
            # If it applies to just day-ahead, save to DA cache
            da_scalars.append(ScalarReserveValue(res_name, area_name, req))
        elif res_name in rt_scalar_reserves:
            # If it applies to just real-time, save to RT cache
            rt_scalars.append(ScalarReserveValue(res_name, area_name, req))

    return ScalarReserveData(da_scalars, rt_scalars)
Exemplo n.º 4
0
def _check_and_generate_flow_viol_warnings(mb, md, PTDF, PFV, PFV_I, prepend_str, \
        lt_viol, gt_viol, min_viol_int, max_viol_int, time):

    ## get the lines we're monitoring
    idx_monitored = mb._idx_monitored
    interfaces_monitored = mb._interfaces_monitored

    gt_viol_in_mb = gt_viol.intersection(idx_monitored)
    lt_viol_in_mb = lt_viol.intersection(idx_monitored)

    max_viol_int_in_mb = max_viol_int.intersection(interfaces_monitored)
    min_viol_int_in_mb = min_viol_int.intersection(interfaces_monitored)

    ## print a warning for these lines
    ## check if the found violations are in the model and print warning
    baseMVA = md.data['system']['baseMVA']
    for i in lt_viol_in_mb:
        bn = PTDF.branches_keys_masked[i]
        thermal_limit = PTDF.branch_limits_array_masked[i]
        logger.warning(prepend_str + _generate_flow_viol_warning(
            mb.pf, 'branch', bn, PFV[i], -thermal_limit, baseMVA, time))

    for i in gt_viol_in_mb:
        bn = PTDF.branches_keys_masked[i]
        thermal_limit = PTDF.branch_limits_array_masked[i]
        logger.warning(prepend_str + _generate_flow_viol_warning(
            mb.pf, 'branch', bn, PFV[i], thermal_limit, baseMVA, time))

    ## break here if no interfaces
    if 'interface' not in md.data['elements']:
        return len(gt_viol_in_mb) + len(lt_viol_in_mb), 0
    ## print a warning for these interfaces if they don't have slack
    ## check if the found violations are in the model and print warning
    interfaces = md.data['elements']['interface']
    interface_hard_violations = 0
    interface_soft_violations = 0
    for i in min_viol_int_in_mb:
        i_n = PTDF.interface_keys[i]
        if 'violation_penalty' in interfaces[i_n] \
                and interfaces[i_n]['violation_penalty'] is not None:
            interface_soft_violations += 1
            continue
        limit = PTDF.interface_min_limits[i]
        logger.warning(prepend_str + _generate_flow_viol_warning(
            mb.pfi, 'interface', i_n, PFV_I[i], limit, baseMVA, time))
        interface_hard_violations += 1

    for i in max_viol_int_in_mb:
        i_n = PTDF.interface_keys[i]
        if 'violation_penalty' in interfaces[i_n] \
                and interfaces[i_n]['violation_penalty'] is not None:
            interface_soft_violations += 1
            continue
        limit = PTDF.interface_max_limits[i]
        logger.warning(prepend_str + _generate_flow_viol_warning(
            mb.pfi, 'interface', i_n, PFV_I[i], limit, baseMVA, time))
        interface_hard_violations += 1

    return len(gt_viol_in_mb) + len(
        lt_viol_in_mb) + interface_hard_violations, interface_soft_violations
Exemplo n.º 5
0
def check_and_scale_ptdf_options(ptdf_options, baseMVA):
    ## scale to base MVA
    ptdf_options['abs_ptdf_tol'] /= baseMVA
    ptdf_options['abs_flow_tol'] /= baseMVA

    rel_flow_tol = ptdf_options['rel_flow_tol']
    abs_flow_tol = ptdf_options['abs_flow_tol']

    rel_ptdf_tol = ptdf_options['rel_ptdf_tol']
    abs_ptdf_tol = ptdf_options['abs_ptdf_tol']

    max_violations_per_iteration = ptdf_options['max_violations_per_iteration']

    if max_violations_per_iteration < 1 or (not isinstance(
            max_violations_per_iteration, int)):
        raise Exception(
            "max_violations_per_iteration must be an integer least 1, max_violations_per_iteration={}"
            .format(max_violations_per_iteration))

    if abs_flow_tol < 1e-6:
        logger.warning(
            "WARNING: abs_flow_tol={0}, which is below the numeric threshold of most solvers."
            .format(abs_flow_tol * baseMVA))
    if abs_flow_tol < rel_ptdf_tol * 10:
        logger.warning(
            "WARNING: abs_flow_tol={0}, rel_ptdf_tol={1}, which will likely result in violations. Consider raising abs_flow_tol or lowering rel_ptdf_tol."
            .format(abs_flow_tol * baseMVA, rel_ptdf_tol))
    if rel_ptdf_tol < 1e-6:
        logger.warning(
            "WARNING: rel_ptdf_tol={0}, which is low enough it may cause numerical issues in the solver. Consider rasing rel_ptdf_tol."
            .format(rel_ptdf_tol))
    if abs_ptdf_tol < 1e-12:
        logger.warning(
            "WARNING: abs_ptdf_tol={0}, which is low enough it may cause numerical issues in the solver. Consider rasing abs_ptdf_tol."
            .format(abs_ptdf_tol * baseMVA))
Exemplo n.º 6
0
def add_initial_monitored_constraints(mb,
                                      md,
                                      branches_in_service,
                                      ptdf_options,
                                      PTDF,
                                      time=None):

    viol_not_lazy = set()
    for bn in branches_in_service:
        branch = md.data['elements']['branch'][bn]
        if 'lazy' in branch and not branch['lazy']:
            if bn in PTDF.branchname_to_index_masked_map:
                viol_not_lazy.add(PTDF.branchname_to_index_masked_map[bn])
            else:
                logger.warning(
                    "Branch {0} has flag 'lazy' set to False but is excluded from monitored set based on kV limits"
                    .format(bn))

    int_viol_not_lazy = set()
    if 'interface' in md.data['elements']:
        for i_n, interface in md.data['elements']['interface'].items():
            if 'lazy' in interface and not interface['lazy']:
                int_viol_not_lazy.add(PTDF.interfacename_to_index_map[i_n])

    # not easy to support in the current
    # set-up, as 'lazy' would need to be
    # set on a branch, branch basis
    cont_viol_not_lazy = set()

    #blank flows
    flows = _CalculatedFlows()
    lazy_violations = _LazyViolations(
        branch_lazy_violations=viol_not_lazy,
        interface_lazy_violations=int_viol_not_lazy,
        contingency_lazy_violations=cont_viol_not_lazy)

    add_violations(lazy_violations,
                   flows,
                   mb,
                   md,
                   None,
                   ptdf_options,
                   PTDF,
                   time=time,
                   prepend_str="[Initial Set] ",
                   obj_multi=None)
Exemplo n.º 7
0
def check_and_scale_ptdf_options(ptdf_options, baseMVA):
    ## scale to base MVA
    ptdf_options['abs_ptdf_tol'] /= baseMVA
    ptdf_options['abs_flow_tol'] /= baseMVA
    ptdf_options['active_flow_tol'] /= baseMVA

    ## lowercase keyword options
    ptdf_options['kv_threshold_type'] = ptdf_options[
        'kv_threshold_type'].lower()

    rel_flow_tol = ptdf_options['rel_flow_tol']
    abs_flow_tol = ptdf_options['abs_flow_tol']

    rel_ptdf_tol = ptdf_options['rel_ptdf_tol']
    abs_ptdf_tol = ptdf_options['abs_ptdf_tol']

    lazy_rel_flow_tol = ptdf_options['lazy_rel_flow_tol']

    max_violations_per_iteration = ptdf_options['max_violations_per_iteration']

    if max_violations_per_iteration < 1 or (not isinstance(
            max_violations_per_iteration, int)):
        raise Exception(
            "max_violations_per_iteration must be an integer least 1, max_violations_per_iteration={}"
            .format(max_violations_per_iteration))

    if abs_flow_tol < lazy_rel_flow_tol:
        raise Exception(
            "abs_flow_tol (when scaled by baseMVA) cannot be less than lazy_flow_tol"
            " abs_flow_tol={0}, lazy_rel_flow_tol={1}, baseMVA={2}".format(
                abs_flow_tol * baseMVA, lazy_rel_flow_tol, baseMVA))

    if ptdf_options['kv_threshold_type'] not in ['one', 'both']:
        raise Exception(
            "kv_threshold_type must be either 'one' (for at least one end of the line"
            " above branch_kv_threshold) or 'both' (for both end of the line above"
            " branch_kv_threshold), kv_threshold_type={}".format(
                ptdf_options['kv_threshold_type']))

    if abs_flow_tol < 1e-6:
        logger.warning(
            "WARNING: abs_flow_tol={0}, which is below the numeric threshold of most solvers."
            .format(abs_flow_tol * baseMVA))
    if abs_flow_tol < rel_ptdf_tol * 10:
        logger.warning(
            "WARNING: abs_flow_tol={0}, rel_ptdf_tol={1}, which will likely result in violations. Consider raising abs_flow_tol or lowering rel_ptdf_tol."
            .format(abs_flow_tol * baseMVA, rel_ptdf_tol))
    if rel_ptdf_tol < 1e-6:
        logger.warning(
            "WARNING: rel_ptdf_tol={0}, which is low enough it may cause numerical issues in the solver. Consider rasing rel_ptdf_tol."
            .format(rel_ptdf_tol))
    if abs_ptdf_tol < 1e-12:
        logger.warning(
            "WARNING: abs_ptdf_tol={0}, which is low enough it may cause numerical issues in the solver. Consider rasing abs_ptdf_tol."
            .format(abs_ptdf_tol * baseMVA))
Exemplo n.º 8
0
def check_network_connection(graph, index_set_bus):
    """
    Checks for the connectivity of the network and prints some helpful information to the
    logger if the network is disconnected

    Parameters
    ----------
    graph : output from construct_connection_graph
    index_set_bus : list mapping bus indices to bus names (only used to generate warnings)
    """
    
    n_components, labels = sp.csgraph.connected_components(csgraph=graph, directed=False, return_labels=True)

    if n_components > 1:
        logger.warning("Network is disconnected. Number of components: {}".format(n_components))
        ### get the counts to eliminate the largest connected component
        unique, counts = np.unique(labels, return_counts=True)

        largest_component_label = unique[counts.argmax()]

        ## These are the indicies of the small connected components
        small_connected_components = np.nonzero(labels != largest_component_label)[0]

        components = { comp: [] for comp in unique if comp != largest_component_label }

        for idx, comp in zip(small_connected_components, labels[small_connected_components]):
            components[comp].append(index_set_bus[idx])

        logger.warning("Buses not in largest component:")
        for comp, buses in components.items():
            logger.warning("{} : {}".format(comp, buses))

    return (n_components == 1)
Exemplo n.º 9
0
def check_network_connection(branches, index_set_branch, index_set_bus,
                             mapping_bus_to_idx):
    """
    Checks for the connectivity of the network and prints some helpful information to the
    logger if the network is disconnected
    """
    _len_bus = len(index_set_bus)

    row = []
    col = []
    data = []

    for branch in branches.values():
        from_bus = branch['from_bus']
        to_bus = branch['to_bus']

        row.append(mapping_bus_to_idx[from_bus])
        col.append(mapping_bus_to_idx[to_bus])

    data = np.ones((len(branches), ), dtype=int)

    graph = sp.coo_matrix((data, (row, col)),
                          shape=(_len_bus, _len_bus)).tocsr()

    n_components, labels = sp.csgraph.connected_components(csgraph=graph,
                                                           directed=False,
                                                           return_labels=True)

    if n_components > 1:
        logger.warning(
            "Network is disconnected. Number of components: {}".format(
                n_components))
        ### get the counts to eliminate the largest connected component
        unique, counts = np.unique(labels, return_counts=True)

        largest_component_label = unique[counts.argmax()]

        ## These are the indicies of the small connected components
        small_connected_components = np.nonzero(
            labels != largest_component_label)[0]

        components = {
            comp: []
            for comp in unique if comp != largest_component_label
        }

        for idx, comp in zip(small_connected_components,
                             labels[small_connected_components]):
            components[comp].append(index_set_bus[idx])

        logger.warning("Buses not in largest component:")
        for comp, buses in components.items():
            logger.warning("{} : {}".format(comp, buses))

    return (n_components == 1)
Exemplo n.º 10
0
def set_t0_data(md: dict, base_dir: str = "", t0_state: Optional[dict] = None):
    """ Put t0 information into the passed in model dict

    Only t0 data for thermal generators is populated.

    Data comes from:
    * t0_state, if provided
    * otherwise, a file called initial_status.csv, if present
    * otherwise, t0 data is left blank

    If t0_state is provided, it should be organized as t0_state[name][value],
    where `name` is the name of a generator, and `value` is 'initial_status',
    'initial_p_output', and 'initial_q_output'.  For any generator included in
    t0_state, all three values must be present.

    If initial_status.csv is used, it must have a header row and may have 
    from 1 to 3 data rows.  Row 1 is 'initial_status'.  Row 2 is 
    'initial_p_output'.  Row 3 is 'initial_q_output'.  Column headers are 
    the generator names. Default values are used for any missing rows.

    Any generators not mentioned in the data source are left untouched.
    """
    if t0_state is not None:
        for name, gen in md['elements']['generator'].items():
            if gen['generator_type'] == 'thermal' and name in t0_state:
                gen['initial_status'] = t0_state[name]['initial_status']
                gen['initial_p_output'] = t0_state[name]['initial_p_output']
                gen['initial_q_output'] = t0_state[name]['initial_q_output']
        return

    state_fname = os.path.join(base_dir, 'initial_status.csv')
    if os.path.exists(state_fname):
        import csv
        with open(state_fname, 'r') as f:
            reader = csv.DictReader(f)
            rows = list(reader)

        # We now have a list of rows, from 1 to 3 rows long.
        # Row 1 is 'initial_status', row 2 is 'initial_p_output', and row 3 is 'initial_q_output'.
        # Any missing row uses defaults
        row_count = len(rows)
        for name, gen in md['elements']['generator'].items():
            if gen['generator_type'] != 'thermal':
                continue
            if name not in reader.fieldnames:
                continue
            gen['initial_status'] = float(rows[0][name])
            if gen['initial_status'] < 0:
                gen['initial_p_output'] = 0.0
                gen['initial_q_output'] = 0.0
            else:
                if row_count >= 2:
                    gen['initial_p_output'] = float(rows[1][name])
                else:
                    gen["initial_p_output"] = gen["p_min"]
                if row_count >= 3:
                    gen['initial_q_output'] = float(rows[2][name])
                else:
                    gen["initial_q_output"] = max(0., gen["q_min"])
    else:
        logger.warning("Setting default t0 state in RTS-GMLC parser")
        for name, gen in md['elements']['generator'].items():
            if gen['generator_type'] == 'thermal':
                gen['initial_status'] = gen['min_up_time'] + 1
                gen['initial_p_output'] = gen['p_min']
                gen['initial_q_output'] = 0.
Exemplo n.º 11
0
def calculate_ptdf(branches,buses,index_set_branch,index_set_bus,reference_bus,base_point=BasePointType.FLATSTART,sparse_index_set_branch=None,mapping_bus_to_idx=None):
    """
    Calculates the sensitivity of the voltage angle to real power injections
    Parameters
    ----------
    branches: dict{}
        The dictionary of branches for the test case
    buses: dict{}
        The dictionary of buses for the test case
    index_set_branch: list
        The list of keys for branches for the test case
    index_set_bus: list
        The list of keys for buses for the test case
    reference_bus: key value
        The reference bus key value
    base_point: egret.model_library_defn.BasePointType
        The base-point type for calculating the PTDF matrix
    sparse_index_set_branch: list
        The list of keys for branches needed to compute a sparse PTDF matrix
        If this is None, a dense PTDF matrix is returned
    mapping_bus_to_idx: dict
        A map from bus names to indices for matrix construction. If None,
        will be inferred from index_set_bus.
    """
    _len_bus = len(index_set_bus)

    if mapping_bus_to_idx is None:
        mapping_bus_to_idx = {bus_n: i for i, bus_n in enumerate(index_set_bus)}

    _len_branch = len(index_set_branch)

    _ref_bus_idx = mapping_bus_to_idx[reference_bus]

    ## check if the network is connected
    graph = construct_connection_graph(branches, mapping_bus_to_idx)
    connected = check_network_connection(graph, index_set_bus)

    J = _calculate_J11(branches,buses,index_set_branch,index_set_bus,mapping_bus_to_idx,base_point,approximation_type=ApproximationType.PTDF)
    A = calculate_adjacency_matrix_transpose(branches,index_set_branch,index_set_bus,mapping_bus_to_idx)
    M = A@J

    if sparse_index_set_branch is None or len(sparse_index_set_branch) == _len_branch:
        ## the resulting matrix after inversion will be fairly dense,
        ## the scipy documenation recommends using dense for the inversion
        ## as well

        ref_bus_mask = np.ones(_len_bus, dtype=bool)
        ref_bus_mask[_ref_bus_idx] = False

        # M is now (A^T B_d A) with
        # row and column of reference
        # bus removed
        J0 = M[ref_bus_mask,:][:,ref_bus_mask]

        # (B_d A) with reference bus column removed
        B_dA = J[:,ref_bus_mask].A

        if connected:
            try:
                PTDF = np.linalg.solve(J0.T.A, B_dA.T).T
            except np.linalg.LinAlgError:
                logger.warning("Matrix not invertible. Calculating pseudo-inverse instead.")
                SENSI = np.linalg.pinv(J0.A,rcond=1e-7)
                PTDF = np.matmul(B_dA,SENSI)
        else:
            logger.warning("Using pseudo-inverse method as network is disconnected")
            SENSI = np.linalg.pinv(J0.A,rcond=1e-7)
            PTDF = np.matmul(B_dA,SENSI)

        # insert 0 column for reference bus
        PTDF = np.insert(PTDF, _ref_bus_idx, np.zeros(_len_branch), axis=1)

    elif len(sparse_index_set_branch) < _len_branch:
        ref_bus_row = sp.coo_matrix(([1],([0],[_ref_bus_idx])), shape=(1,_len_bus))
        ref_bus_col = sp.coo_matrix(([1],([_ref_bus_idx],[0])), shape=(_len_bus,1))
 
        J0 = sp.bmat([[M,ref_bus_col],[ref_bus_row,0]], format='coo')

        B = np.array([], dtype=np.int64).reshape(_len_bus + 1,0)
        _sparse_mapping_branch = {i: branch_n for i, branch_n in enumerate(index_set_branch) if branch_n in sparse_index_set_branch}

        ## TODO: Maybe just keep the sparse PTDFs as a dict of ndarrays?
        ## Right now the return type depends on the options 
        ## passed in
        for idx, branch_name in _sparse_mapping_branch.items():
            b = np.zeros((_len_branch,1))
            b[idx] = 1
            _tmp = J.transpose()@b
            _tmp = np.vstack([_tmp,0])
            B = np.concatenate((B,_tmp), axis=1)
        row_idx = list(_sparse_mapping_branch.keys())
        PTDF = sp.lil_matrix((_len_branch,_len_bus))
        _ptdf = sp.linalg.spsolve(J0.transpose().tocsr(), B).T
        PTDF[row_idx] = _ptdf[:,:-1]

    return PTDF
Exemplo n.º 12
0
def precompute_contingency_matricies( graph, MLU_MP, A, Bd,\
                                      mapping_bus_to_idx,  mapping_branch_to_idx, 
                                      ref_bus_mask,
                                      branches, contingencies):

    contingencies_monitored = {}
    for c, cdict in contingencies.items():
        if 'branch_contingency' not in cdict:
            logger.warning(f"Contingency {c} does not have a branch specified; ignoring")
            continue
        branches_out = cdict['branch_contingency'] 
        if isinstance( branches_out, list ):
            if len(branches_out) == 0:
                logger.warning(f"Contingency {c} does not have a branch specified; ignoring")
                continue
            if len(branches_out) > 1:
                raise RuntimeError(f"Contingency {c} has multiple branches. This is not currently supported")
            branch_out = branches_out[0]
            if branch_out not in branches:
                raise RuntimeError(f"Contingency {c} is already out!")
        elif branches_out in mapping_branch_to_idx:
            branch_out = branches_out
        else:
            raise RuntimeError(f"Contingencies must be specified as a list of branches or single branch")

        contingencies_monitored[c] = branch_out

    _check_contingencies_not_disconnecting(graph, branches, mapping_bus_to_idx, contingencies_monitored.values()) 
    
    ## things for every possible modification
    _bus_len = A.shape[1]
    Pr = sp.csc_matrix((np.ones(_bus_len), (MLU_MP.perm_r, np.arange(_bus_len))))
    Pc = sp.csc_matrix((np.ones(_bus_len), (np.arange(_bus_len), MLU_MP.perm_c)))

    ## shouldn't need to re-order
    splu_options = {
                     "Equil":False,
                     "ColPerm":"NATURAL",
                     #"DiagPivotThresh":0.0,
                   }
    L_factor = sp.linalg.splu(MLU_MP.L,options=splu_options)
    U_factor = sp.linalg.splu(MLU_MP.U,options=splu_options)

    buff = np.zeros((_bus_len,1))

    compensators = {}

    for cn, branch_out in contingencies_monitored.items():
        branch_out_idx = mapping_branch_to_idx[branch_out]

        M = A[branch_out_idx].T
        dely = -Bd[branch_out_idx, branch_out_idx]

        # NOTE: The conversions involved here are a bottleneck. 
        #       Egret should probably implement its own sparse 
        #       triangular solver. Batching (collecting Pr@M,
        #       Pc.T@M for every branch_out) could also be tried.
        W = sp.csc_matrix( L_factor.solve((Pr@M).toarray(out=buff)) )
        Wbar = sp.csc_matrix( U_factor.solve((Pc.T@M).toarray(out=buff), 'T') )

        # NOTE: With a single change, these are simple inverses.
        #       If we go to multiple contingencies, this needs to 
        #       use matrix inverses and the code should be re-visited.
        z = (Wbar.T@W)[0,0]
        c = 1./((1./dely) + z)

        # Compute phi_compensator
        branch = branches[branch_out]

        if branch['branch_type'] == 'transformer' and branch['transformer_phase_shift'] != 0.:
            shift = math.radians(branch['transformer_phase_shift'])

            neg_b = shift*dely

            row = [mapping_bus_to_idx[branch['from_bus']], mapping_bus_to_idx[branch['to_bus']]]
            col = [0, 0]
            data = [neg_b, -neg_b]

            phi_comp = sp.coo_matrix((data,(row,col)), shape=(_bus_len+1,1)).tocsc()[ref_bus_mask]
            VA_comp = MLU_MP.solve(phi_comp.toarray(out=buff).T[0])

        else:
            phi_comp = sp.coo_matrix(([],([],[])), shape=(_bus_len,1)).tocsc()
            VA_comp = None

        comp = _ContingencyCompensator(M=M, c=c, W=W, Wbar=Wbar, phi_compensator=phi_comp,\
                                        VA_compensator=VA_comp, branch_out=branch_out)

        compensators[cn] = comp

    contingency_compensators = _ContingencyCompensators(compensators=compensators, L=L_factor, U=U_factor, Pr=Pr, Pc=Pc)

    return contingency_compensators
Exemplo n.º 13
0
def calculate_ptdf_ldf(branches,buses,index_set_branch,index_set_bus,reference_bus,base_point=BasePointType.SOLUTION,sparse_index_set_branch=None,mapping_bus_to_idx=None):
    """
    Calculates the sensitivity of the voltage angle to real power injections and losses on the lines. Includes the
    calculation of the constant term for the quadratic losses on the lines.
    Parameters
    ----------
    branches: dict{}
        The dictionary of branches for the test case
    buses: dict{}
        The dictionary of buses for the test case
    index_set_branch: list
        The list of keys for branches for the test case
    index_set_bus: list
        The list of keys for buses for the test case
    reference_bus: key value
        The reference bus key value
    base_point: egret.model_library_defn.BasePointType
        The base-point type for calculating the PTDF and LDF matrix
    sparse_index_set_branch: list
        The list of keys for branches needed to compute a sparse PTDF matrix
    mapping_bus_to_idx: dict
        A map from bus names to indices for matrix construction. If None,
        will be inferred from index_set_bus.
    """
    _len_bus = len(index_set_bus)

    if mapping_bus_to_idx is None:
        mapping_bus_to_idx = {bus_n: i for i, bus_n in enumerate(index_set_bus)}

    _len_branch = len(index_set_branch)

    _ref_bus_idx = mapping_bus_to_idx[reference_bus]

    J = _calculate_J11(branches,buses,index_set_branch,index_set_bus,mapping_bus_to_idx,base_point,approximation_type=ApproximationType.PTDF_LOSSES)
    L = _calculate_L11(branches,buses,index_set_branch,index_set_bus,mapping_bus_to_idx,base_point)
    Jc = _calculate_pf_constant(branches,buses,index_set_branch,base_point)
    Lc = _calculate_pfl_constant(branches,buses,index_set_branch,base_point)

    if np.all(Jc == 0) and np.all(Lc == 0):
        return np.zeros((_len_branch, _len_bus)), np.zeros((_len_branch, _len_bus)), np.zeros((1,_len_branch))

    ## check if the network is connected
    graph = construct_connection_graph(branches, mapping_bus_to_idx)
    connected = check_network_connection(graph, index_set_bus)

    A = calculate_adjacency_matrix_transpose(branches,index_set_branch,index_set_bus, mapping_bus_to_idx)
    AA = calculate_absolute_adjacency_matrix(A)
    M1 = A@J
    M2 = AA@L
    M = M1 + 0.5 * M2

    ref_bus_row = sp.coo_matrix(([1],([0],[_ref_bus_idx])), shape=(1,_len_bus))
    ref_bus_col = sp.coo_matrix(([1],([_ref_bus_idx],[0])), shape=(_len_bus,1))

    J0 = sp.bmat([[M,ref_bus_col],[ref_bus_row,0]], format='coo')

    if sparse_index_set_branch is None or len(sparse_index_set_branch) == _len_branch:
        ## the resulting matrix after inversion will be fairly dense,
        ## the scipy documenation recommends using dense for the inversion
        ## as well
        if connected:
            try:
                SENSI = np.linalg.inv(J0.A)
            except np.linalg.LinAlgError:
                logger.warning("Matrix not invertible. Calculating pseudo-inverse instead.")
                SENSI = np.linalg.pinv(J0.A,rcond=1e-7)
        else:
            logger.warning("Using pseudo-inverse method as network is disconnected")
            SENSI = np.linalg.pinv(J0.A,rcond=1e-7)
        SENSI = SENSI[:-1,:-1]

        PTDF = np.matmul(J.A, SENSI)
        LDF = np.matmul(L.A, SENSI)
    elif len(sparse_index_set_branch) < _len_branch:
        B_J = np.array([], dtype=np.int64).reshape(_len_bus + 1, 0)
        B_L = np.array([], dtype=np.int64).reshape(_len_bus + 1, 0)
        _sparse_mapping_branch = {i: branch_n for i, branch_n in enumerate(index_set_branch) if branch_n in sparse_index_set_branch}

        for idx, branch_name in _sparse_mapping_branch.items():
            b = np.zeros((_len_branch, 1))
            b[idx] = 1

            _tmp_J = np.matmul(J.transpose(), b)
            _tmp_J = np.vstack([_tmp_J, 0])
            B_J = np.concatenate((B_J, _tmp_J), axis=1)

            _tmp_L = np.matmul(L.transpose(), b)
            _tmp_L = np.vstack([_tmp_L, 0])
            B_L = np.concatenate((B_L, _tmp_L), axis=1)

        row_idx = list(_sparse_mapping_branch.keys())
        PTDF = sp.lil_matrix((_len_branch, _len_bus))
        _ptdf = sp.linalg.spsolve(J0.transpose().tocsr(), B_J).T
        PTDF[row_idx] = _ptdf[:, :-1]

        LDF = sp.lil_matrix((_len_branch, _len_bus))
        _ldf = sp.linalg.spsolve(J0.transpose().tocsr(), B_L).T
        LDF[row_idx] = _ldf[:, :-1]

    M1 = A@Jc
    M2 = AA@Lc
    M = M1 + 0.5 * M2
    LDF_constant = -LDF@M + Lc

    return PTDF, LDF, LDF_constant
Exemplo n.º 14
0
def _lazy_ptdf_dcopf_model_solve_loop(m,
                                      md,
                                      solver,
                                      solver_tee=True,
                                      symbolic_solver_labels=False,
                                      iteration_limit=100000):
    '''
    The lazy PTDF DCOPF solver loop. This function iteratively
    adds violated transmission constraints until either the result is
    transmission feasible or we're tracking every violated constraint
    in the model

    Parameters
    ----------
    m : pyomo.environ.ConcreteModel
        An egret DCOPF model with no transmission constraints
    md : egret.data.ModelData
        An egret ModelData object
    solver : pyomo.opt.solver
        A pyomo solver object
    solver_tee : bool (optional)
        For displaying the solver log (default is True)
    symbolic_solver_labels : bool (optional)
        Use symbolic solver labels when writing to the solver (default is False)
    iteration_limit : int (optional)
        Number of iterations before a hard termination (default is 100000)

    Returns
    -------
    egret.common.lazy_ptdf_utils.LazyPTDFTerminationCondition : the termination status
    pyomo.opt.results.SolverResults : The results object from the pyomo solver
    int : The number of iterations before termination

    '''
    from pyomo.solvers.plugins.solvers.persistent_solver import PersistentSolver

    PTDF = m._PTDF

    ptdf_options = m._ptdf_options

    persistent_solver = isinstance(solver, PersistentSolver)

    for i in range(iteration_limit):

        PFV, PFV_I, viol_num, mon_viol_num, viol_lazy, int_viol_lazy \
                = lpu.check_violations(m, md, PTDF, ptdf_options['max_violations_per_iteration'])

        iter_status_str = "iteration {0}, found {1} violation(s)".format(
            i, viol_num)
        if mon_viol_num:
            iter_status_str += ", {} of which are already monitored".format(
                mon_viol_num)

        logger.info(iter_status_str)

        if viol_num <= 0:
            ## in this case, there are no violations!
            ## load the duals now too, if we're using a persistent solver
            if persistent_solver:
                solver.load_duals()
            return lpu.LazyPTDFTerminationCondition.NORMAL

        elif viol_num == mon_viol_num:
            logger.warning(
                'WARNING: Terminating with monitored violations! Result is not transmission feasible.'
            )
            if persistent_solver:
                solver.load_duals()
            return lpu.LazyPTDFTerminationCondition.FLOW_VIOLATION

        lpu.add_violations(viol_lazy, int_viol_lazy, PFV, PFV_I, m, md, solver,
                           ptdf_options, PTDF)
        total_flow_constr_added = len(viol_lazy) + len(int_viol_lazy)
        logger.info("iteration {0}, added {1} flow constraint(s)".format(
            i, total_flow_constr_added))

        if persistent_solver:
            solver.solve(m,
                         tee=solver_tee,
                         load_solutions=False,
                         save_results=False)
            solver.load_vars()
        else:
            solver.solve(m,
                         tee=solver_tee,
                         symbolic_solver_labels=symbolic_solver_labels)

    else:  # we hit the iteration limit
        logger.warning(
            'WARNING: Exiting on maximum iterations for lazy PTDF model. Result is not transmission feasible.'
        )
        if persistent_solver:
            solver.load_duals()
        return lpu.LazyPTDFTerminationCondition.ITERATION_LIMIT
Exemplo n.º 15
0
def check_violations(mb, md, PTDF, max_viol_add, time=None, prepend_str=""):

    PFV = calculate_PFV(mb, PTDF)

    ## calculate the lazy violations
    gt_viol_lazy_array = PFV - PTDF.lazy_branch_limits
    lt_viol_lazy_array = -PFV - PTDF.lazy_branch_limits

    ## *_viol_lazy has the indices of the violations at
    ## the lazy limit
    gt_viol_lazy = np.nonzero(gt_viol_lazy_array > 0)[0]
    lt_viol_lazy = np.nonzero(lt_viol_lazy_array > 0)[0]

    ## calculate the violations
    ## these will be just a subset
    gt_viol_array = PFV[gt_viol_lazy] - PTDF.enforced_branch_limits[
        gt_viol_lazy]
    lt_viol_array = -PFV[lt_viol_lazy] - PTDF.enforced_branch_limits[
        lt_viol_lazy]

    ## *_viol will be indexed by *_viol_lazy
    gt_viol = np.nonzero(gt_viol_array > 0)[0]
    lt_viol = np.nonzero(lt_viol_array > 0)[0]

    ## these will hold the violations
    ## we found this iteration
    gt_viol = frozenset(gt_viol_lazy[gt_viol])
    lt_viol = frozenset(lt_viol_lazy[lt_viol])

    ## get the lines we're monitoring
    gt_idx_monitored = mb._gt_idx_monitored
    lt_idx_monitored = mb._lt_idx_monitored

    ## get the lines for which we've found a violation that's
    ## in the model
    gt_viol_in_mb = gt_viol.intersection(gt_idx_monitored)
    lt_viol_in_mb = lt_viol.intersection(lt_idx_monitored)

    ## print a warning for these lines
    ## check if the found violations are in the model and print warning
    baseMVA = md.data['system']['baseMVA']
    for i in lt_viol_in_mb:
        bn = PTDF.branches_keys_masked[i]
        thermal_limit = PTDF.branch_limits_array_masked[i]
        logger.warning(prepend_str + _generate_flow_viol_warning(
            'LB', mb, bn, PFV[i], -thermal_limit, baseMVA, time))

    for i in gt_viol_in_mb:
        bn = PTDF.branches_keys_masked[i]
        thermal_limit = PTDF.branch_limits_array_masked[i]
        logger.warning(prepend_str + _generate_flow_viol_warning(
            'UB', mb, bn, PFV[i], thermal_limit, baseMVA, time))

    ## *t_viol_lazy will hold the lines we're adding
    ## this iteration -- don't want to add lines
    ## that are already in the monitored set

    # eliminate lines in the monitored set
    gt_viol_lazy = set(gt_viol_lazy).difference(gt_idx_monitored)
    lt_viol_lazy = set(lt_viol_lazy).difference(lt_idx_monitored)

    ## limit the number of lines we add in one iteration
    ## if we have too many violations, just take those largest
    ## in absolute value in either direction
    if len(gt_viol_lazy) + len(lt_viol_lazy) > max_viol_add:

        tracking_gt_viol_lazy = list(gt_viol_lazy)
        tracking_lt_viol_lazy = list(lt_viol_lazy)

        gt_viol_lazy = list()
        lt_viol_lazy = list()

        ## one of the tracking_*t_viol_lazy could be empty

        if not tracking_gt_viol_lazy:
            idx = np.argmax(lt_viol_lazy_array[tracking_lt_viol_lazy])
            ptdf_idx = tracking_lt_viol_lazy.pop(idx)
            lt_viol_lazy.append(ptdf_idx)

        elif not tracking_lt_viol_lazy:
            idx = np.argmax(gt_viol_lazy_array[tracking_gt_viol_lazy])
            ptdf_idx = tracking_gt_viol_lazy.pop(idx)
            gt_viol_lazy.append(ptdf_idx)

        else:  ## get the worst of both
            gt_idx = np.argmax(gt_viol_lazy_array[tracking_gt_viol_lazy])
            lt_idx = np.argmax(lt_viol_lazy_array[tracking_lt_viol_lazy])
            gt_branch_idx = tracking_gt_viol_lazy[gt_idx]
            lt_branch_idx = tracking_lt_viol_lazy[lt_idx]

            if gt_viol_lazy_array[gt_branch_idx] > lt_viol_lazy_array[
                    lt_branch_idx]:
                ptdf_idx = gt_branch_idx
                gt_viol_lazy.append(ptdf_idx)
                del tracking_gt_viol_lazy[gt_idx]
            else:
                ptdf_idx = lt_branch_idx
                lt_viol_lazy.append(ptdf_idx)
                del tracking_lt_viol_lazy[lt_idx]

        if max_viol_add > 1:
            ptdf_lin = np.zeros(len(PTDF.buses_keys))

        ## for those in the monitored set, assume they're feasible for
        ## the purposes of sorting the worst violations, which means
        ## resetting the values for these lines as computed above
        for _ in range(max_viol_add - 1):

            ptdf_lin += PTDF.PTDFM_masked[ptdf_idx]

            all_other_violations = list(tracking_gt_viol_lazy +
                                        tracking_lt_viol_lazy)

            other_gt_viols = gt_viol_lazy_array[all_other_violations]
            other_lt_viols = lt_viol_lazy_array[all_other_violations]

            other_viols = np.maximum(other_gt_viols, other_lt_viols)

            ## put this in baseMVA
            other_viols *= baseMVA

            other_viol_rows = PTDF.PTDFM_masked[all_other_violations]

            orthogonality = np.absolute(np.dot(other_viol_rows, ptdf_lin))

            ## divide by transmission limits to give higher
            ## priority to those lines with larger violations

            ## larger values emphasize violation
            ## smaller emphasize orthogonality
            ## TODO: try weighting by number of nonzeros
            orthogonality /= other_viols

            ## this is the index into the orthogonality matrix,
            ## which is indexed by all_other_violations
            all_other_idx = np.argmin(orthogonality)

            ptdf_idx = all_other_violations[all_other_idx]

            if ptdf_idx in tracking_gt_viol_lazy:
                tracking_gt_viol_lazy.remove(ptdf_idx)
                gt_viol_lazy.append(ptdf_idx)
            elif ptdf_idx in tracking_lt_viol_lazy:
                tracking_lt_viol_lazy.remove(ptdf_idx)
                lt_viol_lazy.append(ptdf_idx)
            else:
                raise Exception("Unexpected case")

    viol_num = len(gt_viol) + len(lt_viol)
    monitored_viol_num = len(lt_viol_in_mb) + len(gt_viol_in_mb)

    return PFV, viol_num, monitored_viol_num, gt_viol_lazy, lt_viol_lazy
Exemplo n.º 16
0
def check_violations(mb, md, PTDF, max_viol_add, time=None):

    NWV = np.fromiter((pe.value(mb.p_nw[b]) for b in PTDF.bus_iterator()),
                      float,
                      count=len(PTDF.buses_keys))
    NWV += PTDF.phi_adjust_array

    PFV = PTDF.PTDFM.dot(NWV)
    PFV += PTDF.phase_shift_array

    ## calculate the negative of the violations (for easy sorting)
    gt_viol_array = PTDF.enforced_branch_limits - PFV
    lt_viol_array = PFV + PTDF.enforced_branch_limits

    gt_viol = np.nonzero(gt_viol_array < 0)[0]
    lt_viol = np.nonzero(lt_viol_array < 0)[0]

    ## these will hold the violations
    ## we found this iteration
    gt_viol = frozenset(gt_viol)
    lt_viol = frozenset(lt_viol)

    ## get the lines we're monitoring
    gt_idx_monitored = mb._gt_idx_monitored
    lt_idx_monitored = mb._lt_idx_monitored

    ## get the lines for which we've found a violation that's
    ## in the model
    gt_viol_in_mb = gt_viol.intersection(gt_idx_monitored)
    lt_viol_in_mb = lt_viol.intersection(lt_idx_monitored)

    ## print a warning for these lines
    ## check if the found violations are in the model and print warning
    baseMVA = md.data['system']['baseMVA']
    for i in lt_viol_in_mb:
        bn = PTDF.branches_keys[i]
        thermal_limit = PTDF.branch_limits_array[i]
        logger.warning(
            _generate_flow_viol_warning('LB', mb, bn, PFV[i], -thermal_limit,
                                        baseMVA, time))

    for i in gt_viol_in_mb:
        bn = PTDF.branches_keys[i]
        thermal_limit = PTDF.branch_limits_array[i]
        logger.warning(
            _generate_flow_viol_warning('UB', mb, bn, PFV[i], thermal_limit,
                                        baseMVA, time))

    ## *t_viol_lazy will hold the lines we're adding
    ## this iteration -- don't want to add lines
    ## that are already in the monitored set
    gt_viol_lazy = gt_viol.difference(gt_idx_monitored)
    lt_viol_lazy = lt_viol.difference(lt_idx_monitored)

    ## limit the number of lines we add in one iteration
    ## if we have too many violations, just take those largest
    ## in absolute value in either direction
    if len(gt_viol_lazy) + len(lt_viol_lazy) > max_viol_add:

        ## for those in the monitored set, assume they're feasible for
        ## the purposes of sorting the worst violations, which means
        ## resetting the values for these lines as computed above

        ## use what most solvers consider +infty
        LARGE_CONST = 1e+100
        gt_viol_array[gt_idx_monitored] = LARGE_CONST
        lt_viol_array[lt_idx_monitored] = LARGE_CONST

        ## give the order of the first max_viol_add violations
        measured_gt_viol = np.argpartition(gt_viol_array, range(max_viol_add))
        measured_lt_viol = np.argpartition(lt_viol_array, range(max_viol_add))

        measured_gt_viol_pos = 0
        measured_lt_viol_pos = 0
        gt_viol_lazy = set()
        lt_viol_lazy = set()
        for _ in range(max_viol_add):
            gt_v = gt_viol_array[measured_gt_viol[measured_gt_viol_pos]]
            lt_v = lt_viol_array[measured_lt_viol[measured_lt_viol_pos]]

            ## because we negated for sorting, this means the
            ## overall violation is more for the gt side
            ## dont have any more actual violations
            if gt_v > 0 and lt_v > 0:
                break
            elif gt_v < lt_v:
                gt_viol_lazy.add(measured_gt_viol[measured_gt_viol_pos])
                measured_gt_viol_pos += 1
            else:
                lt_viol_lazy.add(measured_lt_viol[measured_lt_viol_pos])
                measured_lt_viol_pos += 1

    viol_num = len(gt_viol) + len(lt_viol)
    monitored_viol_num = len(lt_viol_in_mb) + len(gt_viol_in_mb)

    return PFV, viol_num, monitored_viol_num, gt_viol_lazy, lt_viol_lazy