Exemple #1
0
def build_group_table(ssa, group, columns, mdl_name=[]):
    """
    Build the table for devices in a group in an ADNES System.

    Parameters
    ----------
    ssa : andes.system.System
        The ADNES system to build the table
    group : string
        The ADNES group
    columns : list of string
        The common columns of a group that to be included in the table.
    mdl_name : list of string
        The list of models that to be included in the table. Default as all models.

    Returns
    -------
    DataFrame

        The output Dataframe contains the columns from the device
    """
    group_df = pd.DataFrame(columns=columns)
    group = getattr(ssa, group)
    if not mdl_name:
        mdl_dict = getattr(group, 'models')
        for key in mdl_dict:
            mdl = getattr(ssa, key)
            group_df = pd.concat([group_df, mdl.as_df()[columns]], axis=0)
    else:
        for key in mdl_name:
            mdl = getattr(ssa, key)
            group_df = pd.concat([group_df, mdl.as_df()[columns]], axis=0)
    return group_df
Exemple #2
0
    def unpack_df(self, attr):
        """
        Construct pandas dataframes.
        """

        uxname = self.dae.x_name_output
        uyname = self.dae.y_name_output
        uzname = self.dae.z_name

        if attr is None or 'x' in attr:
            self.df_x = pd.DataFrame.from_dict(self._xs,
                                               orient='index',
                                               columns=uxname)

        if attr is None or 'y' in attr:
            self.df_y = pd.DataFrame.from_dict(self._ys,
                                               orient='index',
                                               columns=uyname)

        if attr is None or 'z' in attr:
            self.df_z = pd.DataFrame.from_dict(self._zs,
                                               orient='index',
                                               columns=uzname)

        if attr is None or attr == 'df_xy':
            self.df_xy = pd.concat((self.df_x, self.df_y), axis=1)

        if attr is None or attr == 'df_xyz':
            self.df_xyz = pd.concat((self.df_x, self.df_y, self.df_z), axis=1)

        return True
Exemple #3
0
def _sumPF_ppn(ppn):
    """Summarize PF results of a pandapower net"""
    rg = pd.concat([ppn.res_gen[['p_mw']], ppn.gen[['bus']]],
                   axis=1).rename(columns={'p_mw': 'gen'})
    rd = pd.concat([ppn.res_load[['p_mw']], ppn.load[['bus']]],
                   axis=1).rename(columns={'p_mw': 'demand'})
    rp = pd.DataFrame()
    rp['bus'] = ppn.bus.index
    rp = rp.merge(rg, on='bus', how='left')
    rp = rp.merge(rd, on='bus', how='left')
    rp.fillna(0, inplace=True)
    rp['ngen'] = rp['gen'] - rp['demand']
    rp = rp.groupby('bus').sum().reset_index(drop=True)
    rp['bus'] = rp.index
    return rp
Exemple #4
0
    def unpack_df(self):
        """
        Construct pandas dataframes.
        """

        self.df_x = pd.DataFrame.from_dict(self._xs, orient='index',
                                           columns=self.dae.x_name)
        self.df_y = pd.DataFrame.from_dict(self._ys, orient='index',
                                           columns=self.dae.y_name)
        self.df_z = pd.DataFrame.from_dict(self._zs, orient='index',
                                           columns=self.dae.z_name)

        self.df_xy = pd.concat((self.df_x, self.df_y), axis=1)
        self.df_xyz = pd.concat((self.df_xy, self.df_z), axis=1)

        return True
Exemple #5
0
def runopp_map(ssp, link_table, **kwargs):
    """
    Run OPF in pandapower using ``pp.runopp()`` and map results back to ANDES
    based on the link table.

    Parameters
    ----------
    ssp : pandapower network
        The pandapower network

    link_table : DataFrame
        The link table of ADNES system

    Returns
    -------
    DataFrame

        The DataFrame contains the OPF results with columns ``p_mw``,
        ``q_mvar``, ``vm_pu`` in p.u., and the corresponding ``idx`` of
        ``StaticGen``, ``Exciter``, ``TurbineGov`` in ANDES.

    Notes
    -----
      - The pandapower net and the ANDES system must have same base MVA.
      - Multiple ``DG`` connected to the same ``StaticGen`` will be converted to one generator.
        The power is dispatched to each ``DG`` by the power ratio ``gammap``
    """

    try:
        pp.runopp(ssp, **kwargs)
    except Exception:
        pp.rundcopp(ssp, **kwargs)
        logger.warning("ACOPF failed, DCOPF is used instead.")

    # take dispatch results from pp
    ssp_gen = ssp.gen.rename(columns={'name': 'stg_idx'})
    ssp_res = pd.concat(
        [ssp_gen[['stg_idx']], ssp.res_gen[['p_mw', 'q_mvar', 'vm_pu']]],
        axis=1)

    ssp_res = pd.merge(left=ssp_res,
                       right=ssp_gen[['stg_idx', 'controllable']],
                       how='left',
                       on='stg_idx')
    ssp_res = pd.merge(left=ssp_res,
                       right=link_table,
                       how='left',
                       on='stg_idx')
    ssp_res['p'] = ssp_res['p_mw'] * ssp_res['gammap'] / ssp.sn_mva
    ssp_res['q'] = ssp_res['q_mvar'] * ssp_res['gammaq'] / ssp.sn_mva
    col = [
        'stg_idx', 'p', 'q', 'vm_pu', 'bus_idx', 'controllable', 'dg_idx',
        'rg_idx', 'syg_idx', 'gov_idx', 'exc_idx'
    ]
    return ssp_res[col]
Exemple #6
0
def _verifyGSF(ppn, gsf, tol=1e-4):
    """Verify the GSF with DCPF"""
    # --- DCPF results ---
    rl = pd.concat(
        [ppn.res_line['p_from_mw'], ppn.line[['from_bus', 'to_bus']]], axis=1)
    rp = _sumPF_ppn(ppn)
    rl_c = np.array(np.matrix(gsf) * np.matrix(rp.ngen).T)
    res_gap = rl.p_from_mw.values - rl_c.flatten()
    if np.abs(res_gap).max() <= tol:
        logger.info("GSF is consistent.")
    else:
        logger.warning("Warning: GSF is inconsistent. Pleaes check!")
Exemple #7
0
def parse_raw(in_file_name):
    """
    This function will parse a RAW file and return a PyPSA model
    """

    # Initialize output
    output = {}
    for item in modes:
        key = item['key']
        output[key] = {"name": item['name'], "key": key, "lines": []}
        # Set up column structure
        if 'columns' in item:
            output[key]['columns'] = item['columns']
            header = ",".join([c['name'] for c in item['columns']]).replace(
                ' ', '') + '\n'
            output[key]['header'] = header
            output[key]['lines'].append(header)
        if 'records' in item:
            output[key]['records'] = item['records']
            for record in output[key]['records']:
                header = ",".join([c['name'] for c in record['columns']
                                   ]).replace(' ', '') + '\n'
                record['header'] = header
        # Get parsing info
        output[key]['parse'] = item.get('parse', {})

    # Initialize header mode
    current_mode = {'key': 'header'}

    # Read file and store in container
    with open(in_file_name, 'r') as in_file:
        line = in_file.readline()
        line_num = 0
        while line:
            line_num += 1

            START = None
            STOP = None

            # Process signals
            signals = get_signals(line_num, line, current_mode)
            for signal, mode in signals:
                logger.debug("Signal: {} {} {}".format(line_num,
                                                       signal['command'],
                                                       mode['key']))
                if signal['command'] == 'start':
                    START = mode
                if signal['command'] == 'stop':
                    STOP = mode

            if current_mode and START and not STOP:
                raise ValueError('Current mode was never stopped')

            # Store lines
            if current_mode and (not STOP or 'keep_tail' in STOP):
                key = current_mode['key']
                output[key]['lines'].append(line)
            # print key

            if STOP and current_mode and current_mode['key'] != STOP['key']:
                raise ValueError('Attempting to stop a different mode',
                                 current_mode['key'], STOP['key'])
            elif START:
                current_mode = START
            elif STOP:
                current_mode = None

            line = in_file.readline()

    logger.debug("Captured Lines")
    for i in output:
        logger.debug('Item: {}, length: {}'.format(i, len(output[i]['lines'])))

    for i in output:
        if 'lines' not in output[i]:
            logger.debug('no lines {}'.format(i))
            continue
        lines = output[i]['lines']
        if len(lines) == 1:
            logger.debug('only header {}'.format(i))
            continue

        if 'read_table' in output[i]['parse']:
            text = StringIO(''.join(lines))
            output[i]['df'] = pd.read_table(text, sep=',')

        if 'read_transformer' in output[i]['parse']:
            output[i]['dfs'] = read_transformer(lines, output[i]['records'])
            df2 = pd.concat(output[i]['dfs'][2], axis=1, sort=False)
            df3 = pd.concat(output[i]['dfs'][3], axis=1, sort=False)
            output[i]['df'] = df3.append(df2, sort=False)

        if 'read_twodc' in output[i]['parse']:
            output[i]['dfs'] = read_twodc(lines, output[i]['records'])
            output[i]['df'] = pd.concat(output[i]['dfs'], axis=1, sort=False)

        logger.debug('{} {} {}'.format(i, len(lines), 'df' in output[i]))
        if 'df' in output[i]:
            logger.info('Parsed {} {}'.format(len(output[i]['df']), i))
            if len(lines) > 0:
                logger.debug("{}".format(lines[0]))

    return output
Exemple #8
0
def make_link_table(ssa):
    """
    Build the link table for generators and generator controllers in an ADNES
    System, including ``SynGen`` and ``DG`` for now.

    Parameters
    ----------
    ssa : andes.system.System
        The ADNES system to link

    Returns
    -------
    DataFrame

        Each column in the output Dataframe contains the ``idx`` of linked
        ``StaticGen``, ``Bus``, ``DG``, ``SynGen``, ``Exciter``, and ``TurbineGov``,
        ``gammap``, ``gammaq``.
    """
    # build StaticGen df
    ssa_stg = build_group_table(ssa, 'StaticGen', ['u', 'name', 'idx', 'bus'])
    # build TurbineGov df
    ssa_gov = build_group_table(ssa, 'TurbineGov', ['idx', 'syn'])
    # build Exciter df
    ssa_exc = build_group_table(ssa, 'Exciter', ['idx', 'syn'])
    # build SynGen df
    ssa_syg = build_group_table(ssa, 'SynGen',
                                ['idx', 'bus', 'gen', 'gammap', 'gammaq'],
                                ['GENCLS', 'GENROU'])
    # build DG df
    ssa_dg = build_group_table(ssa, 'DG',
                               ['idx', 'bus', 'gen', 'gammap', 'gammaq'])

    # output
    ssa_bus = ssa.Bus.as_df()[['name', 'idx']]
    ssa_key = pd.merge(
        left=ssa_stg.rename(columns={
            'name': 'stg_name',
            'idx': 'stg_idx',
            'bus': 'bus_idx',
            'u': 'stg_u'
        }),
        right=ssa_bus.rename(columns={
            'name': 'bus_name',
            'idx': 'bus_idx'
        }),
        how='left',
        on='bus_idx')
    ssa_syg = pd.merge(left=ssa_key,
                       how='right',
                       on='stg_idx',
                       right=ssa_syg.rename(columns={
                           'idx': 'syg_idx',
                           'gen': 'stg_idx'
                       }))
    ssa_dg = pd.merge(left=ssa_key,
                      how='right',
                      on='stg_idx',
                      right=ssa_dg.rename(columns={
                          'idx': 'dg_idx',
                          'gen': 'stg_idx'
                      }))
    # TODO: Add RenGen
    ssa_key = pd.concat([ssa_syg, ssa_dg], axis=0)
    ssa_key = pd.merge(left=ssa_key,
                       right=ssa_exc.rename(columns={
                           'idx': 'exc_idx',
                           'syn': 'syg_idx'
                       }),
                       how='left',
                       on='syg_idx')
    ssa_key = pd.merge(left=ssa_key,
                       right=ssa_gov.rename(columns={
                           'idx': 'gov_idx',
                           'syn': 'syg_idx'
                       }),
                       how='left',
                       on='syg_idx')
    cols = [
        'stg_name', 'stg_u', 'stg_idx', 'bus_idx', 'dg_idx', 'syg_idx',
        'exc_idx', 'gov_idx', 'bus_name', 'gammap', 'gammaq'
    ]
    return ssa_key[cols]
Exemple #9
0
def make_link_table(ssa):
    """
    Build the link table for generators and generator controllers in an ADNES
    System, including ``SynGen`` and ``DG`` for now.

    Parameters
    ----------
    ssa : andes.system.System
        The ADNES system to link

    Returns
    -------
    DataFrame

        Each column in the output Dataframe contains the ``idx`` of linked
        ``StaticGen``, ``Bus``, ``DG``, ``RenGen``, ``RenExciter``, ``SynGen``,
        ``Exciter``, and ``TurbineGov``, ``gammap``, ``gammaq``.
    """
    # build StaticGen df
    ssa_stg = build_group_table(ssa, 'StaticGen', ['u', 'name', 'idx', 'bus'])
    # build TurbineGov df
    ssa_gov = build_group_table(ssa, 'TurbineGov', ['idx', 'syn'])
    # build Exciter df
    ssa_exc = build_group_table(ssa, 'Exciter', ['idx', 'syn'])
    # build SynGen df
    ssa_syg = build_group_table(ssa, 'SynGen',
                                ['idx', 'bus', 'gen', 'gammap', 'gammaq'],
                                ['GENCLS', 'GENROU'])
    # build DG df
    ssa_dg = build_group_table(ssa, 'DG',
                               ['idx', 'bus', 'gen', 'gammap', 'gammaq'])
    # build RenGen df
    ssa_rg = build_group_table(ssa, 'RenGen',
                               ['idx', 'bus', 'gen', 'gammap', 'gammaq'])
    # build RenExciter df
    ssa_rexc = build_group_table(ssa, 'RenExciter', ['idx', 'reg'])

    # output
    ssa_bus = ssa.Bus.as_df()[['name', 'idx']]
    ssa_key = pd.merge(
        left=ssa_stg.rename(columns={
            'name': 'stg_name',
            'idx': 'stg_idx',
            'bus': 'bus_idx',
            'u': 'stg_u'
        }),
        right=ssa_bus.rename(columns={
            'name': 'bus_name',
            'idx': 'bus_idx'
        }),
        how='left',
        on='bus_idx')
    ssa_syg = pd.merge(left=ssa_key,
                       how='right',
                       on='stg_idx',
                       right=ssa_syg.rename(columns={
                           'idx': 'syg_idx',
                           'gen': 'stg_idx'
                       }))
    ssa_dg = pd.merge(left=ssa_key,
                      how='right',
                      on='stg_idx',
                      right=ssa_dg.rename(columns={
                          'idx': 'dg_idx',
                          'gen': 'stg_idx'
                      }))
    ssa_rg = pd.merge(left=ssa_key,
                      how='right',
                      on='stg_idx',
                      right=ssa_rg.rename(columns={
                          'idx': 'rg_idx',
                          'gen': 'stg_idx'
                      }))
    ssa_key0 = pd.merge(left=ssa_key,
                        how='left',
                        on='stg_idx',
                        right=ssa_syg[['stg_idx', 'syg_idx']])
    ssa_key0 = pd.merge(left=ssa_key0,
                        how='left',
                        on='stg_idx',
                        right=ssa_dg[['stg_idx', 'dg_idx']])
    ssa_key0 = pd.merge(left=ssa_key0,
                        how='left',
                        on='stg_idx',
                        right=ssa_rg[['stg_idx', 'rg_idx']])
    ssa_key0.fillna(False, inplace=True)
    ssa_key0['dyr'] = ssa_key0['syg_idx'].astype(bool) + ssa_key0[
        'dg_idx'].astype(bool) + ssa_key0['rg_idx'].astype(bool)
    ssa_key0['dyr'] = 1 - ssa_key0['dyr'].astype(int)
    ssa_key0['dyr'] = ssa_key0['dyr'].astype(bool)
    ssa_dyr0 = ssa_key0[ssa_key0.dyr].drop(['dyr'], axis=1)
    ssa_dyr0['gammap'] = 1
    ssa_dyr0['gammaq'] = 1
    ssa_key = pd.concat([ssa_syg, ssa_dg, ssa_rg, ssa_dyr0], axis=0)
    ssa_key = pd.merge(left=ssa_key,
                       right=ssa_exc.rename(columns={
                           'idx': 'exc_idx',
                           'syn': 'syg_idx'
                       }),
                       how='left',
                       on='syg_idx')
    ssa_key = pd.merge(left=ssa_key,
                       right=ssa_gov.rename(columns={
                           'idx': 'gov_idx',
                           'syn': 'syg_idx'
                       }),
                       how='left',
                       on='syg_idx')
    ssa_key = pd.merge(left=ssa_key,
                       how='left',
                       on='rg_idx',
                       right=ssa_rexc.rename(columns={
                           'idx': 'rexc_idx',
                           'reg': 'rg_idx'
                       }))

    cols = [
        'stg_name', 'stg_u', 'stg_idx', 'bus_idx', 'dg_idx', 'rg_idx',
        'rexc_idx', 'syg_idx', 'exc_idx', 'gov_idx', 'bus_name', 'gammap',
        'gammaq'
    ]
    return ssa_key[cols].reset_index(drop=True)