コード例 #1
0
ファイル: tests.py プロジェクト: mhdella/ding0
def init_files_for_tests(mv_grid_districts=[3545],
                         filename='ding0_tests_grids_1.pkl'):
    '''Runs ding0 over the districtis selected in mv_grid_districts and writes the result in filename.
    
    Parameters
    ----------
    mv_grid_districts: :obj:`list` of :obj:`int`
        Districts IDs: Defaults to [3545]
    filename: :obj:`str`
        Defaults to 'ding0_tests_grids_1.pkl'
    
    '''
    print('\n########################################')
    print('  Running ding0 for district', mv_grid_districts)

    # database connection/ session
    engine = db.connection(readonly=True)
    session = sessionmaker(bind=engine)()

    # instantiate new ding0 network object
    nd = NetworkDing0(name='network')

    # run DING0 on selected MV Grid District
    nd.run_ding0(session=session, mv_grid_districts_no=mv_grid_districts)

    # export grid to file (pickle)
    print('\n########################################')
    print('  Saving result in ', filename)
    save_nd_to_pickle(nd, filename=filename)
コード例 #2
0
ファイル: edisgo_integration.py プロジェクト: mjohnson518/eGo
    def get_bus_id_from_mv_grid(self, subst_id):
        """
        Queries the eTraGo bus ID for given MV grid (ding0) ID

        Parameters
        ----------
        subst_id : int
            MV grid (ding0) ID

        Returns
        -------
        int
            eTraGo bus ID

        """

        conn = db.connection(section=self._db_section)
        session_factory = sessionmaker(bind=conn)
        Session = scoped_session(session_factory)
        session = Session()

        bus_id = self._get_bus_id_from_mv_grid(session, subst_id)

        Session.remove()

        return bus_id
コード例 #3
0
ファイル: tests.py プロジェクト: mhdella/ding0
def manual_ding0_test(mv_grid_districts=[3545],
                      filename='ding0_tests_grids_1.pkl'):
    ''' Compares a new run of ding0 over districts and an old one saved in
    filename.
    
    Parameters
    ----------
    mv_grid_districts: :obj:`list` of :obj:`int`
        Districts IDs: Defaults to [3545]
    filename: :obj:`str`
        Defaults to 'ding0_tests_grids_1.pkl'
    '''
    print('\n########################################')
    print('Test ding0 vs File')
    print('\n########################################')
    print('  Loading file', filename, '...')
    nw_1 = load_nd_from_pickle(filename=filename)

    print('\n########################################')
    print('  Running ding0 for district', mv_grid_districts, '...')

    # database connection/ session
    engine = db.connection(readonly=True)
    session = sessionmaker(bind=engine)()

    nw_2 = NetworkDing0(name='network')
    nw_2.run_ding0(session=session, mv_grid_districts_no=mv_grid_districts)

    # test equality
    print('\n########################################')
    print('  Testing equality...')
    passed, msg = dataframe_equal(nw_1, nw_2)
    print('    ...' + msg)
コード例 #4
0
ファイル: tests.py プロジェクト: mhdella/ding0
    def test_ding0(self):
        print('\n########################################')
        print('Test ding0 vs ding0')
        # database connection/ session
        engine = db.connection(readonly=True)
        session = sessionmaker(bind=engine)()

        mv_grid_districts = [3545]

        print('\n########################################')
        print('  Running ding0 once...')
        nw_1 = NetworkDing0(name='network')
        nw_1.run_ding0(session=session, mv_grid_districts_no=mv_grid_districts)

        print('\n########################################')
        print('  Running ding0 twice...')
        nw_2 = NetworkDing0(name='network')
        nw_2.run_ding0(session=session, mv_grid_districts_no=mv_grid_districts)

        #test equality
        print('\n########################################')
        print('  Testing equality...')
        passed, msg = dataframe_equal(nw_1, nw_2)
        print('    ...' + msg)

        self.assertTrue(passed, msg=msg)
コード例 #5
0
ファイル: test_core.py プロジェクト: mhdella/ding0
 def oedb_session(self):
     """
     Returns an ego.io oedb session and closes it on finishing the test
     """
     engine = db.connection(readonly=True)
     session = sessionmaker(bind=engine)()
     yield session
     print("closing session")
     session.close()
コード例 #6
0
ファイル: utilities.py プロジェクト: mjohnson518/eGo
def open_oedb_session(ego):
    """
    """
    _db_section = ego.json_file["eTraGo"]["db"]
    conn = db.connection(section=_db_section)
    session_factory = sessionmaker(bind=conn)
    Session = scoped_session(session_factory)
    session = Session()

    return session
コード例 #7
0
ファイル: retrieve_data.py プロジェクト: gplssm/ding0_viz
def generate_ding0_data(grid_id, save_path):

    engine = db.connection(readonly=True)
    session = sessionmaker(bind=engine)()

    nd = NetworkDing0(name='network')

    # run DING0 on selected MV Grid District
    nd.run_ding0(session=session, mv_grid_districts_no=[grid_id])

    nd.to_csv(save_path)
コード例 #8
0
def create_test_grids_with_stats(path):
    '''
    If changes in electrical data have been made, run this function to update the saved test data in folder.
    Test are run on mv_grid_district 460.
    :param path: directory where testdata ist stored.
    :return: mvgd_stats
    '''

    # database connection/ session
    engine = db.connection(section='oedb')
    session = sessionmaker(bind=engine)()

    # instantiate new ding0 network object
    nd = NetworkDing0(name='network')

    # choose MV Grid Districts to import
    mv_grid_districts = [460]

    # run DING0 on selected MV Grid District
    nd.run_ding0(session=session, mv_grid_districts_no=mv_grid_districts)

    # save network
    if not os.path.exists(path):
        os.makedirs(path)
    save_nd_to_pickle(nd, path=path, filename=None)

    mvgd_stats = calculate_mvgd_stats(nd)
    mvgd_stats.to_csv(os.path.join(path, 'mvgd_stats.csv'))
    mvgd_voltage_current_stats = calculate_mvgd_voltage_current_stats(nd)
    mvgd_current_branches = mvgd_voltage_current_stats[1]
    mvgd_current_branches.to_csv(
        os.path.join(path, 'mvgd_current_branches.csv'))
    mvgd_voltage_nodes = mvgd_voltage_current_stats[0]
    mvgd_voltage_nodes.to_csv(os.path.join(path, 'mvgd_voltage_nodes.csv'))

    lvgd_stats = calculate_lvgd_stats(nd)
    lvgd_stats.to_csv(os.path.join(path, 'lvgd_stats.csv'))
    lvgd_voltage_current_stats = calculate_lvgd_voltage_current_stats(nd)
    lvgd_current_branches = lvgd_voltage_current_stats[1]
    lvgd_current_branches.to_csv(
        os.path.join(path, 'lvgd_current_branches.csv'))
    lvgd_voltage_nodes = lvgd_voltage_current_stats[0]
    lvgd_voltage_nodes.to_csv(os.path.join(path, 'lvgd_voltage_nodes.csv'))

    return mvgd_stats, mvgd_voltage_nodes, mvgd_current_branches, lvgd_stats, lvgd_voltage_nodes, lvgd_current_branches
コード例 #9
0
def db_session(db_section):
    """Create DB session using egoio

    Parameters
    ----------
    db_section : :obj:`str`
      Database section in ego.io db config (usually ~/.egoio/config.ini) which
      holds connection details. Note: keyring entry must exist for the section
      to load the credentials.

    Returns
    -------
    :class:`.sessionmaker`
        SQLAlchemy session
    """
    conn = connection(section=db_section)
    Session = sessionmaker(bind=conn)

    return Session()
コード例 #10
0
    def __init__(self, *args, **kwargs):
        """
        """

        logger.info("Using scenario setting: {}".format(self.jsonpath))

        self.json_file = None
        self.session = None
        self.scn_name = None

        self.json_file = get_scenario_setting(jsonpath=self.jsonpath)

        # Database connection from json_file
        try:
            conn = db.connection(section=self.json_file['eTraGo']['db'])
            Session = sessionmaker(bind=conn)
            self.session = Session()
            logger.info('Connected to Database')
        except:
            logger.error('Failed connection to Database', exc_info=True)

        # get scn_name
        self.scn_name = self.json_file['eTraGo']['scn_name']
コード例 #11
0
ファイル: edisgo_integration.py プロジェクト: mjohnson518/eGo
    def _identify_extended_storages(self):

        conn = db.connection(section=self._db_section)
        session_factory = sessionmaker(bind=conn)
        Session = scoped_session(session_factory)
        session = Session()

        all_mv_grids = self._check_available_mv_grids()

        storages = pd.DataFrame(index=all_mv_grids, columns=['storage_p_nom'])

        logger.info('Identifying extended storage')
        for mv_grid in all_mv_grids:
            bus_id = self._get_bus_id_from_mv_grid(session, mv_grid)

            min_extended = 0.3
            stor_p_nom = self._etrago_network.storage_units.loc[
                (self._etrago_network.storage_units['bus'] == str(bus_id))
                & (self._etrago_network.storage_units['p_nom_extendable'] ==
                   True)
                & (self._etrago_network.storage_units['p_nom_opt'] >
                   min_extended)
                & (self._etrago_network.storage_units['max_hours'] <=
                   20.)]['p_nom_opt']

            if len(stor_p_nom) == 1:
                stor_p_nom = stor_p_nom.values[0]
            elif len(stor_p_nom) == 0:
                stor_p_nom = 0.
            else:
                raise IndexError

            storages.at[mv_grid, 'storage_p_nom'] = stor_p_nom

        Session.remove()

        return storages
コード例 #12
0
ファイル: tests.py プロジェクト: mhdella/ding0
    def test_ding0_file(self):
        print('\n########################################')
        print('Test ding0 vs File')
        print('\n########################################')
        print('  Loading data...')
        nw_1 = load_nd_from_pickle(filename='ding0_tests_grids_1.pkl')

        print('\n########################################')
        print('  Running ding0 for the same configuration...')

        # database connection/ session
        engine = db.connection(readonly=True)
        session = sessionmaker(bind=engine)()
        mv_grid_districts = [3545]

        nw_2 = NetworkDing0(name='network')
        nw_2.run_ding0(session=session, mv_grid_districts_no=mv_grid_districts)

        #test equality
        print('  Testing equality...')
        passed, msg = dataframe_equal(nw_1, nw_2)
        print('    ...' + msg)

        self.assertTrue(passed, msg=msg)
コード例 #13
0
def run_multiple_grid_districts(mv_grid_districts,
                                run_id,
                                failsafe=False,
                                base_path=None):
    """
    Perform ding0 run on given grid districts

    Parameters
    ----------
    mv_grid_districs : :obj:`list`
        Integers describing grid districts
    run_id: :obj:`str`
        Identifier for a run of Ding0. For example it is used to create a
        subdirectory of os.path.join(`base_path`, 'results')
    failsafe : bool
        Setting to True enables failsafe mode where corrupt grid districts
        (mostly due to data issues) are reported and skipped. Report is to be
         found in the log dir under :code:`~/.ding0` . Default is False.
    base_path : :obj:`str`
        Base path for ding0 data (input, results and logs).
        Default is `None` which sets it to :code:`~/.ding0` (may deviate on
        windows systems).
        Specify your own but keep in mind that it a required a particular
        structure of subdirectories.

    Returns
    -------
    msg : :obj:`str`
        Traceback of error computing corrupt MV grid district
        .. TODO: this is only true if try-except environment is moved into this
            fundion and traceback return is implemented

    Note
    -----
    Consider that a large amount of MV grid districts may take hours or up to
    days to compute. A computational run for a single grid district may consume
    around 30 secs.
    """
    start = time.time()

    # define base path
    if base_path is None:
        base_path = BASEPATH

    # database connection/ session
    engine = db.connection(readonly=True)
    session = sessionmaker(bind=engine)()

    corrupt_grid_districts = pd.DataFrame(columns=['id', 'message'])

    for mvgd in mv_grid_districts:
        # instantiate ding0  network object
        nd = NetworkDing0(name='network', run_id=run_id)

        if not os.path.exists(os.path.join(base_path, "grids")):
            os.mkdir(os.path.join(base_path, "grids"))

        if not failsafe:
            # run DING0 on selected MV Grid District
            msg = nd.run_ding0(session=session, mv_grid_districts_no=[mvgd])

            # save results
            results.save_nd_to_pickle(nd, os.path.join(base_path, "grids"))
        else:
            # try to perform ding0 run on grid district
            try:
                msg = nd.run_ding0(session=session,
                                   mv_grid_districts_no=[mvgd])
                # if not successful, put grid district to report
                if msg:
                    corrupt_grid_districts = corrupt_grid_districts.append(
                        pd.Series({
                            'id': mvgd,
                            'message': msg[0]
                        }),
                        ignore_index=True)
                # if successful, save results
                else:
                    results.save_nd_to_pickle(nd,
                                              os.path.join(base_path, "grids"))
            except Exception as e:
                corrupt_grid_districts = corrupt_grid_districts.append(
                    pd.Series({
                        'id': mvgd,
                        'message': e
                    }), ignore_index=True)

                continue

        # Merge metadata of multiple runs
        if 'metadata' not in locals():
            metadata = nd.metadata

        else:
            if isinstance(mvgd, list):
                metadata['mv_grid_districts'].extend(mvgd)
            else:
                metadata['mv_grid_districts'].append(mvgd)

    # Save metadata to disk
    with open(os.path.join(base_path, "grids", 'Ding0_{}.meta'.format(run_id)),
              'w') as f:
        json.dump(metadata, f)

    # report on unsuccessful runs
    corrupt_grid_districts.to_csv(os.path.join(
        base_path, "grids", 'corrupt_mv_grid_districts.txt'),
                                  index=False,
                                  float_format='%.0f')

    print('Elapsed time for', str(len(mv_grid_districts)),
          'MV grid districts (seconds): {}'.format(time.time() - start))

    return msg
コード例 #14
0
def etrago(args):
    """The etrago function works with following arguments:
    
    
    Parameters
    ----------
           
    db (str): 
    	'oedb', 
        Name of Database session setting stored in config.ini of oemof.db
        
    gridversion (str):
        'v0.2.11', 
        Name of the data version number of oedb: state 'None' for 
        model_draft (sand-box) or an explicit version number 
        (e.g. 'v0.2.10') for the grid schema.
         
    method (str):
        'lopf', 
        Choose between a non-linear power flow ('pf') or
        a linear optimal power flow ('lopf').
        
    pf_post_lopf (bool): 
        False, 
        Option to run a non-linear power flow (pf) directly after the 
        linear optimal power flow (and thus the dispatch) has finished.
                
    start_snapshot (int):
    	1, 
        Start hour of the scenario year to be calculated.
        
    end_snapshot (int) : 
    	2,
        End hour of the scenario year to be calculated.
        
    scn_name (str): 
    	'Status Quo',
	Choose your scenario. Currently, there are three different 
	scenarios: 'Status Quo', 'NEP 2035', 'eGo100'. If you do not 
	want to use the full German dataset, you can use the excerpt of 
	Schleswig-Holstein by adding the acronym SH to the scenario 
	name (e.g. 'SH Status Quo').
        
    solver (str): 
        'glpk', 
        Choose your preferred solver. Current options: 'glpk' (open-source),
        'cplex' or 'gurobi'.
                
    lpfile (obj): 
        False, 
        State if and where you want to save pyomo's lp file. Options:
        False or '/path/tofolder'.
        
    results (obj): 
        False, 
        State if and where you want to save results as csv files.Options: 
        False or '/path/tofolder'.
        
    export (bool): 
        False, 
        State if you want to export the results of your calculation 
        back to the database.
        
    storage_extendable (bool):
        True,
        Choose if you want to allow to install extendable storages 
        (unlimited in size) at each grid node in order to meet the flexibility demand. 
        
    generator_noise (bool):
        True,
        Choose if you want to apply a small random noise to the marginal 
        costs of each generator in order to prevent an optima plateau.
        
    reproduce_noise (obj): 
        False, 
        State if you want to use a predefined set of random noise for 
        the given scenario. If so, provide path to the csv file,
        e.g. 'noise_values.csv'.
        
    minimize_loading (bool):
        False,
        
    k_mean_clustering (bool): 
        False,
        State if you want to apply a clustering of all network buses down to 
        only 'k' buses. The weighting takes place considering generation and load
        at each node. If so, state the number of k you want to apply. Otherwise 
        put False. This function doesn't work together with 'line_grouping = True'
	    or 'network_clustering = True'.
    
    network_clustering (bool):
        False, 
        Choose if you want to cluster the full HV/EHV dataset down to only the EHV 
        buses. In that case, all HV buses are assigned to their closest EHV sub-station, 
        taking into account the shortest distance on power lines.
        
    parallelisation (bool):
        False,
        Choose if you want to calculate a certain number of snapshots in parallel. If
        yes, define the respective amount in the if-clause execution below. Otherwise 
        state False here.
        
    line_grouping (bool): 
        True,
        State if you want to group lines that connect the same two buses into one system.
   
    branch_capacity_factor (numeric): 
        1, 
        Add a factor here if you want to globally change line capacities (e.g. to "consider"
        an (n-1) criterion or for debugging purposes.
           
    load_shedding (bool):
        False,
        State here if you want to make use of the load shedding function which is helpful when
        debugging: a very expensive generator is set to each bus and meets the demand when regular
        generators cannot do so.
        
    comments (str): 
        None
        
    Result:
    -------
        

    """
    conn = db.connection(section=args['db'])
    Session = sessionmaker(bind=conn)
    session = Session()

    # additional arguments cfgpath, version, prefix
    if args['gridversion'] == None:
        args['ormcls_prefix'] = 'EgoGridPfHv'
    else:
        args['ormcls_prefix'] = 'EgoPfHv'
        
    scenario = NetworkScenario(session,
                               version=args['gridversion'],
                               prefix=args['ormcls_prefix'],
                               method=args['method'],
                               start_snapshot=args['start_snapshot'],
                               end_snapshot=args['end_snapshot'],
                               scn_name=args['scn_name'])

    network = scenario.build_network()

    # add coordinates
    network = add_coordinates(network)

    # TEMPORARY vague adjustment due to transformer bug in data processing     
    if args['gridversion'] == 'v0.2.11':
        network.transformers.x=network.transformers.x*0.0001

    if args['branch_capacity_factor']:
        network.lines.s_nom = network.lines.s_nom*args['branch_capacity_factor']
        network.transformers.s_nom = network.transformers.s_nom*args['branch_capacity_factor']

    if args['generator_noise']:
        # create or reproduce generator noise 
        if not args['reproduce_noise'] == False:    
            noise_values = genfromtxt('noise_values.csv', delimiter=',')
            # add random noise to all generator
            network.generators.marginal_cost = noise_values
        else:
            noise_values = network.generators.marginal_cost + abs(np.random.normal(0,0.001,len(network.generators.marginal_cost)))
            np.savetxt("noise_values.csv", noise_values, delimiter=",")
            noise_values = genfromtxt('noise_values.csv', delimiter=',')
            # add random noise to all generator
            network.generators.marginal_cost = noise_values
      
      
    if args['storage_extendable']:
        # set virtual storages to be extendable
        if network.storage_units.carrier[network.storage_units.carrier== 'extendable_storage'].any() == 'extendable_storage':
            network.storage_units.loc[network.storage_units.carrier=='extendable_storage','p_nom_extendable'] = True
        # set virtual storage costs with regards to snapshot length
            network.storage_units.capital_cost = (network.storage_units.capital_cost /
            (8760//(args['end_snapshot']-args['start_snapshot']+1)))

    # for SH scenario run do data preperation:
    if args['scn_name'] == 'SH Status Quo' or args['scn_name'] == 'SH NEP 2035':
        data_manipulation_sh(network)
        
    # grouping of parallel lines
    if args['line_grouping']:
        group_parallel_lines(network)

    #load shedding in order to hunt infeasibilities
    if args['load_shedding']:
    	load_shedding(network)

    # network clustering
    if args['network_clustering']:
        network.generators.control="PV"
        busmap = busmap_from_psql(network, session, scn_name=args['scn_name'])
        network = cluster_on_extra_high_voltage(network, busmap, with_time=True)
    
    # k-mean clustering
    if not args['k_mean_clustering'] == False:
        network = kmean_clustering(network, n_clusters=args['k_mean_clustering'])
        
    # Branch loading minimization
    if args['minimize_loading']:
        extra_functionality = loading_minimization
    else:
        extra_functionality=None
    
    if args['skip_snapshots']:
        network.snapshots=network.snapshots[::args['skip_snapshots']]
        network.snapshot_weightings=network.snapshot_weightings[::args['skip_snapshots']]*args['skip_snapshots']   
        
    # parallisation
    if args['parallelisation']:
        parallelisation(network, start_snapshot=args['start_snapshot'], end_snapshot=args['end_snapshot'],group_size=1, solver_name=args['solver'], extra_functionality=extra_functionality)
    # start linear optimal powerflow calculations
    elif args['method'] == 'lopf':
        x = time.time()
        network.lopf(network.snapshots, solver_name=args['solver'], extra_functionality=extra_functionality)
        y = time.time()
        z = (y - x) / 60 # z is time for lopf in minutes
    # start non-linear powerflow simulation
    elif args['method'] == 'pf':
        network.pf(scenario.timeindex)
       # calc_line_losses(network)
        
    if args['pf_post_lopf']:
        pf_post_lopf(network, scenario)
        calc_line_losses(network)
    
       # provide storage installation costs
    if sum(network.storage_units.p_nom_opt) != 0:
        installed_storages = network.storage_units[ network.storage_units.p_nom_opt!=0]
        storage_costs = sum(installed_storages.capital_cost * installed_storages.p_nom_opt)
        print("Investment costs for all storages in selected snapshots [EUR]:",round(storage_costs,2))   
        
    # write lpfile to path
    if not args['lpfile'] == False:
        network.model.write(args['lpfile'], io_options={'symbolic_solver_labels':
                                                     True})
    # write PyPSA results back to database
    if args['export']:
        username = str(conn.url).split('//')[1].split(':')[0]
        args['user_name'] = username
        safe_results=False #default is False. If it is set to 'True' the result set will be safed 
                           #to the versioned grid schema eventually apart from 
                           #being saved to the model_draft. 
                           #ONLY set to True if you know what you are doing.  
        results_to_oedb(session, network, args, grid='hv', safe_results = safe_results)  
        
    # write PyPSA results to csv to path
    if not args['results'] == False:
        results_to_csv(network, args['results'])

    # close session
    session.close()

    return network
コード例 #15
0
from ding0.tools.logger import setup_logger
import logging
from test_skripts.tools import compare_networks_by_line_type_lengths
#endregion
logger = logging.getLogger('debug')

#region SETTINGS
nr_test_runs = 2
test_path = 'C:/Users/Anya.Heider/open_BEA/ding0/testdata'
# choose MV Grid Districts to import
mv_grid_districts = [460]
#endgregion

#region CREATE NW
# database connection/ session
engine = db.connection(section='oedb')
session = sessionmaker(bind=engine)()

# instantiate new ding0 network object
nw1 = NetworkDing0(name='network')
nw2 = NetworkDing0(name='network')
compare_networks_by_line_type_lengths(nw1, nw2, 0)
logger.debug("########## New Networks initiated #############")

# STEP 1: Import MV Grid Districts and subjacent objects
nw1.import_mv_grid_districts(session, mv_grid_districts_no=mv_grid_districts)
nw2.import_mv_grid_districts(session, mv_grid_districts_no=mv_grid_districts)
compare_networks_by_line_type_lengths(nw1, nw2, 1)
logger.debug("########## Step 1 finished: grids imported #############")

# STEP 2: Import generators
コード例 #16
0
ファイル: edisgo_integration.py プロジェクト: mjohnson518/eGo
    def _run_edisgo(self, mv_grid_id):
        """
        Performs a single eDisGo run

        Parameters
        ----------
        mv_grid_id : int
            MV grid ID of the ding0 grid

        Returns
        -------
        :class:`edisgo.grid.network.EDisGo`
            Returns the complete eDisGo container, also including results
        """
        self._status_update(mv_grid_id, 'start', show=False)

        storage_integration = self._storage_distribution
        apply_curtailment = self._apply_curtailment

        logger.info(
            'MV grid {}: Calculating interface values'.format(mv_grid_id))

        conn = db.connection(section=self._db_section)
        session_factory = sessionmaker(bind=conn)
        Session = scoped_session(session_factory)
        session = Session()

        # Query bus ID for this MV grid
        bus_id = self._get_bus_id_from_mv_grid(session, mv_grid_id)

        # Calculate Interface values for this MV grid
        specs = get_etragospecs_direct(session, bus_id, self._etrago_network,
                                       self._scn_name, self._grid_version,
                                       self._pf_post_lopf,
                                       self._max_cos_phi_renewable)
        Session.remove()

        # Get ding0 (MV grid) form folder
        ding0_filepath = (self._ding0_files + '/ding0_grids__' +
                          str(mv_grid_id) + '.pkl')

        if not os.path.isfile(ding0_filepath):
            msg = 'No MV grid file for MV grid {}'.format(mv_grid_id)
            logger.error(msg)
            raise Exception(msg)

        # Initalize eDisGo with this MV grid
        logger.info(("MV grid {}: Initialize MV grid").format(mv_grid_id))

        edisgo_grid = EDisGo(ding0_grid=ding0_filepath,
                             worst_case_analysis='worst-case')

        logger.info(("MV grid {}: Changing eDisGo's voltage configurations " +
                     "for initial reinforcement").format(mv_grid_id))

        edisgo_grid.network.config[
            'grid_expansion_allowed_voltage_deviations'] = {
                'hv_mv_trafo_offset': 0.04,
                'hv_mv_trafo_control_deviation': 0.0,
                'mv_load_case_max_v_deviation': 0.055,
                'mv_feedin_case_max_v_deviation': 0.02,
                'lv_load_case_max_v_deviation': 0.065,
                'lv_feedin_case_max_v_deviation': 0.03,
                'mv_lv_station_load_case_max_v_deviation': 0.02,
                'mv_lv_station_feedin_case_max_v_deviation': 0.01
            }

        # Inital grid reinforcements
        logger.info(("MV grid {}: Initial MV grid reinforcement " +
                     "(worst-case anaylsis)").format(mv_grid_id))

        edisgo_grid.reinforce()

        # Get costs for initial reinforcement
        # TODO: Implement a separate cost function
        costs_grouped = \
            edisgo_grid.network.results.grid_expansion_costs.groupby(
                ['type']).sum()
        costs = pd.DataFrame(
            costs_grouped.values,
            columns=costs_grouped.columns,
            index=[[edisgo_grid.network.id] * len(costs_grouped),
                   costs_grouped.index]).reset_index()
        costs.rename(columns={'level_0': 'grid'}, inplace=True)

        costs_before = costs

        total_costs_before_EUR = costs_before['total_costs'].sum() * 1000
        logger.info(("MV grid {}: Costs for initial " +
                     "reinforcement: EUR {}").format(
                         mv_grid_id,
                         "{0:,.2f}".format(total_costs_before_EUR)))

        logger.info(("MV grid {}: Resetting grid after initial reinforcement"
                     ).format(mv_grid_id))
        edisgo_grid.network.results = Results(edisgo_grid.network)
        # Reload the (original) eDisGo configs
        edisgo_grid.network.config = None

        # eTraGo case begins here
        logger.info("MV grid {}: eTraGo feed-in case".format(mv_grid_id))

        # Update eDisGo settings (from config files) with scenario settings
        logger.info(
            "MV grid {}: Updating eDisgo configuration".format(mv_grid_id))
        # Update configs with eGo's scenario settings
        self._update_edisgo_configs(edisgo_grid)

        # Generator import for NEP 2035 and eGo 100 scenarios
        if self._generator_scn:
            logger.info('Importing generators for scenario {}'.format(
                self._scn_name))
            edisgo_grid.import_generators(
                generator_scenario=self._generator_scn)
        else:
            logger.info('No generators imported for scenario {}'.format(
                self._scn_name))
            edisgo_grid.network.pypsa = None

        # Time Series from eTraGo
        logger.info('Updating eDisGo timeseries with eTraGo values')
        if self._pf_post_lopf:
            logger.info('(Including reactive power)')
            edisgo_grid.network.timeseries = TimeSeriesControl(
                network=edisgo_grid.network,
                timeseries_generation_fluctuating=specs['ren_potential'],
                timeseries_generation_dispatchable=specs['conv_dispatch'],
                timeseries_generation_reactive_power=specs['reactive_power'],
                timeseries_load='demandlib',
                timeindex=specs['conv_dispatch'].index).timeseries
        else:
            logger.info('(Only active power)')
            edisgo_grid.network.timeseries = TimeSeriesControl(
                network=edisgo_grid.network,
                timeseries_generation_fluctuating=specs['ren_potential'],
                timeseries_generation_dispatchable=specs['conv_dispatch'],
                timeseries_load='demandlib',
                timeindex=specs['conv_dispatch'].index).timeseries

        # Curtailment
        if apply_curtailment:
            logger.info('Including Curtailment')

            gens_df = tools.get_gen_info(edisgo_grid.network)
            solar_wind_capacities = gens_df.groupby(
                by=['type', 'weather_cell_id'])['nominal_capacity'].sum()

            curt_cols = [
                i for i in specs['ren_curtailment'].columns
                if i in solar_wind_capacities.index
            ]

            if not curt_cols:
                raise ImportError(
                    ("MV grid {}: Data doesn't match").format(mv_grid_id))

            curt_abs = pd.DataFrame(
                columns=pd.MultiIndex.from_tuples(curt_cols))

            for col in curt_abs:
                curt_abs[col] = (specs['ren_curtailment'][col] *
                                 solar_wind_capacities[col])

            edisgo_grid.curtail(
                curtailment_timeseries=curt_abs,
                methodology='voltage-based',
                solver=self._solver,
                voltage_threshold=self._curtailment_voltage_threshold)
        else:
            logger.info('No curtailment applied')

        # Storage Integration
        costs_without_storage = None
        if storage_integration:
            if self._ext_storage:
                if not specs['battery_p_series'] is None:
                    logger.info('Integrating storages in MV grid')
                    edisgo_grid.integrate_storage(
                        timeseries=specs['battery_p_series'],
                        position='distribute_storages_mv',
                        timeseries_reactive_power=specs['battery_q_series']
                    )  # None if no pf_post_lopf
                    costs_without_storage = (
                        edisgo_grid.network.results.storages_costs_reduction[
                            'grid_expansion_costs_initial'].values[0])
        else:
            logger.info('No storage integration')

        logger.info("MV grid {}: eDisGo grid analysis".format(mv_grid_id))

        edisgo_grid.reinforce(timesteps_pfa=self._timesteps_pfa)

        if costs_without_storage is not None:
            costs_with_storage = (edisgo_grid.network.results.
                                  grid_expansion_costs['total_costs'].sum())
            if costs_with_storage >= costs_without_storage:
                logger.warning(
                    "Storage did not benefit MV grid {}".format(mv_grid_id))
                st = edisgo_grid.network.mv_grid.graph.nodes_by_attribute(
                    'storage')
                for storage in st:
                    tools.disconnect_storage(edisgo_grid.network, storage)

        self._status_update(mv_grid_id, 'end')

        path = os.path.join(self._results, str(mv_grid_id))
        edisgo_grid.network.results.save(path)

        return {edisgo_grid.network.id: path}
コード例 #17
0
ファイル: validation.py プロジェクト: mhdella/ding0
                    }
                trafo_count = 0
                for trafo in lv_district.lv_grid._station._transformers:
                    lv_transformer_dict[str(lv_district.lv_grid._station.id_db)
                                        + "_" + str(trafo_count)] = {
                                            'power': trafo.s_max_a,
                                            'resistance': trafo.r_pu,
                                            'ractance': trafo.x_pu
                                        }
    return mv_branches_dict, lv_branches_dict, lv_transformer_dict


########################################################
if __name__ == "__main__":
    # database connection/ session
    engine = db.connection(readonly=True)
    session = sessionmaker(bind=engine)()

    nw = load_nd_from_pickle(filename='ding0_tests_grids_1.pkl')

    compare_by_level, compare_by_type = validate_generation(session, nw)
    print('\nCompare Generation by Level')
    print(compare_by_level)
    print('\nCompare Generation by Type/Subtype')
    print(compare_by_type)

    compare_by_la, compare_la_ids = validate_load_areas(session, nw)
    print('\nCompare Load by Load Areas')
    print(compare_by_la)
    #print(compare_la_ids)
コード例 #18
0
ファイル: appl.py プロジェクト: Svosw/eTraGo
def etrago(args):
    """The etrago function works with following arguments:


    Parameters
    ----------

    db : str
        ``'oedb'``,
        Name of Database session setting stored in *config.ini* of *.egoio*

    gridversion : NoneType or str
        ``'v0.2.11'``,
        Name of the data version number of oedb: state ``'None'`` for
        model_draft (sand-box) or an explicit version number
        (e.g. 'v0.2.10') for the grid schema.

    method : str
        ``'lopf'``,
        Choose between a non-linear power flow ('pf') or
        a linear optimal power flow ('lopf').

    pf_post_lopf : bool
        False,
        Option to run a non-linear power flow (pf) directly after the
        linear optimal power flow (and thus the dispatch) has finished.

    start_snapshot : int
        1,
        Start hour of the scenario year to be calculated.

    end_snapshot : int
        2,
        End hour of the scenario year to be calculated.

    solver : str
        'glpk',
        Choose your preferred solver. Current options: 'glpk' (open-source),
        'cplex' or 'gurobi'.

    scn_name : str
        'Status Quo',
        Choose your scenario. Currently, there are three different
        scenarios: 'Status Quo', 'NEP 2035', 'eGo100'. If you do not
        want to use the full German dataset, you can use the excerpt of
        Schleswig-Holstein by adding the acronym SH to the scenario
        name (e.g. 'SH Status Quo').

   scn_extension : NoneType or list
       None,
       Choose extension-scenarios which will be added to the existing
       network container. Data of the extension scenarios are located in
       extension-tables (e.g. model_draft.ego_grid_pf_hv_extension_bus)
       with the prefix 'extension_'.
       Currently there are three overlay networks:
           'nep2035_confirmed' includes all planed new lines confirmed by the
           Bundesnetzagentur
           'nep2035_b2' includes all new lines planned by the
           Netzentwicklungsplan 2025 in scenario 2035 B2
           'BE_NO_NEP 2035' includes planned lines to Belgium and Norway and
           adds BE and NO as electrical neighbours

    scn_decommissioning : str
        None,
        Choose an extra scenario which includes lines you want to decommise
        from the existing network. Data of the decommissioning scenarios are
        located in extension-tables
        (e.g. model_draft.ego_grid_pf_hv_extension_bus) with the prefix
        'decommissioning_'.
        Currently, there are two decommissioning_scenarios which are linked to
        extension-scenarios:
            'nep2035_confirmed' includes all lines that will be replaced in
            confirmed projects
            'nep2035_b2' includes all lines that will be replaced in
            NEP-scenario 2035 B2

    lpfile : obj
        False,
        State if and where you want to save pyomo's lp file. Options:
        False or '/path/tofolder'.import numpy as np

    csv_export : obj
        False,
        State if and where you want to save results as csv files.Options:
        False or '/path/tofolder'.

    db_export : bool
        False,
        State if you want to export the results of your calculation
        back to the database.

    extendable : list
        ['network', 'storages'],
        Choose components you want to optimize.
        Settings can be added in /tools/extendable.py.
        The most important possibilities:
            'network': set all lines, links and transformers extendable
            'german_network': set lines and transformers in German grid
                            extendable
            'foreign_network': set foreign lines and transformers extendable
            'transformers': set all transformers extendable
            'overlay_network': set all components of the 'scn_extension'
                               extendable
            'storages': allow to install extendable storages
                        (unlimited in size) at each grid node in order to meet
                        the flexibility demand.
            'network_preselection': set only preselected lines extendable,
                                    method is chosen in function call


    generator_noise : bool or int
        State if you want to apply a small random noise to the marginal costs
        of each generator in order to prevent an optima plateau. To reproduce
        a noise, choose the same integer (seed number).

    minimize_loading : bool
        False,
        ...

    ramp_limits : bool
        False,
        State if you want to consider ramp limits of generators.
        Increases time for solving significantly.
        Only works when calculating at least 30 snapshots.

    extra_functionality : str or None
        None,
        Choose name of extra functionality described in etrago/utilities.py
        "min_renewable_share" to set a minimal share of renewable energy or
        "max_line_ext" to set an overall maximum of line expansion.
        When activating snapshot_clustering or minimize_loading these
        extra_funtionalities are overwritten and therefore neglected.

    network_clustering_kmeans : bool or int
        False,
        State if you want to apply a clustering of all network buses down to
        only ``'k'`` buses. The weighting takes place considering generation
        and load
        at each node. If so, state the number of k you want to apply. Otherwise
        put False. This function doesn't work together with
        ``'line_grouping = True'``.

    load_cluster : bool or obj
        state if you want to load cluster coordinates from a previous run:
        False or /path/tofile (filename similar to ./cluster_coord_k_n_result).

    network_clustering_ehv : bool
        False,
        Choose if you want to cluster the full HV/EHV dataset down to only the
        EHV buses. In that case, all HV buses are assigned to their closest EHV
        sub-station, taking into account the shortest distance on power lines.

    snapshot_clustering : bool or int
        False,
        State if you want to cluster the snapshots and run the optimization
        only on a subset of snapshot periods. The int value defines the number
        of periods (i.e. days) which will be clustered to.
        Move to PyPSA branch:features/snapshot_clustering

    parallelisation : bool
        False,
        Choose if you want to calculate a certain number of snapshots in
        parallel. If yes, define the respective amount in the if-clause
        execution below. Otherwise state False here.

    line_grouping : bool
        True,
        State if you want to group lines that connect the same two buses
        into one system.

    branch_capacity_factor : dict
        {'HV': 0.5, 'eHV' : 0.7},
        Add a factor here if you want to globally change line capacities
        (e.g. to "consider" an (n-1) criterion or for debugging purposes).

    load_shedding : bool
        False,
        State here if you want to make use of the load shedding function which
        is helpful when debugging: a very expensive generator is set to each
        bus and meets the demand when regular
        generators cannot do so.

    foreign_lines : dict
        {'carrier':'AC', 'capacity': 'osmTGmod}'
        Choose transmission technology and capacity of foreign lines:
            'carrier': 'AC' or 'DC'
            'capacity': 'osmTGmod', 'ntc_acer' or 'thermal_acer'

    comments : str
        None

    Returns
    -------
    network : `pandas.DataFrame<dataframe>`
        eTraGo result network based on `PyPSA network
        <https://www.pypsa.org/doc/components.html#network>`_
    """
    conn = db.connection(section=args['db'])
    Session = sessionmaker(bind=conn)
    session = Session()

    # additional arguments cfgpath, version, prefix
    if args['gridversion'] is None:
        args['ormcls_prefix'] = 'EgoGridPfHv'
    else:
        args['ormcls_prefix'] = 'EgoPfHv'

    scenario = NetworkScenario(session,
                               version=args['gridversion'],
                               prefix=args['ormcls_prefix'],
                               method=args['method'],
                               start_snapshot=args['start_snapshot'],
                               end_snapshot=args['end_snapshot'],
                               scn_name=args['scn_name'])

    network = scenario.build_network()

    # add coordinates
    network = add_coordinates(network)

    # Set countrytags of buses, lines, links and transformers
    network = geolocation_buses(network, session)

    # Set q_sets of foreign loads
    network = set_q_foreign_loads(network, cos_phi=1)

    # Change transmission technology and/or capacity of foreign lines
    if args['foreign_lines']['carrier'] == 'DC':
        foreign_links(network)
        network = geolocation_buses(network, session)

    if args['foreign_lines']['capacity'] != 'osmTGmod':
        crossborder_capacity(network, args['foreign_lines']['capacity'],
                             args['branch_capacity_factor'])

    # TEMPORARY vague adjustment due to transformer bug in data processing
    if args['gridversion'] == 'v0.2.11':
        network.transformers.x = network.transformers.x * 0.0001

    # set SOC at the beginning and end of the period to equal values
    network.storage_units.cyclic_state_of_charge = True

    # set extra_functionality
    if args['extra_functionality'] is not None:
        extra_functionality = eval(args['extra_functionality'])
    elif args['extra_functionality'] is None:
        extra_functionality = args['extra_functionality']

    # set disaggregated_network to default
    disaggregated_network = None

    # set clustering to default
    clustering = None

    if args['generator_noise'] is not False:
        # add random noise to all generators
        s = np.random.RandomState(args['generator_noise'])
        network.generators.marginal_cost[network.generators.bus.isin(
                network.buses.index[network.buses.country_code == 'DE'])] += \
            abs(s.normal(0, 0.1, len(network.generators.marginal_cost[
                    network.generators.bus.isin(network.buses.index[
                            network.buses.country_code == 'DE'])])))
    # for SH scenario run do data preperation:
    if (args['scn_name'] == 'SH Status Quo'
            or args['scn_name'] == 'SH NEP 2035'):
        data_manipulation_sh(network)

    # grouping of parallel lines
    if args['line_grouping']:
        group_parallel_lines(network)

    # Branch loading minimization
    if args['minimize_loading']:
        extra_functionality = loading_minimization

    # scenario extensions
    if args['scn_extension'] is not None:
        for i in range(len(args['scn_extension'])):
            network = extension(network,
                                session,
                                version=args['gridversion'],
                                scn_extension=args['scn_extension'][i],
                                start_snapshot=args['start_snapshot'],
                                end_snapshot=args['end_snapshot'])
        network = geolocation_buses(network, session)

    # set Branch capacity factor for lines and transformer
    if args['branch_capacity_factor']:
        set_branch_capacity(network, args)

    # scenario decommissioning
    if args['scn_decommissioning'] is not None:
        network = decommissioning(network, session, args)

    # Add missing lines in Munich and Stuttgart
    network = add_missing_components(network)

    # investive optimization strategies
    if args['extendable'] != []:
        network = extendable(network, args, line_max=4)
        network = convert_capital_costs(network, args['start_snapshot'],
                                        args['end_snapshot'])

    # skip snapshots
    if args['skip_snapshots']:
        network.snapshots = network.snapshots[::args['skip_snapshots']]
        network.snapshot_weightings = network.snapshot_weightings[::args[
            'skip_snapshots']] * args['skip_snapshots']

    # snapshot clustering
    if not args['snapshot_clustering'] is False:
        network = snapshot_clustering(network,
                                      how='daily',
                                      clusters=args['snapshot_clustering'])
        extra_functionality = daily_bounds  # daily_bounds or other constraint

    # load shedding in order to hunt infeasibilities
    if args['load_shedding']:
        load_shedding(network)

    # ehv network clustering
    if args['network_clustering_ehv']:
        network.generators.control = "PV"
        busmap = busmap_from_psql(network, session, scn_name=args['scn_name'])
        network = cluster_on_extra_high_voltage(network,
                                                busmap,
                                                with_time=True)

    # k-mean clustering
    if not args['network_clustering_kmeans'] == False:
        clustering = kmean_clustering(
            network,
            n_clusters=args['network_clustering_kmeans'],
            load_cluster=args['load_cluster'],
            line_length_factor=1,
            remove_stubs=False,
            use_reduced_coordinates=False,
            bus_weight_tocsv=None,
            bus_weight_fromcsv=None,
            n_init=10,
            max_iter=100,
            tol=1e-6,
            n_jobs=-1)
        disaggregated_network = (network.copy()
                                 if args.get('disaggregation') else None)
        network = clustering.network.copy()

    if args['ramp_limits']:
        ramp_limits(network)

    # preselection of extendable lines
    if 'network_preselection' in args['extendable']:
        extension_preselection(network, args, 'snapshot_clustering', 2)

    # parallisation
    if args['parallelisation']:
        parallelisation(network,
                        start_snapshot=args['start_snapshot'],
                        end_snapshot=args['end_snapshot'],
                        group_size=1,
                        solver_name=args['solver'],
                        solver_options=args['solver_options'],
                        extra_functionality=extra_functionality)

    # start linear optimal powerflow calculations
    elif args['method'] == 'lopf':
        x = time.time()
        network.lopf(network.snapshots,
                     solver_name=args['solver'],
                     solver_options=args['solver_options'],
                     extra_functionality=extra_functionality,
                     formulation="angles")
        y = time.time()
        z = (y - x) / 60
        # z is time for lopf in minutes
        print("Time for LOPF [min]:", round(z, 2))

        # start non-linear powerflow simulation
    elif args['method'] is 'pf':
        network.pf(scenario.timeindex)
        # calc_line_losses(network)

    if args['pf_post_lopf']:
        x = time.time()
        pf_solution = pf_post_lopf(network,
                                   args,
                                   extra_functionality,
                                   add_foreign_lopf=True)
        y = time.time()
        z = (y - x) / 60
        print("Time for PF [min]:", round(z, 2))
        calc_line_losses(network)
        network = distribute_q(network, allocation='p_nom')

    if not args['extendable'] == []:
        print_expansion_costs(network, args)

    if clustering:
        disagg = args.get('disaggregation')
        skip = () if args['pf_post_lopf'] else ('q', )
        t = time.time()
        if disagg:
            if disagg == 'mini':
                disaggregation = MiniSolverDisaggregation(
                    disaggregated_network, network, clustering, skip=skip)
            elif disagg == 'uniform':
                disaggregation = UniformDisaggregation(disaggregated_network,
                                                       network,
                                                       clustering,
                                                       skip=skip)

            else:
                raise Exception('Invalid disaggregation command: ' + disagg)

            disaggregation.execute(scenario, solver=args['solver'])
            # temporal bug fix for solar generator which ar during night time
            # nan instead of 0
            disaggregated_network.generators_t.p.fillna(0, inplace=True)
            disaggregated_network.generators_t.q.fillna(0, inplace=True)

            disaggregated_network.results = network.results
            print("Time for overall desaggregation [min]: {:.2}".format(
                (time.time() - t) / 60))

    # write lpfile to path
    if not args['lpfile'] is False:
        network.model.write(args['lpfile'],
                            io_options={'symbolic_solver_labels': True})

    # write PyPSA results back to database
    if args['db_export']:
        username = str(conn.url).split('//')[1].split(':')[0]
        args['user_name'] = username

        results_to_oedb(session,
                        network,
                        dict([("disaggregated_results", False)] +
                             list(args.items())),
                        grid='hv',
                        safe_results=False)

        if disaggregated_network:
            results_to_oedb(session,
                            disaggregated_network,
                            dict([("disaggregated_results", True)] +
                                 list(args.items())),
                            grid='hv',
                            safe_results=False)

    # write PyPSA results to csv to path
    if not args['csv_export'] is False:
        if not args['pf_post_lopf']:
            results_to_csv(network, args)
        else:
            results_to_csv(network, args, pf_solution=pf_solution)

        if disaggregated_network:
            results_to_csv(
                disaggregated_network, {
                    k: os.path.join(v, 'disaggregated')
                    if k == 'csv_export' else v
                    for k, v in args.items()
                })

    # close session
    # session.close()

    return network, disaggregated_network
コード例 #19
0
from egoio.tools.db import connection
from sqlalchemy.orm import sessionmaker
from contextlib import contextmanager

Session = sessionmaker(bind=connection(readonly=True))


@contextmanager
def session_scope():
    """Function to ensure that sessions are closed properly."""
    session = Session()
    try:
        yield session
    except:
        session.rollback()
        raise
    finally:
        session.close()
# class for ego.io
class EgoGridLineExpansionCosts(Base):
    __tablename__ = 'ego_grid_line_expansion_costs'
    __table_args__ = {'schema': 'model_draft'}

    cost_id = Column(Integer, primary_key=True)
    voltage_level = Column(Text)
    measure = Column(Text)
    component = Column(Text)
    investment_cost = Column(Float(53))
    unit = Column(Text)
    comment = Column(Text)
    source = Column(Text)

# DB connection
conn = db.connection(section='oedb')
Session = sessionmaker(bind=conn)
session = Session()

# get data
df = pd.read_csv('https://raw.githubusercontent.com/openego/eGo/dev/ego'\
                            '/data/investment_costs_of_grid_%20measures.csv',
                            sep=',',
                            thousands='.',
                            decimal=',',
                            header=0)

df.rename(index=str, columns={"id": "cost_id", "Spannungsebene": "voltage_level",
                              "Anlage/Anlagenteil": "component",
                              "Maßnahme": "measure",
                              "\nInvestionskosten ": "investment_cost",
コード例 #21
0
fh = logging.FileHandler('ego.log', mode='w')
fh.setLevel(logging.INFO)
formatter = logging.Formatter(
    '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)

logger.addHandler(fh)
ego_logger.addHandler(fh)

if __name__ == '__main__':
    # import scenario settings **args of eTraGo
    args = get_scenario_setting(json_file='scenario_setting.json')
    print(args)

    try:
        conn = db.connection(section=args['global']['db'])
        Session = sessionmaker(bind=conn)
        session = Session()
    except OperationalError:
        logger.error('Failed connection to Database', exc_info=True)

    # start calculations of eTraGo if true
    if args['global']['eTraGo']:
        # start eTraGo calculation
        eTraGo = etrago(args['eTraGo'])

        eGo = eGo(eTraGo=eTraGo, scn_name='Status Quo')

        # add country code to bus and geometry (shapely)
        # eTraGo.buses = eTraGo.buses.drop(['country_code','geometry'], axis=1)
        #test = geolocation_buses(network = eTraGo, session)
コード例 #22
0
def etrago(args):
    """The etrago function works with following arguments:


    Parameters
    ----------

    db : str
        ``'oedb'``,
        Name of Database session setting stored in *config.ini* of *.egoio*

    gridversion : NoneType or str
        ``'v0.2.11'``,
        Name of the data version number of oedb: state ``'None'`` for
        model_draft (sand-box) or an explicit version number
        (e.g. 'v0.2.10') for the grid schema.

    method : str
        ``'lopf'``,
        Choose between a non-linear power flow ('pf') or
        a linear optimal power flow ('lopf').

    pf_post_lopf : bool
        False,
        Option to run a non-linear power flow (pf) directly after the
        linear optimal power flow (and thus the dispatch) has finished.

    start_snapshot : int
        1,
        Start hour of the scenario year to be calculated.

    end_snapshot : int
        2,
        End hour of the scenario year to be calculated.

    solver : str
        'glpk',
        Choose your preferred solver. Current options: 'glpk' (open-source),
        'cplex' or 'gurobi'.

    scn_name : str
        'Status Quo',
        Choose your scenario. Currently, there are three different
        scenarios: 'Status Quo', 'NEP 2035', 'eGo100'. If you do not
        want to use the full German dataset, you can use the excerpt of
        Schleswig-Holstein by adding the acronym SH to the scenario
        name (e.g. 'SH Status Quo').

   scn_extension : str
       None,
       Choose an extension-scenario which will be added to the existing
       network container. Data of the extension scenarios are located in
       extension-tables (e.g. model_draft.ego_grid_pf_hv_extension_bus)
       with the prefix 'extension_'.
       Currently there are two overlay networks:
           'nep2035_confirmed' includes all planed new lines confirmed by the
           Bundesnetzagentur
           'nep2035_b2' includes all new lines planned by the
           Netzentwicklungsplan 2025 in scenario 2035 B2

    scn_decommissioning : str
        None,
        Choose an extra scenario which includes lines you want to decommise
        from the existing network. Data of the decommissioning scenarios are
        located in extension-tables
        (e.g. model_draft.ego_grid_pf_hv_extension_bus) with the prefix
        'decommissioning_'.
        Currently, there are two decommissioning_scenarios which are linked to
        extension-scenarios:
            'nep2035_confirmed' includes all lines that will be replaced in
            confirmed projects
            'nep2035_b2' includes all lines that will be replaced in
            NEP-scenario 2035 B2

    add_Belgium_Norway : bool
        False,
        State if you want to add Belgium and Norway as electrical neighbours.
        Currently, generation and load always refer to scenario 'NEP 2035'.

    lpfile : obj
        False,
        State if and where you want to save pyomo's lp file. Options:
        False or '/path/tofolder'.import numpy as np

    results : obj
        False,
        State if and where you want to save results as csv files.Options:
        False or '/path/tofolder'.

    export : bool
        False,
        State if you want to export the results of your calculation
        back to the database.

    extendable : NoneType or list
        ['network', 'storages'],
        Choose None or which components you want to optimize.
        Settings can be added in /tools/extendable.py.
        The most important possibilities:
            'network': set all lines, links and transformers extendable
            'transformers': set all transformers extendable
            'overlay_network': set all components of the 'scn_extension'
                               extendable
            'storages': allow to install extendable storages
                        (unlimited in size) at each grid node in order to meet
                        the flexibility demand.


    generator_noise : bool or int
        State if you want to apply a small random noise to the marginal costs
        of each generator in order to prevent an optima plateau. To reproduce
        a noise, choose the same integer (seed number).

    minimize_loading : bool
        False,
        ...

    network_clustering_kmeans : bool or int
        False,
        State if you want to apply a clustering of all network buses down to
        only ``'k'`` buses. The weighting takes place considering generation
        and load
        at each node. If so, state the number of k you want to apply. Otherwise
        put False. This function doesn't work together with
        ``'line_grouping = True'``.

    load_cluster : bool or obj
        state if you want to load cluster coordinates from a previous run:
        False or /path/tofile (filename similar to ./cluster_coord_k_n_result).

    network_clustering_ehv : bool
        False,
        Choose if you want to cluster the full HV/EHV dataset down to only the
        EHV buses. In that case, all HV buses are assigned to their closest EHV
        sub-station, taking into account the shortest distance on power lines.

    snapshot_clustering : bool or int
        False,
        State if you want to cluster the snapshots and run the optimization
        only on a subset of snapshot periods. The int value defines the number
        of periods (i.e. days) which will be clustered to.
        Move to PyPSA branch:features/snapshot_clustering

    parallelisation : bool
        False,
        Choose if you want to calculate a certain number of snapshots in
        parallel. If yes, define the respective amount in the if-clause
        execution below. Otherwise state False here.

    line_grouping : bool
        True,
        State if you want to group lines that connect the same two buses
        into one system.

    branch_capacity_factor : numeric
        1,
        Add a factor here if you want to globally change line capacities
        (e.g. to "consider" an (n-1) criterion or for debugging purposes).

    load_shedding : bool
        False,
        State here if you want to make use of the load shedding function which
        is helpful when debugging: a very expensive generator is set to each
        bus and meets the demand when regular
        generators cannot do so.

    comments : str
        None

    Returns
    -------
    network : `pandas.DataFrame<dataframe>`
        eTraGo result network based on `PyPSA network
        <https://www.pypsa.org/doc/components.html#network>`_


    """
    conn = db.connection(section=args['db'])
    Session = sessionmaker(bind=conn)
    session = Session()

    # additional arguments cfgpath, version, prefix
    if args['gridversion'] is None:
        args['ormcls_prefix'] = 'EgoGridPfHv'
    else:
        args['ormcls_prefix'] = 'EgoPfHv'

    scenario = NetworkScenario(session,
                               version=args['gridversion'],
                               prefix=args['ormcls_prefix'],
                               method=args['method'],
                               start_snapshot=args['start_snapshot'],
                               end_snapshot=args['end_snapshot'],
                               scn_name=args['scn_name'])

    network = scenario.build_network()

    # add coordinates
    network = add_coordinates(network)

    # TEMPORARY vague adjustment due to transformer bug in data processing
    if args['gridversion'] == 'v0.2.11':
        network.transformers.x = network.transformers.x * 0.0001

    # set SOC at the beginning and end of the period to equal values
    network.storage_units.cyclic_state_of_charge = True

    # set extra_functionality to default
    extra_functionality = None

    if args['generator_noise'] is not False:
        # add random noise to all generators
        s = np.random.RandomState(args['generator_noise'])
        network.generators.marginal_cost += \
            abs(s.normal(0, 0.001, len(network.generators.marginal_cost)))

    # for SH scenario run do data preperation:
    if (args['scn_name'] == 'SH Status Quo'
            or args['scn_name'] == 'SH NEP 2035'):
        data_manipulation_sh(network)

    # grouping of parallel lines
    if args['line_grouping']:
        group_parallel_lines(network)

    # network clustering
    if args['network_clustering_ehv']:
        network.generators.control = "PV"
        busmap = busmap_from_psql(network, session, scn_name=args['scn_name'])
        network = cluster_on_extra_high_voltage(network,
                                                busmap,
                                                with_time=True)

    # k-mean clustering
    if not args['network_clustering_kmeans'] is False:
        network = kmean_clustering(
            network,
            n_clusters=args['network_clustering_kmeans'],
            load_cluster=args['load_cluster'],
            line_length_factor=1,
            remove_stubs=False,
            use_reduced_coordinates=False,
            bus_weight_tocsv=None,
            bus_weight_fromcsv=None)

    # Branch loading minimization
    if args['minimize_loading']:
        extra_functionality = loading_minimization

    if args['skip_snapshots']:
        network.snapshots = network.snapshots[::args['skip_snapshots']]
        network.snapshot_weightings = network.snapshot_weightings[::args[
            'skip_snapshots']] * args['skip_snapshots']

    if args['scn_extension'] is not None:
        network = extension(
            network,
            session,
            scn_extension=args['scn_extension'],
            start_snapshot=args['start_snapshot'],
            end_snapshot=args['end_snapshot'],
            k_mean_clustering=args['network_clustering_kmeans'])

    if args['scn_decommissioning'] is not None:
        network = decommissioning(
            network,
            session,
            scn_decommissioning=args['scn_decommissioning'],
            k_mean_clustering=args['network_clustering_kmeans'])

    if args['add_Belgium_Norway']:
        network = extension(
            network,
            session,
            scn_extension='BE_NO_NEP 2035',
            start_snapshot=args['start_snapshot'],
            end_snapshot=args['end_snapshot'],
            k_mean_clustering=args['network_clustering_kmeans'])

    if args['extendable'] is not None:
        network = extendable(network, args['extendable'],
                             args['scn_extension'])
        network = convert_capital_costs(network, args['start_snapshot'],
                                        args['end_snapshot'])

    if args['branch_capacity_factor']:
        network.lines.s_nom = network.lines.s_nom * \
            args['branch_capacity_factor']
        network.transformers.s_nom = network.transformers.s_nom * \
            args['branch_capacity_factor']

    # load shedding in order to hunt infeasibilities
    if args['load_shedding']:
        load_shedding(network)

    # snapshot clustering
    if not args['snapshot_clustering'] is False:
        network = snapshot_clustering(network,
                                      how='daily',
                                      clusters=args['snapshot_clustering'])
        extra_functionality = daily_bounds  # daily_bounds or other constraint

    # parallisation
    if args['parallelisation']:
        parallelisation(network,
                        start_snapshot=args['start_snapshot'],
                        end_snapshot=args['end_snapshot'],
                        group_size=1,
                        solver_name=args['solver'],
                        solver_options=args['solver_options'],
                        extra_functionality=extra_functionality)
    # start linear optimal powerflow calculations
    elif args['method'] == 'lopf':
        x = time.time()
        network.lopf(network.snapshots,
                     solver_name=args['solver'],
                     solver_options=args['solver_options'],
                     extra_functionality=extra_functionality)
        y = time.time()
        z = (y - x) / 60
        # z is time for lopf in minutes
        print("Time for LOPF [min]:", round(z, 2))

        # start non-linear powerflow simulation
    elif args['method'] is 'pf':
        network.pf(scenario.timeindex)
        # calc_line_losses(network)

    if args['pf_post_lopf']:
        pf_post_lopf(network, scenario)
        calc_line_losses(network)

    # provide storage installation costs
    if sum(network.storage_units.p_nom_opt) != 0:
        installed_storages = \
            network.storage_units[network.storage_units.p_nom_opt != 0]
        storage_costs = sum(installed_storages.capital_cost *
                            installed_storages.p_nom_opt)
        print("Investment costs for all storages in selected snapshots [EUR]:",
              round(storage_costs, 2))

    # write lpfile to path
    if not args['lpfile'] is False:
        network.model.write(args['lpfile'],
                            io_options={'symbolic_solver_labels': True})

    # write PyPSA results back to database
    if args['export']:
        username = str(conn.url).split('//')[1].split(':')[0]
        args['user_name'] = username
        safe_results = False  # default is False.
        # If it is set to 'True' the result set will be safed
        # to the versioned grid schema eventually apart from
        # being saved to the model_draft.
        # ONLY set to True if you know what you are doing.
        results_to_oedb(session,
                        network,
                        args,
                        grid='hv',
                        safe_results=safe_results)

    # write PyPSA results to csv to path
    if not args['results'] is False:
        results_to_csv(network, args)

    # close session
    # session.close()

    return network
コード例 #23
0
def process_runs(mv_districts, n_of_districts, output_info, run_id, base_path):
    '''Runs a process organized by parallel_run()

    The function take all districts mv_districts and divide them into clusters
    of n_of_districts each. For each cluster, ding0 is run and the resulting
    network is saved as a pickle

    Parameters
    ----------
    mv_districts: :obj:`list` of int
        List with all districts to be run.
    n_of_districts: :obj:`int`
        Number of districts in a cluster
    output_info:
        Info about how the run went
    run_id: :obj:`str`
        Identifier for a run of Ding0. For example it is used to create a
        subdirectory of os.path.join(`base_path`, 'results')
    base_path : :obj:`str`
        Base path for ding0 data (input, results and logs).
        Default is `None` which sets it to :code:`~/.ding0` (may deviate on
        windows systems).
        Specify your own but keep in mind that it a required a particular
        structure of subdirectories.

    See Also
    --------
    parallel_run

    '''
    #######################################################################
    # database connection/ session
    engine = db.connection(readonly=True)
    session = sessionmaker(bind=engine)()

    #############################
    clusters = [
        mv_districts[x:x + n_of_districts]
        for x in range(0, len(mv_districts), n_of_districts)
    ]
    output_clusters = []

    for cl in clusters:
        print('\n########################################')
        print('  Running ding0 for district', cl)
        print('########################################')

        nw_name = 'ding0_grids_' + str(cl[0])
        if not cl[0] == cl[-1]:
            nw_name = nw_name + '_to_' + str(cl[-1])
        nw = NetworkDing0(name=nw_name)
        try:
            msg = nw.run_ding0(session=session, mv_grid_districts_no=cl)
            if msg:
                status = 'run error'
            else:
                msg = ''
                status = 'OK'
                results.save_nd_to_pickle(nw, os.path.join(base_path, run_id))
            output_clusters.append((nw_name, status, msg, nw.metadata))
        except Exception as e:
            output_clusters.append((nw_name, 'corrupt dist', e, nw.metadata))
            continue

    output_info.put(output_clusters)