def init_files_for_tests(mv_grid_districts=[3545], filename='ding0_tests_grids_1.pkl'): '''Runs ding0 over the districtis selected in mv_grid_districts and writes the result in filename. Parameters ---------- mv_grid_districts: :obj:`list` of :obj:`int` Districts IDs: Defaults to [3545] filename: :obj:`str` Defaults to 'ding0_tests_grids_1.pkl' ''' print('\n########################################') print(' Running ding0 for district', mv_grid_districts) # database connection/ session engine = db.connection(readonly=True) session = sessionmaker(bind=engine)() # instantiate new ding0 network object nd = NetworkDing0(name='network') # run DING0 on selected MV Grid District nd.run_ding0(session=session, mv_grid_districts_no=mv_grid_districts) # export grid to file (pickle) print('\n########################################') print(' Saving result in ', filename) save_nd_to_pickle(nd, filename=filename)
def create_test_grids_with_stats(path): ''' If changes in electrical data have been made, run this function to update the saved test data in folder. Test are run on mv_grid_district 460. :param path: directory where testdata ist stored. :return: mvgd_stats ''' # database connection/ session engine = db.connection(section='oedb') session = sessionmaker(bind=engine)() # instantiate new ding0 network object nd = NetworkDing0(name='network') # choose MV Grid Districts to import mv_grid_districts = [460] # run DING0 on selected MV Grid District nd.run_ding0(session=session, mv_grid_districts_no=mv_grid_districts) # save network if not os.path.exists(path): os.makedirs(path) save_nd_to_pickle(nd, path=path, filename=None) mvgd_stats = calculate_mvgd_stats(nd) mvgd_stats.to_csv(os.path.join(path, 'mvgd_stats.csv')) mvgd_voltage_current_stats = calculate_mvgd_voltage_current_stats(nd) mvgd_current_branches = mvgd_voltage_current_stats[1] mvgd_current_branches.to_csv( os.path.join(path, 'mvgd_current_branches.csv')) mvgd_voltage_nodes = mvgd_voltage_current_stats[0] mvgd_voltage_nodes.to_csv(os.path.join(path, 'mvgd_voltage_nodes.csv')) lvgd_stats = calculate_lvgd_stats(nd) lvgd_stats.to_csv(os.path.join(path, 'lvgd_stats.csv')) lvgd_voltage_current_stats = calculate_lvgd_voltage_current_stats(nd) lvgd_current_branches = lvgd_voltage_current_stats[1] lvgd_current_branches.to_csv( os.path.join(path, 'lvgd_current_branches.csv')) lvgd_voltage_nodes = lvgd_voltage_current_stats[0] lvgd_voltage_nodes.to_csv(os.path.join(path, 'lvgd_voltage_nodes.csv')) return mvgd_stats, mvgd_voltage_nodes, mvgd_current_branches, lvgd_stats, lvgd_voltage_nodes, lvgd_current_branches
def process_runs(mv_districts, n_of_districts, output_info, run_id, base_path): '''Runs a process organized by parallel_run() The function take all districts mv_districts and divide them into clusters of n_of_districts each. For each cluster, ding0 is run and the resulting network is saved as a pickle Parameters ---------- mv_districts: :obj:`list` of int List with all districts to be run. n_of_districts: :obj:`int` Number of districts in a cluster output_info: Info about how the run went run_id: :obj:`str` Identifier for a run of Ding0. For example it is used to create a subdirectory of os.path.join(`base_path`, 'results') base_path : :obj:`str` Base path for ding0 data (input, results and logs). Default is `None` which sets it to :code:`~/.ding0` (may deviate on windows systems). Specify your own but keep in mind that it a required a particular structure of subdirectories. See Also -------- parallel_run ''' ####################################################################### # database connection/ session engine = db.connection(readonly=True) session = sessionmaker(bind=engine)() ############################# clusters = [ mv_districts[x:x + n_of_districts] for x in range(0, len(mv_districts), n_of_districts) ] output_clusters = [] for cl in clusters: print('\n########################################') print(' Running ding0 for district', cl) print('########################################') nw_name = 'ding0_grids_' + str(cl[0]) if not cl[0] == cl[-1]: nw_name = nw_name + '_to_' + str(cl[-1]) nw = NetworkDing0(name=nw_name) try: msg = nw.run_ding0(session=session, mv_grid_districts_no=cl) if msg: status = 'run error' else: msg = '' status = 'OK' results.save_nd_to_pickle(nw, os.path.join(base_path, run_id)) output_clusters.append((nw_name, status, msg, nw.metadata)) except Exception as e: output_clusters.append((nw_name, 'corrupt dist', e, nw.metadata)) continue output_info.put(output_clusters)
def run_multiple_grid_districts(mv_grid_districts, run_id, failsafe=False, base_path=None): """ Perform ding0 run on given grid districts Parameters ---------- mv_grid_districs : :obj:`list` Integers describing grid districts run_id: :obj:`str` Identifier for a run of Ding0. For example it is used to create a subdirectory of os.path.join(`base_path`, 'results') failsafe : bool Setting to True enables failsafe mode where corrupt grid districts (mostly due to data issues) are reported and skipped. Report is to be found in the log dir under :code:`~/.ding0` . Default is False. base_path : :obj:`str` Base path for ding0 data (input, results and logs). Default is `None` which sets it to :code:`~/.ding0` (may deviate on windows systems). Specify your own but keep in mind that it a required a particular structure of subdirectories. Returns ------- msg : :obj:`str` Traceback of error computing corrupt MV grid district .. TODO: this is only true if try-except environment is moved into this fundion and traceback return is implemented Note ----- Consider that a large amount of MV grid districts may take hours or up to days to compute. A computational run for a single grid district may consume around 30 secs. """ start = time.time() # define base path if base_path is None: base_path = BASEPATH # database connection/ session engine = db.connection(readonly=True) session = sessionmaker(bind=engine)() corrupt_grid_districts = pd.DataFrame(columns=['id', 'message']) for mvgd in mv_grid_districts: # instantiate ding0 network object nd = NetworkDing0(name='network', run_id=run_id) if not os.path.exists(os.path.join(base_path, "grids")): os.mkdir(os.path.join(base_path, "grids")) if not failsafe: # run DING0 on selected MV Grid District msg = nd.run_ding0(session=session, mv_grid_districts_no=[mvgd]) # save results results.save_nd_to_pickle(nd, os.path.join(base_path, "grids")) else: # try to perform ding0 run on grid district try: msg = nd.run_ding0(session=session, mv_grid_districts_no=[mvgd]) # if not successful, put grid district to report if msg: corrupt_grid_districts = corrupt_grid_districts.append( pd.Series({ 'id': mvgd, 'message': msg[0] }), ignore_index=True) # if successful, save results else: results.save_nd_to_pickle(nd, os.path.join(base_path, "grids")) except Exception as e: corrupt_grid_districts = corrupt_grid_districts.append( pd.Series({ 'id': mvgd, 'message': e }), ignore_index=True) continue # Merge metadata of multiple runs if 'metadata' not in locals(): metadata = nd.metadata else: if isinstance(mvgd, list): metadata['mv_grid_districts'].extend(mvgd) else: metadata['mv_grid_districts'].append(mvgd) # Save metadata to disk with open(os.path.join(base_path, "grids", 'Ding0_{}.meta'.format(run_id)), 'w') as f: json.dump(metadata, f) # report on unsuccessful runs corrupt_grid_districts.to_csv(os.path.join( base_path, "grids", 'corrupt_mv_grid_districts.txt'), index=False, float_format='%.0f') print('Elapsed time for', str(len(mv_grid_districts)), 'MV grid districts (seconds): {}'.format(time.time() - start)) return msg
from ding0.core import NetworkDing0 from ding0.tools.logger import setup_logger from ding0.tools.results import save_nd_to_pickle from sqlalchemy.orm import sessionmaker import oedialect # define logger logger = setup_logger() # ===== MAIN ===== # database connection/ session engine = db.connection(section='oedb') session = sessionmaker(bind=engine)() # instantiate new ding0 network object nd = NetworkDing0(name='network') # choose MV Grid Districts to import mv_grid_districts = [3040] # run DING0 on selected MV Grid District nd.run_ding0(session=session, mv_grid_districts_no=mv_grid_districts) # export grids to database # nd.export_mv_grid(conn, mv_grid_districts) # nd.export_mv_grid_new(conn, mv_grid_districts) # export grid to file (pickle) save_nd_to_pickle(nd, filename='ding0_grids_example.pkl')
#region CREATE GRIDS if is_create_grids == True: # database connection/ session engine = db.connection(section='oedb') session = sessionmaker(bind=engine)() for i in range(nr_test_runs): # instantiate new ding0 network object nd = NetworkDing0(name='network') # run DING0 on selected MV Grid District nd.run_ding0(session=session, mv_grid_districts_no=mv_grid_districts) # export grid to file (pickle) save_nd_to_pickle(nd, path=test_path, filename='ding0_grids_example{}.pkl'.format(i)) #endregion #region EXTRACT DATAFRAMES components_for_comparison = {} lines_for_comparison = {} for i in range(nr_test_runs): try: if not is_load_from_csv: nw = load_nd_from_pickle( path=test_path, filename='ding0_grids_example{}.pkl'.format(i)) if not is_alternative_lv_line_naming: # extract component dataframes from network components = create_grid_component_dataframes_from_network(nw)