def main(): import sys logger.define_logging(screen_level=logging.INFO) cfg.init( paths=[ os.path.dirname(berlin_hp.__file__), os.path.dirname(deflex.__file__), os.path.dirname(__file__), ] ) msg = "Unknown parameter: >>{0}<<. Only floats or 'all' are allowed." arg1 = sys.argv[1] if len(sys.argv) > 2: path = sys.argv[2] else: path = cfg.get("paths", "figures") try: arg1 = float(arg1) except ValueError: arg1 = arg1 os.makedirs(path, exist_ok=True) if isinstance(arg1, str): if arg1 == "all": plot_all(path=path) else: raise ValueError(msg.format(arg1)) elif isinstance(arg1, float): plot_figure(str(arg1), save=True, show=True, path=path)
def test_init_own_file_list(): files = [ os.path.join(os.path.dirname(__file__), "data", "config_test.ini") ] config.init(files=files) fn = sorted([f.split(os.sep)[-1] for f in config.FILES]) eq_(fn, ["config_test.ini"]) eq_(config.get("tester", "my_test"), "my_value")
def test_check_functions(): files = [ os.path.join(os.path.dirname(__file__), "data", "config_test.ini") ] config.init(files=files) ok_(config.has_section("tester")) ok_(not (config.has_section("teste"))) ok_(config.has_option("tester", "my_test"))
def test_missing_value(): files = [ os.path.join(os.path.dirname(__file__), "data", "config_test.ini") ] config.init(files=files) with assert_raises_regexp(NoOptionError, "No option 'blubb' in section: 'type_tester'"): config.get("type_tester", "blubb") with assert_raises_regexp(NoSectionError, "No section: 'typetester'"): config.get("typetester", "blubb")
def test_set_temp_value(): files = [ os.path.join(os.path.dirname(__file__), "data", "config_test.ini") ] config.init(files=files) with assert_raises_regexp(NoOptionError, "No option 'blubb' in section: 'type_tester'"): config.get("type_tester", "blubb") config.tmp_set("type_tester", "blubb", "None") eq_(config.get("type_tester", "blubb"), None) config.tmp_set("type_tester", "blubb", "5.5") eq_(config.get("type_tester", "blubb"), 5.5)
def test_init_basic(): config.init() fn = sorted([f.split(os.sep)[-1] for f in config.FILES]) eq_( fn, [ "dictionary.ini", "mobility.ini", "reegis.ini", "solar.ini", "wind.ini", ], )
def test_get_function(): """Read config file.""" files = [ os.path.join(os.path.dirname(__file__), "data", "config_test.ini") ] config.init(files=files) ok_(config.get("type_tester", "my_bool")) ok_(isinstance(config.get("type_tester", "my_int"), int)) ok_(isinstance(config.get("type_tester", "my_float"), float)) ok_(isinstance(config.get("type_tester", "my_string"), str)) ok_(isinstance(config.get("type_tester", "my_None"), type(None))) ok_(isinstance(config.get("type_tester", "my_list"), str)) eq_(int(config.get_list("type_tester", "my_list")[2]), 7)
def test_init_additional_path(): additional_path = [os.path.join(os.path.dirname(__file__), "data")] config.init(paths=additional_path) fn = sorted([f.split(os.sep)[-1] for f in config.FILES]) eq_( fn, [ "config_test.ini", "dictionary.ini", "mobility.ini", "reegis.ini", "solar.ini", "wind.ini", ], )
def test_dicts(): """Test dictionaries in config file.""" files = [ os.path.join(os.path.dirname(__file__), "data", "config_test.ini") ] config.init(files=files) d = config.get_dict("type_tester") eq_(d["my_list"], "4,6,7,9") d = config.get_dict_list("type_tester") eq_(d["my_list"][1], "6") eq_(d["my_None"][0], None) eq_(d["my_int"][0], 5) d = config.get_dict_list("type_tester", string=True) eq_(d["my_list"][1], "6") eq_(d["my_None"][0], "None") eq_(d["my_int"][0], "5")
def main(): import sys logger.define_logging(screen_level=logging.INFO) cfg.init(paths=[ os.path.dirname(berlin_hp.__file__), os.path.dirname(deflex.__file__), os.path.dirname(__file__), ]) cpu_fraction = float(sys.argv[1]) if len(sys.argv) > 2: path = sys.argv[2] else: path = cfg.get("paths", "phd") os.makedirs(path, exist_ok=True) reproduce_scenario_results(path, cpu_fraction)
arrowprops=dict(facecolor='black', arrowstyle="-"), horizontalalignment='left', verticalalignment='bottom', bbox=bbox_props) az = (np.array([az_lower, az_lower, az_upper, az_upper, az_lower]) / 180 * np.pi) t = np.array([t_lower, t_upper, t_upper, t_lower, t_lower]) ax.plot(az, t) ax.set_rmax(50) ax.set_rmin(20) ax.set_thetamin(90) ax.set_thetamax(270) # Adjust margins plt.subplots_adjust(right=0.94, left=0, bottom=-0.2, top=1.2) if __name__ == "__main__": logger.define_logging() cfg.init(paths=[os.path.dirname(deflex.__file__)]) # combine_large_orientation_files() # optimal_pv_orientation() # pv_yield_by_orientation() # collect_orientation_files() # collect_single_orientation_files() # plt.show() # scatter() # analyse_multi_files() polar_plot()
def setup_class(cls): path = os.path.join(TEST_PATH, "de22_heat_transmission_csv") sc = st.DeflexScenario() sc.read_csv(path) cls.tables = sc.input_data tmp_tables = {} parameter = { "costs_source": "ewi", "downtime_bioenergy": 0.1, "limited_transformer": "bioenergy", "local_fuels": "district heating", "map": "de22", "mobility_other": "petrol", "round": 1, "separate_heat_regions": "de22", "copperplate": False, "default_transmission_efficiency": 0.9, "group_transformer": False, "heat": True, "use_CO2_costs": True, "use_downtime_factor": True, "use_variable_costs": False, "year": 2014, } config.init(paths=[os.path.dirname(dfile)]) for option, value in parameter.items(): cfg.tmp_set("creator", option, str(value)) config.tmp_set("creator", option, str(value)) name = "heat_demand_deflex" fn = os.path.join(os.path.dirname(__file__), "data", name + ".csv") tmp_tables[name] = pd.read_csv(fn, index_col=[0], header=[0, 1]) name = "transformer_balance" fn = os.path.join(os.path.dirname(__file__), "data", name + ".csv") tmp_tables[name] = pd.read_csv(fn, index_col=[0, 1, 2], header=[0]) powerplants.scenario_powerplants = MagicMock( return_value={ "volatile plants": cls.tables["volatile plants"], "power plants": cls.tables["power plants"], }) powerplants.scenario_chp = MagicMock( return_value={ "heat-chp plants": cls.tables["heat-chp plants"], "power plants": cls.tables["power plants"], }) feedin.scenario_feedin = MagicMock( return_value=cls.tables["volatile series"]) demand_table = { "electricity demand series": cls.tables["electricity demand series"], "heat demand series": cls.tables["heat demand series"], } demand.scenario_demand = MagicMock(return_value=demand_table) name = "deflex_2014_de22_heat_transmission" polygons = deflex_regions(rmap=parameter["map"], rtype="polygons") lines = deflex_power_lines(parameter["map"]).index cls.input_data = scenario_creator.create_scenario( polygons, 2014, name, lines)
from my_reegis import alternative_scenarios import deflex import pandas as pd import numpy as np from reegis import config as cfg from my_reegis import results from my_reegis import reegis_plot from my_reegis import upstream_analysis from my_reegis import plots_de21 from matplotlib import pyplot as plt import oemof_visio as oev #cfg.init(paths=[os.path.dirname(deflex.__file__), # os.path.dirname(my_reegis.__file__)]) cfg.init() exit(0) # # Schritt 1a: Erstelle ein Szenario mit Reegis aus Daten aus dem Internet year = 2014 # Zur Wahl stehen 2014, 2013, 2012 geom = 'de21' # Geometrien sind de02, de17, de21, de22 #p = deflex.basic_scenario.create_basic_scenario(year, rmap=geom ) for geom in ['de02']: for inc_fac in [1.55]: # Schritt 1b: Alternativ: Lade vorhandenes Szenario name = '{0}_{1}_{2}'.format('deflex', year, geom) path = os.path.join(cfg.get('paths', 'scenario'), 'deflex', str(year)) csv_dir = name + '_csv' csv_path = os.path.join(path, csv_dir) meta = {
def create_basic_reegis_scenario( name, regions, parameter, lines=None, csv_path=None, excel_path=None, ): """ Create a basic scenario for a given year and region-set. Parameters ---------- name : str Name of the scenario regions : geopandas.geoDataFrame Set of region polygons. lines : geopandas.geoDataFrame Set of transmission lines. parameter : dict Parameter set for the creation process. Some parameters will have a default value. For the default values see below. csv_path : str A directory to store the scenario as csv collection. If None no csv collection will be created. Either csv_path or excel_path must not be 'None'. excel_path : str A file to store the scenario as an excel map. If None no excel file will be created. Both suffixes 'xls' or 'xlsx' are possible. The excel format can be used in most spreadsheet programs such as LibreOffice or Gnumeric. Either csv_path or excel_path must not be 'None'. Returns ------- namedtuple : Path Notes ----- List of default values: * copperplate: True * default_transmission_efficiency: 0.9 * costs_source: "ewi" * downtime_bioenergy: 0.1 * group_transformer: False * heat: False * limited_transformer: "bioenergy", * local_fuels: "district heating", * map: "de02", * mobility_other: "petrol", * round: 1, * separate_heat_regions: "de22", * use_CO2_costs: False, * use_downtime_factor: True, * use_variable_costs: False, * year: 2014 Examples -------- >>> from oemof.tools import logger >>> from deflex.geometries import deflex_power_lines >>> from deflex.geometries import deflex_regions >>> >>> logger.define_logging(screen_level=logging.DEBUG) # doctest: +SKIP >>> >>> my_parameter = { ... "year": 2014, ... "map": "de02", ... "copperplate": True, ... "heat": True, ... } >>> >>> my_name = "deflex" >>> for k, v in my_parameter.items(): ... my_name += "_" + str(k) + "-" + str(v) >>> >>> polygons = deflex_regions(rmap=my_parameter["map"], rtype="polygons") >>> my_lines = deflex_power_lines(my_parameter["map"]).index >>> path = "/my/path/creator/{0}{1}".format(my_name, "{0}") >>> >>> create_basic_reegis_scenario( ... name=my_name, ... regions=polygons, ... lines=my_lines, ... parameter=my_parameter, ... excel_path=path.format(".xlsx"), ... csv_path=path.format("_csv"), ... ) # doctest: +SKIP """ # The default parameter can be found in "creator.ini". config.init(paths=[os.path.dirname(dfile)]) for option, value in parameter.items(): cfg.tmp_set("creator", option, str(value)) config.tmp_set("creator", option, str(value)) year = cfg.get("creator", "year") configuration = json.dumps(cfg.get_dict("creator"), indent=4, sort_keys=True) logging.info( "The following configuration is used to build the scenario:" " %s", configuration, ) paths = namedtuple("paths", "xls, csv") table_collection = create_scenario(regions, year, name, lines) table_collection = clean_time_series(table_collection) name = table_collection["general"].get("name") sce = scenario.Scenario(input_data=table_collection, name=name, year=year) if csv_path is not None: os.makedirs(csv_path, exist_ok=True) sce.to_csv(csv_path) if excel_path is not None: os.makedirs(os.path.dirname(excel_path), exist_ok=True) sce.to_xlsx(excel_path) return paths(xls=excel_path, csv=csv_path)
import my_reegis import deflex import pandas as pd import matplotlib from reegis import config as cfg from my_reegis import results from my_reegis import reegis_plot from my_reegis import upstream_analysis from my_reegis import plots_de21 from matplotlib import pyplot as plt #import PyQt5 #matplotlib.use('Qt5Agg') # Ini-Dateien von deflex/reegis auslesen, da script außerhalb der Repos ist cfg.init(paths=[ os.path.dirname(deflex.__file__), os.path.dirname(my_reegis.__file__) ]) # Ergebnis-Energieystem laden de21_2014 = results.load_es( '/home/dbeier/reegis/scenarios/deflex/2014/results_cbc/deflex_2014_de21.esys' ) de02_2014 = results.load_es( '/home/dbeier/reegis/scenarios/deflex/2014/results_cbc/deflex_2014_de02.esys' ) #de17_2014 = results.load_es('/home/dbeier/reegis/scenarios/deflex/2014/results_cbc/deflex_2014_de17.esys') #de02_2013 = results.load_es('/home/dbeier/reegis/scenarios/deflex/2013/results_cbc/deflex_2013_de02.esys') results_obj = de21_2014.results['main'] # Results
"de21_2014": "deflex_2014_de21_csv", "de22_2014": "deflex_2014_de21_csv", "de21_without_berlin_2014": "deflex_2014_de21_without_berlin_csv", } SPLITTER = { "berlin": ["berlin_hp", "berlin_single"], "base_var": ["base_var", "upstream"], "extend": ["extend", "alt"], "deflex": ["deflex"], "modellhagen": ["modellhagen", "friedrichshagen"], } cfg.init(paths=[ os.path.dirname(berlin_hp.__file__), os.path.dirname(deflex.__file__), os.path.dirname(my_regis_file), ]) def split_scenarios(sc): splitted = {} for g, kws in SPLITTER.items(): splitted[g] = [] for keyword in kws: for s in sc: if keyword in s.split(os.sep)[-1]: splitted[g].append(s) return splitted