def ninja_pv_profiles(buses, weather_year, scenario_year, datapackage_dir, raw_data_path): """ Parameter --------- buses: array like List with buses represented by iso country code weather_year: integer or string Year to select from raw data source scenario_year: integer or string Year to use for timeindex in tabular resource datapackage_dir: string Directory for tabular resource raw_data_path: string Path where raw data file `ninja_pv_europe_v1.1_merra2.csv` is located """ filepath = building.download_data( "https://www.renewables.ninja/static/downloads/ninja_europe_pv_v1.1.zip", unzip_file="ninja_pv_europe_v1.1_merra2.csv", directory=raw_data_path, ) year = str(weather_year) countries = buses raw_data = pd.read_csv(filepath, index_col=[0], parse_dates=True) # for leap year... raw_data = raw_data[~((raw_data.index.month == 2) & (raw_data.index.day == 29))] df = raw_data.loc[year] sequences_df = pd.DataFrame(index=df.index) for c in countries: sequence_name = c + "-pv-profile" sequences_df[sequence_name] = raw_data.loc[year][c].values sequences_df.index = building.timeindex(year=str(scenario_year)) building.write_sequences( "volatile_profile.csv", sequences_df, directory=os.path.join(datapackage_dir, "data", "sequences"), )
def ninja_offshore_wind_profiles(buses, weather_year, scenario_year, datapackage_dir, raw_data_path): """ Parameter --------- buses: array like List with buses represented by iso country code weather_year: integer or string Year to select from raw data source scenario_year: integer or string Year to use for timeindex in tabular resource datapackage_dir: string Directory for tabular resource raw_data_path: string Path where raw data file `ninja_wind_europe_v1.1_current_national.csv` and `ninja_wind_europe_v1.1_current_national.csv` is located """ onoff_filepath = building.download_data( "https://www.renewables.ninja/static/downloads/ninja_europe_wind_v1.1.zip", unzip_file="ninja_wind_europe_v1.1_future_nearterm_on-offshore.csv", directory=raw_data_path, ) year = str(weather_year) on_off_data = pd.read_csv(onoff_filepath, index_col=[0], parse_dates=True) on_off_data = on_off_data[~((on_off_data.index.month == 2) & (on_off_data.index.day == 29))] sequences_df = pd.DataFrame(index=on_off_data.loc[year].index) for c in buses: if c + "_OFF" in on_off_data.columns: sequences_df[c + "-offshore-profile"] = on_off_data[c + "_OFF"] elif c == "PL": sequences_df[c + "-offshore-profile"] = on_off_data["SE_OFF"] sequences_df.index = building.timeindex(year=str(scenario_year)) building.write_sequences( "volatile_profile.csv", sequences_df, directory=os.path.join(datapackage_dir, "data", "sequences"), )
def emhires_pv_profiles(buses, weather_year, scenario_year, datapackage_dir, raw_data_path): """ Gonzalez Aparicio, Iratxe (2017): Solar hourly generation time series at country, NUTS 1, NUTS 2 level and bidding zones. European Commission, Joint Research Centre (JRC) [Dataset] PID: http://data.europa.eu/89h/jrc-emhires-solar-generation-time-series EU Commission, DG ENER, Unit A4 - ENERGY STATISTICS, https://ec.europa.eu/energy/sites/ener/files/documents/countrydatasheets_june2018.xlsx """ year = str(weather_year) countries = buses date_parser = lambda y: datetime.strptime(y, "%Y %m %d %H") date_columns = ["Year", "Month", "Day", "Hour"] df = (pd.read_excel( building.download_data( "https://setis.ec.europa.eu/sites/default/files/EMHIRES_DATA/Solar/EMHIRESPV_country_level.zip", unzip_file="EMHIRESPV_TSh_CF_Country_19862015.xlsx", directory=raw_data_path, ), parse_dates={ "i": date_columns }, date_parser=date_parser, index_col="i", ).reindex(columns=countries).dropna(axis=1).loc[year, countries]) renames = {c: c + "-pv-profile" for c in countries} df.rename(columns=renames, inplace=True) df = df[~((df.index.month == 2) & (df.index.day == 29))] df.index = building.timeindex(year=str(scenario_year)) building.write_sequences( "volatile_profile.csv", df, directory=os.path.join(datapackage_dir, "data", "sequences"), )
filepath = building.download_data( "https://www.renewables.ninja/static/downloads/ninja_europe_pv_v1.1.zip", unzip_file="ninja_pv_europe_v1.1_merra2.csv") raw_data = pd.read_csv(filepath, index_col=[0], parse_dates=True) df = raw_data.loc[year] sequences_df = pd.DataFrame(index=df.index) for c in countries: sequence_name = c + "-pv-profile" sequences_df[sequence_name] = raw_data.loc[year][c].values sequences_df.index = building.timeindex(year) building.write_sequences("volatile_profile.csv", sequences_df) filepath = building.download_data( "https://www.renewables.ninja/static/downloads/ninja_europe_wind_v1.1.zip", unzip_file="ninja_wind_europe_v1.1_current_on-offshore.csv") raw_data = pd.read_csv(filepath, index_col=[0], parse_dates=True) # not in ninja dataset, as new market zones? (replace by german factor) raw_data['LU_ON'] = raw_data['DE_ON'] raw_data['AT_ON'] = raw_data['DE_ON'] raw_data['CH_ON'] = raw_data['DE_ON'] raw_data['CZ_ON'] = raw_data['DE_ON'] raw_data['PL_OFF'] = raw_data['SE_OFF']
'capacity': capacity, 'profile': country + '-ror-profile', 'efficiency': eta } building.write_elements( 'ror.csv', pd.DataFrame.from_dict(elements, orient='index')) sequences = (inflows * ror_shares * 1000) / capacities['ror_power'] sequences = sequences[countries].copy() sequences.dropna(axis=1, inplace=True) sequences.columns = sequences.columns.astype(str) + '-ror-profile' building.write_sequences( 'ror_profile.csv', sequences.set_index(building.timeindex(str(year)))) # reservoir elements = {} for country in countries: name = country + '-reservoir' capacity = capacities.loc[country, 'rsv_power'] eta = technologies.loc[(year, 'hydro', 'reservoir', 'efficiency'), 'value'] if capacity > 0: elements[name] = { 'type': 'reservoir', 'tech': 'reservoir',
def generation(config, scenario_year, datapackage_dir, raw_data_path): """ """ countries, scenario_year = ( config["buses"]["electricity"], config["scenario"]["year"], ) building.download_data( "https://zenodo.org/record/804244/files/Hydro_Inflow.zip?download=1", directory=raw_data_path, unzip_file="Hydro_Inflow/", ) technologies = pd.DataFrame( Package("https://raw.githubusercontent.com/ZNES-datapackages/" "angus-input-data/master/technology/datapackage.json"). get_resource("technology").read(keyed=True)).set_index( ["year", "parameter", "carrier", "tech"]) hydro_data = pd.DataFrame( Package("https://raw.githubusercontent.com/ZNES-datapackages/" "angus-input-data/master/hydro/datapackage.json").get_resource( "hydro").read(keyed=True)).set_index(["year", "country"]) hydro_data.rename(index={"UK": "GB"}, inplace=True) # for iso code inflows = _get_hydro_inflow( inflow_dir=os.path.join(raw_data_path, "Hydro_Inflow")) inflows = inflows.loc[inflows.index.year == config["scenario"]["weather_year"], :] inflows["DK"], inflows["LU"] = 0, inflows["BE"] for c in hydro_data.columns: if c != "source": hydro_data[c] = hydro_data[c].astype(float) capacities = hydro_data.loc[scenario_year].loc[countries][[ "ror", "rsv", "phs" ]] ror_shares = hydro_data.loc[scenario_year].loc[countries]["ror-share"] max_hours = hydro_data.loc[scenario_year].loc[countries][[ "rsv-max-hours", "phs-max-hours" ]] rsv_factor = hydro_data.loc[scenario_year].loc[countries]["rsv-factor"] # ror elements = {} for country in countries: name = country + "-hydro-ror" capacity = capacities.loc[country, "ror"] # eta = technologies.loc[ # (scenario_year, "efficiency", "hydro", "ror"), "value" # ] if capacity > 0: elements[name] = { "type": "volatile", "tech": "ror", "carrier": "hydro", "bus": country + "-electricity", "capacity": capacity, "profile": country + "-ror-profile", "efficiency": 1, # as already included in inflow profile } building.write_elements( "ror.csv", pd.DataFrame.from_dict(elements, orient="index"), directory=os.path.join(datapackage_dir, "data", "elements"), ) sequences = (inflows[countries] * ror_shares * 1000) / capacities["ror"] col = list(set(countries) - set(["NO", "SE"])) sequences[col] = sequences[col] * 1.5 # correction factor sequences = sequences[countries].copy() sequences.dropna(axis=1, inplace=True) sequences.clip(upper=1, inplace=True) sequences.columns = sequences.columns.astype(str) + "-ror-profile" building.write_sequences( "ror_profile.csv", sequences.set_index(building.timeindex(str(scenario_year))), directory=os.path.join(datapackage_dir, "data", "sequences"), ) # reservoir elements = {} for country in countries: name = country + "-hydro-reservoir" capacity = capacities.loc[country, "rsv"] rsv_max_hours = max_hours.loc[country, "rsv-max-hours"] # eta = technologies.loc[ # (scenario_year, "efficiency", "hydro", "rsv"), "value" # ] if capacity > 0: elements[name] = { "type": "reservoir", "tech": "rsv", "carrier": "hydro", "bus": country + "-electricity", "capacity": capacity, "storage_capacity": capacity * rsv_max_hours, "profile": country + "-reservoir-profile", "efficiency": 1, # as already included in inflow profile "marginal_cost": 0.0000001, } building.write_elements( "reservoir.csv", pd.DataFrame.from_dict(elements, orient="index"), directory=os.path.join(datapackage_dir, "data", "elements"), ) sequences = inflows[countries] * (1 - ror_shares) * 1000 sequences[["NO", "SE"]] = (sequences[["NO", "SE"]] * 1.6) # correction factor sequences = sequences[countries].copy() sequences.dropna(axis=1, inplace=True) sequences.columns = sequences.columns.astype(str) + "-reservoir-profile" building.write_sequences( "reservoir_profile.csv", sequences.set_index(building.timeindex(str(scenario_year))), directory=os.path.join(datapackage_dir, "data", "sequences"), ) # phs elements = {} for country in countries: name = country + "-hydro-phs" capacity = capacities.loc[country, "phs"] phs_max_hours = max_hours.loc[country, "phs-max-hours"] eta = technologies.loc[(scenario_year, "efficiency", "hydro", "phs"), "value"] if capacity > 0: elements[name] = { "type": "storage", "tech": "phs", "carrier": "hydro", "bus": country + "-electricity", "capacity": capacity, "loss": 0, "marginal_cost": 1, "storage_capacity": capacity * phs_max_hours, "storage_capacity_initial": 0.5, "efficiency": float(eta)**(0.5), # rountrip to input/output eta } building.write_elements( "phs.csv", pd.DataFrame.from_dict(elements, orient="index"), directory=os.path.join(datapackage_dir, "data", "elements"), )
def german_heat_system( heat_buses, weather_year, scenario, scenario_year, wacc, decentral_heat_flex_share, sensitivities, datapackage_dir, raw_data_path, ): """ """ technologies = pd.DataFrame( # Package('/home/planet/data/datapackages/technology-cost/datapackage.json') Package("https://raw.githubusercontent.com/ZNES-datapackages/" "angus-input-data/master/technology/datapackage.json" ).get_resource("heat").read(keyed=True)).set_index( ["year", "parameter", "carrier", "tech"]) data = (pd.DataFrame( Package("https://raw.githubusercontent.com/ZNES-datapackages/" "angus-input-data/master/capacities/datapackage.json"). get_resource("german-heat-system").read(keyed=True)).set_index( ["scenario", "year", "carrier", "tech"]).loc[(scenario, scenario_year)]) filepath = building.download_data( "https://data.open-power-system-data.org/when2heat/" "opsd-when2heat-2019-08-06.zip", directory=raw_data_path, unzip_file="opsd-when2heat-2019-08-06/", ) df = pd.read_csv( os.path.join(filepath, "opsd-when2heat-2019-08-06", "when2heat.csv"), index_col=[0], parse_dates=True, sep=";", ) cop = pd.read_csv( os.path.join(filepath, "opsd-when2heat-2019-08-06", "when2heat.csv"), decimal=",", index_col=[0], parse_dates=True, sep=";", ) df = df[~((df.index.month == 2) & (df.index.day == 29))] cop = cop[~((cop.index.month == 2) & (cop.index.day == 29))] data["country"] = "DE" data.set_index("country", append=True, inplace=True) if sensitivities is not None: for k, v in sensitivities.items(): k = k.split("-") data.at[(k[1], k[2], k[0]), "value"] = v elements = [] sequences = {} weather_year = str(weather_year) gshp_cop = cop.loc[ weather_year, ["DE_COP_GSHP_floor", "DE_COP_GSHP_radiator", "DE_COP_GSHP_water"], ].mean(axis=1) ashp_cop = cop.loc[ weather_year, ["DE_COP_ASHP_floor", "DE_COP_ASHP_radiator", "DE_COP_ASHP_water"], ].mean(axis=1) el_buses = building.read_elements("bus.csv", directory=os.path.join( datapackage_dir, "data/elements")) heat_demand_total = (float(data.loc[("decentral_heat", "load"), "value"]) * 1000) # MWh for bustype, buses in heat_buses.items(): carrier = bustype + "_heat" for b in buses: heat_bus = "-".join([b, carrier, "bus"]) flex_peak_demand_heat = ( df.loc[weather_year][b + "_heat_demand_total"] / df.loc[weather_year][b + "_heat_demand_total"].sum() # MW * heat_demand_total).max() * decentral_heat_flex_share peak_demand_heat = ( df.loc[weather_year][b + "_heat_demand_total"] / df.loc[weather_year][b + "_heat_demand_total"].sum() # MW * heat_demand_total).max() * (1 - decentral_heat_flex_share) el_buses.loc[heat_bus] = [True, "heat", None, "bus"] profile_name = "-".join([b, carrier, "load", "profile"]) if "flex" in bustype: elements.append({ "name": "-".join([b, carrier, "load"]), "type": "load", "bus": heat_bus, "amount": heat_demand_total * decentral_heat_flex_share, "profile": profile_name, "carrier": carrier, }) elements.append({ "name": "-".join([b, carrier, "gshp"]), "type": "conversion", "to_bus": heat_bus, "capacity_cost": ( float(technologies.loc[(2050, "fom", "decentral_heat", "gshp"), "value", ]) + annuity( float(technologies.loc[( 2050, "capex", "decentral_heat", "gshp", ), "value", ]), float(technologies.loc[( 2050, "lifetime", "decentral_heat", "gshp", ), "value", ]), wacc, ) * 1000, # €/kW -> €/MW )[0], "from_bus": "DE-electricity", "expandable": True, "capacity": flex_peak_demand_heat, "efficiency": "DE-gshp-profile", "carrier": carrier, "tech": "gshp", }) name = "-".join([b, carrier, "tes"]) if sensitivities is not None: if name in sensitivities.keys(): capacity = sensitivities[name] else: capacity = flex_peak_demand_heat else: capacity = flex_peak_demand_heat carrier = carrier.replace("flex-", "") elements.append({ "name": name, "type": "storage", "bus": heat_bus, # "capacity": capacity, "capacity_cost": 0, "storage_capacity_cost": (float(technologies.loc[(2050, "fom", "decentral_heat", "tes"), "value", ]) * 1000) + ( annuity( float(technologies.loc[( 2050, "capex_energy", "decentral_heat", "tes", ), "value", ]), float(technologies.loc[( 2050, "lifetime", "decentral_heat", "tes", ), "value", ]), wacc, ) * 1000, # €/kWh -> €/MWh )[0], "expandable": True, # "storage_capacity": capacity * float(technologies.loc[ # (2050, "max_hours", carrier, "tes"), # "value" # ]), "efficiency": float( technologies.loc[(2050, "efficiency", carrier, "tes"), "value"])**0.5, # rountrip conversion "loss": technologies.loc[(2050, "loss", carrier, "tes"), "value"], "marginal_cost": 0.001, "carrier": carrier, "tech": "tes", }) else: elements.append({ "name": "-".join([b, carrier, "load"]), "type": "load", "bus": heat_bus, "amount": heat_demand_total * (1 - decentral_heat_flex_share), "profile": profile_name, "carrier": carrier, }) elements.append({ "name": "-".join([b, carrier, "gshp"]), "type": "conversion", "to_bus": heat_bus, "capacity_cost": 0, "expandable": False, "from_bus": "DE-electricity", "capacity": peak_demand_heat, "efficiency": "DE-gshp-profile", "carrier": carrier, "tech": "gshp", }) sequences[profile_name] = ( df.loc[weather_year][b + "_heat_demand_total"] / df.loc[weather_year][b + "_heat_demand_total"].sum()) sequences_df = pd.DataFrame(sequences) sequences_df.index.name = "timeindex" sequences_df.index = building.timeindex(year=str(scenario_year)) sequences_cop = pd.concat([gshp_cop, ashp_cop], axis=1) sequences_cop.columns = ["DE-gshp-profile", "DE-ashp-profile"] sequences_cop.index.name = "timeindex" sequences_cop.index = building.timeindex(year=str(scenario_year)) building.write_sequences( "efficiency_profile.csv", sequences_cop, directory=os.path.join(datapackage_dir, "data/sequences"), ) if "NEPC" in scenario: must_run_sequences = {} must_run_sequences["DE-must-run-profile"] = ( df.loc[weather_year][b + "_heat_demand_total"] / df.loc[weather_year][b + "_heat_demand_total"].max()) must_run_sequences_df = pd.DataFrame(must_run_sequences) must_run_sequences_df = (must_run_sequences_df * 3 * 8300).clip( upper=8300) / 8300 # calibrate for 2030NEPC must_run_sequences_df.index.name = "timeindex" must_run_sequences_df.index = building.timeindex( year=str(scenario_year)) building.write_sequences( "volatile_profile.csv", must_run_sequences_df, directory=os.path.join(datapackage_dir, "data/sequences"), ) building.write_elements( "heat_load.csv", pd.DataFrame([i for i in elements if i["type"] == "load"]).set_index("name"), directory=os.path.join(datapackage_dir, "data/elements"), ) building.write_elements( "heatpump.csv", pd.DataFrame([i for i in elements if i["type"] == "conversion"]).set_index("name"), directory=os.path.join(datapackage_dir, "data/elements"), ) building.write_elements( "heat_storage.csv", pd.DataFrame([i for i in elements if i["type"] == "storage"]).set_index("name"), directory=os.path.join(datapackage_dir, "data/elements"), ) building.write_elements( "bus.csv", el_buses, directory=os.path.join(datapackage_dir, "data/elements"), replace=True, ) building.write_sequences( "heat_load_profile.csv", sequences_df, directory=os.path.join(datapackage_dir, "data/sequences"), )
timeseries['DE_load_old'] = timeseries['DE_load_old'] * ( 596.3e6 / timeseries['DE_load_old'].sum()) load_total = timeseries.sum() load_profile = timeseries / load_total elements = {} sequences = pd.DataFrame(index=load_profile.index) for c in countries: element_name = c + '-load' sequence_name = element_name + '-profile' sequences[sequence_name] = load_profile[c + suffix].values element = { 'bus': c + '-electricity', 'amount': load_total[c + suffix], 'profile': sequence_name, 'tech': 'load', 'type': 'load' } elements[element_name] = element building.write_elements('load.csv', pd.DataFrame.from_dict(elements, orient='index')) sequences.index = building.timeindex(year) building.write_sequences('load_profile.csv', sequences)
def opsd_profile(buses, demand_year, scenario_year, datapackage_dir, raw_data_path): """ Parameter --------- buses: array like List with buses represented by iso country code demand_year: integer or string Demand year to select scenario_year: integer or string Year of scenario to use for timeindex to resource datapackage_dir: string Directory for tabular resource raw_data_path: string Path where raw data file is located """ filepath = building.download_data( "https://data.open-power-system-data.org/time_series/2018-06-30/" "time_series_60min_singleindex.csv", directory=raw_data_path, ) if os.path.exists(filepath): raw_data = pd.read_csv(filepath, index_col=[0], parse_dates=True) else: raise FileNotFoundError( "File for OPSD loads does not exist. Did you download data?") suffix = "_load_entsoe_power_statistics" countries = buses columns = [c + suffix for c in countries] timeseries = raw_data[str(demand_year)][columns] if timeseries.isnull().values.any(): raise ValueError("Timeseries for load has NaN values. Select " + "another demand year or use another data source.") load_total = timeseries.sum() load_profile = timeseries / load_total sequences_df = pd.DataFrame(index=load_profile.index) elements = building.read_elements("load.csv", directory=os.path.join( datapackage_dir, "data", "elements")) for c in countries: # get sequence name from elements edge_parameters # (include re-exp to also check for 'elec' or similar) sequence_name = elements.at[ elements.index[elements.index.str.contains(c)][0], "profile"] sequences_df[sequence_name] = load_profile[c + suffix].values if sequences_df.index.is_leap_year[0]: sequences_df = sequences_df.loc[~((sequences_df.index.month == 2) & (sequences_df.index.day == 29))] sequences_df.index = building.timeindex(year=str(scenario_year)) building.write_sequences( "load_profile.csv", sequences_df, directory=os.path.join(datapackage_dir, "data/sequences"), )
def emhires_wind_profiles(buses, weather_year, scenario_year, datapackage_dir, raw_data_path): """ Gonzalez Aparicio, Iratxe; Zucker, Andreas; Careri, Francesco; Monforti Ferrario, Fabio; Huld, Thomas; Badger, Jake (2016): Wind hourly generation time series at country, NUTS 1, NUTS 2 level and bidding zones. European Commission, Joint Research Centre (JRC) [Dataset] PID: http://data.europa.eu/89h/jrc-emhires-wind-generation-time-series """ year = str(weather_year) countries = buses date_parser = lambda y: datetime.strptime(y, "%Y %m %d %H") date_columns = ["Year", "Month", "Day", "Hour"] urls = [ "http://setis.ec.europa.eu/sites/default/files/EMHIRES_DATA/EMHIRES_WIND_COUNTRY_June2019.zip", "http://setis.ec.europa.eu/sites/default/files/EMHIRES_DATA/TS_CF_OFFSHORE_30yr_date.zip", ] filenames = [ "EMHIRES_WIND_COUNTRY_June2019.xlsx", "TS.CF.OFFSHORE.30yr.date.txt", ] technologies = ["onshore", "offshore"] for url, fname, tech in zip(urls, filenames, technologies): if fname.endswith(".xlsx"): df = (pd.read_excel( building.download_data(url, unzip_file=fname, directory=raw_data_path), parse_dates={ "i": date_columns }, date_parser=date_parser, index_col="i", ).reindex(columns=countries).dropna(axis=1).loc[year, :]) else: df = (pd.read_csv( building.download_data(url, unzip_file=fname, directory=raw_data_path), sep="\t", parse_dates={ "i": date_columns }, date_parser=date_parser, index_col="i", ).reindex(columns=countries).dropna(axis=1).loc[year, :]) renames = {c: c + "-" + tech + "-profile" for c in countries} df.rename(columns=renames, inplace=True) df = df[~((df.index.month == 2) & (df.index.day == 29))] df.index = building.timeindex(year=str(scenario_year)) building.write_sequences( "volatile_profile.csv", df, directory=os.path.join(datapackage_dir, "data", "sequences"), )
def eGo_offshore_wind_profiles( buses, weather_year, scenario_year, datapackage_dir, raw_data_path, correction_factor=0.8, ): """ Parameter --------- buses: array like List with buses represented by iso country code weather_year: integer or string Year to select from raw data source scenario_year: integer or string Year to use for timeindex in tabular resource datapackage_dir: string Directory for tabular resource raw_data_path: string """ filepath = building.download_data( "https://github.com/znes/FlEnS/archive/master.zip", unzip_file="FlEnS-master/open_eGo/NEP_2035/nep_2035_seq.csv", directory=raw_data_path, ) wind = pd.read_csv(filepath, parse_dates=True, index_col=0, header=[0, 1, 2, 3, 4]) wind.columns = wind.columns.droplevel([0, 2, 3, 4]) wind.reset_index(inplace=True) sequences_df = pd.DataFrame() # use vernetzen data filepath_2050 = building.download_data( "https://github.com/znes/FlEnS/archive/master.zip", unzip_file="FlEnS-master/Socio-ecologic/2050_seq.csv", directory=raw_data_path, ) wind_2050 = pd.read_csv(filepath_2050, parse_dates=True, index_col=0, header=[0, 1, 2, 3, 4]) wind_2050.columns = wind_2050.columns.droplevel([0, 2, 3, 4]) wind_2050["DE_wind_offshore"] = (wind_2050["DEdr19_wind_offshore"] * 0.2 + wind_2050["DEdr20_wind_offshore"] * 0.4 + wind_2050["DEdr21_wind_offshore"] * 0.4) wind_2050.reset_index(inplace=True) wind_2050["DE_wind_onshore"] = wind["DE_wind_onshore"] wind = wind_2050 for c in buses: if c + "_wind_offshore" in wind.columns: sequences_df[c + "-offshore-profile"] = ( wind[c + "_wind_offshore"] * correction_factor ) # correction factor sequences_df.index = building.timeindex(year=str(scenario_year)) building.write_sequences( "volatile_profile.csv", sequences_df, directory=os.path.join(datapackage_dir, "data", "sequences"), )