コード例 #1
0
ファイル: make_summary.py プロジェクト: pz-max/pypsa-eur-sec
        for output in outputs:
            df[output] = globals()["calculate_" + output](n, label, df[output])

    return df


def to_csv(df):
    for key in df:
        df[key].to_csv(snakemake.output[key])


if __name__ == "__main__":
    if 'snakemake' not in globals():
        from helper import mock_snakemake
        snakemake = mock_snakemake('make_summary')

    networks_dict = {
        (cluster, lv, opt+sector_opt, planning_horizon) :
        snakemake.config['results_dir'] + snakemake.config['run'] + f'/postnetworks/elec_s{simpl}_{cluster}_lv{lv}_{opt}_{sector_opt}_{planning_horizon}.nc' \
        for simpl in snakemake.config['scenario']['simpl'] \
        for cluster in snakemake.config['scenario']['clusters'] \
        for opt in snakemake.config['scenario']['opts'] \
        for sector_opt in snakemake.config['scenario']['sector_opts'] \
        for lv in snakemake.config['scenario']['lv'] \
        for planning_horizon in snakemake.config['scenario']['planning_horizons']
    }

    print(networks_dict)

    Nyears = 1
コード例 #2
0
    lng["type"] = "lng"
    entry["type"] = "pipeline"
    prod["type"] = "production"

    sel = ["geometry", "p_nom", "type"]

    return pd.concat([prod[sel], entry[sel], lng[sel]], ignore_index=True)


if __name__ == "__main__":

    if 'snakemake' not in globals():
        from helper import mock_snakemake
        snakemake = mock_snakemake(
            'build_gas_import_locations',
            simpl='',
            clusters='37',
        )

    logging.basicConfig(level=snakemake.config['logging_level'])

    regions = load_bus_regions(snakemake.input.regions_onshore,
                               snakemake.input.regions_offshore)

    # add a buffer to eastern countries because some
    # entry points are still in Russian or Ukrainian territory.
    buffer = 9000  # meters
    eastern_countries = ['FI', 'EE', 'LT', 'LV', 'PL', 'SK', 'HU', 'RO']
    add_buffer_b = regions.index.str[:2].isin(eastern_countries)
    regions.loc[add_buffer_b] = regions[add_buffer_b].to_crs(3035).buffer(
        buffer).to_crs(4326)
コード例 #3
0
    https://doi.org/10.1039/C2EE22653G.
    """
    if source == 'air':
        return 6.81 - 0.121 * delta_T + 0.000630 * delta_T**2
    elif source == 'soil':
        return 8.77 - 0.150 * delta_T + 0.000734 * delta_T**2
    else:
        raise NotImplementedError("'source' must be one of  ['air', 'soil']")


if __name__ == '__main__':
    if 'snakemake' not in globals():
        from helper import mock_snakemake
        snakemake = mock_snakemake(
            'build_cop_profiles',
            simpl='',
            clusters=48,
        )

    for area in ["total", "urban", "rural"]:

        for source in ["air", "soil"]:

            source_T = xr.open_dataarray(
                snakemake.input[f"temp_{source}_{area}"])

            delta_T = snakemake.config['sector']['heat_pump_sink_T'] - source_T

            cop = coefficient_of_performance(delta_T, source)

            cop.to_netcdf(snakemake.output[f"cop_{source}_{area}"])
コード例 #4
0
    # EE, HR and LT got negative demand through subtraction - poor data
    demand['Basic chemicals'].clip(lower=0., inplace=True)

    # assume HVC, methanol, chlorine production proportional to non-ammonia basic chemicals
    distribution_key = demand["Basic chemicals"] / demand["Basic chemicals"].sum()
    demand["HVC"] = config["HVC_production_today"] * 1e3 * distribution_key
    demand["Chlorine"] = config["chlorine_production_today"] * 1e3 * distribution_key
    demand["Methanol"] = config["methanol_production_today"] * 1e3 * distribution_key

    demand.drop(columns=["Basic chemicals"], inplace=True)

if __name__ == '__main__':
    if 'snakemake' not in globals():
        from helper import mock_snakemake
        snakemake = mock_snakemake('build_industrial_production_per_country')

    countries = non_EU + eu28

    year = snakemake.config['industry']['reference_year']

    config = snakemake.config["industry"]

    jrc_dir = snakemake.input.jrc
    eurostat_dir = snakemake.input.eurostat

    demand = industry_production(countries)

    separate_basic_chemicals(demand)

    fn = snakemake.output.industrial_production_per_country
コード例 #5
0
    nodal_production = pd.DataFrame(index=keys.index,
                                    columns=industrial_production.columns,
                                    dtype=float)

    countries = keys.country.unique()
    sectors = industrial_production.columns

    for country, sector in product(countries, sectors):

        buses = keys.index[keys.country == country]
        mapping = sector_mapping.get(sector, "population")

        key = keys.loc[buses, mapping]
        nodal_production.loc[buses,
                             sector] = industrial_production.at[country,
                                                                sector] * key

    nodal_production.to_csv(snakemake.output.industrial_production_per_node)


if __name__ == "__main__":
    if 'snakemake' not in globals():
        from helper import mock_snakemake
        snakemake = mock_snakemake(
            'build_industrial_production_per_node',
            simpl='',
            clusters=48,
        )

    build_nodal_industrial_production()
コード例 #6
0
        else:
            new_pipes = n.links.carrier.isin(pipe_carrier) & (n.links.build_year==year)
            n.links.loc[new_pipes, "p_nom"] = 0.
            n.links.loc[new_pipes, "p_nom_min"] = 0.



#%%
if __name__ == "__main__":
    if 'snakemake' not in globals():
        from helper import mock_snakemake
        snakemake = mock_snakemake(
            'add_brownfield',
            simpl='',
            clusters="37",
            opts="",
            lv=1.0,
            sector_opts='168H-T-H-B-I-solar+p3-dist1',
            planning_horizons=2030,
        )

    print(snakemake.input.network_p)
    logging.basicConfig(level=snakemake.config['logging_level'])

    year = int(snakemake.wildcards.planning_horizons)

    overrides = override_component_attrs(snakemake.input.overrides)
    n = pypsa.Network(snakemake.input.network, override_component_attrs=overrides)

    add_build_year_to_new_assets(n, year)
コード例 #7
0
    ambitious_dE_cost.columns = pd.MultiIndex.from_product([ambitious_dE_cost.columns,
                                                           ["ambitious"]])

    cost_dE_new = pd.concat([moderate_dE_cost, ambitious_dE_cost], axis=1)

    return cost_dE_new, area_tot


#%% --- MAIN --------------------------------------------------------------
if __name__ == "__main__":
    if 'snakemake' not in globals():
        from helper import mock_snakemake
        snakemake = mock_snakemake(
            'build_retro_cost',
            simpl='',
            clusters=48,
            lv=1.0,
            sector_opts='Co2L0-168H-T-H-B-I-solar3-dist1'
        )

#  ********  config  *********************************************************

    retro_opts =  snakemake.config["sector"]["retrofitting"]
    interest_rate = retro_opts["interest_rate"]
    annualise_cost = retro_opts["annualise_cost"]  # annualise the investment costs
    tax_weighting = retro_opts["tax_weighting"]   # weight costs depending on taxes in countries
    construction_index = retro_opts["construction_index"]   # weight costs depending on labour/material costs per ct

    # mapping missing countries by neighbours
    map_for_missings = {
        "AL": ["BG", "RO", "GR"],
コード例 #8
0
    s_out = idees["out"][3:4]
    assert "Physical output" in str(s_out.index)

    # MWh/t material
    sources = ["elec", "biomass", "methane", "hydrogen", "heat", "naphtha"]
    df.loc[sources, sector] = (df.loc[sources, sector] * toe_to_MWh /
                               s_out["Physical output (index)"])

    return df


if __name__ == "__main__":
    if 'snakemake' not in globals():
        from helper import mock_snakemake
        snakemake = mock_snakemake('build_industry_sector_ratios')

    # TODO make config option
    year = 2015

    config = snakemake.config["industry"]

    df = pd.concat(
        [
            iron_and_steel(),
            chemicals_industry(),
            nonmetalic_mineral_products(),
            pulp_paper_printing(),
            food_beverages_tobacco(),
            non_ferrous_metals(),
            transport_equipment(),
コード例 #9
0
    missing = transport_data.index[
        transport_data["average fuel efficiency"].isna()]
    print(
        f"Missing data on fuel efficiency from:\n{list(missing)}\nFilling gapswith averaged data."
    )

    fill_values = transport_data["average fuel efficiency"].mean()
    transport_data.loc[missing, "average fuel efficiency"] = fill_values

    return transport_data


if __name__ == "__main__":
    if 'snakemake' not in globals():
        from helper import mock_snakemake
        snakemake = mock_snakemake('build_energy_totals')

    config = snakemake.config["energy"]

    nuts3 = gpd.read_file(snakemake.input.nuts3_shapes).set_index("index")
    population = nuts3["pop"].groupby(nuts3.country).sum()

    countries = population.index
    idees_countries = countries.intersection(eu28)

    data_year = config["energy_totals_year"]
    eurostat = build_eurostat(countries, data_year)
    swiss = build_swiss(data_year)
    idees = build_idees(idees_countries, data_year)

    energy = build_energy_totals(countries, eurostat, swiss, idees)
コード例 #10
0
    adjust_cols = overlay.columns.difference(
        {"name", "area_nuts2", "geometry", "share"})
    overlay[adjust_cols] = overlay[adjust_cols].multiply(overlay["share"],
                                                         axis=0)

    bio_regions = overlay.groupby("name").sum()

    bio_regions.drop(["area_nuts2", "share"], axis=1, inplace=True)

    return bio_regions


if __name__ == "__main__":
    if 'snakemake' not in globals():
        from helper import mock_snakemake
        snakemake = mock_snakemake('build_biomass_potentials')

    config = snakemake.config['biomass']
    year = config["year"]
    scenario = config["scenario"]

    enspreso = enspreso_biomass_potentials(year, scenario)

    enspreso = disaggregate_nuts0(enspreso)

    nuts2 = build_nuts2_shapes()

    df_nuts2 = gpd.GeoDataFrame(nuts2.geometry).join(enspreso)

    regions = gpd.read_file(snakemake.input.regions_onshore)
コード例 #11
0
    overlay = gpd.overlay(regions.reset_index(), caverns, keep_geom_type=True)

    # calculate share of cavern area inside region
    overlay["share"] = area(overlay) / overlay["area_caverns"]

    overlay["e_nom"] = overlay.eval(
        "capacity_per_area * share * area_caverns / 1000")  # TWh

    caverns_regions = overlay.groupby(['name', "storage_type"
                                       ]).e_nom.sum().unstack("storage_type")

    return caverns_regions


if __name__ == '__main__':
    if 'snakemake' not in globals():
        from helper import mock_snakemake
        snakemake = mock_snakemake('build_salt_cavern_potentials',
                                   simpl='',
                                   clusters='37')

    fn_onshore = snakemake.input.regions_onshore
    fn_offshore = snakemake.input.regions_offshore

    regions = load_bus_regions(fn_onshore, fn_offshore)

    caverns = gpd.read_file(snakemake.input.salt_caverns)  # GWh/sqkm

    caverns_regions = salt_cavern_potential_by_region(caverns, regions)

    caverns_regions.to_csv(snakemake.output.h2_cavern_potential)
コード例 #12
0
    "Lithuania": "LT",
    "Netherlands": "NL",
    "Norwaye": "NO",
    "Poland": "PL",
    "Romania": "RO",
    "Serbia": "RS",
    "Slovakia": "SK",
    "Spain": "ES",
    "Switzerland": "CH",
    "United Kingdom": "GB",
}

if __name__ == '__main__':
    if 'snakemake' not in globals():
        from helper import mock_snakemake
        snakemake = mock_snakemake('build_ammonia_production')

    ammonia = pd.read_excel(snakemake.input.usgs,
                            sheet_name="T12",
                            skiprows=5,
                            header=0,
                            index_col=0,
                            skipfooter=19)

    ammonia.rename(country_to_alpha2, inplace=True)

    years = [str(i) for i in range(2013, 2018)]
    countries = country_to_alpha2.values()
    ammonia = ammonia.loc[countries, years].astype(float)

    # convert from ktonN to ktonNH3
コード例 #13
0
"""Build heat demand time series."""

import geopandas as gpd
import atlite
import pandas as pd
import xarray as xr
import numpy as np

if __name__ == '__main__':
    if 'snakemake' not in globals():
        from helper import mock_snakemake
        snakemake = mock_snakemake(
            'build_heat_demands',
            simpl='',
            clusters=48,
        )

    if 'snakemake' not in globals():
        from vresutils import Dict
        import yaml
        snakemake = Dict()
        with open('config.yaml') as f:
            snakemake.config = yaml.safe_load(f)
        snakemake.input = Dict()
        snakemake.output = Dict()

    time = pd.date_range(freq='h', **snakemake.config['snapshots'])
    cutout_config = snakemake.config['atlite']['cutout']
    cutout = atlite.Cutout(cutout_config).sel(time=time)

    clustered_regions = gpd.read_file(
コード例 #14
0
Retrieve gas infrastructure data from https://zenodo.org/record/4767098/files/IGGIELGN.zip
"""

import logging
from helper import progress_retrieve

import zipfile
from pathlib import Path

logger = logging.getLogger(__name__)


if __name__ == "__main__":
    if 'snakemake' not in globals():
        from helper import mock_snakemake
        snakemake = mock_snakemake('retrieve_gas_network_data')
        rootpath = '..'
    else:
        rootpath = '.'

    url = "https://zenodo.org/record/4767098/files/IGGIELGN.zip"

    # Save locations
    zip_fn = Path(f"{rootpath}/IGGIELGN.zip")
    to_fn = Path(f"{rootpath}/data/gas_network/scigrid-gas")

    logger.info(f"Downloading databundle from '{url}'.")
    progress_retrieve(url, zip_fn)

    logger.info(f"Extracting databundle.")
    zipfile.ZipFile(zip_fn).extractall(to_fn)
コード例 #15
0
ファイル: copy_config.py プロジェクト: nworbmot/pypsa-eur-sec
from shutil import copy

files = [
    "config.yaml", "Snakefile", "scripts/solve_network.py",
    "scripts/prepare_sector_network.py"
]

if __name__ == '__main__':
    if 'snakemake' not in globals():
        from helper import mock_snakemake
        snakemake = mock_snakemake('copy_config')

    for f in files:
        copy(
            f, snakemake.config['summary_dir'] + '/' +
            snakemake.config['run'] + '/configs/')
コード例 #16
0
"""Build clustered population layouts."""

import geopandas as gpd
import xarray as xr
import pandas as pd
import atlite

if __name__ == '__main__':
    if 'snakemake' not in globals():
        from helper import mock_snakemake
        snakemake = mock_snakemake(
            'build_clustered_population_layouts',
            simpl='',
            clusters=48,
        )

    cutout = atlite.Cutout(snakemake.config['atlite']['cutout'])

    clustered_regions = gpd.read_file(
        snakemake.input.regions_onshore).set_index('name').buffer(0).squeeze()

    I = cutout.indicatormatrix(clustered_regions)

    pop = {}
    for item in ["total", "urban", "rural"]:
        pop_layout = xr.open_dataarray(snakemake.input[f'pop_layout_{item}'])
        pop[item] = I.dot(pop_layout.stack(spatial=('y', 'x')))

    pop = pd.DataFrame(pop, index=clustered_regions.index)

    pop["ct"] = pop.index.str[:2]
    tqdm_kwargs = dict(ascii=False,
                       unit=' country',
                       total=len(countries),
                       desc="Build industrial energy demand")
    with mp.Pool(processes=nprocesses) as pool:
        demand_l = list(tqdm(pool.imap(func, countries), **tqdm_kwargs))

    demand = pd.concat(demand_l, keys=countries)

    return demand


if __name__ == '__main__':
    if 'snakemake' not in globals():
        from helper import mock_snakemake
        snakemake = mock_snakemake(
            'build_industrial_energy_demand_per_country_today')

    config = snakemake.config['industry']
    year = config.get('reference_year', 2015)

    demand = industrial_energy_demand(eu28)

    demand = add_ammonia_energy_demand(demand)

    demand = add_non_eu28_industrial_energy_demand(demand)

    # for format compatibility
    demand = demand.stack(dropna=False).unstack(level=[0, 2])

    # style and annotation
    demand.index.name = 'TWh/a'
コード例 #18
0
                'threshold_capacity']
            n.mremove("Link", [
                index for index in n.links.index.to_list()
                if str(grouping_year) in index
                and n.links.p_nom[index] < threshold
            ])


if __name__ == "__main__":
    if 'snakemake' not in globals():
        from helper import mock_snakemake
        snakemake = mock_snakemake(
            'add_existing_baseyear',
            simpl='',
            clusters=45,
            lv=1.0,
            opts='',
            sector_opts='Co2L0-168H-T-H-B-I-solar+p3-dist1',
            planning_horizons=2020,
        )

    logging.basicConfig(level=snakemake.config['logging_level'])

    options = snakemake.config["sector"]
    opts = snakemake.wildcards.sector_opts.split('-')

    baseyear = snakemake.config['scenario']["planning_horizons"][0]

    overrides = override_component_attrs(snakemake.input.overrides)
    n = pypsa.Network(snakemake.input.network,
                      override_component_attrs=overrides)
コード例 #19
0
    for country, sector in product(countries, sectors):

        buses = keys.index[keys.country == country]
        mapping = sector_mapping.get(sector, 'population')

        key = keys.loc[buses, mapping]
        demand = industrial_demand[country, sector]

        outer = pd.DataFrame(np.outer(key, demand),
                             index=key.index,
                             columns=demand.index)

        nodal_demand.loc[buses] += outer

    nodal_demand.index.name = "TWh/a"

    nodal_demand.to_csv(
        snakemake.output.industrial_energy_demand_per_node_today)


if __name__ == "__main__":
    if 'snakemake' not in globals():
        from helper import mock_snakemake
        snakemake = mock_snakemake(
            'build_industrial_energy_demand_per_node_today',
            simpl='',
            clusters=48,
        )

    build_nodal_industrial_energy_demand()
コード例 #20
0
    ax.set_ylabel("Power [GW]")
    fig.tight_layout()

    fig.savefig("{}{}/maps/series-{}-{}-{}-{}-{}.pdf".format(
        snakemake.config['results_dir'], snakemake.config['run'],
        snakemake.wildcards["lv"], carrier, start, stop, name),
                transparent=True)


if __name__ == "__main__":
    if 'snakemake' not in globals():
        from helper import mock_snakemake
        snakemake = mock_snakemake(
            'plot_network',
            simpl='',
            clusters=48,
            lv=1.0,
            sector_opts='Co2L0-168H-T-H-B-I-solar3-dist1',
            planning_horizons=2050,
        )

    overrides = override_component_attrs(snakemake.input.overrides)
    n = pypsa.Network(snakemake.input.network,
                      override_component_attrs=overrides)

    map_opts = snakemake.config['plotting']['map']

    plot_map(n,
             components=["generators", "links", "stores", "storage_units"],
             bus_size_factor=1.5e10,
             transmission=False)
コード例 #21
0
        n.import_components_from_dataframe(c.df, c.name)

        # copy time-dependent
        selection = (n.component_attrs[c.name].type.str.contains("series")
                     & n.component_attrs[c.name].status.str.contains("Input"))
        for tattr in n.component_attrs[c.name].index[selection]:
            n.import_series_from_dataframe(c.pnl[tattr], c.name, tattr)


if __name__ == "__main__":
    if 'snakemake' not in globals():
        from helper import mock_snakemake
        snakemake = mock_snakemake(
            'add_brownfield',
            simpl='',
            clusters=48,
            lv=1.0,
            sector_opts='Co2L0-168H-T-H-B-I-solar3-dist1',
            planning_horizons=2030,
        )

    print(snakemake.input.network_p)
    logging.basicConfig(level=snakemake.config['logging_level'])

    year = int(snakemake.wildcards.planning_horizons)

    overrides = override_component_attrs(snakemake.input.overrides)
    n = pypsa.Network(snakemake.input.network,
                      override_component_attrs=overrides)

    add_build_year_to_new_assets(n, year)
コード例 #22
0
            
    ax1.plot([2050],[0.125*emissions[1990]],'ro',
                     marker='*', markersize=12, markerfacecolor='black',
                     markeredgecolor='black', label='EU commited target')
            
    ax1.legend(fancybox=True, fontsize=18, loc=(0.01,0.01), 
                       facecolor='white', frameon=True) 
            
    path_cb_plot = snakemake.config['results_dir'] + snakemake.config['run'] + '/graphs/'             
    plt.savefig(path_cb_plot+'carbon_budget_plot.pdf', dpi=300) 


if __name__ == "__main__":
    if 'snakemake' not in globals():
        from helper import mock_snakemake
        snakemake = mock_snakemake('plot_summary')
        
    n_header = 4

    plot_costs()

    plot_energy()

    plot_balances()
    
    for sector_opts in snakemake.config['scenario']['sector_opts']:
        opts=sector_opts.split('-')
        for o in opts:
            if "cb" in o:
                plot_carbon_budget_distribution()
コード例 #23
0
              min_iterations=min_iterations,
              max_iterations=max_iterations,
              extra_functionality=extra_functionality,
              keep_shadowprices=keep_shadowprices,
              **kwargs)
    return n


if __name__ == "__main__":
    if 'snakemake' not in globals():
        from helper import mock_snakemake
        snakemake = mock_snakemake(
            'solve_network',
            simpl='',
            opts="",
            clusters="37",
            lv=1.0,
            sector_opts='168H-T-H-B-I-A-solar+p3-dist1',
            planning_horizons="2030",
        )

    logging.basicConfig(filename=snakemake.log.python,
                        level=snakemake.config['logging_level'])

    tmpdir = snakemake.config['solving'].get('tmpdir')
    if tmpdir is not None:
        from pathlib import Path
        Path(tmpdir).mkdir(parents=True, exist_ok=True)
    opts = snakemake.wildcards.opts.split('-')
    solve_opts = snakemake.config['solving']['options']
コード例 #24
0
        "max_pressure_bar": "mean",
        "build_year": "mean",
        "diameter_mm": "mean",
        "length": 'mean',
        'name': ' '.join,
        "p_min_pu": 'min',
    }
    return df.groupby(df.index).agg(strategies)


if __name__ == "__main__":

    if 'snakemake' not in globals():
        from helper import mock_snakemake
        snakemake = mock_snakemake('cluster_gas_network',
                                   simpl='',
                                   clusters='37')

    logging.basicConfig(level=snakemake.config['logging_level'])

    fn = snakemake.input.cleaned_gas_network
    df = pd.read_csv(fn, index_col=0)
    for col in ["point0", "point1"]:
        df[col] = df[col].apply(wkt.loads)

    bus_regions = load_bus_regions(snakemake.input.regions_onshore,
                                   snakemake.input.regions_offshore)

    gas_network = build_clustered_gas_network(df, bus_regions)

    reindex_pipes(gas_network)
コード例 #25
0
"""Build temperature profiles."""

import geopandas as gpd
import atlite
import pandas as pd
import xarray as xr
import numpy as np

if __name__ == '__main__':
    if 'snakemake' not in globals():
        from helper import mock_snakemake
        snakemake = mock_snakemake(
            'build_temperature_profiles',
            simpl='',
            clusters=48,
        )

    time = pd.date_range(freq='h', **snakemake.config['snapshots'])
    cutout_config = snakemake.config['atlite']['cutout']
    cutout = atlite.Cutout(cutout_config).sel(time=time)

    clustered_regions = gpd.read_file(
        snakemake.input.regions_onshore).set_index('name').buffer(0).squeeze()

    I = cutout.indicatormatrix(clustered_regions)

    for area in ["total", "rural", "urban"]:

        pop_layout = xr.open_dataarray(snakemake.input[f'pop_layout_{area}'])

        stacked_pop = pop_layout.stack(spatial=('y', 'x'))
コード例 #26
0
"""Build solar thermal collector time series."""

import geopandas as gpd
import atlite
import pandas as pd
import xarray as xr
import numpy as np

if __name__ == '__main__':
    if 'snakemake' not in globals():
        from helper import mock_snakemake
        snakemake = mock_snakemake(
            'build_solar_thermal_profiles',
            simpl='',
            clusters=48,
        )

    if 'snakemake' not in globals():
        from vresutils import Dict
        import yaml
        snakemake = Dict()
        with open('config.yaml') as f:
            snakemake.config = yaml.safe_load(f)
        snakemake.input = Dict()
        snakemake.output = Dict()

    config = snakemake.config['solar_thermal']

    time = pd.date_range(freq='h', **snakemake.config['snapshots'])
    cutout_config = snakemake.config['atlite']['cutout']
    cutout = atlite.Cutout(cutout_config).sel(time=time)
コード例 #27
0
            else:
                #BEWARE: this is a strong assumption
                emissions = emissions.fillna(emissions.mean())
                key = emissions / emissions.sum()
            key = key.groupby(facilities.bus).sum().reindex(regions_ct,
                                                            fill_value=0.)
        else:
            key = keys.loc[regions_ct, 'population']

        keys.loc[regions_ct, sector] = key

    return keys


if __name__ == "__main__":
    if 'snakemake' not in globals():
        from helper import mock_snakemake
        snakemake = mock_snakemake(
            'build_industrial_distribution_key',
            simpl='',
            clusters=48,
        )

    regions = gpd.read_file(snakemake.input.regions_onshore).set_index('name')

    hotmaps = prepare_hotmaps_database(regions)

    keys = build_nodal_distribution_key(hotmaps, regions)

    keys.to_csv(snakemake.output.industrial_distribution_key)
コード例 #28
0
"""Build mapping between grid cells and population (total, urban, rural)"""

import multiprocessing as mp
import atlite
import numpy as np
import pandas as pd
import xarray as xr
import geopandas as gpd

from vresutils import shapes as vshapes

if __name__ == '__main__':
    if 'snakemake' not in globals():
        from helper import mock_snakemake
        snakemake = mock_snakemake('build_population_layouts')

    cutout = atlite.Cutout(snakemake.config['atlite']['cutout'])

    grid_cells = cutout.grid_cells()

    # nuts3 has columns country, gdp, pop, geometry
    # population is given in dimensions of 1e3=k
    nuts3 = gpd.read_file(snakemake.input.nuts3_shapes).set_index('index')

    # Indicator matrix NUTS3 -> grid cells
    I = atlite.cutout.compute_indicatormatrix(nuts3.geometry, grid_cells)

    # Indicator matrix grid_cells -> NUTS3; inprinciple Iinv*I is identity
    # but imprecisions mean not perfect
    Iinv = cutout.indicatormatrix(nuts3.geometry)
コード例 #29
0
        df.p_nom_diameter.where((df.p_nom <= 500) | (
            (ratio > correction_threshold_p_nom) & not_nordstream) | (
                (ratio < 1 / correction_threshold_p_nom) & not_nordstream)))

    # lines which have way too discrepant line lengths
    # get assigned haversine length * length factor
    df["length_haversine"] = df.apply(lambda p: length_factor * haversine_pts(
        [p.point0.x, p.point0.y], [p.point1.x, p.point1.y]),
                                      axis=1)
    ratio = df.eval("length / length_haversine")
    df["length"].update(
        df.length_haversine.where((df["length"] < 20)
                                  | (ratio > correction_threshold_length)
                                  | (ratio < 1 / correction_threshold_length)))

    return df


if __name__ == "__main__":

    if 'snakemake' not in globals():
        from helper import mock_snakemake
        snakemake = mock_snakemake('build_gas_network')

    logging.basicConfig(level=snakemake.config['logging_level'])

    gas_network = load_dataset(snakemake.input.gas_network)

    gas_network = prepare_dataset(gas_network)

    gas_network.to_csv(snakemake.output.cleaned_gas_network)