Example #1
0
def read_original_timeseries_file(overwrite=False):
    """Read timeseries file if it exists. Otherwise download it from opsd.
    """

    orig_csv_file = os.path.join(cfg.get('paths', 'entsoe'),
                                 cfg.get('entsoe', 'original_file'))
    readme = os.path.join(cfg.get('paths', 'entsoe'),
                          cfg.get('entsoe', 'readme_file'))
    json = os.path.join(cfg.get('paths', 'entsoe'),
                        cfg.get('entsoe', 'json_file'))

    if not os.path.isfile(orig_csv_file) or overwrite:
        req = requests.get(cfg.get('entsoe', 'timeseries_data'))
        if not overwrite:
            logging.warning("File not found. Try to download it from server.")
        else:
            logging.warning("Will download file from server and overwrite"
                            "existing ones")
        logging.warning("Check URL if download does not work.")
        with open(orig_csv_file, 'wb') as fout:
            fout.write(req.content)
        logging.warning("Downloaded from {0} and copied to '{1}'.".format(
            cfg.get('entsoe', 'timeseries_data'), orig_csv_file))
        req = requests.get(cfg.get('entsoe', 'timeseries_readme'))
        with open(readme, 'wb') as fout:
            fout.write(req.content)
        req = requests.get(cfg.get('entsoe', 'timeseries_json'))
        with open(json, 'wb') as fout:
            fout.write(req.content)

    orig = pd.read_csv(orig_csv_file, index_col=[0], parse_dates=True)
    orig = orig.tz_localize('UTC').tz_convert('Europe/Berlin')
    return orig
Example #2
0
def de21_profile_from_entsoe(year, share, annual_demand=None, overwrite=False):
    load_file = os.path.join(cfg.get('paths', 'entsoe'),
                             cfg.get('entsoe', 'load_file'))

    if not os.path.isfile(load_file) or overwrite:
        reegis_tools.entsoe.split_timeseries_file(overwrite)

    # start = datetime.datetime(year, 1, 1, 0, 0)
    # end = datetime.datetime(year, 12, 31, 23, 0)

    entsoe = reegis_tools.entsoe.get_entsoe_load(year)
    # entsoe = entsoe.tz_localize('UTC').tz_convert('Europe/Berlin')
    de_load_profile = entsoe.DE_load_

    load_profile = pd.DataFrame(index=de_load_profile.index)
    for i in range(21):
        region = 'DE{:02.0f}'.format(i + 1)
        if region not in share:
            share[region] = 0
        load_profile[region] = de_load_profile.multiply(float(share[region]))

    if annual_demand is not None:
        load_profile = load_profile.div(
            load_profile.sum().sum()).multiply(annual_demand)
    return load_profile
Example #3
0
def guess_coordinates_by_postcode_opsd(df):
    # *** Use postcode ***
    if 'postcode' in df:
        df_pstc = df.loc[(df.lon.isnull() & df.postcode.notnull())]
        if len(df_pstc) > 0:
            pstc = pd.read_csv(os.path.join(
                cfg.get('paths', 'geometry'),
                cfg.get('geometry', 'postcode_polygon')),
                               index_col='zip_code')
        for idx, val in df_pstc.iterrows():
            try:
                # If the postcode is not number the integer conversion will
                # raise a ValueError. Some postcode look like this '123XX'.
                # It would be possible to add the mayor regions to the postcode
                # map in order to search for the first two/three digits.
                postcode = int(val.postcode)
                if postcode in pstc.index:
                    df.loc[df.id == val.id, 'lon'] = wkt_loads(
                        pstc.loc[postcode].values[0]).centroid.x
                    df.loc[df.id == val.id, 'lat'] = wkt_loads(
                        pstc.loc[postcode].values[0]).centroid.y
                # Replace the last number with a zero and try again.
                elif round(postcode / 10) * 10 in pstc.index:
                    postcode = round(postcode / 10) * 10
                    df.loc[df.id == val.id, 'lon'] = wkt_loads(
                        pstc.loc[postcode].values[0]).centroid.x
                    df.loc[df.id == val.id, 'lat'] = wkt_loads(
                        pstc.loc[postcode].values[0]).centroid.y
                else:
                    logging.debug("Cannot find postcode {0}.".format(postcode))
            except ValueError:
                logging.debug("Cannot find postcode {0}.".format(val.postcode))
    return df
Example #4
0
def guess_coordinates_by_spatial_names_opsd(df, fs_column, cap_col, total_cap,
                                            stat):
    # *** Use municipal_code and federal_state to define coordinates ***
    if fs_column in df:
        if 'municipality_code' in df:
            if df.municipality_code.dtype == str:
                df.loc[df.municipality_code == 'AWZ', fs_column] = 'AWZ_NS'
        if 'postcode' in df:
            df.loc[df.postcode == '000XX', fs_column] = 'AWZ'
        states = df.loc[df.lon.isnull()].groupby(fs_column).sum()[cap_col]
        logging.debug("Fraction of undefined capacity by federal state " +
                      "(percentage):")
        for (state, capacity) in states.iteritems():
            logging.debug("{0}: {1:.4f}".format(state,
                                                capacity / total_cap * 100))
            stat.loc[state, 'undefined_capacity'] = capacity

        # A simple table with the centroid of each federal state.
        f2c = pd.read_csv(os.path.join(
            cfg.get('paths', 'geometry'),
            cfg.get('geometry', 'federalstates_centroid')),
                          index_col='name')

        # Use the centroid of each federal state if the federal state is given.
        # This is not very precise and should not be used for a high fraction
        # of plants.
        f2c = f2c.applymap(wkt_loads).centroid
        for l in df.loc[(df.lon.isnull() & df[fs_column].notnull())].index:
            if df.loc[l, fs_column] in f2c.index:
                df.loc[l, 'lon'] = f2c[df.loc[l, fs_column]].x
                df.loc[l, 'lat'] = f2c[df.loc[l, fs_column]].y
    return df
Example #5
0
def prepare_ego_demand(overwrite=False):
    egofile_de21 = os.path.join(cfg.get('paths', 'demand'),
                                cfg.get('demand', 'ego_file_de21'))

    if os.path.isfile(egofile_de21) and not overwrite:
        ego_demand_de21 = pd.read_hdf(egofile_de21, 'demand')
    else:
        ego_demand_df = reegis_tools.openego.get_ego_demand(overwrite=False)
        # Create GeoDataFrame from ego demand file.
        ego_demand = reegis_tools.geometries.Geometry(name='ego demand',
                                                      df=ego_demand_df)

        ego_demand.create_geo_df()

        # Load region polygons
        de21_regions = de21.geometries.de21_regions()

        # Add column with region id
        ego_demand.gdf = reegis_tools.geometries.spatial_join_with_buffer(
            ego_demand, de21_regions)

        # Overwrite Geometry object with its DataFrame, because it is not
        # needed anymore.
        ego_demand_de21 = pd.DataFrame(ego_demand.gdf)

        # Delete the geometry column, because spatial grouping will be done
        # only with the region column.
        del ego_demand_de21['geometry']

        # Write out file (hdf-format).
        ego_demand_de21.to_hdf(egofile_de21, 'demand')

    return ego_demand_de21.groupby('de21_region').sum()
Example #6
0
def de21_regions(suffix='vg'):
    name = os.path.join(
        cfg.get('paths', 'geo_de21'),
        cfg.get('geometry', 'de21_polygon').format(suffix=suffix))
    regions = geo.Geometry(name='de21_region')
    regions.load(fullname=name)
    return regions
Example #7
0
def prepare_ego_demand(egofile):
    ego_demand = geometries.Geometry(name='ego demand')
    ego_demand.load_csv(cfg.get('paths', 'static_sources'),
                        cfg.get('open_ego', 'ego_input_file'))
    ego_demand.create_geo_df(wkt_column='st_astext')

    # Add column with name of the federal state (Bayern, Berlin,...)
    federal_states = geometries.Geometry('federal states')
    federal_states.load(cfg.get('paths', 'geometry'),
                        cfg.get('geometry', 'federalstates_polygon'))

    # Add column with federal_states
    ego_demand.gdf = geometries.spatial_join_with_buffer(
        ego_demand, federal_states)

    # Overwrite Geometry object with its DataFrame, because it is not
    # needed anymore.
    ego_demand = pd.DataFrame(ego_demand.gdf)

    ego_demand['geometry'] = ego_demand['geometry'].astype(str)

    # Write out file (hdf-format).
    ego_demand.to_hdf(egofile, 'demand')

    return ego_demand
Example #8
0
def aggregate_by_region(year, regions):
    # Create the path for the output files.
    feedin_berlin_path = cfg.get('paths_pattern',
                                 'berlin_feedin').format(year=year)
    os.makedirs(feedin_berlin_path, exist_ok=True)

    # Create pattern for the name of the resulting files.
    feedin_berlin_outfile_name = os.path.join(
        feedin_berlin_path,
        cfg.get('feedin', 'feedin_berlin_pattern').format(year=year,
                                                          type='{type}'))

    # Filter the capacity of the powerplants for the given year.
    pp = get_grouped_power_plants(year)

    # Loop over weather depending feed-in categories.
    # WIND and PV
    for cat in ['Wind', 'Solar']:
        outfile_name = feedin_berlin_outfile_name.format(type=cat.lower())
        if not os.path.isfile(outfile_name):
            reegis_tools.coastdat.aggregate_by_region_coastdat_feedin(
                pp, regions, year, cat, outfile_name)

    # HYDRO
    outfile_name = feedin_berlin_outfile_name.format(type='hydro')
    if not os.path.isfile(outfile_name):
        reegis_tools.coastdat.aggregate_by_region_hydro(
            pp, regions, year, outfile_name)

    # GEOTHERMAL
    outfile_name = feedin_berlin_outfile_name.format(type='geothermal')
    if not os.path.isfile(outfile_name):
        reegis_tools.coastdat.aggregate_by_region_geothermal(
            regions, year, outfile_name)
Example #9
0
def share_houses_flats(key=None):
    """

    Parameters
    ----------
    key str
        Valid keys are: 'total_area', 'avg_area', 'share_area', 'total_number',
         'share_number'.

    Returns
    -------
    dict or pd.DataFrame
    """
    size = pd.Series([1, 25, 50, 70, 90, 110, 130, 150, 170, 190, 210])
    infile = os.path.join(cfg.get('paths', 'data_de21'),
                          cfg.get('general_sources', 'zensus_flats'))
    whg = pd.read_csv(infile,
                      delimiter=';',
                      index_col=[0],
                      header=[0, 1],
                      skiprows=5)
    whg = whg.loc[whg['Insgesamt', 'Insgesamt'].notnull()]
    new_index = []
    states = cfg.get_dict('STATES')
    for i in whg.index:
        new_index.append(states[i[3:-13]])
    whg.index = new_index

    flat = {
        'total_area': pd.DataFrame(),
        'total_number': pd.DataFrame(),
    }
    for f in whg.columns.get_level_values(0).unique():
        df = pd.DataFrame(whg[f].values * size.values,
                          columns=whg[f].columns,
                          index=whg.index)
        flat['total_area'][f] = df.sum(1) - df['Insgesamt']
        flat['total_number'][f] = df['Insgesamt']
    flat['total_area']['1 + 2 Wohnungen'] = (flat['total_area']['1 Wohnung'] +
                                             flat['total_area']['2 Wohnungen'])
    flat['total_number']['1 + 2 Wohnungen'] = (
        flat['total_number']['1 Wohnung'] +
        flat['total_number']['2 Wohnungen'])

    flat['avg_area'] = flat['total_area'].div(flat['total_number'])
    flat['share_area'] = (flat['total_area'].transpose().div(
        flat['total_area']['Insgesamt'])).transpose().round(3)
    flat['share_number'] = (flat['total_number'].transpose().div(
        flat['total_number']['Insgesamt'])).transpose().round(3)

    if key is None:
        return flat
    elif key in flat:
        return flat[key].sort_index()
    else:
        logging.warning(
            "'{0}' is an invalid key for function 'share_houses_flats'".format(
                key))
    return None
Example #10
0
def get_ego_demand(overwrite=False):
    egofile = os.path.join(cfg.get('paths', 'demand'),
                           cfg.get('open_ego', 'ego_file'))

    if os.path.isfile(egofile) and not overwrite:
        return pd.read_hdf(egofile, 'demand')
    else:
        return prepare_ego_demand(egofile)
Example #11
0
def get_alkis_with_additional_data():
    filename_alkis = os.path.join(cfg.get('paths', 'fis_broker'),
                                  cfg.get('fis_broker', 'alkis_joined_hdf'))

    if not os.path.isfile(filename_alkis):
        filename_alkis = convert_shp2table()['hdf']

    return pd.read_hdf(filename_alkis, 'alkis')
Example #12
0
def federal_state_average_weather(year, parameter):
    federal_states = geometries.Geometry(name='federal_states')
    federal_states.load(cfg.get('paths', 'geometry'),
                        cfg.get('geometry', 'federalstates_polygon'))
    filename = os.path.join(
        cfg.get('paths', 'coastdat'),
        'average_{0}_BB_TH_{1}.csv'.format(parameter, year))
    if not os.path.isfile(filename):
        spatial_average_weather(year, federal_states, parameter,
                                outfile=filename)
    return pd.read_csv(filename, index_col=[0], parse_dates=True)
Example #13
0
def process_alkis_buildings(shapefile_out, table, remove_non_heated=True):
    """

    Parameters
    ----------
    shapefile_out
    table
    remove_non_heated

    Returns
    -------

    """
    path = os.path.join(cfg.get('paths', 'fis_broker'), table, 'shp')
    shapefile_in = os.path.join(path, table + '.shp')

    # Download shp_file if it does not exist
    if not os.path.isfile(shapefile_in):
        shapefile_in = download.download_maps(single='alkis')

    geo_table = gpd.read_file(shapefile_in)

    # Removing parts of the Alkis table:
    # Bauart_sch == 0 : Data sets with Bauart_sch > 0 are building parts
    # LageZurErd != 1200 : Remove underground buildings
    logging.info("Length of data set before removing parts: {0}".format(
        len(geo_table)))
    geo_table = geo_table[geo_table['Bauart_sch'] == 0]
    geo_table = geo_table[geo_table['LageZurErd'] != 1200]

    # Remove all data sets that are marked es non-heated in the alkis heat
    # factor table if remove_non_heated is set to True.
    if remove_non_heated is True:
        filename_heat_factor = os.path.join(
            cfg.get('paths', 'data_berlin'),
            cfg.get('oeq', 'alkis_heat_factor_table'))
        heat_factor = pd.read_csv(filename_heat_factor, index_col=[0])
        non_heated = list(heat_factor.loc[heat_factor.heat_factor == 0].index)
        geo_table = geo_table[~geo_table['Gebaeudefu'].isin(non_heated)]

    logging.info("Length of data set after removing parts: {0}".format(
        len(geo_table)))

    # Calculate the perimeter and area of the polygons and add it as columns
    logging.info("Calculate perimeter and area of each polygon...")
    geo_table = geo_table.to_crs({'init': 'epsg:3035'})
    geo_table['area'] = geo_table['geometry'].area
    geo_table['perimeter'] = geo_table['geometry'].length
    geo_table = geo_table.to_crs({'init': 'epsg:4326'})

    # Dump table as new shape_file
    logging.info("Dump new table to shp-file.")
    geo_table.to_file(shapefile_out)
    return shapefile_out
Example #14
0
def prepare_de_file(overwrite=False):
    """Convert demand file. CET index and Germany's load only."""
    de_file = os.path.join(cfg.get('paths', 'entsoe'),
                           cfg.get('entsoe', 'de_file'))
    if not os.path.isfile(de_file) or overwrite:
        ts = read_original_timeseries_file(overwrite)
        for col in ts.columns:
            if 'DE' not in col:
                ts.drop(col, 1, inplace=True)

        ts.to_csv(de_file)
Example #15
0
def load_opsd_file(category, overwrite, prepared=True):
    if prepared:
        prepared_file_name = os.path.join(
            cfg.get('paths', 'opsd'),
            cfg.get('opsd', 'cleaned_csv_file_pattern').format(cat=category))
        if not os.path.isfile(prepared_file_name) or overwrite:
            df = prepare_opsd_file(category, prepared_file_name, overwrite)
        else:
            df = pd.read_csv(prepared_file_name, index_col=[0])
    else:
        df = load_original_opsd_file(category, overwrite)
    return df
Example #16
0
def create_pvlib_sets():
    """Create pvlib parameter sets from the solar.ini file.

    Returns
    -------
    dict
    """
    # get module and inverter parameter from sandia database
    sandia_modules = pvlib.pvsystem.retrieve_sam('sandiamod')
    sapm_inverters = pvlib.pvsystem.retrieve_sam('sandiainverter')

    pvlib_sets = cfg.get_list('solar', 'set_list')

    pvsets = {}
    for pvlib_set in pvlib_sets:
        set_name = cfg.get(pvlib_set, 'pv_set_name')
        module_name = cfg.get(pvlib_set, 'module_name')
        module_key = cfg.get(pvlib_set, 'module_key')
        inverter = cfg.get(pvlib_set, 'inverter_name')
        azimuth_angles = cfg.get_list(pvlib_set, 'surface_azimuth')
        tilt_angles = cfg.get_list(pvlib_set, 'surface_tilt')
        albedo_values = cfg.get_list(pvlib_set, 'albedo')

        set_idx = 0
        pvsets[set_name] = {}
        for t in tilt_angles:
            if t == '0':
                az_angles = (0, )
            else:
                az_angles = azimuth_angles
            for a in az_angles:
                for alb in albedo_values:
                    set_idx += 1
                    pvsets[set_name][set_idx] = {
                        'module_parameters': sandia_modules[module_name],
                        'inverter_parameters': sapm_inverters[inverter],
                        'surface_azimuth': float(a),
                        'surface_tilt': t,
                        'albedo': float(alb)
                    }
                    pvsets[set_name][set_idx]['p_peak'] = (
                        pvsets[set_name][set_idx]['module_parameters'].Impo *
                        pvsets[set_name][set_idx]['module_parameters'].Vmpo)
                    pvsets[set_name][set_idx]['name'] = "_".join([
                        module_key, inverter[:3],
                        "tlt{}".format(t[:3].rjust(3, '0')),
                        "az{}".format(str(a).rjust(3, '0')),
                        "alb{}".format(str(alb).replace('.', ''))
                    ])
                    logging.debug("PV set: {}".format(
                        pvsets[set_name][set_idx]['name']))

    return pvsets
Example #17
0
def emissions_from_znes(src):

    znes = pd.read_csv(os.path.join(
        cfg.get('paths', 'static_sources'),
        cfg.get('static_sources', 'znes_flens_data')),
                       skiprows=1,
                       header=[0, 1],
                       index_col=[0])
    znes['emission', 'value'] /= 1.0e+3  # gCO2 / J
    for fuel in znes.index:
        src[fuel.lower(), 'emission'] = znes.loc[fuel, ('emission', 'value')]
    return src
Example #18
0
def get_conversion_balance(year):
    fn = os.path.join(
        cfg.get('paths', 'static_sources'),
        cfg.get('energy_balance', 'energy_balance_states_conversion'))

    eb = pd.read_csv(fn, index_col=[0, 1, 2, 3])
    eb.rename(columns=cfg.get_dict('COLUMN_TRANSLATION'), inplace=True)
    eb.sort_index(0, inplace=True)
    eb = eb.apply(lambda x: pd.to_numeric(x, errors='coerce')).fillna(0)
    eb = eb.groupby(by=cfg.get_dict('FUEL_GROUPS'), axis=1).sum()
    eb = fix_states(year, eb)
    return eb
Example #19
0
def get_pp_by_year(year, capacity_in=False, overwrite_capacity=False):
    """

    Parameters
    ----------
    capacity_in : bool
        Set to True if a capactiy_in column is present.
    year : int
    overwrite_capacity : bool
        By default (False) a new column "capacity_<year>" is created. If set to
        True the old capacity column will be overwritten.

    Returns
    -------

    """
    filename = os.path.join(cfg.get('paths', 'powerplants'),
                            cfg.get('powerplants', 'reegis_pp'))
    logging.info("Get reegis power plants for {0}.".format(year))
    if not os.path.isfile(filename):
        msg = "File '{0}' does not exist. Will create it from reegis file."
        logging.debug(msg.format(filename))
        filename = pp_opsd2reegis()
    pp = pd.read_hdf(filename, 'pp', mode='r')

    filter_columns = ['capacity_{0}']

    if capacity_in:
        filter_columns.append('capacity_in_{0}')

    # Get all powerplants for the given year.
    # If com_month exist the power plants will be considered month-wise.
    # Otherwise the commission/decommission within the given year is not
    # considered.
    for fcol in filter_columns:
        filter_column = fcol.format(year)
        orig_column = fcol[:-4]
        c1 = (pp['com_year'] < year) & (pp['decom_year'] > year)
        pp.loc[c1, filter_column] = pp.loc[c1, orig_column]

        c2 = pp['com_year'] == year
        pp.loc[c2, filter_column] = (pp.loc[c2, orig_column] *
                                     (12 - pp.loc[c2, 'com_month']) / 12)
        c3 = pp['decom_year'] == year
        pp.loc[c3, filter_column] = (pp.loc[c3, orig_column] *
                                     pp.loc[c3, 'com_month'] / 12)

        if overwrite_capacity:
            pp[orig_column] = 0
            pp[orig_column] = pp[filter_column]
            del pp[filter_column]

    return pp
Example #20
0
def get_entsoe_load(year):
    filename = os.path.join(cfg.get('paths', 'entsoe'),
                            cfg.get('entsoe', 'load_file'))
    if not os.path.isfile(filename):
        prepare_entsoe_timeseries()

    # Read entsoe time series for the given year
    f = pd.datetime(year, 1, 1, 0)
    t = pd.datetime(year, 12, 31, 23)
    logging.info("Read entsoe load series from {0} to {1}".format(f, t))
    df = pd.read_hdf(filename, 'entsoe')
    return df.loc[f:t]
Example #21
0
def fetch_coastdat2_year_from_db(years=None, overwrite=False):
    """Fetch coastDat2 weather data sets from db and store it to hdf5 files.
    This files relies on the RLI-database structure and a valid access to the
    internal database of the Reiner Lemoine Institut. Contact the author for
    more information or use the hdf5 files of the reegis weather repository:
    https://github.com/...

    [email protected]

    Parameters
    ----------
    overwrite : boolean
        Skip existing files if set to False.
    years : list of integer
        Years to fetch.
    """
    weather = os.path.join(cfg.get('paths', 'weather'),
                           cfg.get('weather', 'file_pattern'))
    geometry = os.path.join(cfg.get('paths', 'geometry'),
                            cfg.get('geometry', 'germany_polygon'))

    polygon = wkt.loads(
        pd.read_csv(geometry, index_col='gid', squeeze=True)[0])

    if years is None:
        years = range(1980, 2020)

    try:
        conn = db.connection()
    except exc.OperationalError:
        conn = None
    for year in years:
        if not os.path.isfile(weather.format(year=str(year))) or overwrite:
            logging.info("Fetching weather data for {0}.".format(year))

            try:
                weather_sets = coastdat.get_weather(conn, polygon, year)
            except AttributeError:
                logging.warning("No database connection found.")
                weather_sets = list()
            if len(weather_sets) > 0:
                logging.info("Success. Store weather data to {0}.".format(
                    weather.format(year=str(year))))
                store = pd.HDFStore(weather.format(year=str(year)), mode='w')
                for weather_set in weather_sets:
                    logging.debug(weather_set.name)
                    store['A' + str(weather_set.name)] = weather_set.data
                store.close()
            else:
                logging.warning("No weather data found for {0}.".format(year))
        else:
            logging.info("Weather data for {0} exists. Skipping.".format(year))
Example #22
0
def split_timeseries_file(overwrite=False, csv=False):
    logging.info("Splitting time series.")
    path_pattern = os.path.join(cfg.get('paths', 'entsoe'), '{0}')
    de_file = path_pattern.format(cfg.get('entsoe', 'de_file'))

    if not os.path.isfile(de_file) or overwrite:
        prepare_de_file(overwrite)

    de_ts = pd.read_csv(de_file,
                        index_col='utc_timestamp',
                        parse_dates=True,
                        date_parser=dateutil.parser.parse)

    berlin = pytz.timezone('Europe/Berlin')
    end_date = berlin.localize(datetime.datetime(2015, 1, 1, 0, 0, 0))

    de_ts.loc[de_ts.index < end_date,
              'DE_load_'] = (de_ts.loc[de_ts.index < end_date, 'DE_load_old'])
    de_ts.loc[de_ts.index >= end_date,
              'DE_load_'] = (de_ts.loc[de_ts.index >= end_date, 'DE_load_new'])

    load = pd.DataFrame(de_ts[pd.notnull(de_ts['DE_load_'])]['DE_load_'],
                        columns=['DE_load_'])

    re_columns = [
        'DE_solar_capacity', 'DE_solar_generation', 'DE_solar_profile',
        'DE_wind_capacity', 'DE_wind_generation', 'DE_wind_profile',
        'DE_wind_offshore_capacity', 'DE_wind_offshore_generation',
        'DE_wind_offshore_profile', 'DE_wind_onshore_capacity',
        'DE_wind_onshore_generation', 'DE_wind_onshore_profile'
    ]
    re_subset = [
        'DE_solar_capacity', 'DE_solar_generation', 'DE_solar_profile',
        'DE_wind_capacity', 'DE_wind_generation', 'DE_wind_profile'
    ]

    renewables = de_ts.dropna(subset=re_subset, how='any')[re_columns]

    if csv:
        load_file = path_pattern.format(cfg.get('entsoe', 'load_file_csv'))
    else:
        load_file = path_pattern.format(cfg.get('entsoe', 'load_file'))

    if not os.path.isfile(load_file) or overwrite:
        if csv:
            load.to_csv(load_file)
        else:
            load.to_hdf(load_file, 'entsoe')

    re_file = path_pattern.format(cfg.get('entsoe', 'renewables_file'))
    if not os.path.isfile(re_file) or overwrite:
        renewables.to_csv(re_file)
Example #23
0
def prices_2014_from_znes(src, force_znes=False):
    znes = pd.read_csv(os.path.join(
        cfg.get('paths', 'static_sources'),
        cfg.get('static_sources', 'znes_flens_data')),
                       skiprows=1,
                       header=[0, 1],
                       index_col=[0])
    znes['fuel price', 'value'] /= 1.0e+9  # EUR / J
    for fuel in znes.index:
        if src.get((fuel.lower(), 'costs')) is None or force_znes:
            src.loc[2014, (fuel.lower(),
                           'costs')] = znes.loc[fuel, ('fuel price', 'value')]
    return src
Example #24
0
def convert_shp2table():
    filename = {
        'hdf':
        os.path.join(cfg.get('paths', 'fis_broker'),
                     cfg.get('fis_broker', 'alkis_joined_hdf')),
        'csv':
        os.path.join(cfg.get('paths', 'fis_broker'),
                     cfg.get('fis_broker', 'alkis_joined_csv')),
        'geo':
        os.path.join(cfg.get('paths', 'fis_broker'),
                     cfg.get('fis_broker', 'alkis_geometry_csv')),
        'shp':
        os.path.join(cfg.get('paths', 'fis_broker'),
                     cfg.get('fis_broker', 'alkis_joined_shp'))
    }

    if not os.path.isfile(filename['shp']):
        filename['shp'] = merge_maps()
    alkis = gpd.read_file(filename['shp'])

    alkis.to_csv(filename['csv'])
    data = pd.read_csv(filename['csv'], index_col=[0])
    data['gml_id'] = data['gml_id'].str.replace(
        's_wfs_alkis_gebaeudeflaechen.', '')
    data['SCHL5'] = data['SCHL5'].str.replace('s_ISU5_2015_UA.', '')
    data.set_index('gml_id', drop=True, inplace=True)
    data['geometry'].to_csv(filename['geo'])
    del data['geometry']

    data.to_csv(filename['csv'])
    data.to_hdf(filename['hdf'], 'alkis')
    return filename
Example #25
0
def get_heat_profiles_by_state(year, to_csv=False, divide_domestic=False):
    building_class = {}
    for (k, v) in cfg.get_dict('building_class').items():
        for s in v.split(', '):
            building_class[s] = int(k)

    demand_state = heat_demand(year).sort_index()

    if divide_domestic:
        house_flats = share_houses_flats('share_area')
        for state in demand_state.index.get_level_values(0).unique():
            dom = demand_state.loc[state, 'domestic']
            demand_state.loc[(state, 'domestic_efh'), ] = (
                dom * house_flats.loc[state, '1 + 2 Wohnungen'])
            demand_state.sort_index(0, inplace=True)
            dom = demand_state.loc[state, 'domestic']
            demand_state.loc[(state, 'domestic_mfh'), ] = (
                dom * house_flats.loc[state, '3 und mehr Wohnungen'])
            demand_state.sort_index(0, inplace=True)

        demand_state.sort_index(inplace=True)
        demand_state.drop('domestic', level=1, inplace=True)

    temperatures = reegis_tools.coastdat.federal_state_average_weather(
        year, 'temp_air')

    temperatures = temperatures.tz_localize('UTC').tz_convert('Europe/Berlin')

    my_columns = pd.MultiIndex(levels=[[], [], []], labels=[[], [], []])
    heat_profiles = pd.DataFrame(columns=my_columns)

    for region in demand_state.index.get_level_values(0).unique():
        logging.info("Creating heat profile for {}".format(region))
        tmp = demand_state.loc[region].groupby(level=0).sum()
        temperature = temperatures[region] - 273
        for fuel in tmp.columns:
            logging.debug("{0} - {1} ({2})".format(region, fuel,
                                                   building_class[region]))
            for sector in tmp.index:
                heat_profiles[(region, sector,
                               fuel)] = (get_heat_profile_from_demandlib(
                                   temperature, tmp.loc[sector, fuel], sector,
                                   year, building_class[region]))
    heat_profiles.sort_index(1, inplace=True)
    if to_csv:
        heat_profiles.to_csv(
            os.path.join(
                cfg.get('paths', 'demand'),
                cfg.get('demand', 'heat_profile_state').format(year=year)))
    return heat_profiles
Example #26
0
def get_states_balance(year=None, grouped=False, overwrite=False):
    fname = os.path.join(cfg.get('paths', 'energy_balance'),
                         cfg.get('energy_balance', 'energy_balance_edited'))
    if not os.path.isfile(fname) or overwrite:
        edit_balance()
    eb = pd.read_csv(fname, index_col=[0, 1, 2])
    if grouped:
        eb = eb.groupby(by=cfg.get_dict('FUEL_GROUPS'), axis=1).sum()
    eb.index = eb.index.set_names(['year', 'state', 'sector'])

    if year is not None:
        eb = eb.loc[year]

    return eb
Example #27
0
def get_de21_slp_profile(year, annual_demand=None, overwrite=False):
    outfile = os.path.join(
        cfg.get('paths', 'demand'),
        cfg.get('demand', 'ego_profile_pattern').format(year=year))
    if not os.path.isfile(outfile) or overwrite:
        create_de21_slp_profile(year, outfile)

    de21_profile = pd.read_csv(outfile, index_col=[0],
                               parse_dates=True).multiply(1000)

    if annual_demand is not None:
        de21_profile = de21_profile.div(
            de21_profile.sum().sum()).multiply(annual_demand)

    return de21_profile
Example #28
0
def main(year):
    stopwatch()

    sc = de21.Scenario(name='basic', year=2014)
    csv_path = os.path.join(cfg.get('paths', 'scenario'), 'basic', '{year}',
                            'csv')

    logging.info("Read scenario from csv collection: {0}".format(stopwatch()))
    sc.load_csv(csv_path.format(year=str(year)))

    logging.info("Add nodes to the EnergySystem: {0}".format(stopwatch()))
    sc.add_nodes2solph()

    # Save energySystem to '.graphml' file.
    sc.plot_nodes(filename='/home/uwe/de21',
                  remove_nodes_with_substrings=['bus_cs'])

    logging.info("Create the concrete model: {0}".format(stopwatch()))
    sc.create_model()

    logging.info("Solve the optimisation model: {0}".format(stopwatch()))
    sc.solve()

    logging.info("Solved. Dump results: {0}".format(stopwatch()))
    sc.dump_results_to_es()

    logging.info("All done. de21 finished without errors: {0}".format(
        stopwatch()))
Example #29
0
def main(year):
    stopwatch()

    sc = berlin_hp.Scenario(name='berlin_basic', year=year)

    path = os.path.join(cfg.get('paths', 'scenario'), 'berlin_basic',
                        str(year))

    logging.info("Read scenario from excel-sheet: {0}".format(stopwatch()))
    excel_fn = os.path.join(path, '_'.join([sc.name, str(year)]) + '.xls')

    if not os.path.isfile(excel_fn):
        berlin_hp.basic_scenario.create_basic_scenario(year)

    sc.load_excel(excel_fn)
    sc.check_table('time_series')

    logging.info("Add nodes to the EnergySystem: {0}".format(stopwatch()))
    sc.add_nodes2solph()

    # Save energySystem to '.graphml' file.
    sc.plot_nodes(filename=os.path.join(path, 'berlin_hp'),
                  remove_nodes_with_substrings=['bus_cs'])

    logging.info("Create the concrete model: {0}".format(stopwatch()))
    sc.create_model()

    logging.info("Solve the optimisation model: {0}".format(stopwatch()))
    sc.solve()

    logging.info("Solved. Dump results: {0}".format(stopwatch()))
    sc.dump_es(os.path.join(path, 'berlin_hp.reegis'))

    logging.info("All done. de21 finished without errors: {0}".format(
        stopwatch()))
Example #30
0
def spatial_preparation_power_plants(pp):
    """Add spatial names to DataFrame. Three columns will be added to the
    power plant table:

    federal_states: The federal state of Germany
    model_region: The name of the model region defined by the user.
    coastdat: The id of the nearest coastdat weather data set.

    Parameters
    ----------
    pp : reegis_tools.Geometry
        An object containing Germany's power plants.

    Returns
    -------
    reegis_tools.Geometry

    """

    if pp.gdf is None:
        logging.info("Create GeoDataFrame from lat/lon.")
        pp.create_geo_df()

    logging.info("Remove invalid geometries")
    pp.remove_invalid_geometries()

    # Add column with name of the federal state (Bayern, Berlin,...)
    federal_states = geo.Geometry('federal states')
    federal_states.load(cfg.get('paths', 'geometry'),
                        cfg.get('geometry', 'federalstates_polygon'))
    pp.gdf = geo.spatial_join_with_buffer(pp, federal_states)

    # Add country code to federal state if country code is not 'DE'.
    if 'country_code' in pp.gdf.columns:
        country_codes = list(pp.gdf.country_code.unique())
        country_codes.remove('DE')
        for c_code in country_codes:
            pp.gdf.loc[pp.gdf.country_code == c_code,
                       'federal_states'] = (c_code)

    # Add column with coastdat id
    coastdat = geo.Geometry('coastdat2')
    coastdat.load(cfg.get('paths', 'geometry'),
                  cfg.get('coastdat', 'coastdatgrid_polygon'))
    pp.gdf = geo.spatial_join_with_buffer(pp, coastdat)

    return pp