示例#1
0
def ownership_eia860(start_date=None, end_date=None, testing=False):
    """
    Pull a useful set of fields related to ownership_eia860 table.

    Args:
        start_date (date): date of the earliest data to retrieve
        end_date (date): date of the latest data to retrieve
        testing (bool): True if we're connecting to the pudl_test DB, False
            if we're connecting to the live PUDL DB. False by default.
    Returns:
        out_df (pandas dataframe)
    """
    pudl_engine = pudl.db_connect_pudl(testing=testing)
    o_eia860_tbl = pt['ownership_eia860']
    o_eia860_select = sa.sql.select([o_eia860_tbl, ])
    o_df = pd.read_sql(o_eia860_select, pudl_engine)
    o_df = o_df.rename(columns={'plant_id': 'plant_id_eia'})

    pu_eia = plants_utils_eia(start_date=start_date,
                              end_date=end_date,
                              testing=testing)
    pu_eia = pu_eia[['plant_id_eia', 'plant_id_pudl', 'util_id_pudl',
                     'report_date']]

    o_df['report_date'] = pd.to_datetime(o_df.report_date)
    out_df = pd.merge(o_df, pu_eia,
                      how='left', on=['report_date', 'plant_id_eia'])

    out_df = out_df.drop(['id'], axis=1)

    out_df = out_df.dropna(subset=[
        'plant_id_eia',
        'plant_id_pudl',
        'operator_id',
        'util_id_pudl',
        'generator_id',
        'ownership_id',
    ])

    first_cols = [
        'report_date',
        'plant_id_eia',
        'plant_id_pudl',
        'plant_name',
        'operator_id',
        'util_id_pudl',
        'operator_name',
        'generator_id',
        'ownership_id',
        'owner_name',
    ]

    # Re-arrange the columns for easier readability:
    out_df = organize_cols(out_df, first_cols)
    out_df['plant_id_pudl'] = out_df.plant_id_pudl.astype(int)
    out_df['util_id_pudl'] = out_df.util_id_pudl.astype(int)
    out_df = extend_annual(out_df, start_date=start_date, end_date=end_date)

    return(out_df)
示例#2
0
def fuel_ferc1(testing=False):
    """
    Pull a useful dataframe related to FERC Form 1 fuel information.

    This function pulls the FERC Form 1 fuel data, and joins in the name of the
    reporting utility, as well as the PUDL IDs for that utility and the plant,
    allowing integration with other PUDL tables.

    Also calculates the total heat content consumed for each fuel, and the
    total cost for each fuel. Total cost is calculated in two different ways,
    on the basis of fuel units consumed (e.g. tons of coal, mcf of gas) and
    on the basis of heat content consumed. In theory these should give the
    same value for total cost, but this is not always the case.

    TODO: Check whether this includes all of the fuel_ferc1 fields...

    Args:
        testing (bool): True if we're using the pudl_test DB, False if we're
            using the live PUDL DB.  False by default.
    Returns:
        fuel_df: a pandas dataframe.
    """
    pudl_engine = pudl.db_connect_pudl(testing=testing)
    fuel_ferc1_tbl = pt['fuel_ferc1']
    fuel_ferc1_select = sa.sql.select([fuel_ferc1_tbl, ])
    fuel_df = pd.read_sql(fuel_ferc1_select, pudl_engine)

    # We have two different ways of assessing the total cost of fuel given cost
    # per unit delivered and cost per mmbtu. They *should* be the same, but we
    # know they aren't always. Calculate both so we can compare both.
    fuel_df['fuel_consumed_total_mmbtu'] = \
        fuel_df['fuel_qty_burned'] * fuel_df['fuel_avg_mmbtu_per_unit']
    fuel_df['fuel_consumed_total_cost_mmbtu'] = \
        fuel_df['fuel_cost_per_mmbtu'] * fuel_df['fuel_consumed_total_mmbtu']
    fuel_df['fuel_consumed_total_cost_unit'] = \
        fuel_df['fuel_cost_per_unit_burned'] * fuel_df['fuel_qty_burned']

    pu_ferc = plants_utils_ferc1(testing=testing)

    out_df = pd.merge(fuel_df, pu_ferc, on=['respondent_id', 'plant_name'])
    out_df = out_df.drop('id', axis=1)

    first_cols = [
        'report_year',
        'respondent_id',
        'util_id_pudl',
        'respondent_name',
        'plant_id_pudl',
        'plant_name'
    ]

    out_df = organize_cols(out_df, first_cols)

    return(out_df)
示例#3
0
def plants_eia860(start_date=None, end_date=None, testing=False):
    """Pull all fields from the EIA860 Plants table."""
    pudl_engine = pudl.db_connect_pudl(testing=testing)
    plants_eia860_tbl = pt['plants_eia860']
    plants_eia860_select = sa.sql.select([plants_eia860_tbl])
    if start_date is not None:
        start_date = pd.to_datetime(start_date)
        plants_eia860_select = plants_eia860_select.where(
            plants_eia860_tbl.c.report_date >= start_date
        )
    if end_date is not None:
        end_date = pd.to_datetime(end_date)
        plants_eia860_select = plants_eia860_select.where(
            plants_eia860_tbl.c.report_date <= end_date
        )
    plants_eia860_df = pd.read_sql(plants_eia860_select, pudl_engine)
    plants_eia860_df = \
        plants_eia860_df.rename(columns={'plant_id': 'plant_id_eia'})

    plants_eia_tbl = pt['plants_eia']
    plants_eia_select = sa.sql.select([
        plants_eia_tbl.c.plant_id,
        plants_eia_tbl.c.plant_id_pudl,
    ])
    plants_eia_df = pd.read_sql(plants_eia_select,  pudl_engine)
    plants_eia_df = plants_eia_df.rename(columns={'plant_id': 'plant_id_eia'})

    out_df = pd.merge(plants_eia860_df, plants_eia_df,
                      how='left', on=['plant_id_eia', ])

    utils_eia_tbl = pt['utilities_eia']
    utils_eia_select = sa.sql.select([
        utils_eia_tbl.c.operator_id,
        utils_eia_tbl.c.util_id_pudl,
    ])
    utils_eia_df = pd.read_sql(utils_eia_select,  pudl_engine)

    out_df = pd.merge(out_df, utils_eia_df,
                      how='left', on=['operator_id', ])

    out_df = out_df.drop(['id'], axis=1)
    first_cols = [
        'report_date',
        'operator_id',
        'util_id_pudl',
        'operator_name',
        'plant_id_eia',
        'plant_id_pudl',
        'plant_name',
    ]

    out_df = organize_cols(out_df, first_cols)
    out_df = extend_annual(out_df, start_date=start_date, end_date=end_date)
    return(out_df)
示例#4
0
def plants_utils_ferc1(testing=False):
    """Build a dataframe of useful FERC Plant & Utility information."""
    pudl_engine = pudl.db_connect_pudl(testing=testing)

    utils_ferc_tbl = pt['utilities_ferc']
    utils_ferc_select = sa.sql.select([utils_ferc_tbl, ])
    utils_ferc = pd.read_sql(utils_ferc_select, pudl_engine)

    plants_ferc_tbl = pt['plants_ferc']
    plants_ferc_select = sa.sql.select([plants_ferc_tbl, ])
    plants_ferc = pd.read_sql(plants_ferc_select, pudl_engine)

    out_df = pd.merge(plants_ferc, utils_ferc, on='respondent_id')
    return(out_df)
示例#5
0
def plants_utils_eia(start_date=None, end_date=None, testing=False):
    """
    Create a dataframe of plant and utility IDs and names from EIA.

    Returns a pandas dataframe with the following columns:
    - report_date (in which data was reported)
    - plant_name (from EIA860)
    - plant_id (from EIA860)
    - plant_id_pudl
    - operator_id (from EIA860)
    - operator_name (from EIA860)
    - util_id_pudl

    Issues:
        - EIA 860 data has only been integrated for 2011-2015. Function needs
          to take start_date & end_date and synthesize the earlier and later
          years if need be.
    """
    pudl_engine = pudl.db_connect_pudl(testing=testing)
    # Contains the one-to-one mapping of EIA plants to their operators, but
    # we only have the 860 data integrated for 2011 forward right now.
    plants_eia = plants_eia860(start_date=start_date,
                               end_date=end_date,
                               testing=testing)
    utils_eia = utilities_eia860(start_date=start_date,
                                 end_date=end_date,
                                 testing=testing)
    # to avoid duplicate columns on the merge...
    plants_eia = plants_eia.drop(['util_id_pudl', 'operator_name'], axis=1)
    out_df = pd.merge(plants_eia, utils_eia,
                      how='left', on=['report_date', 'operator_id'])

    cols_to_keep = ['report_date',
                    'plant_id_eia',
                    'plant_name',
                    'plant_id_pudl',
                    'operator_id',
                    'operator_name',
                    'util_id_pudl']

    out_df = out_df[cols_to_keep]
    out_df = out_df.dropna()
    out_df.plant_id_pudl = out_df.plant_id_pudl.astype(int)
    out_df.util_id_pudl = out_df.util_id_pudl.astype(int)
    return(out_df)
示例#6
0
def boiler_generator_assn_eia860(start_date=None, end_date=None,
                                 testing=False):
    pudl_engine = pudl.db_connect_pudl(testing=testing)
    bga_eia860_tbl = pt['boiler_generator_assn_eia860']
    bga_eia860_select = sa.sql.select([bga_eia860_tbl])

    if start_date is not None:
        start_date = pd.to_datetime(start_date)
        bga_eia860_select = bga_eia860_select.where(
            bga_eia860_tbl.c.report_date >= start_date
        )
    if end_date is not None:
        end_date = pd.to_datetime(end_date)
        bga_eia860_select = bga_eia860_select.where(
            bga_eia860_tbl.c.report_date <= end_date
        )
    bga_eia860_df = pd.read_sql(bga_eia860_select, pudl_engine)
    bga_eia860_df = bga_eia860_df.rename(columns={'plant_id': 'plant_id_eia'})
    out_df = extend_annual(bga_eia860_df,
                           start_date=start_date, end_date=end_date)
    return(out_df)
示例#7
0
def test_init_db():
    """Create a fresh PUDL DB and pull in some FERC1 & EIA923 data."""
    ferc1.init_db(ferc1_tables=constants.ferc1_default_tables,
                  refyear=2015,
                  years=range(2007, 2016),
                  def_db=True,
                  verbose=True,
                  testing=True)

    pudl.init_db(ferc1_tables=constants.ferc1_pudl_tables,
                 ferc1_years=range(2007, 2016),
                 eia923_tables=constants.eia923_pudl_tables,
                 eia923_years=range(2009, 2017),
                 verbose=True,
                 debug=False,
                 testing=True)

    ferc1_engine = ferc1.db_connect_ferc1(testing=True)
    ferc1.drop_tables_ferc1(ferc1_engine)

    pudl_engine = pudl.db_connect_pudl(testing=True)
    pudl.drop_tables_pudl(pudl_engine)
示例#8
0
def plants_steam_ferc1(testing=False):
    """
    Select and join some useful fields from the FERC Form 1 steam table.

    Select the FERC Form 1 steam plant table entries, add in the reporting
    utility's name, and the PUDL ID for the plant and utility for readability
    and integration with other tables that have PUDL IDs.

    Args:
        testing (bool): True if we're using the pudl_test DB, False if we're
            using the live PUDL DB.  False by default.

    Returns:
        steam_df: a pandas dataframe.
    """
    pudl_engine = pudl.db_connect_pudl(testing=testing)
    steam_ferc1_tbl = pt['plants_steam_ferc1']
    steam_ferc1_select = sa.sql.select([steam_ferc1_tbl, ])
    steam_df = pd.read_sql(steam_ferc1_select, pudl_engine)

    pu_ferc = plants_utils_ferc1(testing=testing)

    out_df = pd.merge(steam_df, pu_ferc, on=['respondent_id', 'plant_name'])

    first_cols = [
        'report_year',
        'respondent_id',
        'util_id_pudl',
        'respondent_name',
        'plant_id_pudl',
        'plant_name'
    ]

    out_df = organize_cols(out_df, first_cols)

    return(out_df)
示例#9
0
def fercplants(plant_tables=[
    'f1_steam', 'f1_gnrt_plant', 'f1_hydro', 'f1_pumped_storage'
],
               years=constants.working_years['ferc1'],
               new=True,
               min_capacity=5.0):
    """
    Generate a list of FERC plants for matching with EIA plants.

    There are several kinds of FERC plants, with different information stored
    in different FERC database tables. FERC doesn't provide any kind of
    plant_id like EIA, so the unique identifier that we're using is a
    combination of the respondent_id (the utility) and plant_name.

    For each table in the FERC DB that contains per-plant information, we'll
    grab the respondent_id and plant_name, and join that with respondent_name
    so that the utility is more readily identifiable.  We'll also add a column
    indicating what table the plant came from, and return a DataFrame with
    those four columns in it, for use in the matching. That matching currently
    happens in an Excel spreadsheet, so you will likely want to output the
    resulting DataFrame as a CSV or XLSX file.

    The function can generate an exhaustive list of plants, or it can only grab
    plants from a particular range of years. It can also optionally grab only
    new plants i.e. those which do not appear in the existing PUDL database.
    This is useful for finding new plants when a new year of FERC data comes
    out.

    Args:
        f1_tables (list): A list of tables in the FERC Form 1 DB whose plants
            you want to get information about.  Can include any of: f1_steam,
            f1_gnrt_plant, f1_hydro, and f1_pumped_storage.
        years (list): The set of years for which you wish to obtain plant by
            plant information.
        new (boolean): If True (the default) then return only those plants
            which appear in the years of FERC data being specified by years,
            and NOT also in the currently initialized PUDL DB.
        min_capacity (float): The smallest size plant, in MW, that should be
            included in the output. This avoids most of the plants being tiny.

    Returns:
        DataFrame: with four columns: respondent_id, respondent_name,
            plant_name, and plant_table.
    """
    # Need to be able to use years outside the "valid" range if we're trying
    # to get new plant ID info...
    if not new:
        for yr in years:
            assert yr in constants.working_years['ferc1']

    okay_tbls = ['f1_steam', 'f1_gnrt_plant', 'f1_hydro', 'f1_pumped_storage']

    # Function only knows how to work with these tables.
    for tbl in plant_tables:
        assert tbl in okay_tbls

    f1_engine = ferc1.db_connect_ferc1()

    # Need to make sure we have a populated metadata object, which isn't
    # always the case, since folks often are not initializing the FERC DB.
    ferc1.define_db(max(constants.working_years['ferc1']),
                    constants.ferc1_working_tables, ferc1.ferc1_meta)
    f1_tbls = ferc1.ferc1_meta.tables

    # FERC doesn't use the sme column names for the same values across all of
    # Their tables... but all of these are cpacity in MW.
    capacity_cols = {
        'f1_steam': 'tot_capacity',
        'f1_gnrt_plant': 'capacity_rating',
        'f1_hydro': 'tot_capacity',
        'f1_pumped_storage': 'tot_capacity'
    }

    rspndnt_tbl = f1_tbls['f1_respondent_id']
    ferc1_plants_all = pd.DataFrame()
    for tbl in plant_tables:
        plant_select = sa.sql.select([
            f1_tbls[tbl].c.respondent_id, f1_tbls[tbl].c.plant_name,
            rspndnt_tbl.c.respondent_name
        ]).distinct().where(
            sa.and_(
                f1_tbls[tbl].c.respondent_id == rspndnt_tbl.c.respondent_id,
                f1_tbls[tbl].c.plant_name != '',
                f1_tbls[tbl].columns[capacity_cols[tbl]] >= min_capacity,
                f1_tbls[tbl].c.report_year.in_(years)))
        # Add all the plants from the current table to our bigger list:
        new_plants = pd.read_sql(plant_select, f1_engine)
        new_plants.respondent_name = new_plants.respondent_name.str.strip()
        new_plants.respondent_name = new_plants.respondent_name.str.title()
        new_plants.plant_name = new_plants.plant_name.str.strip().str.title()
        new_plants['plant_table'] = tbl
        ferc1_plants_all = ferc1_plants_all.append(new_plants[[
            'respondent_id', 'respondent_name', 'plant_name', 'plant_table'
        ]])

    # If we're only trying to get the NEW plants, then we need to see which
    # ones we've already got in the PUDL DB, and look at what's different.
    if (new):
        ferc1_plants_all = ferc1_plants_all.set_index(
            ['respondent_id', 'plant_name'])

        pudl_engine = pudl.db_connect_pudl()
        pudl_tbls = pudl.models.PUDLBase.metadata.tables

        ferc1_plants_tbl = pudl_tbls['plants_ferc']
        ferc1_plants_select = sa.sql.select(
            [ferc1_plants_tbl.c.respondent_id,
             ferc1_plants_tbl.c.plant_name]).distinct()
        ferc1_plants_old = pd.read_sql(ferc1_plants_select, pudl_engine)
        ferc1_plants_old = ferc1_plants_old.set_index(
            ['respondent_id', 'plant_name'])

        # Take the difference between the two table indexes -- I.e. get a
        # list of just the index values that appear in the FERC index, but
        # not in the PUDL index.
        new_index = ferc1_plants_all.index.difference(ferc1_plants_old.index)
        ferc1_plants = ferc1_plants_all.loc[new_index].reset_index()
    else:
        ferc1_plants = ferc1_plants_all

    return (ferc1_plants)
示例#10
0
def generation_eia923(freq=None, testing=False,
                      start_date=None, end_date=None):
    """
    Sum net generation by generator at the specified frequency.

    In addition, some human readable plant and utility names, as well as some
    ID values for joining with other dataframes is added back in to the
    dataframe before it is returned.

    Args:
        pudl_engine: An SQLAlchemy DB connection engine.
        freq: A string used to specify a time grouping frequency.
        testing (bool): True if we're using the pudl_test DB, False if we're
            using the live PUDL DB.  False by default.
    Returns:
        out_df: a pandas dataframe.
    """
    pudl_engine = pudl.db_connect_pudl(testing=testing)
    g_eia923_tbl = pt['generation_eia923']
    g_eia923_select = sa.sql.select([g_eia923_tbl, ])
    if start_date is not None:
        g_eia923_select = g_eia923_select.where(
            g_eia923_tbl.c.report_date >= start_date
        )
    if end_date is not None:
        g_eia923_select = g_eia923_select.where(
            g_eia923_tbl.c.report_date <= end_date
        )
    g_df = pd.read_sql(g_eia923_select, pudl_engine)
    g_df = g_df.rename(columns={'plant_id': 'plant_id_eia'})

    # Index by date and aggregate net generation.
    # Create a date index for grouping based on freq
    by = ['plant_id_eia', 'generator_id']
    if freq is not None:
        g_df = g_df.set_index(pd.DatetimeIndex(g_df.report_date))
        by = by + [pd.Grouper(freq=freq)]
        g_gb = g_df.groupby(by=by)
        g_df = g_gb['net_generation_mwh'].sum().reset_index()

    # Grab EIA 860 plant and utility specific information:
    pu_eia = plants_utils_eia(start_date=start_date,
                              end_date=end_date,
                              testing=testing)

    # Merge annual plant/utility data in with the more granular dataframe
    out_df = analysis.merge_on_date_year(g_df, pu_eia, on=['plant_id_eia'])

    if freq is None:
        out_df = out_df.drop(['id'], axis=1)

    # These ID fields are vital -- without them we don't have a complete record
    out_df = out_df.dropna(subset=[
        'plant_id_eia',
        'plant_id_pudl',
        'operator_id',
        'util_id_pudl',
        'generator_id',
    ])

    first_cols = [
        'report_date',
        'plant_id_eia',
        'plant_id_pudl',
        'plant_name',
        'operator_id',
        'util_id_pudl',
        'operator_name',
        'generator_id',
    ]

    # Re-arrange the columns for easier readability:
    out_df = organize_cols(out_df, first_cols)

    out_df['operator_id'] = out_df.operator_id.astype(int)
    out_df['util_id_pudl'] = out_df.util_id_pudl.astype(int)
    out_df['plant_id_pudl'] = out_df.plant_id_pudl.astype(int)

    return(out_df)
示例#11
0
def boiler_fuel_eia923(freq=None, testing=False,
                       start_date=None, end_date=None):
    """
    Pull records from the boiler_fuel_eia923 table, in a given data range.

    Optionally, aggregate the records over some timescale -- monthly, yearly,
    quarterly, etc. as well as by fuel type within a plant.

    If the records are not being aggregated, all of the database fields are
    available. If they're being aggregated, then we preserve the following
    fields. Per-unit values are re-calculated based on the aggregated totals.
    Totals are summed across whatever time range is being used, within a
    given plant and fuel type.
     - fuel_qty_consumed (sum)
     - fuel_mmbtu_per_unit (weighted average)
     - total_heat_content_mmbtu (sum)
     - sulfur_content_pct (weighted average)
     - ash_content_pct (weighted average)

    In addition, plant and utility names and IDs are pulled in from the EIA
    860 tables.

    Args:
        freq (str): a pandas timeseries offset alias. The original data is
            reported monthly, so the best time frequencies to use here are
            probably month start (freq='MS') and year start (freq='YS').
        start_date & end_date: date-like objects, including strings of the
            form 'YYYY-MM-DD' which will be used to specify the date range of
            records to be pulled.  Dates are inclusive.
        testing (bool): True if we're using the pudl_test DB, False if we're
            using the live PUDL DB.  False by default.

    Returns:
        bf_df: a pandas dataframe.

    """
    pudl_engine = pudl.db_connect_pudl(testing=testing)
    bf_eia923_tbl = pt['boiler_fuel_eia923']
    bf_eia923_select = sa.sql.select([bf_eia923_tbl, ])
    if start_date is not None:
        bf_eia923_select = bf_eia923_select.where(
            bf_eia923_tbl.c.report_date >= start_date
        )
    if end_date is not None:
        bf_eia923_select = bf_eia923_select.where(
            bf_eia923_tbl.c.report_date <= end_date
        )
    bf_df = pd.read_sql(bf_eia923_select, pudl_engine)
    bf_df = bf_df.rename(columns={'plant_id': 'plant_id_eia'})

    # The total heat content is also useful in its own right, and we'll keep it
    # around.  Also needed to calculate average heat content per unit of fuel.
    bf_df['total_heat_content_mmbtu'] = bf_df['fuel_qty_consumed'] * \
        bf_df['fuel_mmbtu_per_unit']

    # Create a date index for grouping based on freq
    by = ['plant_id_eia', 'boiler_id', 'fuel_type_pudl']
    if freq is not None:
        # In order to calculate the weighted average sulfur
        # content and ash content we need to calculate these totals.
        bf_df['total_sulfur_content'] = bf_df['fuel_qty_consumed'] * \
            bf_df['sulfur_content_pct']
        bf_df['total_ash_content'] = bf_df['fuel_qty_consumed'] * \
            bf_df['ash_content_pct']
        bf_df = bf_df.set_index(pd.DatetimeIndex(bf_df.report_date))
        by = by + [pd.Grouper(freq=freq)]
        bf_gb = bf_df.groupby(by=by)

        # Sum up these totals within each group, and recalculate the per-unit
        # values (weighted in this case by fuel_qty_consumed)
        bf_df = bf_gb.agg({'total_heat_content_mmbtu': np.sum,
                           'fuel_qty_consumed': np.sum,
                           'total_sulfur_content': np.sum,
                           'total_ash_content': np.sum})

        bf_df['fuel_mmbtu_per_unit'] = \
            bf_df['total_heat_content_mmbtu'] / bf_df['fuel_qty_consumed']
        bf_df['sulfur_content_pct'] = \
            bf_df['total_sulfur_content'] / bf_df['fuel_qty_consumed']
        bf_df['ash_content_pct'] = \
            bf_df['total_ash_content'] / bf_df['fuel_qty_consumed']
        bf_df = bf_df.reset_index()
        bf_df = bf_df.drop(['total_ash_content', 'total_sulfur_content'],
                           axis=1)

    # Grab some basic plant & utility information to add.
    pu_eia = plants_utils_eia(start_date=start_date,
                              end_date=end_date,
                              testing=False)
    out_df = analysis.merge_on_date_year(bf_df, pu_eia, on=['plant_id_eia'])
    if freq is None:
        out_df = out_df.drop(['id'], axis=1)

    out_df = out_df.dropna(subset=[
        'plant_id_eia',
        'plant_id_pudl',
        'operator_id',
        'util_id_pudl',
        'boiler_id',
    ])

    first_cols = [
        'report_date',
        'plant_id_eia',
        'plant_id_pudl',
        'plant_name',
        'operator_id',
        'util_id_pudl',
        'operator_name',
        'boiler_id',
    ]

    # Re-arrange the columns for easier readability:
    out_df = organize_cols(out_df, first_cols)

    out_df['operator_id'] = out_df.operator_id.astype(int)
    out_df['util_id_pudl'] = out_df.util_id_pudl.astype(int)
    out_df['plant_id_pudl'] = out_df.plant_id_pudl.astype(int)

    return(out_df)
示例#12
0
def fuel_receipts_costs_eia923(freq=None, testing=False,
                               start_date=None, end_date=None):
    """
    Pull records from fuel_receipts_costs_eia923 table, in a given date range.

    Optionally, aggregate the records at a monthly or longer timescale, as well
    as by fuel type within a plant, by setting freq to something other than
    the default None value.

    If the records are not being aggregated, then all of the fields found in
    the PUDL database are available.  If they are being aggregated, then the
    following fields are preserved, and appropriately summed or re-calculated
    based on the specified aggregation. In both cases, new total values are
    calculated, for total fuel heat content and total fuel cost.
     - plant_id
     - report_date
     - fuel_type_pudl (formerly energy_source_simple)
     - fuel_quantity (sum)
     - fuel_cost_per_mmbtu (weighted average)
     - total_fuel_cost (sum)
     - total_heat_content_mmbtu (sum)
     - heat_content_mmbtu_per_unit (weighted average)
     - sulfur_content_pct (weighted average)
     - ash_content_pct (weighted average)
     - mercury_content_ppm (weighted average)

    In addition, plant and utility names and IDs are pulled in from the EIA
    860 tables.

    Args:
        freq (str): a pandas timeseries offset alias. The original data is
            reported monthly, so the best time frequencies to use here are
            probably month start (freq='MS') and year start (freq='YS').
        start_date & end_date: date-like objects, including strings of the
            form 'YYYY-MM-DD' which will be used to specify the date range of
            records to be pulled.  Dates are inclusive.
        testing (bool): True if we're using the pudl_test DB, False if we're
            using the live PUDL DB. False by default.

    Returns:
        frc_df: a pandas dataframe.
    """
    pudl_engine = pudl.db_connect_pudl(testing=testing)
    # Most of the fields we want come direclty from Fuel Receipts & Costs
    frc_tbl = pt['fuel_receipts_costs_eia923']
    frc_select = sa.sql.select([frc_tbl, ])

    # Need to re-integrate the MSHA coalmine info:
    cmi_tbl = pt['coalmine_info_eia923']
    cmi_select = sa.sql.select([cmi_tbl, ])
    cmi_df = pd.read_sql(cmi_select, pudl_engine)

    if start_date is not None:
        frc_select = frc_select.where(
            frc_tbl.c.report_date >= start_date)
    if end_date is not None:
        frc_select = frc_select.where(
            frc_tbl.c.report_date <= end_date)

    frc_df = pd.read_sql(frc_select, pudl_engine)
    frc_df = frc_df.rename(columns={'plant_id': 'plant_id_eia'})

    frc_df = pd.merge(frc_df, cmi_df,
                      how='left',
                      left_on='coalmine_id',
                      right_on='id')

    cols_to_drop = ['fuel_receipt_id', 'coalmine_id', 'id']
    frc_df = frc_df.drop(cols_to_drop, axis=1)

    # Calculate a few totals that are commonly needed:
    frc_df['total_heat_content_mmbtu'] = \
        frc_df['heat_content_mmbtu_per_unit'] * frc_df['fuel_quantity']
    frc_df['total_fuel_cost'] = \
        frc_df['total_heat_content_mmbtu'] * frc_df['fuel_cost_per_mmbtu']

    by = ['plant_id_eia', 'fuel_type_pudl']
    if freq is not None:
        # Create a date index for temporal resampling:
        frc_df = frc_df.set_index(pd.DatetimeIndex(frc_df.report_date))
        by = by + [pd.Grouper(freq=freq)]
        # Sum up these values so we can calculate quantity weighted averages
        frc_df['total_ash_content'] = \
            frc_df['ash_content_pct'] * frc_df['fuel_quantity']
        frc_df['total_sulfur_content'] = \
            frc_df['sulfur_content_pct'] * frc_df['fuel_quantity']
        frc_df['total_mercury_content'] = \
            frc_df['mercury_content_ppm'] * frc_df['fuel_quantity']

        frc_gb = frc_df.groupby(by=by)
        frc_df = frc_gb.agg({
            'fuel_quantity': np.sum,
            'total_heat_content_mmbtu': np.sum,
            'total_fuel_cost': np.sum,
            'total_sulfur_content': np.sum,
            'total_ash_content': np.sum,
            'total_mercury_content': np.sum,
        })

        frc_df['fuel_cost_per_mmbtu'] = \
            frc_df['total_fuel_cost'] / frc_df['total_heat_content_mmbtu']
        frc_df['heat_content_mmbtu_per_unit'] = \
            frc_df['total_heat_content_mmbtu'] / frc_df['fuel_quantity']
        frc_df['sulfur_content_pct'] = \
            frc_df['total_sulfur_content'] / frc_df['fuel_quantity']
        frc_df['ash_content_pct'] = \
            frc_df['total_ash_content'] / frc_df['fuel_quantity']
        frc_df['mercury_content_ppm'] = \
            frc_df['total_mercury_content'] / frc_df['fuel_quantity']
        frc_df = frc_df.reset_index()
        frc_df = frc_df.drop(['total_ash_content',
                              'total_sulfur_content',
                              'total_mercury_content'], axis=1)

    # Bring in some generic plant & utility information:
    pu_eia = plants_utils_eia(start_date=start_date,
                              end_date=end_date,
                              testing=testing)
    out_df = analysis.merge_on_date_year(frc_df, pu_eia, on=['plant_id_eia'])

    # Drop any records where we've failed to get the 860 data merged in...
    out_df = out_df.dropna(subset=['operator_id', 'operator_name'])

    if freq is None:
        # There are a couple of invalid records with no specified fuel.
        out_df = out_df.dropna(subset=['fuel_group'])

    first_cols = ['report_date',
                  'plant_id_eia',
                  'plant_id_pudl',
                  'plant_name',
                  'operator_id',
                  'util_id_pudl',
                  'operator_name', ]

    # Re-arrange the columns for easier readability:
    out_df = organize_cols(out_df, first_cols)

    # Clean up the types of a few columns...
    out_df['plant_id_eia'] = out_df.plant_id_eia.astype(int)
    out_df['plant_id_pudl'] = out_df.plant_id_pudl.astype(int)
    out_df['operator_id'] = out_df.operator_id.astype(int)
    out_df['util_id_pudl'] = out_df.util_id_pudl.astype(int)

    return(out_df)
示例#13
0
def generation_fuel_eia923(freq=None, testing=False,
                           start_date=None, end_date=None):
    """
    Pull records from the generation_fuel_eia923 table, in a given date range.

    Optionally, aggregate the records over some timescale -- monthly, yearly,
    quarterly, etc. as well as by fuel type within a plant.

    If the records are not being aggregated, all of the database fields are
    available. If they're being aggregated, then we preserve the following
    fields. Per-unit values are re-calculated based on the aggregated totals.
    Totals are summed across whatever time range is being used, within a
    given plant and fuel type.
     - plant_id
     - report_date
     - fuel_type_pudl
     - fuel_consumed_total
     - fuel_consumed_for_electricity
     - fuel_mmbtu_per_unit
     - fuel_consumed_total_mmbtu
     - fuel_consumed_for_electricity_mmbtu
     - net_generation_mwh

    In addition, plant and utility names and IDs are pulled in from the EIA
    860 tables.

    Args:
        testing (bool): True if we are connecting to the pudl_test DB, False
            if we're using the live DB.  False by default.
        freq (str): a pandas timeseries offset alias. The original data is
            reported monthly, so the best time frequencies to use here are
            probably month start (freq='MS') and year start (freq='YS').
        start_date & end_date: date-like objects, including strings of the
            form 'YYYY-MM-DD' which will be used to specify the date range of
            records to be pulled.  Dates are inclusive.

    Returns:
        gf_df: a pandas dataframe.
    """
    pudl_engine = pudl.db_connect_pudl(testing=testing)
    gf_tbl = pt['generation_fuel_eia923']
    gf_select = sa.sql.select([gf_tbl, ])
    if start_date is not None:
        gf_select = gf_select.where(
            gf_tbl.c.report_date >= start_date)
    if end_date is not None:
        gf_select = gf_select.where(
            gf_tbl.c.report_date <= end_date)

    gf_df = pd.read_sql(gf_select, pudl_engine)
    gf_df = gf_df.rename(columns={'plant_id': 'plant_id_eia'})

    cols_to_drop = ['id']
    gf_df = gf_df.drop(cols_to_drop, axis=1)

    # fuel_type_pudl was formerly aer_fuel_category
    by = ['plant_id_eia', 'fuel_type_pudl']
    if freq is not None:
        # Create a date index for temporal resampling:
        gf_df = gf_df.set_index(pd.DatetimeIndex(gf_df.report_date))
        by = by + [pd.Grouper(freq=freq)]

        # Sum up these values so we can calculate quantity weighted averages
        gf_gb = gf_df.groupby(by=by)
        gf_df = gf_gb.agg({
            'fuel_consumed_total': np.sum,
            'fuel_consumed_for_electricity': np.sum,
            'fuel_consumed_total_mmbtu': np.sum,
            'fuel_consumed_for_electricity_mmbtu': np.sum,
            'net_generation_mwh': np.sum,
        })
        gf_df['fuel_mmbtu_per_unit'] = \
            gf_df['fuel_consumed_total_mmbtu'] / gf_df['fuel_consumed_total']

        gf_df = gf_df.reset_index()

    # Bring in some generic plant & utility information:
    pu_eia = plants_utils_eia(start_date=start_date,
                              end_date=end_date,
                              testing=testing)
    out_df = analysis.merge_on_date_year(gf_df, pu_eia, on=['plant_id_eia'])
    # Drop any records where we've failed to get the 860 data merged in...
    out_df = out_df.dropna(subset=[
        'plant_id_eia',
        'plant_id_pudl',
        'plant_name',
        'operator_id',
        'util_id_pudl',
        'operator_name',
    ])

    first_cols = ['report_date',
                  'plant_id_eia',
                  'plant_id_pudl',
                  'plant_name',
                  'operator_id',
                  'util_id_pudl',
                  'operator_name', ]

    out_df = organize_cols(out_df, first_cols)

    # Clean up the types of a few columns...
    out_df['plant_id_eia'] = out_df.plant_id_eia.astype(int)
    out_df['plant_id_pudl'] = out_df.plant_id_pudl.astype(int)
    out_df['operator_id'] = out_df.operator_id.astype(int)
    out_df['util_id_pudl'] = out_df.util_id_pudl.astype(int)

    return(out_df)
示例#14
0
def generators_eia860(start_date=None, end_date=None, testing=False):
    """
    Pull all fields reported in the generators_eia860 table.

    Merge in other useful fields including the latitude & longitude of the
    plant that the generators are part of, canonical plant & operator names and
    the PUDL IDs of the plant and operator, for merging with other PUDL data
    sources.

    Fill in data for adjacent years if requested, but never fill in earlier
    than the earliest working year of data for EIA923, and never add more than
    one year on after the reported data (since there should at most be a one
    year lag between EIA923 and EIA860 reporting)

    Args:
        start_date (date): the earliest EIA 860 data to retrieve or synthesize
        end_date (date): the latest EIA 860 data to retrieve or synthesize
        testing (bool): Connect to the live PUDL DB or the testing DB?

    Returns:
        A pandas dataframe.

    """
    pudl_engine = pudl.db_connect_pudl(testing=testing)
    # Almost all the info we need will come from here.
    gens_eia860_tbl = pt['generators_eia860']
    gens_eia860_select = sa.sql.select([gens_eia860_tbl, ])
    # To get the Lat/Lon coordinates
    plants_eia860_tbl = pt['plants_eia860']
    plants_eia860_select = sa.sql.select([
        plants_eia860_tbl.c.report_date,
        plants_eia860_tbl.c.plant_id,
        plants_eia860_tbl.c.latitude,
        plants_eia860_tbl.c.longitude,
    ])

    if start_date is not None:
        start_date = pd.to_datetime(start_date)
        # We don't want to get too crazy with the date extensions...
        # start_date shouldn't go back before the earliest working year of
        # EIA 923
        eia923_start_date = \
            pd.to_datetime('{}-01-01'.format(min(pc.working_years['eia923'])))
        assert start_date >= eia923_start_date
        gens_eia860_select = gens_eia860_select.where(
            gens_eia860_tbl.c.report_date >= start_date
        )
        plants_eia860_select = plants_eia860_select.where(
            plants_eia860_tbl.c.report_date >= start_date
        )

    if end_date is not None:
        end_date = pd.to_datetime(end_date)
        # end_date shouldn't be more than one year ahead of the most recent
        # year for which we have EIA 860 data:
        eia860_end_date = \
            pd.to_datetime('{}-12-31'.format(max(pc.working_years['eia860'])))
        assert end_date <= eia860_end_date + pd.DateOffset(years=1)
        gens_eia860_select = gens_eia860_select.where(
            gens_eia860_tbl.c.report_date <= end_date
        )
        plants_eia860_select = plants_eia860_select.where(
            plants_eia860_tbl.c.report_date <= end_date
        )

    gens_eia860 = pd.read_sql(gens_eia860_select, pudl_engine)
    gens_eia860 = gens_eia860.rename(columns={'plant_id': 'plant_id_eia'})
    # Canonical sources for these fields are elsewhere. We will merge them in.
    gens_eia860 = gens_eia860.drop(['operator_id',
                                    'operator_name',
                                    'plant_name'], axis=1)
    plants_eia860 = pd.read_sql(plants_eia860_select, pudl_engine)
    plants_eia860 = plants_eia860.rename(columns={'plant_id': 'plant_id_eia'})
    out_df = pd.merge(gens_eia860, plants_eia860,
                      how='left', on=['report_date', 'plant_id_eia'])
    out_df.report_date = pd.to_datetime(out_df.report_date)

    # Bring in some generic plant & utility information:
    pu_eia = plants_utils_eia(start_date=start_date,
                              end_date=end_date,
                              testing=testing)
    out_df = pd.merge(out_df, pu_eia, on=['report_date', 'plant_id_eia'])

    # Drop a few extraneous fields...
    cols_to_drop = ['id', ]
    out_df = out_df.drop(cols_to_drop, axis=1)

    # In order to be able to differentiate betweet single and multi-fuel
    # plants, we need to count how many different simple energy sources there
    # are associated with plant's generators. This allows us to do the simple
    # lumping of an entire plant's fuel & generation if its primary fuels
    # are homogeneous, and split out fuel & generation by fuel if it is
    # hetereogeneous.
    ft_count = out_df[['plant_id_eia', 'fuel_type_pudl', 'report_date']].\
        drop_duplicates().groupby(['plant_id_eia', 'report_date']).count()
    ft_count = ft_count.reset_index()
    ft_count = ft_count.rename(
        columns={'fuel_type_pudl': 'fuel_type_count'})
    out_df = pd.merge(out_df, ft_count, how='left',
                      on=['plant_id_eia', 'report_date'])

    first_cols = [
        'report_date',
        'plant_id_eia',
        'plant_id_pudl',
        'plant_name',
        'operator_id',
        'util_id_pudl',
        'operator_name',
        'generator_id',
    ]

    # Re-arrange the columns for easier readability:
    out_df = organize_cols(out_df, first_cols)
    out_df = extend_annual(out_df, start_date=start_date, end_date=end_date)
    out_df = out_df.sort_values(['report_date',
                                 'plant_id_eia',
                                 'generator_id'])

    return(out_df)
示例#15
0
def boiler_generator_association(bga_eia860,
                                 gens_eia860,
                                 gen_eia923,
                                 bf_eia923,
                                 start_date=None,
                                 end_date=None,
                                 testing=False):
    """
    Temporary function to create more complete boiler generator associations.

    This is a temporary function until it can be pulled into a datatable. This
    function pulls in all of the generators and all of the boilers, uses them
    to create a relatively complete association list. First, the original bga
    table is used, then the remaining unmatched generators are matched to the
    boilers with the same string (in the same plant and year), then the unit
    codes are used to connect all generators and boilers within each given
    unit. Each of the incomplete or inaccurate records are tagged in columns.

    Notes:
     - unit_code is coming out as a mix of None and NaN values. Should pick
       a single type for the column and stick to it (or enforce on output).
    """
    pudl_engine = pudl.db_connect_pudl(testing=testing)
    # compile and scrub all the parts
    bga_eia860.drop_duplicates(
        ['plant_id_eia', 'boiler_id', 'generator_id', 'report_date'],
        inplace=True)
    bga_eia860.drop(['id', 'operator_id'], axis=1, inplace=True)

    gen_eia923 = gen_eia923.set_index(pd.DatetimeIndex(gen_eia923.report_date))
    gen_eia923_gb = gen_eia923.groupby(
        [pd.Grouper(freq='AS'), 'plant_id_eia', 'generator_id'])
    gen_eia923 = gen_eia923_gb['net_generation_mwh'].sum().reset_index()
    gen_eia923['missing_from_923'] = False

    # The generator records that are missing from 860 but appear in 923
    # I created issue no. 128 to deal with this at a later date
    merged = pd.merge(gens_eia860,
                      gen_eia923,
                      on=['plant_id_eia', 'report_date', 'generator_id'],
                      indicator=True,
                      how='outer')
    missing = merged[merged['_merge'] == 'right_only']

    # compile all of the generators
    gens = pd.merge(gen_eia923,
                    gens_eia860,
                    on=['plant_id_eia', 'report_date', 'generator_id'],
                    how='outer')

    gens = gens[[
        'plant_id_eia', 'report_date', 'generator_id', 'unit_code',
        'net_generation_mwh', 'missing_from_923'
    ]].drop_duplicates()

    # create the beginning of a bga compilation w/ the generators as the
    # background
    bga_compiled_1 = pd.merge(
        gens,
        bga_eia860,
        on=['plant_id_eia', 'generator_id', 'report_date'],
        how='outer')

    # Side note: there are only 6 generators that appear in bga8 that don't
    # apear in gens9 or gens8 (must uncomment-out the og_tag creation above)
    # bga_compiled_1[bga_compiled_1['og_tag'].isnull()]

    bf_eia923 = bf_eia923.set_index(pd.DatetimeIndex(bf_eia923.report_date))
    bf_eia923_gb = bf_eia923.groupby(
        [pd.Grouper(freq='AS'), 'plant_id_eia', 'boiler_id'])
    bf_eia923 = bf_eia923_gb['total_heat_content_mmbtu'].sum().reset_index()

    bf_eia923.drop_duplicates(
        subset=['plant_id_eia', 'report_date', 'boiler_id'], inplace=True)

    # Create a set of bga's that are linked, directly from bga8
    bga_assn = bga_compiled_1[bga_compiled_1['boiler_id'].notnull()].copy()
    bga_assn['bga_source'] = 'eia860_org'

    # Create a set of bga's that were not linked directly through bga8
    bga_unassn = bga_compiled_1[bga_compiled_1['boiler_id'].isnull()].copy()
    bga_unassn = bga_unassn.drop(['boiler_id'], axis=1)

    # Create a list of boilers that were not in bga8
    bf9_bga = bf_eia923.merge(bga_compiled_1,
                              on=['plant_id_eia', 'boiler_id', 'report_date'],
                              how='outer',
                              indicator=True)
    bf9_not_in_bga = bf9_bga[bf9_bga['_merge'] == 'left_only']
    bf9_not_in_bga = bf9_not_in_bga.drop(['_merge'], axis=1)

    # Match the unassociated generators with unassociated boilers
    # This method is assuming that some the strings of the generators and the
    # boilers are the same
    bga_unassn = bga_unassn.merge(
        bf9_not_in_bga[['plant_id_eia', 'boiler_id', 'report_date']],
        how='left',
        left_on=['report_date', 'plant_id_eia', 'generator_id'],
        right_on=['report_date', 'plant_id_eia', 'boiler_id'])
    bga_unassn.sort_values(['report_date', 'plant_id_eia'], inplace=True)
    bga_unassn['bga_source'] = None
    bga_unassn.loc[bga_unassn.boiler_id.notnull(),
                   'bga_source'] = 'string_assn'

    bga_compiled_2 = bga_assn.append(bga_unassn)
    bga_compiled_2.sort_values(['plant_id_eia', 'report_date'], inplace=True)
    bga_compiled_2['missing_from_923'].fillna(value=True, inplace=True)

    # Connect the gens and boilers in units
    bga_compiled_units = bga_compiled_2.loc[
        bga_compiled_2['unit_code'].notnull()]
    bga_gen_units = bga_compiled_units.drop(['boiler_id'], axis=1)
    bga_boil_units = bga_compiled_units[[
        'plant_id_eia', 'report_date', 'boiler_id', 'unit_code'
    ]].copy()
    bga_boil_units.dropna(subset=['boiler_id'], inplace=True)

    # merge the units with the boilers
    bga_unit_compilation = bga_gen_units.merge(
        bga_boil_units,
        how='outer',
        on=['plant_id_eia', 'report_date', 'unit_code'],
        indicator=True)
    # label the bga_source
    bga_unit_compilation. \
        loc[bga_unit_compilation['bga_source'].isnull(),
            'bga_source'] = 'unit_connection'
    bga_unit_compilation.drop(['_merge'], axis=1, inplace=True)
    bga_non_units = bga_compiled_2[bga_compiled_2['unit_code'].isnull()]

    # combine the unit compilation and the non units
    bga_compiled_3 = bga_non_units.append(bga_unit_compilation)

    # resort the records and the columns
    bga_compiled_3.sort_values(['plant_id_eia', 'report_date'], inplace=True)
    bga_compiled_3 = bga_compiled_3[[
        'plant_id_eia', 'report_date', 'generator_id', 'boiler_id',
        'unit_code', 'bga_source', 'net_generation_mwh', 'missing_from_923'
    ]]

    # label plants that have 'bad' generator records (generators that have MWhs
    # in gens9 but don't have connected boilers) create a df with just the bad
    # plants by searching for the 'bad' generators
    bad_plants = bga_compiled_3[(bga_compiled_3['boiler_id'].isnull()) &
                                (bga_compiled_3['net_generation_mwh'] > 0)].\
        drop_duplicates(subset=['plant_id_eia', 'report_date'])
    bad_plants = bad_plants[['plant_id_eia', 'report_date']]

    # merge the 'bad' plants back into the larger frame
    bga_compiled_3 = bga_compiled_3.merge(bad_plants,
                                          how='outer',
                                          on=['plant_id_eia', 'report_date'],
                                          indicator=True)

    # use the indicator to create labels
    bga_compiled_3['plant_w_bad_generator'] = \
        np.where(bga_compiled_3._merge == 'both', True, False)
    # Note: At least one gen has reported MWh in 923, but could not be
    # programmatically mapped to a boiler

    # we don't need this one anymore
    bga_compiled_3 = bga_compiled_3.drop(['_merge'], axis=1)

    # create a label for generators that are unmapped but in 923
    bga_compiled_3['unmapped_but_in_923'] = \
        np.where((bga_compiled_3.boiler_id.isnull()) &
                 ~bga_compiled_3.missing_from_923 &
                 (bga_compiled_3.net_generation_mwh == 0),
                 True,
                 False)

    # create a label for generators that are unmapped
    bga_compiled_3['unmapped'] = np.where(bga_compiled_3.boiler_id.isnull(),
                                          True, False)
    bga_compiled_3 = bga_compiled_3.drop('net_generation_mwh', axis=1)
    bga_compiled_3.loc[bga_compiled_3.unit_code.isnull(), 'unit_code'] = None
    return (bga_compiled_3)