示例#1
0
def capacity_factor(gens_eia860, gen_eia923, min_cap_fact=0, max_cap_fact=1.5):
    """
    Calculate the capacity factor for each generator.

    Capacity Factor is calculated by using the net generation from eia923 and
    the nameplate capacity from eia860. The net gen and capacity are pulled
    into one dataframe, then the dates from that dataframe are pulled out to
    determine the hours in each period based on the frequency. The number of
    hours is used in calculating the capacity factor. Then the 'bad' records
    are dropped.
    """
    # infer the natural frequency of our input dataset:
    freq = pd.infer_freq(
        pd.DatetimeIndex(gen_eia923.report_date.unique()).sort_values())
    # Only include columns to be used
    gens_eia860 = gens_eia860[[
        'plant_id_eia', 'report_date', 'generator_id', 'nameplate_capacity_mw'
    ]]
    gen_eia923 = gen_eia923[[
        'plant_id_eia', 'report_date', 'generator_id', 'net_generation_mwh'
    ]]

    # merge the generation and capacity to calculate capacity factor
    capacity_factor = analysis.merge_on_date_year(
        gen_eia923, gens_eia860, on=['plant_id_eia', 'generator_id'])

    # get a unique set of dates to generate the number of hours
    dates = capacity_factor['report_date'].drop_duplicates()
    dates_to_hours = pd.DataFrame(
        data={
            'report_date':
            dates,
            'hours':
            dates.apply(lambda d: (pd.date_range(d, periods=2, freq=freq)[
                1] - pd.date_range(d, periods=2, freq=freq)[0]) / pd.Timedelta(
                    hours=1))
        })

    # merge in the hours for the calculation
    capacity_factor = capacity_factor.merge(dates_to_hours, on=['report_date'])

    # actually calculate capacity factor wooo!
    capacity_factor['capacity_factor'] = \
        capacity_factor['net_generation_mwh'] / \
        (capacity_factor['nameplate_capacity_mw'] * capacity_factor['hours'])

    # Replace unrealistic capacity factors with NaN
    capacity_factor.loc[capacity_factor['capacity_factor'] < min_cap_fact,
                        'capacity_factor'] = np.nan
    capacity_factor.loc[capacity_factor['capacity_factor'] >= max_cap_fact,
                        'capacity_factor'] = np.nan

    # drop the hours column, cause we don't need it anymore
    capacity_factor.drop(['hours'], axis=1, inplace=True)

    return (capacity_factor)
示例#2
0
def heat_rate_by_gen(pudl_out, verbose=False):
    """Convert by-unit heat rate to by-generator, adding fuel type & count."""
    # pudl_out must have a freq, otherwise capacity factor will fail and merges
    # between tables with different frequencies will fail
    assert pudl_out.freq is not None,\
        "pudl_out must inclue a frequency for mcoe"

    gens_simple = pudl_out.gens_eia860()[[
        'report_date', 'plant_id_eia', 'generator_id', 'fuel_type_pudl'
    ]]
    bga_gens = pudl_out.bga()[[
        'report_date', 'plant_id_eia', 'unit_id_pudl', 'generator_id'
    ]].drop_duplicates()
    gens_simple = pd.merge(gens_simple,
                           bga_gens,
                           on=['report_date', 'plant_id_eia', 'generator_id'],
                           validate='one_to_one')
    # Associate those heat rates with individual generators. This also means
    # losing the net generation and fuel consumption information for now.
    hr_by_gen = analysis.merge_on_date_year(
        pudl_out.heat_rate_by_unit()[[
            'report_date', 'plant_id_eia', 'unit_id_pudl',
            'heat_rate_mmbtu_mwh'
        ]],
        bga_gens,
        on=['plant_id_eia', 'unit_id_pudl'])
    hr_by_gen = hr_by_gen.drop('unit_id_pudl', axis=1)
    # Now bring information about generator fuel type & count
    hr_by_gen = analysis.merge_on_date_year(
        hr_by_gen,
        pudl_out.gens_eia860()[[
            'report_date', 'plant_id_eia', 'generator_id', 'fuel_type_pudl',
            'fuel_type_count'
        ]],
        on=['plant_id_eia', 'generator_id'])
    return hr_by_gen
示例#3
0
def generation_eia923(freq=None, testing=False,
                      start_date=None, end_date=None):
    """
    Sum net generation by generator at the specified frequency.

    In addition, some human readable plant and utility names, as well as some
    ID values for joining with other dataframes is added back in to the
    dataframe before it is returned.

    Args:
        pudl_engine: An SQLAlchemy DB connection engine.
        freq: A string used to specify a time grouping frequency.
        testing (bool): True if we're using the pudl_test DB, False if we're
            using the live PUDL DB.  False by default.
    Returns:
        out_df: a pandas dataframe.
    """
    pudl_engine = pudl.db_connect_pudl(testing=testing)
    g_eia923_tbl = pt['generation_eia923']
    g_eia923_select = sa.sql.select([g_eia923_tbl, ])
    if start_date is not None:
        g_eia923_select = g_eia923_select.where(
            g_eia923_tbl.c.report_date >= start_date
        )
    if end_date is not None:
        g_eia923_select = g_eia923_select.where(
            g_eia923_tbl.c.report_date <= end_date
        )
    g_df = pd.read_sql(g_eia923_select, pudl_engine)
    g_df = g_df.rename(columns={'plant_id': 'plant_id_eia'})

    # Index by date and aggregate net generation.
    # Create a date index for grouping based on freq
    by = ['plant_id_eia', 'generator_id']
    if freq is not None:
        g_df = g_df.set_index(pd.DatetimeIndex(g_df.report_date))
        by = by + [pd.Grouper(freq=freq)]
        g_gb = g_df.groupby(by=by)
        g_df = g_gb['net_generation_mwh'].sum().reset_index()

    # Grab EIA 860 plant and utility specific information:
    pu_eia = plants_utils_eia(start_date=start_date,
                              end_date=end_date,
                              testing=testing)

    # Merge annual plant/utility data in with the more granular dataframe
    out_df = analysis.merge_on_date_year(g_df, pu_eia, on=['plant_id_eia'])

    if freq is None:
        out_df = out_df.drop(['id'], axis=1)

    # These ID fields are vital -- without them we don't have a complete record
    out_df = out_df.dropna(subset=[
        'plant_id_eia',
        'plant_id_pudl',
        'operator_id',
        'util_id_pudl',
        'generator_id',
    ])

    first_cols = [
        'report_date',
        'plant_id_eia',
        'plant_id_pudl',
        'plant_name',
        'operator_id',
        'util_id_pudl',
        'operator_name',
        'generator_id',
    ]

    # Re-arrange the columns for easier readability:
    out_df = organize_cols(out_df, first_cols)

    out_df['operator_id'] = out_df.operator_id.astype(int)
    out_df['util_id_pudl'] = out_df.util_id_pudl.astype(int)
    out_df['plant_id_pudl'] = out_df.plant_id_pudl.astype(int)

    return(out_df)
示例#4
0
def boiler_fuel_eia923(freq=None, testing=False,
                       start_date=None, end_date=None):
    """
    Pull records from the boiler_fuel_eia923 table, in a given data range.

    Optionally, aggregate the records over some timescale -- monthly, yearly,
    quarterly, etc. as well as by fuel type within a plant.

    If the records are not being aggregated, all of the database fields are
    available. If they're being aggregated, then we preserve the following
    fields. Per-unit values are re-calculated based on the aggregated totals.
    Totals are summed across whatever time range is being used, within a
    given plant and fuel type.
     - fuel_qty_consumed (sum)
     - fuel_mmbtu_per_unit (weighted average)
     - total_heat_content_mmbtu (sum)
     - sulfur_content_pct (weighted average)
     - ash_content_pct (weighted average)

    In addition, plant and utility names and IDs are pulled in from the EIA
    860 tables.

    Args:
        freq (str): a pandas timeseries offset alias. The original data is
            reported monthly, so the best time frequencies to use here are
            probably month start (freq='MS') and year start (freq='YS').
        start_date & end_date: date-like objects, including strings of the
            form 'YYYY-MM-DD' which will be used to specify the date range of
            records to be pulled.  Dates are inclusive.
        testing (bool): True if we're using the pudl_test DB, False if we're
            using the live PUDL DB.  False by default.

    Returns:
        bf_df: a pandas dataframe.

    """
    pudl_engine = pudl.db_connect_pudl(testing=testing)
    bf_eia923_tbl = pt['boiler_fuel_eia923']
    bf_eia923_select = sa.sql.select([bf_eia923_tbl, ])
    if start_date is not None:
        bf_eia923_select = bf_eia923_select.where(
            bf_eia923_tbl.c.report_date >= start_date
        )
    if end_date is not None:
        bf_eia923_select = bf_eia923_select.where(
            bf_eia923_tbl.c.report_date <= end_date
        )
    bf_df = pd.read_sql(bf_eia923_select, pudl_engine)
    bf_df = bf_df.rename(columns={'plant_id': 'plant_id_eia'})

    # The total heat content is also useful in its own right, and we'll keep it
    # around.  Also needed to calculate average heat content per unit of fuel.
    bf_df['total_heat_content_mmbtu'] = bf_df['fuel_qty_consumed'] * \
        bf_df['fuel_mmbtu_per_unit']

    # Create a date index for grouping based on freq
    by = ['plant_id_eia', 'boiler_id', 'fuel_type_pudl']
    if freq is not None:
        # In order to calculate the weighted average sulfur
        # content and ash content we need to calculate these totals.
        bf_df['total_sulfur_content'] = bf_df['fuel_qty_consumed'] * \
            bf_df['sulfur_content_pct']
        bf_df['total_ash_content'] = bf_df['fuel_qty_consumed'] * \
            bf_df['ash_content_pct']
        bf_df = bf_df.set_index(pd.DatetimeIndex(bf_df.report_date))
        by = by + [pd.Grouper(freq=freq)]
        bf_gb = bf_df.groupby(by=by)

        # Sum up these totals within each group, and recalculate the per-unit
        # values (weighted in this case by fuel_qty_consumed)
        bf_df = bf_gb.agg({'total_heat_content_mmbtu': np.sum,
                           'fuel_qty_consumed': np.sum,
                           'total_sulfur_content': np.sum,
                           'total_ash_content': np.sum})

        bf_df['fuel_mmbtu_per_unit'] = \
            bf_df['total_heat_content_mmbtu'] / bf_df['fuel_qty_consumed']
        bf_df['sulfur_content_pct'] = \
            bf_df['total_sulfur_content'] / bf_df['fuel_qty_consumed']
        bf_df['ash_content_pct'] = \
            bf_df['total_ash_content'] / bf_df['fuel_qty_consumed']
        bf_df = bf_df.reset_index()
        bf_df = bf_df.drop(['total_ash_content', 'total_sulfur_content'],
                           axis=1)

    # Grab some basic plant & utility information to add.
    pu_eia = plants_utils_eia(start_date=start_date,
                              end_date=end_date,
                              testing=False)
    out_df = analysis.merge_on_date_year(bf_df, pu_eia, on=['plant_id_eia'])
    if freq is None:
        out_df = out_df.drop(['id'], axis=1)

    out_df = out_df.dropna(subset=[
        'plant_id_eia',
        'plant_id_pudl',
        'operator_id',
        'util_id_pudl',
        'boiler_id',
    ])

    first_cols = [
        'report_date',
        'plant_id_eia',
        'plant_id_pudl',
        'plant_name',
        'operator_id',
        'util_id_pudl',
        'operator_name',
        'boiler_id',
    ]

    # Re-arrange the columns for easier readability:
    out_df = organize_cols(out_df, first_cols)

    out_df['operator_id'] = out_df.operator_id.astype(int)
    out_df['util_id_pudl'] = out_df.util_id_pudl.astype(int)
    out_df['plant_id_pudl'] = out_df.plant_id_pudl.astype(int)

    return(out_df)
示例#5
0
def fuel_receipts_costs_eia923(freq=None, testing=False,
                               start_date=None, end_date=None):
    """
    Pull records from fuel_receipts_costs_eia923 table, in a given date range.

    Optionally, aggregate the records at a monthly or longer timescale, as well
    as by fuel type within a plant, by setting freq to something other than
    the default None value.

    If the records are not being aggregated, then all of the fields found in
    the PUDL database are available.  If they are being aggregated, then the
    following fields are preserved, and appropriately summed or re-calculated
    based on the specified aggregation. In both cases, new total values are
    calculated, for total fuel heat content and total fuel cost.
     - plant_id
     - report_date
     - fuel_type_pudl (formerly energy_source_simple)
     - fuel_quantity (sum)
     - fuel_cost_per_mmbtu (weighted average)
     - total_fuel_cost (sum)
     - total_heat_content_mmbtu (sum)
     - heat_content_mmbtu_per_unit (weighted average)
     - sulfur_content_pct (weighted average)
     - ash_content_pct (weighted average)
     - mercury_content_ppm (weighted average)

    In addition, plant and utility names and IDs are pulled in from the EIA
    860 tables.

    Args:
        freq (str): a pandas timeseries offset alias. The original data is
            reported monthly, so the best time frequencies to use here are
            probably month start (freq='MS') and year start (freq='YS').
        start_date & end_date: date-like objects, including strings of the
            form 'YYYY-MM-DD' which will be used to specify the date range of
            records to be pulled.  Dates are inclusive.
        testing (bool): True if we're using the pudl_test DB, False if we're
            using the live PUDL DB. False by default.

    Returns:
        frc_df: a pandas dataframe.
    """
    pudl_engine = pudl.db_connect_pudl(testing=testing)
    # Most of the fields we want come direclty from Fuel Receipts & Costs
    frc_tbl = pt['fuel_receipts_costs_eia923']
    frc_select = sa.sql.select([frc_tbl, ])

    # Need to re-integrate the MSHA coalmine info:
    cmi_tbl = pt['coalmine_info_eia923']
    cmi_select = sa.sql.select([cmi_tbl, ])
    cmi_df = pd.read_sql(cmi_select, pudl_engine)

    if start_date is not None:
        frc_select = frc_select.where(
            frc_tbl.c.report_date >= start_date)
    if end_date is not None:
        frc_select = frc_select.where(
            frc_tbl.c.report_date <= end_date)

    frc_df = pd.read_sql(frc_select, pudl_engine)
    frc_df = frc_df.rename(columns={'plant_id': 'plant_id_eia'})

    frc_df = pd.merge(frc_df, cmi_df,
                      how='left',
                      left_on='coalmine_id',
                      right_on='id')

    cols_to_drop = ['fuel_receipt_id', 'coalmine_id', 'id']
    frc_df = frc_df.drop(cols_to_drop, axis=1)

    # Calculate a few totals that are commonly needed:
    frc_df['total_heat_content_mmbtu'] = \
        frc_df['heat_content_mmbtu_per_unit'] * frc_df['fuel_quantity']
    frc_df['total_fuel_cost'] = \
        frc_df['total_heat_content_mmbtu'] * frc_df['fuel_cost_per_mmbtu']

    by = ['plant_id_eia', 'fuel_type_pudl']
    if freq is not None:
        # Create a date index for temporal resampling:
        frc_df = frc_df.set_index(pd.DatetimeIndex(frc_df.report_date))
        by = by + [pd.Grouper(freq=freq)]
        # Sum up these values so we can calculate quantity weighted averages
        frc_df['total_ash_content'] = \
            frc_df['ash_content_pct'] * frc_df['fuel_quantity']
        frc_df['total_sulfur_content'] = \
            frc_df['sulfur_content_pct'] * frc_df['fuel_quantity']
        frc_df['total_mercury_content'] = \
            frc_df['mercury_content_ppm'] * frc_df['fuel_quantity']

        frc_gb = frc_df.groupby(by=by)
        frc_df = frc_gb.agg({
            'fuel_quantity': np.sum,
            'total_heat_content_mmbtu': np.sum,
            'total_fuel_cost': np.sum,
            'total_sulfur_content': np.sum,
            'total_ash_content': np.sum,
            'total_mercury_content': np.sum,
        })

        frc_df['fuel_cost_per_mmbtu'] = \
            frc_df['total_fuel_cost'] / frc_df['total_heat_content_mmbtu']
        frc_df['heat_content_mmbtu_per_unit'] = \
            frc_df['total_heat_content_mmbtu'] / frc_df['fuel_quantity']
        frc_df['sulfur_content_pct'] = \
            frc_df['total_sulfur_content'] / frc_df['fuel_quantity']
        frc_df['ash_content_pct'] = \
            frc_df['total_ash_content'] / frc_df['fuel_quantity']
        frc_df['mercury_content_ppm'] = \
            frc_df['total_mercury_content'] / frc_df['fuel_quantity']
        frc_df = frc_df.reset_index()
        frc_df = frc_df.drop(['total_ash_content',
                              'total_sulfur_content',
                              'total_mercury_content'], axis=1)

    # Bring in some generic plant & utility information:
    pu_eia = plants_utils_eia(start_date=start_date,
                              end_date=end_date,
                              testing=testing)
    out_df = analysis.merge_on_date_year(frc_df, pu_eia, on=['plant_id_eia'])

    # Drop any records where we've failed to get the 860 data merged in...
    out_df = out_df.dropna(subset=['operator_id', 'operator_name'])

    if freq is None:
        # There are a couple of invalid records with no specified fuel.
        out_df = out_df.dropna(subset=['fuel_group'])

    first_cols = ['report_date',
                  'plant_id_eia',
                  'plant_id_pudl',
                  'plant_name',
                  'operator_id',
                  'util_id_pudl',
                  'operator_name', ]

    # Re-arrange the columns for easier readability:
    out_df = organize_cols(out_df, first_cols)

    # Clean up the types of a few columns...
    out_df['plant_id_eia'] = out_df.plant_id_eia.astype(int)
    out_df['plant_id_pudl'] = out_df.plant_id_pudl.astype(int)
    out_df['operator_id'] = out_df.operator_id.astype(int)
    out_df['util_id_pudl'] = out_df.util_id_pudl.astype(int)

    return(out_df)
示例#6
0
def generation_fuel_eia923(freq=None, testing=False,
                           start_date=None, end_date=None):
    """
    Pull records from the generation_fuel_eia923 table, in a given date range.

    Optionally, aggregate the records over some timescale -- monthly, yearly,
    quarterly, etc. as well as by fuel type within a plant.

    If the records are not being aggregated, all of the database fields are
    available. If they're being aggregated, then we preserve the following
    fields. Per-unit values are re-calculated based on the aggregated totals.
    Totals are summed across whatever time range is being used, within a
    given plant and fuel type.
     - plant_id
     - report_date
     - fuel_type_pudl
     - fuel_consumed_total
     - fuel_consumed_for_electricity
     - fuel_mmbtu_per_unit
     - fuel_consumed_total_mmbtu
     - fuel_consumed_for_electricity_mmbtu
     - net_generation_mwh

    In addition, plant and utility names and IDs are pulled in from the EIA
    860 tables.

    Args:
        testing (bool): True if we are connecting to the pudl_test DB, False
            if we're using the live DB.  False by default.
        freq (str): a pandas timeseries offset alias. The original data is
            reported monthly, so the best time frequencies to use here are
            probably month start (freq='MS') and year start (freq='YS').
        start_date & end_date: date-like objects, including strings of the
            form 'YYYY-MM-DD' which will be used to specify the date range of
            records to be pulled.  Dates are inclusive.

    Returns:
        gf_df: a pandas dataframe.
    """
    pudl_engine = pudl.db_connect_pudl(testing=testing)
    gf_tbl = pt['generation_fuel_eia923']
    gf_select = sa.sql.select([gf_tbl, ])
    if start_date is not None:
        gf_select = gf_select.where(
            gf_tbl.c.report_date >= start_date)
    if end_date is not None:
        gf_select = gf_select.where(
            gf_tbl.c.report_date <= end_date)

    gf_df = pd.read_sql(gf_select, pudl_engine)
    gf_df = gf_df.rename(columns={'plant_id': 'plant_id_eia'})

    cols_to_drop = ['id']
    gf_df = gf_df.drop(cols_to_drop, axis=1)

    # fuel_type_pudl was formerly aer_fuel_category
    by = ['plant_id_eia', 'fuel_type_pudl']
    if freq is not None:
        # Create a date index for temporal resampling:
        gf_df = gf_df.set_index(pd.DatetimeIndex(gf_df.report_date))
        by = by + [pd.Grouper(freq=freq)]

        # Sum up these values so we can calculate quantity weighted averages
        gf_gb = gf_df.groupby(by=by)
        gf_df = gf_gb.agg({
            'fuel_consumed_total': np.sum,
            'fuel_consumed_for_electricity': np.sum,
            'fuel_consumed_total_mmbtu': np.sum,
            'fuel_consumed_for_electricity_mmbtu': np.sum,
            'net_generation_mwh': np.sum,
        })
        gf_df['fuel_mmbtu_per_unit'] = \
            gf_df['fuel_consumed_total_mmbtu'] / gf_df['fuel_consumed_total']

        gf_df = gf_df.reset_index()

    # Bring in some generic plant & utility information:
    pu_eia = plants_utils_eia(start_date=start_date,
                              end_date=end_date,
                              testing=testing)
    out_df = analysis.merge_on_date_year(gf_df, pu_eia, on=['plant_id_eia'])
    # Drop any records where we've failed to get the 860 data merged in...
    out_df = out_df.dropna(subset=[
        'plant_id_eia',
        'plant_id_pudl',
        'plant_name',
        'operator_id',
        'util_id_pudl',
        'operator_name',
    ])

    first_cols = ['report_date',
                  'plant_id_eia',
                  'plant_id_pudl',
                  'plant_name',
                  'operator_id',
                  'util_id_pudl',
                  'operator_name', ]

    out_df = organize_cols(out_df, first_cols)

    # Clean up the types of a few columns...
    out_df['plant_id_eia'] = out_df.plant_id_eia.astype(int)
    out_df['plant_id_pudl'] = out_df.plant_id_pudl.astype(int)
    out_df['operator_id'] = out_df.operator_id.astype(int)
    out_df['util_id_pudl'] = out_df.util_id_pudl.astype(int)

    return(out_df)
示例#7
0
def heat_rate_by_unit(pudl_out, verbose=False):
    """Calculate heat rates (mmBTU/MWh) within separable generation units.

    Assumes a "good" Boiler Generator Association (bga) i.e. one that only
    contains boilers and generators which have been completely associated at
    some point in the past.

    The BGA dataframe needs to have the following columns:
     - report_date (annual)
     - plant_id_eia
     - unit_id_pudl
     - generator_id
     - boiler_id

    The unit_id is associated with generation records based on report_date,
    plant_id_eia, and generator_id. Analogously, the unit_id is associtated
    with boiler fuel consumption records based on report_date, plant_id_eia,
    and boiler_id.

    Then the total net generation and fuel consumption per unit per time period
    are calculated, allowing the calculation of a per unit heat rate. That
    per unit heat rate is returned in a dataframe containing:
     - report_date
     - plant_id_eia
     - unit_id_pudl
     - net_generation_mwh
     - total_heat_content_mmbtu
     - heat_rate_mmbtu_mwh
    """
    # pudl_out must have a freq, otherwise capacity factor will fail and merges
    # between tables with different frequencies will fail
    assert pudl_out.freq is not None,\
        "pudl_out must inclue a frequency for mcoe"

    # Create a dataframe containing only the unit-generator mappings:
    bga_gens = pudl_out.bga()[[
        'report_date', 'plant_id_eia', 'generator_id', 'unit_id_pudl'
    ]].drop_duplicates()
    # Merge those unit ids into the generation data:
    gen_w_unit = analysis.merge_on_date_year(
        pudl_out.gen_eia923(), bga_gens, on=['plant_id_eia', 'generator_id'])
    # Sum up the net generation per unit for each time period:
    gen_gb = gen_w_unit.groupby(
        ['report_date', 'plant_id_eia', 'unit_id_pudl'])
    gen_by_unit = gen_gb.agg({'net_generation_mwh': analysis.sum_na})
    gen_by_unit = gen_by_unit.reset_index()

    # Create a dataframe containingonly the unit-boiler mappings:
    bga_boils = pudl_out.bga()[[
        'report_date', 'plant_id_eia', 'boiler_id', 'unit_id_pudl'
    ]].drop_duplicates()
    # Merge those unit ids into the boiler fule consumption data:
    bf_w_unit = analysis.merge_on_date_year(pudl_out.bf_eia923(),
                                            bga_boils,
                                            on=['plant_id_eia', 'boiler_id'])
    # Sum up all the fuel consumption per unit for each time period:
    bf_gb = bf_w_unit.groupby(['report_date', 'plant_id_eia', 'unit_id_pudl'])
    bf_by_unit = bf_gb.agg({'total_heat_content_mmbtu': analysis.sum_na})
    bf_by_unit = bf_by_unit.reset_index()

    # Merge together the per-unit generation and fuel consumption data so we
    # can calculate a per-unit heat rate:
    hr_by_unit = pd.merge(gen_by_unit,
                          bf_by_unit,
                          on=['report_date', 'plant_id_eia', 'unit_id_pudl'],
                          validate='one_to_one')
    hr_by_unit['heat_rate_mmbtu_mwh'] = \
        hr_by_unit.total_heat_content_mmbtu / hr_by_unit.net_generation_mwh

    return hr_by_unit
示例#8
0
def mcoe(pudl_out,
         min_heat_rate=5.5,
         min_fuel_cost_per_mwh=0.0,
         min_cap_fact=0.0,
         max_cap_fact=1.5,
         verbose=False):
    """
    Compile marginal cost of electricity (MCOE) at the generator level.

    Use data from EIA 923, EIA 860, and (eventually) FERC Form 1 to estimate
    the MCOE of individual generating units. The calculation is performed at
    the time resolution, and for the period indicated by the pudl_out object.
    that is passed in.

    Args:
        pudl_out: a PudlOutput object, specifying the time resolution and
            date range for which the calculations should be performed.
        min_heat_rate: lowest plausible heat rate, in mmBTU/MWh. Any MCOE
            records with lower heat rates are presumed to be invalid, and are
            discarded before returning.
        min_cap_fact, max_cap_fact: minimum & maximum generator capacity
            factor. Generator records with a lower capacity factor will be
            filtered out before returning. This allows the user to exclude
            generators that aren't being used enough to have valid.
        min_fuel_cost_per_mwh: minimum fuel cost on a per MWh basis that is
            required for a generator record to be considered valid. For some
            reason there are now a large number of $0 fuel cost records, which
            previously would have been NaN.

    Returns:
        mcoe_out: a dataframe organized by date and generator, with lots of
        juicy information about the generators -- including fuel cost on a per
        MWh and MMBTU basis, heat rates, and neg generation.
    """
    # Bring together the fuel cost and capacity factor dataframes, which
    # also include heat rate information.
    mcoe_out = pd.merge(pudl_out.fuel_cost(verbose=verbose),
                        pudl_out.capacity_factor(verbose=verbose)[[
                            'report_date', 'plant_id_eia', 'generator_id',
                            'capacity_factor'
                        ]],
                        on=['report_date', 'plant_id_eia', 'generator_id'],
                        how='left')

    # Bring the PUDL Unit IDs into the output dataframe so we can see how
    # the generators are really grouped.
    mcoe_out = analysis.merge_on_date_year(mcoe_out,
                                           pudl_out.bga(verbose=verbose)[[
                                               'report_date', 'plant_id_eia',
                                               'unit_id_pudl', 'generator_id'
                                           ]].drop_duplicates(),
                                           how='left',
                                           on=['plant_id_eia', 'generator_id'])

    # Instead of getting the total MMBTU through this multiplication... we
    # could also calculate the total fuel consumed on a per-unit basis, from
    # the boiler_fuel table, and then determine what proportion should be
    # distributed to each generator based on its heat-rate and net generation.
    mcoe_out['total_mmbtu'] = \
        mcoe_out.net_generation_mwh * mcoe_out.heat_rate_mmbtu_mwh
    mcoe_out['total_fuel_cost'] = \
        mcoe_out.total_mmbtu * mcoe_out.fuel_cost_per_mmbtu

    simplified_gens_eia860 = pudl_out.gens_eia860().drop([
        'plant_id_pudl', 'plant_name', 'operator_id', 'util_id_pudl',
        'operator_name', 'fuel_type_count', 'fuel_type_pudl'
    ],
                                                         axis=1)
    mcoe_out = analysis.merge_on_date_year(mcoe_out,
                                           simplified_gens_eia860,
                                           on=['plant_id_eia', 'generator_id'])

    first_cols = [
        'report_date', 'plant_id_eia', 'plant_id_pudl', 'unit_id_pudl',
        'generator_id', 'plant_name', 'operator_id', 'util_id_pudl',
        'operator_name'
    ]
    mcoe_out = outputs.organize_cols(mcoe_out, first_cols)
    mcoe_out = mcoe_out.sort_values(
        ['plant_id_eia', 'unit_id_pudl', 'generator_id', 'report_date'])

    # Filter the output based on the range of validity supplied by the user:
    if min_heat_rate is not None:
        mcoe_out = mcoe_out[mcoe_out.heat_rate_mmbtu_mwh >= min_heat_rate]
    if min_fuel_cost_per_mwh is not None:
        mcoe_out = mcoe_out[mcoe_out.fuel_cost_per_mwh > min_fuel_cost_per_mwh]
    if min_cap_fact is not None:
        mcoe_out = mcoe_out[mcoe_out.capacity_factor >= min_cap_fact]
    if max_cap_fact is not None:
        mcoe_out = mcoe_out[mcoe_out.capacity_factor <= max_cap_fact]

    return mcoe_out
示例#9
0
def capacity_factor(pudl_out, min_cap_fact=0, max_cap_fact=1.5, verbose=False):
    """
    Calculate the capacity factor for each generator.

    Capacity Factor is calculated by using the net generation from eia923 and
    the nameplate capacity from eia860. The net gen and capacity are pulled
    into one dataframe, then the dates from that dataframe are pulled out to
    determine the hours in each period based on the frequency. The number of
    hours is used in calculating the capacity factor. Then records with
    capacity factors outside the range specified by min_cap_fact and
    max_cap_fact are dropped.
    """
    # pudl_out must have a freq, otherwise capacity factor will fail and merges
    # between tables with different frequencies will fail
    assert pudl_out.freq is not None,\
        "pudl_out must inclue a frequency for mcoe"

    # pudl_out must have a freq, otherwise capacity factor will fail and merges
    # between tables with different frequencies will fail
    assert pudl_out.freq is not None,\
        "pudl_out must inclue a frequency for mcoe"
    # Only include columns to be used
    gens_eia860 = pudl_out.gens_eia860()[[
        'plant_id_eia', 'report_date', 'generator_id', 'nameplate_capacity_mw'
    ]]
    gen_eia923 = pudl_out.gen_eia923()[[
        'plant_id_eia', 'report_date', 'generator_id', 'net_generation_mwh'
    ]]

    # merge the generation and capacity to calculate capacity factor
    capacity_factor = analysis.merge_on_date_year(
        gen_eia923, gens_eia860, on=['plant_id_eia', 'generator_id'])

    # get a unique set of dates to generate the number of hours
    dates = capacity_factor['report_date'].drop_duplicates()
    dates_to_hours = pd.DataFrame(
        data={
            'report_date':
            dates,
            'hours':
            dates.apply(
                lambda d: (pd.date_range(d, periods=2, freq=pudl_out.freq)[
                    1] - pd.date_range(d, periods=2, freq=pudl_out.freq)[0]
                           ) / pd.Timedelta(hours=1))
        })

    # merge in the hours for the calculation
    capacity_factor = capacity_factor.merge(dates_to_hours, on=['report_date'])

    # actually calculate capacity factor wooo!
    capacity_factor['capacity_factor'] = \
        capacity_factor['net_generation_mwh'] / \
        (capacity_factor['nameplate_capacity_mw'] * capacity_factor['hours'])

    # Replace unrealistic capacity factors with NaN
    capacity_factor.loc[capacity_factor['capacity_factor'] < min_cap_fact,
                        'capacity_factor'] = np.nan
    capacity_factor.loc[capacity_factor['capacity_factor'] >= max_cap_fact,
                        'capacity_factor'] = np.nan

    # drop the hours column, cause we don't need it anymore
    capacity_factor.drop(['hours'], axis=1, inplace=True)

    return capacity_factor
示例#10
0
def gens_with_bga(bga_eia860, gen_eia923):
    """
    Label EIA generators based on which type of associations they have.

    Given the boiler generator associations, and the generation records,
    label each generator according to which kinds of associations it has
    had. Three boolean values are set for each generator, for each time
    period:
        - boiler_generator_assn: True if the generator has any boilers
          associated with it, False otherwise.
        - plant_assn: True if all the generators associated with a given
          plant_id have a boiler associated with them, False otherwise.
        - complete_assn: True if the generator has *ever* been part of a
          plant in which all generators had a boiler associated with them,
          False if otherwise.

    Returns:
        A dataframe containing plant_id_eia, generator_id, boiler_id, and
        the three boolean columns mentioned above.
    """
    # All generators from the Boiler Generator Association table (860)
    bga8 = bga_eia860[[
        'report_date', 'plant_id_eia', 'generator_id', 'boiler_id'
    ]]

    # All generators from the generation_eia923 table, by year.
    gens9 = gen_eia923[['report_date', 'plant_id_eia',
                        'generator_id']].drop_duplicates()

    # Merge in the boiler associations across all the different years of
    # generator - plant associations.
    gens = analysis.merge_on_date_year(gens9,
                                       bga8,
                                       how='left',
                                       on=['plant_id_eia', 'generator_id'])
    # Set a boolean flag on each record indicating whether the plant-generator
    # pairing has a boiler associated with it.
    gens['boiler_generator_assn'] = \
        np.where(gens.boiler_id.isnull(), False, True)

    # Find all the generator records that were ever missing a boiler:
    unassociated_generators = gens[~gens['boiler_generator_assn']]
    # Create a list of plants with unassociated generators, by year.
    unassociated_plants = unassociated_generators.\
        drop_duplicates(subset=['plant_id_eia', 'report_date']).\
        drop(['generator_id', 'boiler_id', 'boiler_generator_assn'], axis=1)
    # Tag those plant-years as being unassociated
    unassociated_plants['plant_assn'] = False

    # Merge the plant association flag back in to the generators
    gens = pd.merge(gens,
                    unassociated_plants,
                    how='left',
                    on=['plant_id_eia', 'report_date'])
    # Tag the rest of the generators as being part of a plant association...
    # This may or may not be true. Need to filter out partially associated
    # plants in the next step.
    gens['plant_assn'] = gens.plant_assn.fillna(value=True)

    # Using the associtated plants, extract the generator/boiler combos
    # that represent complete plants at any time to preserve
    # associations (i.e. if a coal plant had its boilers and generators
    # fully associated in the bga table in 2011 and then adds a
    # combined cycle plant the coal boiler/gen combo will be saved).

    # Remove the report_date:
    gens_complete = gens.drop('report_date', axis=1)
    # Select only those generators tagged as being part of a complete plant:
    gens_complete = gens_complete[gens_complete['plant_assn']]

    gens_complete = gens_complete.drop_duplicates(
        subset=['plant_id_eia', 'generator_id', 'boiler_id'])
    gens_complete['complete_assn'] = True

    gens = gens.merge(gens_complete[[
        'plant_id_eia', 'generator_id', 'boiler_id', 'complete_assn'
    ]],
                      how='left',
                      on=['plant_id_eia', 'generator_id', 'boiler_id'])
    gens['complete_assn'] = gens.complete_assn.fillna(value=False)

    return (gens)
示例#11
0
def heat_rate(bga, gen_eia923, bf_eia923, gens_eia860, min_heat_rate=5.5):
    """Calculate heat rates (mmBTU/MWh) within separable generation units."""
    generation_w_boilers = \
        analysis.merge_on_date_year(gen_eia923, bga, how='left',
                                    on=['plant_id_eia', 'generator_id'])

    # Calculate net generation from all generators associated with each boiler
    gb1 = generation_w_boilers.groupby(
        by=['plant_id_eia', 'report_date', 'boiler_id'])
    gen_by_boiler = gb1.net_generation_mwh.sum().to_frame().reset_index()
    gen_by_boiler.rename(
        columns={'net_generation_mwh': 'net_generation_mwh_boiler'},
        inplace=True)

    # Calculate net generation per unique boiler generator combo
    gb2 = generation_w_boilers.groupby(
        by=['plant_id_eia', 'report_date', 'boiler_id', 'generator_id'])
    gen_by_bg = gb2.net_generation_mwh.sum().to_frame().reset_index()
    gen_by_bg.rename(
        columns={'net_generation_mwh': 'net_generation_mwh_boiler_gen'},
        inplace=True)

    # squish them together
    gen_by_bg_and_boiler = \
        pd.merge(gen_by_boiler, gen_by_bg,
                 on=['plant_id_eia', 'report_date', 'boiler_id'], how='left')

    # Bring in boiler fuel consumption and boiler generator associations
    bg = analysis.merge_on_date_year(bf_eia923,
                                     bga,
                                     how='left',
                                     on=['plant_id_eia', 'boiler_id'])
    # Merge boiler fuel consumption in with our per-boiler and boiler
    # generator combo net generation calculations
    bg = pd.merge(
        bg,
        gen_by_bg_and_boiler,
        how='left',
        on=['plant_id_eia', 'report_date', 'boiler_id', 'generator_id'])

    # Use the proportion of the generation of each generator to allot mmBTU
    bg['proportion_of_gen_by_boil_gen'] = \
        bg['net_generation_mwh_boiler_gen'] / bg['net_generation_mwh_boiler']
    bg['fuel_consumed_mmbtu_generator'] = \
        bg['proportion_of_gen_by_boil_gen'] * bg['total_heat_content_mmbtu']

    # Generators with no generation and no associated fuel consumption result
    # in some 0/0 = NaN values, which propagate when summed. For our purposes
    # they should be set to zero, since those generators are contributing
    # nothing to either the fuel consumed or the proportion of net generation.
    bg['proportion_of_gen_by_boil_gen'] = \
        bg.proportion_of_gen_by_boil_gen.fillna(0)
    bg['fuel_consumed_mmbtu_generator'] = \
        bg.fuel_consumed_mmbtu_generator.fillna(0)

    # Get total heat consumed per time period by each generator.
    # Before this, the bg dataframe has mulitple records for each generator
    # when there are multiple boiler associated with each generators. This step
    # squishes the boiler level data into generators to be compared to the
    # generator level net generation.
    bg_gb = bg.groupby(by=['plant_id_eia', 'report_date', 'generator_id'])
    bg = bg_gb.fuel_consumed_mmbtu_generator.sum().to_frame().reset_index()
    # Now that we have the fuel consumed per generator, bring the net
    # generation per generator back in:
    hr = pd.merge(bg,
                  gen_eia923,
                  how='left',
                  on=['plant_id_eia', 'report_date', 'generator_id'])
    # Finally, calculate heat rate
    hr['heat_rate_mmbtu_mwh'] = \
        hr['fuel_consumed_mmbtu_generator'] / \
        hr['net_generation_mwh']

    # Importing the plant association tag to filter out the
    # generators that are a part of plants that aren't in the bga table
    gens = gens_with_bga(bga, gen_eia923)
    # This is a per-generator table now -- so we don't want the boiler_id
    # And we only want the ones with complete associations.
    gens_assn = gens[gens['complete_assn']].drop('boiler_id', axis=1)
    gens_assn = gens_assn.drop_duplicates(
        subset=['plant_id_eia', 'generator_id', 'report_date'])
    hr = pd.merge(hr,
                  gens_assn,
                  on=['plant_id_eia', 'report_date', 'generator_id'])

    # Only keep the generators with reasonable heat rates
    hr = hr[hr.heat_rate_mmbtu_mwh >= min_heat_rate]

    hr = analysis.merge_on_date_year(hr,
                                     gens_eia860[[
                                         'report_date', 'plant_id_eia',
                                         'generator_id', 'fuel_type_pudl',
                                         'fuel_type_count'
                                     ]],
                                     how='inner',
                                     on=['plant_id_eia', 'generator_id'])

    # Sort it a bit and clean up some types
    first_cols = [
        'report_date', 'operator_id', 'operator_name', 'plant_id_eia',
        'plant_name', 'generator_id'
    ]
    hr = outputs.organize_cols(hr, first_cols)
    hr['util_id_pudl'] = hr.util_id_pudl.astype(int)
    hr['operator_id'] = hr.operator_id.astype(int)
    hr = hr.sort_values(
        by=['operator_id', 'plant_id_eia', 'generator_id', 'report_date'])
    return (hr)